Coverage for python / lsst / cp / pipe / cpLinearitySolve.py: 10%
873 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-24 08:52 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-24 08:52 +0000
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = [
24 "LinearitySolveTask",
25 "LinearitySolveConfig",
26 "LinearityDoubleSplineSolveTask",
27 "LinearityDoubleSplineSolveConfig",
28]
30import copy
31import logging
32import numpy as np
33import esutil
34from scipy.stats import median_abs_deviation
35from scipy.interpolate import Akima1DInterpolator
37import lsst.afw.image as afwImage
38import lsst.pipe.base as pipeBase
39import lsst.pipe.base.connectionTypes as cT
40import lsst.pex.config as pexConfig
42from lsstDebug import getDebugFrame
43from lsst.ip.isr import (Linearizer, IsrProvenance)
45from .utils import (funcPolynomial, irlsFit, AstierSplineLinearityFitter,
46 extractCalibDate)
49def ptcLookup(datasetType, registry, quantumDataId, collections):
50 """Butler lookup function to allow PTC to be found.
52 Parameters
53 ----------
54 datasetType : `lsst.daf.butler.DatasetType`
55 Dataset type to look up.
56 registry : `lsst.daf.butler.Registry`
57 Registry for the data repository being searched.
58 quantumDataId : `lsst.daf.butler.DataCoordinate`
59 Data ID for the quantum of the task this dataset will be passed to.
60 This must include an "instrument" key, and should also include any
61 keys that are present in ``datasetType.dimensions``. If it has an
62 ``exposure`` or ``visit`` key, that's a sign that this function is
63 not actually needed, as those come with the temporal information that
64 would allow a real validity-range lookup.
65 collections : `lsst.daf.butler.registry.CollectionSearch`
66 Collections passed by the user when generating a QuantumGraph. Ignored
67 by this function (see notes below).
69 Returns
70 -------
71 refs : `list` [ `DatasetRef` ]
72 A zero- or single-element list containing the matching
73 dataset, if one was found.
75 Raises
76 ------
77 RuntimeError
78 Raised if more than one PTC reference is found.
79 """
80 refs = list(registry.queryDatasets(datasetType, dataId=quantumDataId, collections=collections,
81 findFirst=False))
82 if len(refs) >= 2:
83 RuntimeError("Too many PTC connections found. Incorrect collections supplied?")
85 return refs
88class LinearitySolveConnections(pipeBase.PipelineTaskConnections,
89 dimensions=("instrument", "detector")):
90 dummy = cT.Input(
91 name="raw",
92 doc="Dummy exposure.",
93 storageClass='Exposure',
94 dimensions=("instrument", "exposure", "detector"),
95 multiple=True,
96 deferLoad=True,
97 )
99 camera = cT.PrerequisiteInput(
100 name="camera",
101 doc="Camera Geometry definition.",
102 storageClass="Camera",
103 dimensions=("instrument", ),
104 isCalibration=True,
105 )
107 inputPtc = cT.PrerequisiteInput(
108 name="ptc",
109 doc="Input PTC dataset.",
110 storageClass="PhotonTransferCurveDataset",
111 dimensions=("instrument", "detector"),
112 isCalibration=True,
113 lookupFunction=ptcLookup,
114 )
116 inputLinearizerPtc = cT.Input(
117 name="linearizerPtc",
118 doc="Input linearizer PTC dataset.",
119 storageClass="PhotonTransferCurveDataset",
120 dimensions=("instrument", "detector"),
121 isCalibration=True,
122 )
124 inputPhotodiodeCorrection = cT.Input(
125 name="pdCorrection",
126 doc="Input photodiode correction.",
127 storageClass="IsrCalib",
128 dimensions=("instrument", ),
129 isCalibration=True,
130 )
132 inputNormalization = cT.Input(
133 name="cpLinearizerPtcNormalization",
134 doc="Focal-plane normalization table.",
135 storageClass="ArrowAstropy",
136 dimensions=("instrument",),
137 isCalibration=True,
138 )
140 outputLinearizer = cT.Output(
141 name="linearity",
142 doc="Output linearity measurements.",
143 storageClass="Linearizer",
144 dimensions=("instrument", "detector"),
145 isCalibration=True,
146 )
148 def __init__(self, *, config=None):
149 super().__init__(config=config)
151 if not config.applyPhotodiodeCorrection:
152 del self.inputPhotodiodeCorrection
154 if config.useLinearizerPtc:
155 del self.inputPtc
156 else:
157 del self.inputLinearizerPtc
159 if not config.useFocalPlaneNormalization:
160 del self.inputNormalization
163class LinearitySolveConfig(pipeBase.PipelineTaskConfig,
164 pipelineConnections=LinearitySolveConnections):
165 """Configuration for solving the linearity from PTC dataset.
166 """
167 linearityType = pexConfig.ChoiceField(
168 dtype=str,
169 doc="Type of linearizer to construct.",
170 default="Squared",
171 allowed={
172 "LookupTable": "Create a lookup table solution.",
173 "Polynomial": "Create an arbitrary polynomial solution.",
174 "Squared": "Create a single order squared solution.",
175 "Spline": "Create a spline based solution.",
176 "None": "Create a dummy solution.",
177 }
178 )
179 polynomialOrder = pexConfig.RangeField(
180 dtype=int,
181 doc="Degree of polynomial to fit. Must be at least 2.",
182 default=3,
183 min=2,
184 )
185 splineKnots = pexConfig.Field(
186 dtype=int,
187 doc="Number of spline knots to use in fit.",
188 default=10,
189 )
191 trimmedState = pexConfig.Field(
192 dtype=bool,
193 doc="Will this linearizer be used on trimmed data?",
194 default=True,
195 )
197 maxLookupTableAdu = pexConfig.Field(
198 dtype=int,
199 doc="Maximum DN value for a LookupTable linearizer.",
200 default=2**18,
201 )
202 maxLinearAdu = pexConfig.Field(
203 dtype=float,
204 doc="Maximum adu value to use to estimate linear term; not used with spline fits.",
205 default=20000.0,
206 )
207 minLinearAdu = pexConfig.Field(
208 dtype=float,
209 doc="Minimum adu value to use to estimate linear term.",
210 default=30.0,
211 )
212 nSigmaClipLinear = pexConfig.Field(
213 dtype=float,
214 doc="Maximum deviation from linear solution for Poissonian noise.",
215 default=5.0,
216 )
217 ignorePtcMask = pexConfig.Field(
218 dtype=bool,
219 doc="Ignore the expIdMask set by the PTC solver?",
220 default=False,
221 deprecated="This field is no longer used. Will be removed after v28.",
222 )
223 maxFracLinearityDeviation = pexConfig.Field(
224 dtype=float,
225 doc="Maximum fraction deviation from raw linearity to compute "
226 "linearityTurnoff and linearityMaxSignal.",
227 # TODO: DM-46811 investigate if this can be raised to 0.05.
228 default=0.01,
229 )
230 minSignalFitLinearityTurnoff = pexConfig.Field(
231 dtype=float,
232 doc="Minimum signal to compute raw linearity slope for linearityTurnoff.",
233 default=1000.0,
234 )
235 usePhotodiode = pexConfig.Field(
236 dtype=bool,
237 doc="Use the photodiode info instead of the raw expTimes?",
238 default=False,
239 )
240 applyPhotodiodeCorrection = pexConfig.Field(
241 dtype=bool,
242 doc="Calculate and apply a correction to the photodiode readings?",
243 default=False,
244 )
245 minPhotodiodeCurrent = pexConfig.Field(
246 dtype=float,
247 doc="Minimum value to trust photodiode signals.",
248 default=0.0,
249 )
250 doAutoGrouping = pexConfig.Field(
251 dtype=bool,
252 doc="Do automatic group detection? Cannot be True if splineGroupingColumn is also set. "
253 "The automatic group detection will use the ratio of signal to exposure time (if "
254 "autoGroupingUseExptime is True) or photodiode (if False) to determine which "
255 "flat pairs were taken with different illumination settings.",
256 default=False,
257 )
258 autoGroupingUseExptime = pexConfig.Field(
259 dtype=bool,
260 doc="Use exposure time to determine automatic grouping. Used if doAutoGrouping=True.",
261 default=True,
262 )
263 autoGroupingThreshold = pexConfig.Field(
264 dtype=float,
265 doc="Minimum relative jump from sorted conversion values to determine a group.",
266 default=0.1,
267 )
268 autoGroupingMaxSignalFraction = pexConfig.Field(
269 dtype=float,
270 doc="Only do auto-grouping when the signal is this fraction of the maximum signal. "
271 "All exposures with signal higher than this threshold will be put into the "
272 "largest signal group. This config is needed if the input PTC goes beyond "
273 "the linearity turnoff.",
274 default=0.9,
275 )
276 splineGroupingColumn = pexConfig.Field(
277 dtype=str,
278 doc="Column to use for grouping together points for Spline mode, to allow "
279 "for different proportionality constants. If None, then grouping will "
280 "only be done if doAutoGrouping is True.",
281 default=None,
282 optional=True,
283 )
284 splineGroupingMinPoints = pexConfig.Field(
285 dtype=int,
286 doc="Minimum number of linearity points to allow grouping together points "
287 "for Spline mode with splineGroupingColumn. This configuration is here "
288 "to prevent misuse of the Spline code to avoid over-fitting.",
289 default=100,
290 )
291 splineFitMinIter = pexConfig.Field(
292 dtype=int,
293 doc="Minimum number of iterations for spline fit.",
294 default=3,
295 )
296 splineFitMaxIter = pexConfig.Field(
297 dtype=int,
298 doc="Maximum number of iterations for spline fit.",
299 default=20,
300 )
301 splineFitMaxRejectionPerIteration = pexConfig.Field(
302 dtype=int,
303 doc="Maximum number of rejections per iteration for spline fit.",
304 default=5,
305 )
306 doSplineFitOffset = pexConfig.Field(
307 dtype=bool,
308 doc="Fit a scattered light offset in the spline fit.",
309 default=True,
310 )
311 doSplineFitWeights = pexConfig.Field(
312 dtype=bool,
313 doc="Fit linearity weight parameters in the spline fit.",
314 default=False,
315 )
316 splineFitWeightParsStart = pexConfig.ListField(
317 dtype=float,
318 doc="Starting parameters for weight fit, if doSplineFitWeights=True. "
319 "Parameters are such that sigma = sqrt(par[0]**2. + par[1]**2./mu)."
320 "If doSplineFitWeights=False then these are used as-is; otherwise "
321 "they are used as the initial values for fitting these parameters.",
322 length=2,
323 default=[1.0, 0.0],
324 )
325 doSplineFitTemperature = pexConfig.Field(
326 dtype=bool,
327 doc="Fit temperature coefficient in spline fit?",
328 default=False,
329 )
330 splineFitTemperatureColumn = pexConfig.Field(
331 dtype=str,
332 doc="Name of the temperature column to use when fitting temperature "
333 "coefficients in spline fit; this must not be None if "
334 "doSplineFitTemperature is True.",
335 default=None,
336 optional=True,
337 )
338 doSplineFitTemporal = pexConfig.Field(
339 dtype=bool,
340 doc="Fit a linear temporal parameter coefficient in spline fit?",
341 default=False,
342 )
343 useLinearizerPtc = pexConfig.Field(
344 dtype=bool,
345 doc="Use a linearizer ptc in a single pipeline?",
346 default=False,
347 )
348 useFocalPlaneNormalization = pexConfig.Field(
349 dtype=bool,
350 doc="Use focal-plane normalization in addition to/instead of photodiode? "
351 "(Only used with spline fitting).",
352 default=False,
353 )
355 def validate(self):
356 super().validate()
358 if self.doSplineFitTemperature and self.splineFitTemperatureColumn is None:
359 raise ValueError("Must set splineFitTemperatureColumn if doSplineFitTemperature is True.")
361 if self.doAutoGrouping and self.splineGroupingColumn is not None:
362 raise ValueError("Must not set doAutoGrouping and also splineGroupingColumn")
363 if self.doAutoGrouping:
364 if not self.autoGroupingUseExptime and not self.usePhotodiode:
365 raise ValueError("If doAutoGrouping is True and autoGroupingUseExptime is False, then "
366 "usePhotodiode must be True.")
369class LinearitySolveTask(pipeBase.PipelineTask):
370 """Fit the linearity from the PTC dataset.
371 """
373 ConfigClass = LinearitySolveConfig
374 _DefaultName = 'cpLinearitySolve'
376 def runQuantum(self, butlerQC, inputRefs, outputRefs):
377 """Ensure that the input and output dimensions are passed along.
379 Parameters
380 ----------
381 butlerQC : `lsst.daf.butler.QuantumContext`
382 Butler to operate on.
383 inputRefs : `lsst.pipe.base.InputQuantizedConnection`
384 Input data refs to load.
385 ouptutRefs : `lsst.pipe.base.OutputQuantizedConnection`
386 Output data refs to persist.
387 """
388 inputs = butlerQC.get(inputRefs)
390 # Use the dimensions to set calib/provenance information.
392 if self.config.useLinearizerPtc:
393 inputs["inputDims"] = dict(inputRefs.inputLinearizerPtc.dataId.required)
394 inputs["inputPtc"] = inputs["inputLinearizerPtc"]
395 del inputs["inputLinearizerPtc"]
396 else:
397 inputs["inputDims"] = dict(inputRefs.inputPtc.dataId.required)
399 # Add calibration provenance info to header.
400 kwargs = dict()
401 reference = getattr(inputRefs, "inputPtc", None)
403 if reference is not None and hasattr(reference, "run"):
404 runKey = "PTC_RUN"
405 runValue = reference.run
406 idKey = "PTC_UUID"
407 idValue = str(reference.id)
408 dateKey = "PTC_DATE"
409 calib = inputs.get("inputPtc", None)
410 dateValue = extractCalibDate(calib)
412 kwargs[runKey] = runValue
413 kwargs[idKey] = idValue
414 kwargs[dateKey] = dateValue
416 self.log.info("Using " + str(reference.run))
418 outputs = self.run(**inputs)
419 outputs.outputLinearizer.updateMetadata(setDate=False, **kwargs)
421 butlerQC.put(outputs, outputRefs)
423 def run(self, inputPtc, dummy, camera, inputDims,
424 inputPhotodiodeCorrection=None, inputNormalization=None):
425 """Fit non-linearity to PTC data, returning the correct Linearizer
426 object.
428 Parameters
429 ----------
430 inputPtc : `lsst.ip.isr.PtcDataset`
431 Pre-measured PTC dataset.
432 dummy : `lsst.afw.image.Exposure`
433 The exposure used to select the appropriate PTC dataset.
434 In almost all circumstances, one of the input exposures
435 used to generate the PTC dataset is the best option.
436 camera : `lsst.afw.cameraGeom.Camera`
437 Camera geometry.
438 inputDims : `lsst.daf.butler.DataCoordinate` or `dict`
439 DataIds to use to populate the output calibration.
440 inputPhotodiodeCorrection :
441 `lsst.ip.isr.PhotodiodeCorrection`, optional
442 Pre-measured photodiode correction used in the case when
443 applyPhotodiodeCorrection=True.
444 inputNormalization : `astropy.table.Table`, optional
445 Focal plane normalization table to use if
446 useFocalPlaneNormalization is True.
448 Returns
449 -------
450 results : `lsst.pipe.base.Struct`
451 The results struct containing:
453 ``outputLinearizer``
454 Final linearizer calibration (`lsst.ip.isr.Linearizer`).
455 ``outputProvenance``
456 Provenance data for the new calibration
457 (`lsst.ip.isr.IsrProvenance`).
459 Notes
460 -----
461 This task currently fits only polynomial-defined corrections,
462 where the correction coefficients are defined such that:
463 :math:`corrImage = uncorrImage + \\sum_i c_i uncorrImage^(2 + i)`
464 These :math:`c_i` are defined in terms of the direct polynomial fit:
465 :math:`meanVector ~ P(x=timeVector) = \\sum_j k_j x^j`
466 such that :math:`c_(j-2) = -k_j/(k_1^j)` in units of DN^(1-j) (c.f.,
467 Eq. 37 of 2003.05978). The `config.polynomialOrder` or
468 `config.splineKnots` define the maximum order of :math:`x^j` to fit.
469 As :math:`k_0` and :math:`k_1` are degenerate with bias level and gain,
470 they are not included in the non-linearity correction.
471 """
472 if len(dummy) == 0:
473 self.log.warning("No dummy exposure found.")
475 detector = camera[inputDims['detector']]
476 if self.config.linearityType == 'LookupTable':
477 table = np.zeros((len(detector), self.config.maxLookupTableAdu), dtype=np.float32)
478 tableIndex = 0
479 else:
480 table = None
481 tableIndex = None # This will fail if we increment it.
483 # Initialize the linearizer.
484 linearizer = Linearizer(detector=detector, table=table, log=self.log)
485 linearizer.updateMetadataFromExposures([inputPtc])
486 if self.config.usePhotodiode and self.config.applyPhotodiodeCorrection:
487 abscissaCorrections = inputPhotodiodeCorrection.abscissaCorrections
489 groupingValues = _determineInputGroups(
490 inputPtc,
491 self.config.doAutoGrouping,
492 self.config.autoGroupingUseExptime,
493 self.config.autoGroupingMaxSignalFraction,
494 self.config.autoGroupingThreshold,
495 self.config.splineGroupingColumn,
496 self.config.minPhotodiodeCurrent,
497 )
499 if self.config.linearityType == 'Spline':
500 if self.config.doSplineFitTemperature:
501 if self.config.splineFitTemperatureColumn not in inputPtc.auxValues:
502 raise ValueError("Config requests fitting temperature coefficient for "
503 f"{self.config.splineFitTemperatureColumn} but this column "
504 "is not available in inputPtc.auxValues.")
505 temperatureValues = inputPtc.auxValues[self.config.splineFitTemperatureColumn]
506 else:
507 temperatureValues = None
509 # We set this to have a value to fill the bad amps.
510 fitOrder = self.config.splineKnots
511 else:
512 fitOrder = self.config.polynomialOrder
514 for i, amp in enumerate(detector):
515 ampName = amp.getName()
517 # Save the input gains
518 linearizer.inputGain[ampName] = inputPtc.gain[ampName]
519 linearizer.inputTurnoff[ampName] = inputPtc.ptcTurnoff[ampName]
521 if ampName in inputPtc.badAmps:
522 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp)
523 self.log.warning("Amp %s in detector %s has no usable PTC information. Skipping!",
524 ampName, detector.getName())
525 continue
527 # Check for too few points.
528 if self.config.linearityType == "Spline" \
529 and self.config.splineGroupingColumn is not None \
530 and len(inputPtc.inputExpIdPairs[ampName]) < self.config.splineGroupingMinPoints:
531 raise RuntimeError(
532 "The input PTC has too few points to reliably run with PD grouping. "
533 "The recommended course of action is to set splineGroupingColumn to None. "
534 "If you really know what you are doing, you may reduce "
535 "config.splineGroupingMinPoints.")
537 # We start with all finite values.
538 mask = np.isfinite(inputPtc.rawMeans[ampName])
540 if self.config.linearityType == "Spline" and temperatureValues is not None:
541 mask &= np.isfinite(temperatureValues)
543 if self.config.usePhotodiode:
544 modExpTimes = inputPtc.photoCharges[ampName].copy()
545 # Make sure any exposure pairs that do not have photodiode data
546 # are masked.
547 mask[~np.isfinite(modExpTimes)] = False
549 # Make sure any photodiode measurements below the configured
550 # minimum are masked.
551 mask[modExpTimes < self.config.minPhotodiodeCurrent] = False
553 # Get the photodiode correction.
554 if self.config.applyPhotodiodeCorrection:
555 for j, pair in enumerate(inputPtc.inputExpIdPairs[ampName]):
556 try:
557 correction = abscissaCorrections[str(pair)]
558 except KeyError:
559 correction = 0.0
560 modExpTimes[j] += correction
562 inputAbscissa = modExpTimes
563 else:
564 inputAbscissa = inputPtc.rawExpTimes[ampName].copy()
566 # Normalize if configured.
567 inputNorm = np.ones_like(inputAbscissa)
568 if self.config.useFocalPlaneNormalization:
569 exposures = np.asarray(inputPtc.inputExpIdPairs[ampName])[:, 0]
570 a, b = esutil.numpy_util.match(exposures, inputNormalization["exposure"])
571 inputNorm[a] = inputNormalization["normalization"][b]
572 inputAbscissa *= inputNorm
574 # Compute linearityTurnoff and linearitySignalMax.
575 turnoffMask = inputPtc.expIdMask[ampName].copy()
576 turnoffMask &= mask
578 turnoffIndex, turnoff, maxSignal, _ = _computeTurnoffAndMax(
579 inputAbscissa,
580 inputPtc.rawMeans[ampName],
581 turnoffMask,
582 groupingValues,
583 ampName=ampName,
584 minSignalFitLinearityTurnoff=self.config.minSignalFitLinearityTurnoff,
585 maxFracLinearityDeviation=self.config.maxFracLinearityDeviation,
586 log=self.log,
587 use_all_for_normalization=True,
588 )
590 if np.isnan(turnoff):
591 # This is a bad amp, with no linearizer.
592 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp)
593 self.log.warning("Amp %s in detector %s has no usable linearizer information. Skipping!",
594 ampName, detector.getName())
595 continue
597 linearizer.linearityTurnoff[ampName] = turnoff
598 linearizer.linearityMaxSignal[ampName] = maxSignal
600 inputOrdinate = inputPtc.rawMeans[ampName].copy()
602 linearizer.inputAbscissa[ampName] = inputAbscissa.copy()
603 linearizer.inputOrdinate[ampName] = inputOrdinate.copy()
604 linearizer.inputGroupingIndex[ampName] = groupingValues.copy()
605 linearizer.inputNormalization[ampName] = inputNorm.copy()
607 if self.config.linearityType != 'Spline':
608 mask &= (inputOrdinate < self.config.maxLinearAdu)
609 else:
610 # For spline fits, cut above the turnoff.
611 self.log.info("Using linearityTurnoff of %.4f adu for amplifier %s", turnoff, ampName)
612 extraMask = np.ones(len(inputOrdinate), dtype=bool)
613 extraMask[turnoffIndex + 1:] = False
614 mask &= extraMask
616 mask &= (inputOrdinate > self.config.minLinearAdu)
618 # Initial value for the mask.
619 linearizer.inputMask[ampName] = mask.copy()
621 if mask.sum() < 2:
622 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp)
623 self.log.warning("Amp %s in detector %s has not enough points for fit. Skipping!",
624 ampName, detector.getName())
625 continue
627 if self.config.linearityType != 'Spline':
628 linearFit, linearFitErr, chiSq, weights = irlsFit([0.0, 100.0], inputAbscissa[mask],
629 inputOrdinate[mask], funcPolynomial)
631 # Convert this proxy-to-flux fit into an expected linear flux
632 linearOrdinate = linearFit[0] + linearFit[1] * inputAbscissa
633 # Exclude low end outliers.
634 # This is compared to the original values.
635 threshold = self.config.nSigmaClipLinear * np.sqrt(abs(inputOrdinate))
637 mask[np.abs(inputOrdinate - linearOrdinate) >= threshold] = False
639 linearizer.inputMask[ampName] = mask.copy()
641 if mask.sum() < 2:
642 linearizer = self.fillBadAmp(linearizer, fitOrder, inputPtc, amp)
643 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate. "
644 "Skipping!", ampName, detector.getName())
645 continue
647 self.debugFit('linearFit', inputAbscissa, inputOrdinate, linearOrdinate, mask, ampName)
649 # Do fits
650 if self.config.linearityType in ['Polynomial', 'Squared', 'LookupTable']:
651 polyFit = np.zeros(fitOrder + 1)
652 polyFit[1] = 1.0
653 polyFit, polyFitErr, chiSq, weights = irlsFit(polyFit, linearOrdinate[mask],
654 inputOrdinate[mask], funcPolynomial)
656 # Truncate the polynomial fit to the squared term.
657 k1 = polyFit[1]
658 linearityCoeffs = np.array(
659 [-coeff/(k1**order) for order, coeff in enumerate(polyFit)]
660 )[2:]
661 significant = np.where(np.abs(linearityCoeffs) > 1e-10)
662 self.log.info("Significant polynomial fits: %s", significant)
664 modelOrdinate = funcPolynomial(polyFit, linearOrdinate)
666 self.debugFit(
667 'polyFit',
668 inputAbscissa[mask],
669 inputOrdinate[mask],
670 modelOrdinate[mask],
671 None,
672 ampName,
673 )
675 if self.config.linearityType == 'Squared':
676 # The first term is the squared term.
677 linearityCoeffs = linearityCoeffs[0: 1]
678 elif self.config.linearityType == 'LookupTable':
679 # Use linear part to get time at which signal is
680 # maxAduForLookupTableLinearizer DN
681 tMax = (self.config.maxLookupTableAdu - polyFit[0])/polyFit[1]
682 timeRange = np.linspace(0, tMax, self.config.maxLookupTableAdu)
683 signalIdeal = polyFit[0] + polyFit[1]*timeRange
684 signalUncorrected = funcPolynomial(polyFit, timeRange)
685 lookupTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has correction
687 linearizer.tableData[tableIndex, :] = lookupTableRow
688 linearityCoeffs = np.array([tableIndex, 0])
689 tableIndex += 1
690 elif self.config.linearityType in ['Spline']:
691 # This is a spline fit with photodiode data based on a model
692 # from Pierre Astier.
693 # This model fits a spline with (optional) nuisance parameters
694 # to allow for different linearity coefficients with different
695 # photodiode settings. The minimization is a least-squares
696 # fit with the residual of
697 # Sum[(S(mu_i) + mu_i - O)/(k_j * D_i) - 1]**2, where S(mu_i)
698 # is an Akima Spline function of mu_i, the observed flat-pair
699 # mean; D_j is the photo-diode measurement corresponding to
700 # that flat-pair; and k_j is a constant of proportionality
701 # which is over index j as it is allowed to
702 # be different based on different photodiode settings (e.g.
703 # CCOBCURR); and O is a constant offset to allow for light
704 # leaks (and is only fit if doSplineFitOffset=True). In
705 # addition, if config.doSplineFitTemperature is True then
706 # the fit will adjust mu such that
707 # mu = mu_input*(1 + alpha*(T - T_ref))
708 # and T_ref is taken as the median temperature of the run.
709 # Finally, if config.doSplineFitTemporal is True then the
710 # fit will further adjust mu such that
711 # mu = mu_input*(1 + beta*(mjd - mjd_ref))
712 # and mjd_ref is taken as the median mjd of the run.
713 # Note that this fit is only valid if the input data
714 # was taken with a randomly shuffled order of exposure
715 # levels.
717 # The fit has additional constraints to ensure that the spline
718 # goes through the (0, 0) point, as well as a normalization
719 # condition so that the average of the spline over the full
720 # range is 0. The normalization ensures that the spline only
721 # fits deviations from linearity, rather than the linear
722 # function itself which is degenerate with the gain.
724 # We want to make sure the top node is above the top value
725 # to avoid edge issues with the top point.
726 nodes = np.linspace(0.0, np.max(inputOrdinate[mask]) + 1.0, self.config.splineKnots)
728 if temperatureValues is not None:
729 temperatureValuesScaled = temperatureValues - np.median(temperatureValues[mask])
730 else:
731 temperatureValuesScaled = None
733 if self.config.doSplineFitTemporal:
734 inputMjdScaled = inputPtc.inputExpPairMjdStartList[ampName].copy()
735 inputMjdScaled -= np.nanmedian(inputMjdScaled)
736 else:
737 inputMjdScaled = None
739 fitter = AstierSplineLinearityFitter(
740 nodes,
741 groupingValues,
742 inputAbscissa,
743 inputOrdinate,
744 mask=mask,
745 log=self.log,
746 fit_offset=self.config.doSplineFitOffset,
747 fit_weights=self.config.doSplineFitWeights,
748 weight_pars_start=self.config.splineFitWeightParsStart,
749 fit_temperature=self.config.doSplineFitTemperature,
750 temperature_scaled=temperatureValuesScaled,
751 max_signal_nearly_linear=inputPtc.ptcTurnoff[ampName],
752 fit_temporal=self.config.doSplineFitTemporal,
753 mjd_scaled=inputMjdScaled,
754 )
755 p0 = fitter.estimate_p0(use_all_for_normalization=True)
756 pars = fitter.fit(
757 p0,
758 min_iter=self.config.splineFitMinIter,
759 max_iter=self.config.splineFitMaxIter,
760 max_rejection_per_iteration=self.config.splineFitMaxRejectionPerIteration,
761 n_sigma_clip=self.config.nSigmaClipLinear,
762 )
764 # Confirm that the first parameter is 0, and set it to
765 # exactly zero.
766 if not np.isclose(pars[0], 0):
767 raise RuntimeError("Programmer error! First spline parameter must "
768 "be consistent with zero.")
769 pars[0] = 0.0
771 linearityChisq = fitter.compute_chisq_dof(pars)
773 linearityCoeffs = np.concatenate([nodes, pars[fitter.par_indices["values"]]])
774 linearFit = np.array([0.0, np.mean(pars[fitter.par_indices["groups"]])])
776 # We must modify the inputOrdinate according to the
777 # nuisance terms in the linearity fit for the residual
778 # computation code to work properly.
779 # The true mu (inputOrdinate) is given by
780 # mu = mu_in * (1 + alpha*t_scale) * (1 + beta*mjd_scale)
781 if self.config.doSplineFitTemperature:
782 inputOrdinate *= (1.0
783 + pars[fitter.par_indices["temperature_coeff"]]*temperatureValuesScaled)
784 if self.config.doSplineFitTemporal:
785 inputOrdinate *= (1.0
786 + pars[fitter.par_indices["temporal_coeff"]]*inputMjdScaled)
787 # We have to adjust the abscissa for the different groups.
788 # This is because we need a corrected abscissa to get a
789 # reasonable linear fit to look for residuals, particularly in
790 # the case of significantly different signal-vs-photodiode or
791 # signal-vs-exptime scalings for different groups. This then
792 # becomes a multiplication by the relative scaling of the
793 # different groups.
794 for j, group_index in enumerate(fitter.group_indices):
795 inputAbscissa[group_index] *= (pars[fitter.par_indices["groups"][j]] / linearFit[1])
796 # And remove the offset term.
797 if self.config.doSplineFitOffset:
798 inputOrdinate -= pars[fitter.par_indices["offset"]]
800 linearOrdinate = linearFit[1] * inputOrdinate
801 # For the spline fit, reuse the "polyFit -> fitParams"
802 # field to record the linear coefficients for the groups.
803 # We additionally append the offset and weight_pars;
804 # however these will be zero-length arrays if these were
805 # not configured to be fit.
806 polyFit = np.concatenate((
807 pars[fitter.par_indices["groups"]],
808 pars[fitter.par_indices["offset"]],
809 pars[fitter.par_indices["weight_pars"]],
810 pars[fitter.par_indices["temperature_coeff"]],
811 pars[fitter.par_indices["temporal_coeff"]],
812 ))
813 polyFitErr = np.zeros_like(polyFit)
814 chiSq = linearityChisq
816 # Update mask based on what the fitter rejected.
817 mask = fitter.mask
819 linearizer.inputMask[ampName] = mask.copy()
820 else:
821 polyFit = np.zeros(1)
822 polyFitErr = np.zeros(1)
823 chiSq = np.nan
824 linearityCoeffs = np.zeros(1)
826 linearizer.linearityType[ampName] = self.config.linearityType
827 linearizer.linearityCoeffs[ampName] = linearityCoeffs
828 if self.config.trimmedState:
829 linearizer.linearityBBox[ampName] = amp.getBBox()
830 else:
831 linearizer.linearityBBox[ampName] = amp.getRawBBox()
832 linearizer.fitParams[ampName] = polyFit
833 linearizer.fitParamsErr[ampName] = polyFitErr
834 linearizer.fitChiSq[ampName] = chiSq
835 linearizer.linearFit[ampName] = linearFit
837 image = afwImage.ImageF(len(inputOrdinate), 1)
838 image.array[:, :] = inputOrdinate
839 linearizeFunction = linearizer.getLinearityTypeByName(linearizer.linearityType[ampName])
840 linearizeFunction()(
841 image,
842 **{'coeffs': linearizer.linearityCoeffs[ampName],
843 'table': linearizer.tableData,
844 'log': linearizer.log}
845 )
846 linearizeModel = image.array[0, :]
848 # The residuals that we record are the final residuals compared to
849 # a linear model, after everything has been (properly?) linearized.
850 if mask.sum() < 2:
851 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate "
852 "for residuals. Skipping!", ampName, detector.getName())
853 residuals = np.full_like(linearizeModel, np.nan)
854 residualsUnmasked = residuals.copy()
855 else:
856 postLinearFit, _, _, _ = irlsFit(
857 linearFit,
858 inputAbscissa[mask],
859 linearizeModel[mask],
860 funcPolynomial,
861 )
862 # When computing residuals, we only care about the slope of
863 # the postLinearFit and not the intercept. The intercept
864 # itself depends on a possibly unknown zero in the abscissa
865 # (often photodiode) which may have an arbitrary value.
866 residuals = linearizeModel - (postLinearFit[1] * inputAbscissa)
867 residualsUnmasked = residuals.copy()
868 # We set masked residuals to nan.
869 residuals[~mask] = np.nan
871 linearizer.fitResidualsUnmasked[ampName] = residualsUnmasked
872 linearizer.fitResiduals[ampName] = residuals
873 linearizer.fitResidualsModel[ampName] = linearizeModel.copy()
875 finite = np.isfinite(residuals)
876 if finite.sum() == 0:
877 sigmad = np.nan
878 else:
879 sigmad = median_abs_deviation(residuals[finite]/inputOrdinate[finite], scale="normal")
880 linearizer.fitResidualsSigmaMad[ampName] = sigmad
882 self.debugFit(
883 'solution',
884 inputOrdinate[mask],
885 linearOrdinate[mask],
886 linearizeModel[mask],
887 None,
888 ampName,
889 )
891 self.fixupBadAmps(linearizer)
893 linearizer.hasLinearity = True
894 linearizer.validate()
895 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE')
896 linearizer.updateMetadata(setDate=True, setCalibId=True)
897 linearizer.updateMetadataFromExposures([inputPtc])
898 provenance = IsrProvenance(calibType='linearizer')
900 return pipeBase.Struct(
901 outputLinearizer=linearizer,
902 outputProvenance=provenance,
903 )
905 def fillBadAmp(self, linearizer, fitOrder, inputPtc, amp):
906 # Need to fill linearizer with empty values
907 # if the amp is non-functional
908 ampName = amp.getName()
909 nEntries = 1
910 pEntries = 1
911 if self.config.linearityType in ['Polynomial']:
912 # We discard the first 2 entries in the polynomial.
913 nEntries = fitOrder + 1 - 2
914 pEntries = fitOrder + 1 - 2
915 elif self.config.linearityType in ['Spline']:
916 nEntries = fitOrder * 2
917 elif self.config.linearityType in ['Squared', 'None']:
918 nEntries = 1
919 pEntries = fitOrder + 1
920 elif self.config.linearityType in ['LookupTable']:
921 nEntries = 2
922 pEntries = fitOrder + 1
924 nPair = len(inputPtc.inputExpIdPairs[ampName])
926 linearizer.linearityType[ampName] = "None"
927 linearizer.linearityCoeffs[ampName] = np.zeros(nEntries)
928 if self.config.trimmedState:
929 linearizer.linearityBBox[ampName] = amp.getBBox()
930 else:
931 linearizer.linearityBBox[ampName] = amp.getRawBBox()
932 linearizer.fitParams[ampName] = np.zeros(pEntries)
933 linearizer.fitParamsErr[ampName] = np.zeros(pEntries)
934 linearizer.fitChiSq[ampName] = np.nan
935 linearizer.fitResiduals[ampName] = np.zeros(nPair)
936 linearizer.fitResidualsSigmaMad[ampName] = np.nan
937 linearizer.fitResidualsUnmasked[ampName] = np.zeros(nPair)
938 linearizer.fitResidualsModel[ampName] = np.zeros(nPair)
939 linearizer.linearFit[ampName] = np.zeros(2)
940 linearizer.linearityTurnoff[ampName] = np.nan
941 linearizer.linearityMaxSignal[ampName] = np.nan
942 linearizer.inputMask[ampName] = np.zeros(nPair, dtype=np.bool_)
943 linearizer.inputAbscissa[ampName] = np.zeros(nPair)
944 linearizer.inputOrdinate[ampName] = np.zeros(nPair)
945 linearizer.inputGroupingIndex[ampName] = np.zeros(nPair, dtype=np.int64)
946 linearizer.inputNormalization[ampName] = np.ones(nPair)
948 return linearizer
950 def fixupBadAmps(self, linearizer):
951 """Fix nan padding in bad amplifiers.
953 Parameters
954 ----------
955 linearizer : `lsst.ip.isr.Linearizer`
956 """
957 fitParamsMaxLen = 0
958 for ampName in linearizer.ampNames:
959 if (length := len(linearizer.fitParams[ampName])) > fitParamsMaxLen:
960 fitParamsMaxLen = length
962 for ampName in linearizer.ampNames:
963 if linearizer.linearityType[ampName] == "None":
964 # Bad amplifier.
965 linearizer.fitParams[ampName] = np.zeros(fitParamsMaxLen)
966 linearizer.fitParamsErr[ampName] = np.zeros(fitParamsMaxLen)
967 elif len(linearizer.fitParams[ampName]) != fitParamsMaxLen:
968 raise RuntimeError("Linearity has mismatched fitParams; check code/data.")
970 def debugFit(self, stepname, xVector, yVector, yModel, mask, ampName):
971 """Debug method for linearity fitting.
973 Parameters
974 ----------
975 stepname : `str`
976 A label to use to check if we care to debug at a given
977 line of code.
978 xVector : `numpy.array`, (N,)
979 The values to use as the independent variable in the
980 linearity fit.
981 yVector : `numpy.array`, (N,)
982 The values to use as the dependent variable in the
983 linearity fit.
984 yModel : `numpy.array`, (N,)
985 The values to use as the linearized result.
986 mask : `numpy.array` [`bool`], (N,) , optional
987 A mask to indicate which entries of ``xVector`` and
988 ``yVector`` to keep.
989 ampName : `str`
990 Amplifier name to lookup linearity correction values.
991 """
992 frame = getDebugFrame(self._display, stepname)
993 if frame:
994 import matplotlib.pyplot as plt
995 fig, axs = plt.subplots(2)
997 if mask is None:
998 mask = np.ones_like(xVector, dtype=bool)
1000 fig.suptitle(f"{stepname} {ampName} {self.config.linearityType}")
1001 if stepname == 'linearFit':
1002 axs[0].set_xlabel("Input Abscissa (time or mondiode)")
1003 axs[0].set_ylabel("Input Ordinate (flux)")
1004 axs[1].set_xlabel("Linear Ordinate (linear flux)")
1005 axs[1].set_ylabel("Flux Difference: (input - linear)")
1006 elif stepname in ('polyFit', 'splineFit'):
1007 axs[0].set_xlabel("Linear Abscissa (linear flux)")
1008 axs[0].set_ylabel("Input Ordinate (flux)")
1009 axs[1].set_xlabel("Linear Ordinate (linear flux)")
1010 axs[1].set_ylabel("Flux Difference: (input - full model fit)")
1011 elif stepname == 'solution':
1012 axs[0].set_xlabel("Input Abscissa (time or mondiode)")
1013 axs[0].set_ylabel("Linear Ordinate (linear flux)")
1014 axs[1].set_xlabel("Model flux (linear flux)")
1015 axs[1].set_ylabel("Flux Difference: (linear - model)")
1017 axs[0].set_yscale('log')
1018 axs[0].set_xscale('log')
1019 axs[0].scatter(xVector, yVector)
1020 axs[0].scatter(xVector[~mask], yVector[~mask], c='red', marker='x')
1021 axs[1].set_xscale('log')
1023 axs[1].scatter(yModel, yVector[mask] - yModel)
1024 fig.tight_layout()
1025 fig.show()
1027 prompt = "Press Enter or c to continue [chpx]..."
1028 while True:
1029 ans = input(prompt).lower()
1030 if ans in ("", " ", "c",):
1031 break
1032 elif ans in ("p", ):
1033 import pdb
1034 pdb.set_trace()
1035 elif ans in ("h", ):
1036 print("[h]elp [c]ontinue [p]db")
1037 elif ans in ('x', ):
1038 exit()
1039 plt.close()
1042class LinearityDoubleSplineSolveConnections(
1043 pipeBase.PipelineTaskConnections,
1044 dimensions=("instrument", "detector"),
1045):
1046 dummy = cT.Input(
1047 name="raw",
1048 doc="Dummy exposure.",
1049 storageClass='Exposure',
1050 dimensions=("instrument", "exposure", "detector"),
1051 multiple=True,
1052 deferLoad=True,
1053 )
1054 camera = cT.PrerequisiteInput(
1055 name="camera",
1056 doc="Camera Geometry definition.",
1057 storageClass="Camera",
1058 dimensions=("instrument", ),
1059 isCalibration=True,
1060 )
1061 inputLinearizerPtc = cT.Input(
1062 name="linearizerPtc",
1063 doc="Input linearizer PTC dataset.",
1064 storageClass="PhotonTransferCurveDataset",
1065 dimensions=("instrument", "detector"),
1066 isCalibration=True,
1067 )
1068 inputNormalization = cT.Input(
1069 name="cpLinearizerPtcNormalization",
1070 doc="Focal-plane normalization table.",
1071 storageClass="ArrowAstropy",
1072 dimensions=("instrument",),
1073 isCalibration=True,
1074 )
1075 inputBinnedImagesHandles = cT.Input(
1076 name="cpPtcPairBinned",
1077 doc="Tabulated binned exposure pairs.",
1078 storageClass="ArrowAstropy",
1079 dimensions=("instrument", "exposure", "detector"),
1080 multiple=True,
1081 deferLoad=True,
1082 )
1083 outputLinearizer = cT.Output(
1084 name="linearity",
1085 doc="Output linearity measurements.",
1086 storageClass="Linearizer",
1087 dimensions=("instrument", "detector"),
1088 isCalibration=True,
1089 )
1091 def __init__(self, *, config=None):
1092 super().__init__(config=config)
1094 if not config.useFocalPlaneNormalization:
1095 del self.inputNormalization
1098class LinearityDoubleSplineSolveConfig(
1099 pipeBase.PipelineTaskConfig,
1100 pipelineConnections=LinearityDoubleSplineSolveConnections,
1101):
1102 maxFracLinearityDeviation = pexConfig.Field(
1103 dtype=float,
1104 doc="Maximum fraction deviation from raw linearity to compute "
1105 "linearityTurnoff and linearityMaxSignal.",
1106 # TODO: DM-46811 investigate if this can be raised to 0.05.
1107 default=0.01,
1108 )
1109 minSignalFitLinearityTurnoff = pexConfig.Field(
1110 dtype=float,
1111 doc="Minimum signal to compute raw linearity slope for linearityTurnoff.",
1112 default=1000.0,
1113 )
1114 maxLinearityTurnoffRelativeToPtcTurnoff = pexConfig.Field(
1115 dtype=float,
1116 doc="Maximum fractional allowed linearity turnoff relative to the PTC turnoff. Used "
1117 "to keep extra-high odd values from contaminating the fit.",
1118 default=1.3,
1119 )
1120 maxNoiseReference = pexConfig.Field(
1121 dtype=float,
1122 doc="Maximum read noise (e-) in the PTC for an amp to be considered as a reference.",
1123 default=12.0,
1124 )
1125 usePhotodiode = pexConfig.Field(
1126 dtype=bool,
1127 doc="Use the photodiode info instead of the raw expTimes?",
1128 default=False,
1129 )
1130 minPhotodiodeCurrent = pexConfig.Field(
1131 dtype=float,
1132 doc="Minimum value to trust photodiode signals.",
1133 default=0.0,
1134 )
1135 doAutoGrouping = pexConfig.Field(
1136 dtype=bool,
1137 doc="Do automatic group detection? Cannot be True if splineGroupingColumn is also set. "
1138 "The automatic group detection will use the ratio of signal to exposure time (if "
1139 "autoGroupingUseExptime is True) or photodiode (if False) to determine which "
1140 "flat pairs were taken with different illumination settings.",
1141 default=False,
1142 )
1143 autoGroupingUseExptime = pexConfig.Field(
1144 dtype=bool,
1145 doc="Use exposure time to determine automatic grouping. Used if doAutoGrouping=True.",
1146 default=True,
1147 )
1148 autoGroupingThreshold = pexConfig.Field(
1149 dtype=float,
1150 doc="Minimum relative jump from sorted conversion values to determine a group.",
1151 default=0.1,
1152 )
1153 autoGroupingMaxSignalFraction = pexConfig.Field(
1154 dtype=float,
1155 doc="Only do auto-grouping when the signal is this fraction of the maximum signal. "
1156 "All exposures with signal higher than this threshold will be put into the "
1157 "largest signal group. This config is needed if the input PTC goes beyond "
1158 "the linearity turnoff.",
1159 default=0.9,
1160 )
1161 groupingColumn = pexConfig.Field(
1162 dtype=str,
1163 doc="Column to use for grouping together points, to allow "
1164 "for different proportionality constants. If None, then grouping will "
1165 "only be done if doAutoGrouping is True.",
1166 default=None,
1167 optional=True,
1168 )
1169 absoluteSplineMinimumSignalNode = pexConfig.Field(
1170 dtype=float,
1171 doc="Smallest node (above 0) for absolute spline (adu).",
1172 default=0.0,
1173 )
1174 absoluteSplineLowThreshold = pexConfig.Field(
1175 dtype=float,
1176 doc="Threshold for the low-level linearity nodes for absolute spline (adu). "
1177 "If this is below ``absoluteSplineMinimumSignalNode`` then the low "
1178 "level checks will be skipped.",
1179 default=0.0,
1180 )
1181 absoluteSplineLowNodeSize = pexConfig.Field(
1182 dtype=float,
1183 doc="Minimum size for low-level linearity nodes for absolute spline (adu).",
1184 default=2000.0,
1185 )
1186 absoluteSplineNodeSize = pexConfig.Field(
1187 dtype=float,
1188 doc="Minimum size for linearity nodes for absolute spline above absoluteSplineLowThreshold e(adu); "
1189 "note that there will always be a node at the reference PTC turnoff.",
1190 default=3000.0,
1191 )
1192 absoluteSplineFitMinIter = pexConfig.Field(
1193 dtype=int,
1194 doc="Minimum number of iterations for absolute spline fit.",
1195 default=3,
1196 )
1197 absoluteSplineFitMaxIter = pexConfig.Field(
1198 dtype=int,
1199 doc="Maximum number of iterations for absolute spline fit.",
1200 default=20,
1201 )
1202 absoluteSplineFitMaxRejectionPerIteration = pexConfig.Field(
1203 dtype=int,
1204 doc="Maximum number of rejections per iteration for absolute spline fit.",
1205 default=5,
1206 )
1207 absoluteNSigmaClipLinear = pexConfig.Field(
1208 dtype=float,
1209 doc="Sigma-clipping for absolute spline solution.",
1210 default=5.0,
1211 )
1212 doAbsoluteSplineFitOffset = pexConfig.Field(
1213 dtype=bool,
1214 doc="Fit a scattered light offset in the spline fit.",
1215 default=True,
1216 )
1217 doAbsoluteSplineFitWeights = pexConfig.Field(
1218 dtype=bool,
1219 doc="Fit linearity weight parameters in the spline fit.",
1220 default=False,
1221 )
1222 absoluteSplineFitWeightParsStart = pexConfig.ListField(
1223 dtype=float,
1224 doc="Starting parameters for weight fit, if doSplineFitWeights=True. "
1225 "Parameters are such that sigma = sqrt(par[0]**2. + par[1]**2./mu)."
1226 "If doSplineFitWeights=False then these are used as-is; otherwise "
1227 "they are used as the initial values for fitting these parameters.",
1228 length=2,
1229 default=[1.0, 0.0],
1230 )
1231 doAbsoluteSplineFitTemperature = pexConfig.Field(
1232 dtype=bool,
1233 doc="Fit temperature coefficient in spline fit?",
1234 default=False,
1235 )
1236 absoluteSplineFitTemperatureColumn = pexConfig.Field(
1237 dtype=str,
1238 doc="Name of the temperature column to use when fitting temperature "
1239 "coefficients in spline fit; this must not be None if "
1240 "doSplineFitTemperature is True.",
1241 default=None,
1242 optional=True,
1243 )
1244 doAbsoluteSplineFitTemporal = pexConfig.Field(
1245 dtype=bool,
1246 doc="Fit a linear temporal parameter coefficient in spline fit?",
1247 default=False,
1248 )
1249 useFocalPlaneNormalization = pexConfig.Field(
1250 dtype=bool,
1251 doc="Use focal-plane normalization in addition to/instead of photodiode? "
1252 "(Only used with for absolute spline fitting).",
1253 default=False,
1254 )
1255 relativeSplineReferenceCounts = pexConfig.Field(
1256 dtype=float,
1257 doc="Number of target counts (adu) to select a reference image for "
1258 "relative spline solution.",
1259 default=10000.0,
1260 )
1261 relativeSplineMinimumSignalNode = pexConfig.Field(
1262 dtype=float,
1263 doc="Smallest node (above 0) for relative spline (adu).",
1264 default=100.0,
1265 )
1266 relativeSplineLowThreshold = pexConfig.Field(
1267 dtype=float,
1268 doc="Threshold for the low-level linearity nodes for relative spline (adu)."
1269 "If this is below ``relativeSplineMinimumSignalNode`` then the low "
1270 "level checks will be skipped.",
1271 default=5000.0,
1272 )
1273 relativeSplineLowNodeSize = pexConfig.Field(
1274 dtype=float,
1275 doc="Minimum size for low-level linearity nodes for relative spline (adu).",
1276 default=750.0,
1277 )
1278 relativeSplineMidNodeSize = pexConfig.Field(
1279 dtype=float,
1280 doc="Minimum size for mid-level linearity nodes for relative spline (adu); "
1281 "this applies to counts between relativeSplineLowThreshold and the "
1282 "PTC turnoff.",
1283 default=5000.0,
1284 )
1285 relativeSplineHighNodeSize = pexConfig.Field(
1286 dtype=float,
1287 doc="Minimum size for high-level linearity nodes for relative spline (adu); "
1288 "this applies to counts between the PTC and linearity turnoffs.",
1289 default=2000.0,
1290 )
1291 relativeSplineFitMinIter = pexConfig.Field(
1292 dtype=int,
1293 doc="Minimum number of iterations for relative spline fit.",
1294 default=3,
1295 )
1296 relativeSplineFitMaxIter = pexConfig.Field(
1297 dtype=int,
1298 doc="Maximum number of iterations for relative spline fit.",
1299 default=20,
1300 )
1301 relativeSplineFitMaxRejectionPerIteration = pexConfig.Field(
1302 dtype=int,
1303 doc="Maximum number of rejections per iteration for relative spline fit.",
1304 default=5,
1305 )
1306 relativeNSigmaClipLinear = pexConfig.Field(
1307 dtype=float,
1308 doc="Sigma-clipping for relative spline solution.",
1309 default=5.0,
1310 )
1312 def validate(self):
1313 super().validate()
1315 if self.doAbsoluteSplineFitTemperature and self.absoluteSplineFitTemperatureColumn is None:
1316 raise ValueError(
1317 "Must set absoluteSplineFitTemperatureColumn if doAbsoluteSplineFitTemperature is True.",
1318 )
1320 if self.doAutoGrouping and self.groupingColumn is not None:
1321 raise ValueError("Must not set doAutoGrouping and also groupingColumn")
1322 if self.doAutoGrouping:
1323 if not self.autoGroupingUseExptime and not self.usePhotodiode:
1324 raise ValueError("If doAutoGrouping is True and autoGroupingUseExptime is False, then "
1325 "usePhotodiode must be True.")
1328class LinearityDoubleSplineSolveTask(pipeBase.PipelineTask):
1329 ConfigClass = LinearityDoubleSplineSolveConfig
1330 _DefaultName = "cpLinearityDoubleSplineSolve"
1332 def runQuantum(self, butlerQC, inputRefs, outputRefs):
1333 # docstring inherited
1334 inputs = butlerQC.get(inputRefs)
1336 if self.config.useFocalPlaneNormalization:
1337 inputNormalization = inputs["inputNormalization"]
1338 else:
1339 inputNormalization = None
1341 # Add calibration provenance info to header.
1342 kwargs = dict()
1343 reference = getattr(inputRefs, "inputLinearizerPtc", None)
1345 if reference is not None and hasattr(reference, "run"):
1346 runKey = "PTC_RUN"
1347 runValue = reference.run
1348 idKey = "PTC_UUID"
1349 idValue = str(reference.id)
1350 dateKey = "PTC_DATE"
1351 calib = inputs.get("inputPtc", None)
1352 dateValue = extractCalibDate(calib)
1354 kwargs[runKey] = runValue
1355 kwargs[idKey] = idValue
1356 kwargs[dateKey] = dateValue
1358 self.log.info("Using " + str(reference.run))
1360 outputs = self.run(
1361 inputPtc=inputs["inputLinearizerPtc"],
1362 camera=inputs["camera"],
1363 inputBinnedImagesHandles=inputs["inputBinnedImagesHandles"],
1364 inputNormalization=inputNormalization,
1365 )
1366 outputs.outputLinearizer.updateMetadata(setDate=False, **kwargs)
1368 butlerQC.put(outputs, outputRefs)
1370 def run(
1371 self,
1372 *,
1373 inputPtc,
1374 camera,
1375 inputBinnedImagesHandles,
1376 inputNormalization,
1377 ):
1378 """Fit the double-spline relative/absolute linearity correction.
1380 Parameters
1381 ----------
1382 inputPtc : `lsst.ip.isr.PtcDataset`
1383 Pre-measured PTC dataset.
1384 camera : `lsst.afw.cameraGeom.Camera`
1385 Camera geometry.
1386 inputBinnedImagesHandles : `list` [`DeferredDatasetHandle`]
1387 Handles for input binned image pairs.
1388 inputNormalization : `astropy.table.Table`, optional
1389 Focal plane normalization table to use if
1390 useFocalPlaneNormalization is True.
1392 Returns
1393 -------
1394 results : `lsst.pipe.base.Struct`
1395 The results struct containing:
1397 ``outputLinearizer``
1398 Final linearizer calibration (`lsst.ip.isr.Linearizer`).
1399 ``outputProvenance``
1400 Provenance data for the new calibration
1401 (`lsst.ip.isr.IsrProvenance`).
1402 """
1403 detector = camera[inputPtc.metadata["DETECTOR"]]
1405 binnedImagesHandleDict = {
1406 handle.dataId["exposure"]: handle for handle in inputBinnedImagesHandles
1407 }
1409 linearizer = Linearizer(detector=detector, log=self.log)
1410 linearizer.updateMetadataFromExposures([inputPtc])
1412 groupingValues = _determineInputGroups(
1413 inputPtc,
1414 self.config.doAutoGrouping,
1415 self.config.autoGroupingUseExptime,
1416 self.config.autoGroupingMaxSignalFraction,
1417 self.config.autoGroupingThreshold,
1418 self.config.groupingColumn,
1419 self.config.minPhotodiodeCurrent,
1420 )
1422 if self.config.doAbsoluteSplineFitTemperature:
1423 if self.config.absoluteSplineFitTemperatureColumn not in inputPtc.auxValues:
1424 raise ValueError(
1425 "Config requests fitting temperature coefficient for "
1426 f"{self.config.splineFitTemperatureColumn} but this column "
1427 "is not available in inputPtc.auxValues.",
1428 )
1429 temperatureValues = inputPtc.auxValues[self.config.splineFitTemperatureColumn]
1430 else:
1431 temperatureValues = None
1433 # Fill the linearizer with empty values.
1434 firstAmp = None
1435 for ampName in inputPtc.ampNames:
1436 if ampName not in inputPtc.badAmps:
1437 firstAmp = ampName
1438 break
1439 if firstAmp is None:
1440 raise pipeBase.NoWorkFound("No valid amps in input PTC.")
1441 nExp = len(inputPtc.inputExpIdPairs[firstAmp]) * 2
1442 nAmp = len(inputPtc.ampNames)
1444 for amp in detector:
1445 ampName = amp.getName()
1447 linearizer.inputGain[ampName] = inputPtc.gain[ampName]
1448 linearizer.inputTurnoff[ampName] = inputPtc.ptcTurnoff[ampName]
1449 linearizer.linearityType[ampName] = "None"
1450 linearizer.linearityCoeffs[ampName] = np.zeros(1)
1451 # This is not used; kept for compatibility.
1452 linearizer.linearityBBox[ampName] = amp.getBBox()
1453 linearizer.fitParams[ampName] = np.zeros(1)
1454 linearizer.fitParamsErr[ampName] = np.zeros(1)
1455 linearizer.fitChiSq[ampName] = np.nan
1456 linearizer.fitResiduals[ampName] = np.zeros(nExp)
1457 linearizer.fitResidualsSigmaMad[ampName] = np.nan
1458 linearizer.fitResidualsUnmasked[ampName] = np.zeros(nExp)
1459 linearizer.fitResidualsModel[ampName] = np.zeros(nExp)
1460 linearizer.linearFit[ampName] = np.zeros(2)
1461 linearizer.linearityTurnoff[ampName] = np.nan
1462 linearizer.linearityMaxSignal[ampName] = np.nan
1463 linearizer.inputMask[ampName] = np.zeros(nExp, dtype=np.bool_)
1464 linearizer.inputAbscissa[ampName] = np.zeros(nExp)
1465 linearizer.inputOrdinate[ampName] = np.zeros(nExp)
1466 linearizer.inputGroupingIndex[ampName] = np.zeros(nExp, dtype=np.int64)
1467 linearizer.inputNormalization[ampName] = np.ones(nExp)
1469 linearizer.absoluteReferenceAmplifier = ""
1471 # Extract values in common, and per-amp.
1472 data = np.zeros(
1473 nExp,
1474 dtype=[
1475 ("exp_id", "i8"),
1476 ("exptime", "f8"),
1477 ("photocharge", "f8"),
1478 ("mjd", "f8"),
1479 ("raw_mean", ("f8", nAmp)),
1480 ("abscissa", "f8"),
1481 ("grouping", "i4"),
1482 # The following are computed in the relative scaling
1483 # measurements.
1484 ("ref_counts", "f8"),
1485 ("gain_ratio", ("f8", nAmp)),
1486 ],
1487 )
1489 data["exp_id"] = np.asarray(inputPtc.inputExpIdPairs[firstAmp]).ravel()
1490 data["exptime"] = np.repeat(inputPtc.rawExpTimes[firstAmp], 2)
1491 data["mjd"] = np.repeat(inputPtc.inputExpPairMjdStartList[firstAmp], 2)
1492 data["photocharge"] = np.repeat(inputPtc.photoCharges[firstAmp], 2)
1493 data["photocharge"][::2] -= inputPtc.photoChargeDeltas[firstAmp] / 2.
1494 data["photocharge"][1::2] += inputPtc.photoChargeDeltas[firstAmp] / 2.
1495 data["grouping"] = np.repeat(groupingValues, 2)
1497 for i, amp in enumerate(detector):
1498 ampName = amp.getName()
1500 data["raw_mean"][:, i] = np.repeat(inputPtc.rawMeans[ampName], 2)
1501 data["raw_mean"][::2, i] -= inputPtc.rawDeltas[ampName] / 2.
1502 data["raw_mean"][1::2, i] += inputPtc.rawDeltas[ampName] / 2.
1504 if self.config.usePhotodiode:
1505 data["abscissa"][:] = data["photocharge"]
1507 data["abscissa"][data["photocharge"] < self.config.minPhotodiodeCurrent] = np.nan
1508 else:
1509 data["abscissa"][:] = data["exptime"]
1511 # Normalize if configured.
1512 inputNorm = np.ones(nExp, dtype=np.float64)
1513 if self.config.useFocalPlaneNormalization:
1514 a, b = esutil.numpy_util.match(data["exp_id"], inputNormalization["exposure"])
1515 inputNorm[a] = inputNormalization["normalization"][b]
1516 data["abscissa"] *= inputNorm
1518 postTurnoffMasks = {}
1520 # Compute linearity turnoff for each amp.
1521 for i, amp in enumerate(detector):
1522 ampName = amp.getName()
1524 if ampName in inputPtc.badAmps:
1525 self.log.warning(
1526 "Amp %s in detector %s has no usable PTC information. Skipping!",
1527 ampName,
1528 detector.getName(),
1529 )
1530 continue
1532 mask = np.isfinite(data["raw_mean"][:, i])
1534 turnoffMask = np.repeat(inputPtc.expIdMask[ampName], 2)
1535 turnoffMask &= mask
1537 _, turnoff, maxSignal, goodPoints = _computeTurnoffAndMax(
1538 data["abscissa"],
1539 data["raw_mean"][:, i],
1540 turnoffMask,
1541 data["grouping"],
1542 ampName=ampName,
1543 minSignalFitLinearityTurnoff=self.config.minSignalFitLinearityTurnoff,
1544 maxFracLinearityDeviation=self.config.maxFracLinearityDeviation,
1545 log=self.log,
1546 maxTurnoff=inputPtc.ptcTurnoff[ampName] * self.config.maxLinearityTurnoffRelativeToPtcTurnoff,
1547 )
1549 # Use the goodPoints as an initial estimate of the mask
1550 # above the turnoff. But we only want to maintain the
1551 # "high end" outliers.
1552 postTurnoffMask = goodPoints
1553 postTurnoffMask[data["raw_mean"][:, i] < np.median(data["raw_mean"][goodPoints, i])] = True
1554 postTurnoffMasks[ampName] = postTurnoffMask
1556 if np.isnan(turnoff):
1557 # This is a bad amp, with no linearizer.
1558 self.log.warning(
1559 "Amp %s in detector %s has no usable linearizer information. Skipping!",
1560 ampName,
1561 detector.getName(),
1562 )
1563 continue
1565 linearizer.linearityTurnoff[ampName] = turnoff
1566 linearizer.linearityMaxSignal[ampName] = maxSignal
1568 self.log.info("Amplifier %s has a linearity turnoff of %.2f adu.", ampName, turnoff)
1570 # Choose the reference amplifier as the one with the largest
1571 # turnoff. This ensures that the absolute fit covers the full
1572 # range. We additionally confirm that the ptc turnoff is
1573 # finite for this amplifier.
1574 turnoffArray = np.asarray([linearizer.linearityTurnoff[ampName] for ampName in inputPtc.ampNames])
1575 # This is a possibly redundant check to make sure that a bad amp is
1576 # not chosen as a reference amp. We also check that a high noise
1577 # amp is not chosen as the reference amp.
1578 for i, ampName in enumerate(inputPtc.ampNames):
1579 if ampName in inputPtc.badAmps \
1580 or not np.isfinite(inputPtc.ptcTurnoff[ampName]) \
1581 or inputPtc.noise[ampName] > self.config.maxNoiseReference:
1582 turnoffArray[i] = np.nan
1584 if np.all(~np.isfinite(turnoffArray)):
1585 # Return the default blank linearizer.
1586 linearizer.hasLinearity = True
1587 linearizer.validate()
1588 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE')
1589 linearizer.updateMetadata(setDate=True, setCalibId=True)
1590 linearizer.updateMetadataFromExposures([inputPtc])
1591 provenance = IsrProvenance(calibType='linearizer')
1593 return pipeBase.Struct(
1594 outputLinearizer=linearizer,
1595 outputProvenance=provenance,
1596 )
1598 refAmpIndex = np.argmax(np.nan_to_num(turnoffArray))
1599 refAmpName = inputPtc.ampNames[refAmpIndex]
1600 linearizer.absoluteReferenceAmplifier = refAmpName
1602 # Choose a reference image.
1603 refExpIndex = np.argmin(
1604 np.abs(
1605 np.nan_to_num(data["raw_mean"][:, refAmpIndex]) - self.config.relativeSplineReferenceCounts
1606 )
1607 )
1608 refExpId = data["exp_id"][refExpIndex]
1610 self.log.info(
1611 "Using exposure %d (%.2f adu in amp %s) as reference.",
1612 refExpId,
1613 data["raw_mean"][refExpIndex, refAmpIndex],
1614 refAmpName,
1615 )
1617 # We need to know if the reference exposure is the first or second
1618 # in the pair, because the binned are pairs.
1619 offset = refExpIndex % 2
1621 refBinned = binnedImagesHandleDict[data["exp_id"][refExpIndex - offset]].get()
1622 refBinned = copy.copy(refBinned)
1623 if offset == 0:
1624 refBinned["value"] = refBinned["value1"]
1625 else:
1626 refBinned["value"] = refBinned["value2"]
1628 # Scale reference according to reference amplifier.
1629 refScaling = np.median(refBinned["value"][refBinned["amp_index"] == refAmpIndex])
1630 refBinned["value"] /= refScaling
1632 # Get the invidual amp scalings.
1633 # These are the relative gains for the reference image.
1634 ampScalings = np.asarray(
1635 [
1636 np.median(refBinned["value"][refBinned["amp_index"] == ampIndex])
1637 for ampIndex in range(nAmp)
1638 ],
1639 )
1641 # Compute the gain ratios for every exposure.
1642 # The binned images are stored as pairs.
1643 self.log.info("Computing gain ratios for %d exposures.", len(data))
1644 for i in range(len(data)):
1645 expId = data["exp_id"][i]
1646 if (i % 2) == 0:
1647 binned = binnedImagesHandleDict[expId].get()
1648 binned["value"] = binned["value1"]
1649 else:
1650 binned["value"] = binned["value2"]
1652 binned["value"] /= refBinned["value"]
1654 gainRatios = np.asarray(
1655 [
1656 np.median(binned["value"][binned["amp_index"] == ampIndex])
1657 for ampIndex in range(nAmp)
1658 ]
1659 )
1660 ref_counts = gainRatios[refAmpIndex]
1661 gainRatios /= ref_counts
1663 data["ref_counts"][i] = ref_counts
1664 data["gain_ratio"][i, :] = gainRatios
1666 # We need to know which group has the largest size.
1667 groupAmplitudes = np.zeros(len(np.unique(data["grouping"])))
1668 for g in range(len(groupAmplitudes)):
1669 use = (data["grouping"] == g)
1670 groupAmplitudes[g] = np.nanmax(data["ref_counts"][use]) - np.nanmin(data["ref_counts"][use])
1671 maxAmplitudeGroup = np.argmax(groupAmplitudes)
1673 self.log.info("Illumination group %d has the largest signal amplitude.", maxAmplitudeGroup)
1675 # Compute relative linearization first.
1676 maxRelNodes = 0
1678 for i, amp in enumerate(detector):
1679 if i == refAmpIndex:
1680 continue
1682 ampName = amp.getName()
1684 ptcTurnoff = inputPtc.ptcTurnoff[ampName]
1685 linearityTurnoff = linearizer.linearityTurnoff[ampName]
1687 if not np.isfinite(ptcTurnoff) or not np.isfinite(linearityTurnoff):
1688 # This is a bad amp; skip it.
1689 continue
1691 if ptcTurnoff < self.config.relativeSplineLowThreshold:
1692 lowThreshold = 0.0
1693 else:
1694 lowThreshold = self.config.relativeSplineLowThreshold
1696 relAbscissa = data["ref_counts"] * ampScalings[i]
1697 relOrdinate = data["ref_counts"] * data["gain_ratio"][:, i] * ampScalings[i]
1699 # The mask here must exclude everything beyond the turnoff.
1700 # Note that we need to do this before we use the actual
1701 # turnoff to compute the nodes to avoid nodes going past the
1702 # data domain.
1703 relMask = (
1704 postTurnoffMasks[ampName]
1705 & np.isfinite(relAbscissa)
1706 & np.isfinite(relOrdinate)
1707 & (relOrdinate < linearityTurnoff)
1708 )
1710 # Make sure that the linearity turnoff used here does not
1711 # go beyond the max value of the relOrdinate
1712 relTurnoff = min(linearityTurnoff, np.max(relOrdinate[relMask]))
1714 relNodes = _noderator(
1715 lowThreshold,
1716 ptcTurnoff,
1717 relTurnoff,
1718 self.config.relativeSplineMinimumSignalNode,
1719 self.config.relativeSplineLowNodeSize,
1720 self.config.relativeSplineMidNodeSize,
1721 self.config.relativeSplineHighNodeSize,
1722 )
1724 self.log.info(
1725 "Relative linearity for amplifier %s using %d nodes from %.2f to %.2f counts.",
1726 ampName,
1727 len(relNodes),
1728 relNodes[0],
1729 relNodes[-1],
1730 )
1732 # Update the number of relative nodes to concatenation.
1733 if len(relNodes) > maxRelNodes:
1734 maxRelNodes = len(relNodes)
1736 linearizer.inputMask[ampName] = relMask.copy()
1737 linearizer.inputAbscissa[ampName] = relAbscissa.copy()
1738 linearizer.inputOrdinate[ampName] = relOrdinate.copy()
1739 linearizer.inputGroupingIndex[ampName] = data["grouping"].copy()
1740 linearizer.inputNormalization[ampName] = np.ones_like(relAbscissa)
1742 fitter = AstierSplineLinearityFitter(
1743 relNodes,
1744 data["grouping"],
1745 relAbscissa,
1746 relOrdinate,
1747 mask=relMask,
1748 fit_offset=False,
1749 fit_weights=False,
1750 fit_temperature=False,
1751 max_signal_nearly_linear=ptcTurnoff,
1752 fit_temporal=False,
1753 # Put a cap on the maximum correction in absolute value.
1754 max_frac_correction=np.inf,
1755 max_correction=10_000.0,
1756 )
1757 p0 = fitter.estimate_p0()
1758 pars = fitter.fit(
1759 p0,
1760 min_iter=self.config.relativeSplineFitMinIter,
1761 max_iter=self.config.relativeSplineFitMaxIter,
1762 max_rejection_per_iteration=self.config.relativeSplineFitMaxRejectionPerIteration,
1763 n_sigma_clip=self.config.relativeNSigmaClipLinear,
1764 )
1766 # Confirm that the first parameter is 0, and set it to
1767 # exactly zero.
1768 relValues = pars[fitter.par_indices["values"]]
1769 if not np.isclose(relValues[0], 0):
1770 raise RuntimeError("Programmer error! First spline parameter must "
1771 "be consistent with zero.")
1772 relValues[0] = 0.0
1774 if np.any(np.abs(pars[fitter.par_indices["values"]]) > 10_000.0):
1775 self.log.warning("Unconstrained nodes escaped containment; clipping.")
1776 lo = (pars[fitter.par_indices["values"]] < -10_000.0)
1777 if np.sum(lo) > 0:
1778 pars[fitter.par_indices["values"][lo]] = -10_000.0
1779 hi = (pars[fitter.par_indices["values"]] > 10_000.0)
1780 if np.sum(hi) > 0:
1781 pars[fitter.par_indices["values"][hi]] = 10_000.0
1783 relChisq = fitter.compute_chisq_dof(pars)
1785 # Our reference fit is always 1.0 slope.
1786 relLinearFit = np.array([0.0, 1.0])
1788 # Adjust the abscissa for different groups for residuals.
1789 for j, groupIndex in enumerate(fitter.group_indices):
1790 relAbscissa[groupIndex] *= (pars[fitter.par_indices["groups"][j]] / relLinearFit[1])
1792 relMask = fitter.mask
1794 # Record values in the linearizer.
1795 linearizer.linearityType[ampName] = "DoubleSpline"
1796 # Note that we have a placeholder for the number of nodes in
1797 # the absolute spline.
1798 linearizer.linearityCoeffs[ampName] = np.concatenate([[len(relNodes), 0], relNodes, relValues])
1799 linearizer.fitChiSq[ampName] = relChisq
1800 linearizer.linearFit[ampName] = relLinearFit
1802 # Compute residuals.
1803 spl = Akima1DInterpolator(relNodes, relValues, method="akima")
1804 relOffset = spl(np.clip(relOrdinate, relNodes[0], relNodes[-1]))
1805 relModel = relOrdinate - relOffset
1807 if relMask.sum() < 2:
1808 self.log.warning("Amp %s in detector %s has not enough points in linear ordinate "
1809 "for residuals. Skipping!", ampName, detector.getName())
1810 relResiduals = np.full_like(relModel, np.nan)
1811 relResidualsUnmasked = relResiduals.copy()
1812 else:
1813 postLinearFit, _, _, _ = irlsFit(
1814 relLinearFit,
1815 relAbscissa[relMask],
1816 relModel[relMask],
1817 funcPolynomial,
1818 )
1819 # When computing residuals, we only care about the slope of
1820 # the postLinearFit and not the intercept. The intercept
1821 # itself depends on a possibly unknown zero in the abscissa
1822 # (often photodiode) which may have an arbitrary value.
1823 relResiduals = relModel - (postLinearFit[1] * relAbscissa)
1824 relResidualsUnmasked = relResiduals.copy()
1825 # We set masked residuals to nan.
1826 relResiduals[~relMask] = np.nan
1828 linearizer.fitResidualsUnmasked[ampName] = relResidualsUnmasked
1829 linearizer.fitResiduals[ampName] = relResiduals
1830 linearizer.fitResidualsModel[ampName] = relModel.copy()
1832 finite = np.isfinite(relResiduals)
1833 if finite.sum() == 0:
1834 sigmad = np.nan
1835 else:
1836 sigmad = median_abs_deviation(relResiduals[finite]/relOrdinate[finite], scale="normal")
1837 linearizer.fitResidualsSigmaMad[ampName] = sigmad
1839 # Now compute absolute linearization.
1841 if temperatureValues is not None:
1842 temperatureValuesScaled = temperatureValues - np.median(temperatureValues)
1843 else:
1844 temperatureValuesScaled = None
1846 if self.config.doAbsoluteSplineFitTemporal:
1847 inputMjdScaled = data["mjd"].copy()
1848 inputMjdScaled -= np.nanmedian(inputMjdScaled)
1849 else:
1850 inputMjdScaled = None
1852 absAbscissa = data["abscissa"].copy()
1853 absOrdinate = data["ref_counts"].copy()
1855 # These are guaranteed to be finite (as checked previously).
1856 absPtcTurnoff = inputPtc.ptcTurnoff[refAmpName]
1857 absLinearityTurnoff = linearizer.linearityTurnoff[refAmpName]
1859 if absPtcTurnoff < self.config.absoluteSplineLowThreshold:
1860 lowThreshold = 0.0
1861 else:
1862 lowThreshold = self.config.absoluteSplineLowThreshold
1864 # The mask here must exclude everything beyond the turnoff.
1865 # Note that we need to do this before we use the actual
1866 # turnoff to compute the nodes to avoid nodes going past the
1867 # data domain.
1868 absMask = postTurnoffMasks[refAmpName] & np.isfinite(absAbscissa) & np.isfinite(absOrdinate)
1870 absLinearityTurnoff = min(absLinearityTurnoff, np.max(absOrdinate[absMask]))
1872 absNodes = _noderator(
1873 lowThreshold,
1874 absPtcTurnoff,
1875 absLinearityTurnoff,
1876 self.config.absoluteSplineMinimumSignalNode,
1877 self.config.absoluteSplineLowNodeSize,
1878 # The medium and high are matched for absolute spline.
1879 self.config.absoluteSplineNodeSize,
1880 self.config.absoluteSplineNodeSize,
1881 )
1883 self.log.info("Absolute linearity for using %d nodes.", len(absNodes))
1885 # We store the absolute residuals with the reference amplifier.
1886 linearizer.linearityType[refAmpName] = "DoubleSpline"
1887 linearizer.inputMask[refAmpName] = absMask.copy()
1888 linearizer.inputAbscissa[refAmpName] = absAbscissa.copy()
1889 linearizer.inputOrdinate[refAmpName] = absOrdinate.copy()
1890 linearizer.inputGroupingIndex[refAmpName] = data["grouping"].copy()
1891 linearizer.inputNormalization[refAmpName] = inputNorm.copy()
1893 fitter = AstierSplineLinearityFitter(
1894 absNodes,
1895 data["grouping"].copy(),
1896 absAbscissa,
1897 absOrdinate,
1898 mask=absMask,
1899 log=self.log,
1900 fit_offset=self.config.doAbsoluteSplineFitOffset,
1901 fit_weights=self.config.doAbsoluteSplineFitWeights,
1902 weight_pars_start=self.config.absoluteSplineFitWeightParsStart,
1903 fit_temperature=self.config.doAbsoluteSplineFitTemperature,
1904 temperature_scaled=temperatureValuesScaled,
1905 max_signal_nearly_linear=absPtcTurnoff,
1906 fit_temporal=self.config.doAbsoluteSplineFitTemporal,
1907 mjd_scaled=inputMjdScaled,
1908 )
1909 p0 = fitter.estimate_p0(use_all_for_normalization=True)
1910 pars = fitter.fit(
1911 p0,
1912 min_iter=self.config.absoluteSplineFitMinIter,
1913 max_iter=self.config.absoluteSplineFitMaxIter,
1914 max_rejection_per_iteration=self.config.absoluteSplineFitMaxRejectionPerIteration,
1915 n_sigma_clip=self.config.absoluteNSigmaClipLinear,
1916 n_outer_iter=2,
1917 )
1919 # Confirm that the first parameter is 0, and set it to
1920 # exactly zero.
1921 absValues = pars[fitter.par_indices["values"]]
1922 if not np.isclose(absValues[0], 0):
1923 raise RuntimeError("Programmer error! First spline parameter must "
1924 "be consistent with zero.")
1925 absValues[0] = 0.0
1927 # We need a place to store this.
1928 linearizer.fitChiSq[refAmpName] = fitter.compute_chisq_dof(pars)
1930 absLinearFit = np.array([0.0, np.mean(pars[fitter.par_indices["groups"]])])
1932 # We must modify the inputOrdinate according to the
1933 # nuisance terms in the linearity fit for the residual
1934 # computation code to work properly.
1935 # The true mu (inputOrdinate) is given by
1936 # mu = mu_in * (1 + alpha*t_scale) * (1 + beta*mjd_scale)
1937 if self.config.doAbsoluteSplineFitTemperature:
1938 absOrdinate *= (1.0
1939 + pars[fitter.par_indices["temperature_coeff"]]*temperatureValuesScaled)
1940 if self.config.doAbsoluteSplineFitTemporal:
1941 absOrdinate *= (1.0
1942 + pars[fitter.par_indices["temporal_coeff"]]*inputMjdScaled)
1944 # Adjust the abscissa for different groups for residuals.
1945 for j, groupIndex in enumerate(fitter.group_indices):
1946 absAbscissa[groupIndex] *= (pars[fitter.par_indices["groups"][j]] / absLinearFit[1])
1947 # And remove the offset term.
1948 if self.config.doAbsoluteSplineFitOffset:
1949 absOrdinate -= pars[fitter.par_indices["offset"]]
1951 absMask = fitter.mask
1953 # Compute residuals.
1954 spl = Akima1DInterpolator(absNodes, absValues, method="akima")
1955 absOffset = spl(np.clip(absOrdinate, absNodes[0], absNodes[-1]))
1956 absModel = absOrdinate - absOffset
1958 if absMask.sum() < 2:
1959 self.log.warning("Detector %s has not enough points in linear ordinate "
1960 "for residuals. Skipping!", detector.getName())
1961 # We have to KICK OUT HERE something is VERY wrong.
1962 absResiduals = np.full_like(absModel, np.nan)
1963 absResidualsUnmasked = relResiduals.copy()
1964 else:
1965 postLinearFit, _, _, _ = irlsFit(
1966 absLinearFit,
1967 absAbscissa[absMask],
1968 absModel[absMask],
1969 funcPolynomial,
1970 )
1971 # When computing residuals, we only care about the slope of
1972 # the postLinearFit and not the intercept. The intercept
1973 # itself depends on a possibly unknown zero in the abscissa
1974 # (often photodiode) which may have an arbitrary value.
1975 absResiduals = absModel - (postLinearFit[1] * absAbscissa)
1976 absResidualsUnmasked = absResiduals.copy()
1977 # We set masked residuals to nan.
1978 absResiduals[~absMask] = np.nan
1980 linearizer.fitResidualsUnmasked[refAmpName] = absResidualsUnmasked
1981 linearizer.fitResiduals[refAmpName] = absResiduals
1982 linearizer.fitResidualsModel[refAmpName] = absModel.copy()
1984 finite = np.isfinite(absResiduals)
1985 if finite.sum() == 0:
1986 sigmad = np.nan
1987 else:
1988 sigmad = median_abs_deviation(absResiduals[finite]/absOrdinate[finite], scale="normal")
1989 linearizer.fitResidualsSigmaMad[refAmpName] = sigmad
1991 # Record the absolute nodes and values in each individual amplifier,
1992 # along with extra padding for alignment.
1993 nAbsNodes = len(absNodes)
1994 for i, amp in enumerate(detector):
1995 ampName = amp.getName()
1997 coeffs = np.zeros(2 * nAbsNodes + 2 * maxRelNodes + 2)
1998 if ampName == refAmpName:
1999 # The reference amplifier only has the absolute spline.
2000 coeffs[1] = nAbsNodes
2001 coeffs[2: 2 + 2 * nAbsNodes] = np.concatenate([absNodes, absValues])
2002 else:
2003 nRelNodes = int(linearizer.linearityCoeffs[ampName][0])
2005 coeffs = np.zeros(2 * nAbsNodes + 2 * maxRelNodes + 2)
2006 coeffs[0] = nRelNodes
2007 coeffs[1] = nAbsNodes
2008 relStart = 2
2009 relEnd = relStart + 2 * nRelNodes
2010 coeffs[relStart: relEnd] = linearizer.linearityCoeffs[ampName][relStart: relEnd]
2011 absStart = relEnd
2012 absEnd = absStart + 2 * nAbsNodes
2013 coeffs[absStart: absEnd] = np.concatenate([absNodes, absValues])
2015 linearizer.linearityCoeffs[ampName] = coeffs
2017 linearizer.hasLinearity = True
2018 linearizer.validate()
2019 linearizer.updateMetadata(camera=camera, detector=detector, filterName='NONE')
2020 linearizer.updateMetadata(setDate=True, setCalibId=True)
2021 linearizer.updateMetadataFromExposures([inputPtc])
2022 provenance = IsrProvenance(calibType='linearizer')
2024 return pipeBase.Struct(
2025 outputLinearizer=linearizer,
2026 outputProvenance=provenance,
2027 )
2030def _determineInputGroups(
2031 ptc,
2032 doAutoGrouping,
2033 autoGroupingUseExptime,
2034 autoGroupingMaxSignalFraction,
2035 autoGroupingThreshold,
2036 groupingColumn,
2037 minPhotodiodeCurrent,
2038):
2039 """Determine input groups for linearity fit.
2041 If ``splineGroupingColumn`` is set, then grouping will be done
2042 based on this. Otherwise, if ``doAutoGrouping`` is False, then
2043 no grouping will be done. Finally, grouping will be done by measuring
2044 the ratio of signal to exposure time (if
2045 ``autoGroupingUseExptime`` is set; recommended) or photocharge.
2046 These are then clustered with a simple algorithm to split into groups.
2047 If the data was taking by varying exposure time at different
2048 illumination levels, this grouping is very robust as the clusters are
2049 very well separated.
2051 Parameters
2052 ----------
2053 ptc : `lsst.ip.isr.PhotonTransferCurveDataset`
2054 Input PTC to do grouping.
2055 doAutoGrouping : `bool`
2056 Do automatic grouping of pairs?
2057 autoGroupingUseExptime : `bool`
2058 Use exposure time for automatic grouping of pairs?
2059 autoGroupingMaxSignalFraction : `float`
2060 All exposures with signal higher than this threshold will
2061 be put into the largest signal group.
2062 autoGroupingThreshold : `float`
2063 Minimum relative jump from sorted values to determine a group.
2064 minPhotodiodeCurrent : `float`
2065 Minimum photodiode current if auto-grouping is used and
2066 autoGroupingUseExptime is False.
2067 splineGroupingColumn : `str` or `None`
2068 Column to be used for spline grouping (if doAutoGrouping is False).
2070 Returns
2071 -------
2072 groupingValues : `np.ndarray`
2073 Array of values that are unique for a given group.
2074 """
2075 nPair = np.asarray(ptc.inputExpIdPairs[ptc.ampNames[0]]).shape[0]
2076 groupingValues = np.zeros(nPair, dtype=np.int64)
2078 if not doAutoGrouping:
2079 if groupingColumn is not None:
2080 if groupingColumn not in ptc.auxValues:
2081 raise ValueError(f"Config requests grouping by {groupingColumn}, "
2082 "but this column is not available in ptc.auxValues.")
2084 uGroupValues = np.unique(ptc.auxValues[groupingColumn])
2085 for i, uGroupValue in enumerate(uGroupValues):
2086 groupingValues[ptc.auxValues[groupingColumn] == uGroupValue] = i
2087 else:
2088 means = np.zeros((nPair, len(ptc.ampNames)))
2089 exptimes = np.zeros_like(means)
2090 for i, ampName in enumerate(ptc.ampNames):
2091 means[:, i] = ptc.rawMeans[ampName] * ptc.gain[ampName]
2092 exptimes[:, i] = ptc.rawExpTimes[ampName]
2093 detMeans = np.nanmean(means, axis=1)
2094 detExptimes = np.nanmean(exptimes, axis=1)
2096 if autoGroupingUseExptime:
2097 abscissa = detExptimes
2098 else:
2099 abscissa = ptc.photoCharges[ptc.ampNames[0]].copy()
2100 # Set illegal photocharges to NaN.
2101 abscissa[abscissa < minPhotodiodeCurrent] = np.nan
2103 ratio = detMeans / abscissa
2104 ratio /= np.nanmedian(ratio)
2106 # Adjust those that are above threshold so they fall into the
2107 # largest group.
2108 above = (detMeans > autoGroupingMaxSignalFraction*np.nanmax(detMeans))
2109 maxIndex = np.argmax(detMeans[~above])
2110 ratio[above] = ratio[maxIndex]
2112 # The clustering of ratios into groups is performed with a simple
2113 # algorithm based on sorting and looking for the largest gaps.
2114 # See https://stackoverflow.com/a/18385795
2115 st = np.argsort(ratio)
2116 stratio = ratio[st]
2117 delta = stratio[1:] - stratio[0: -1]
2119 transitions, = np.where(delta > autoGroupingThreshold)
2120 # If there are no transitions then everything ends up in group 0.
2121 if len(transitions) > 0:
2122 ratioCuts = stratio[transitions] + autoGroupingThreshold/2.
2124 for i in range(len(transitions) + 1):
2125 if i == 0:
2126 inGroup, = np.where(ratio < ratioCuts[i])
2127 elif i == len(transitions):
2128 inGroup, = np.where(ratio > ratioCuts[i - 1])
2129 else:
2130 inGroup, = np.where((ratio > ratioCuts[i - 1]) & (ratio < ratioCuts[i]))
2131 groupingValues[inGroup] = i
2133 # Ensure out-of-range photoCharges/exptimes are in their own group.
2134 # These are masked later on.
2135 groupingValues[~np.isfinite(abscissa)] = -1
2136 # And put the high ones in the max group.
2137 groupingValues[above] = groupingValues[maxIndex]
2139 return groupingValues
2142def _computeTurnoffAndMax(
2143 abscissa,
2144 ordinate,
2145 initialMask,
2146 groupingValues,
2147 ampName="UNKNOWN",
2148 minSignalFitLinearityTurnoff=1000.0,
2149 maxFracLinearityDeviation=0.01,
2150 log=None,
2151 maxTurnoff=np.inf,
2152 use_all_for_normalization=False,
2153):
2154 """Compute the turnoff and max signal.
2156 Parameters
2157 ----------
2158 abscissa : `np.ndarray`
2159 Input x values, either photoCharges or exposure times.
2160 These should be cleaned of any non-finite values.
2161 ordinate : `np.ndarray`
2162 Input y values, the raw mean values for the amp.
2163 These should be cleaned of any non-finite values.
2164 initialMask : `np.ndarray`
2165 Mask to use for initial fit (usually from PTC).
2166 groupingValues : `np.ndarray`
2167 Array of values that are used to group different fits.
2168 ampName : `str`, optional
2169 Amplifier name (used for logging).
2170 minSignalFitLinearityTurnoff : `float`, optional
2171 Minimum signal to cmpute raw linearity slope for linearityTurnoff.
2172 maxFracLinearityDeviation : `float`, optional
2173 Maximum fraction deviation from raw linearity to compute turnoff.
2174 log : `logging.Logger`, optional
2175 Log object.
2176 maxTurnoff : `float`, optional
2177 Maximum turnoff allowed (will be set above PTC turnoff).
2178 use_all_for_normalization : `bool`, optional
2179 Use all the points (not just below turnoff) for normalization;
2180 this is for compatibility with the old linearizer fits.
2182 Returns
2183 -------
2184 turnoffIndex : `int`
2185 Fit turnoff index (keyed to raw input).
2186 turnoff : `float`
2187 Fit turnoff value.
2188 maxSignal : `float`
2189 Fit maximum signal value.
2190 goodPoints : `np.ndarray`
2191 Mask of good points used in turnoff fit.
2192 """
2193 if log is None:
2194 log = logging.getLogger(__name__)
2196 # Follow eo_pipe:
2197 # https://github.com/lsst-camera-dh/eo_pipe/blob/6afa546569f622b8d604921e248200481c445730/python/lsst/eo/pipe/linearityPlotsTask.py#L50
2198 # Replacing flux with abscissa, Ne with ordinate.
2200 # Fit a line with the y-intercept fixed to zero, using the
2201 # signal counts Ne as the variance in the chi-square, i.e.,
2202 # chi2 = sum( (ordinate - aa*abscissa)**2/ordinate )
2203 # Minimizing chi2 wrt aa, gives
2204 # aa = sum(abscissa) / sum(abscissa**2/ordinate)
2206 fitMask = initialMask.copy()
2207 fitMask[ordinate < minSignalFitLinearityTurnoff] = False
2208 fitMask[ordinate > maxTurnoff] = False
2209 fitMask[~np.isfinite(abscissa) | ~np.isfinite(ordinate)] = False
2210 goodPoints = fitMask.copy()
2212 gValues = np.unique(groupingValues)
2213 groupIndicesList = []
2214 for gValue in gValues:
2215 use, = np.where(groupingValues == gValue)
2216 groupIndicesList.append(use)
2218 found = False
2219 firstIteration = True
2220 while (fitMask.sum() >= 4) and not found:
2221 residuals = np.zeros_like(ordinate)
2223 abscissaMasked = abscissa.copy()
2224 abscissaMasked[~fitMask] = np.nan
2225 ordinateMasked = ordinate.copy()
2226 ordinateMasked[~fitMask] = np.nan
2228 for i, groupIndices in enumerate(groupIndicesList):
2229 num = np.nansum(abscissaMasked[groupIndices])
2230 denom = np.nansum(abscissaMasked[groupIndices]**2./ordinateMasked[groupIndices])
2232 if num == 0.0 or denom == 0.0:
2233 if firstIteration:
2234 log.info(
2235 "All points for %s were masked in linearity turnoff for group %d (first iteration).",
2236 ampName,
2237 i,
2238 )
2239 # We can try to recover this.
2240 nTry = min(10, len(groupIndices))
2241 num = np.nansum(abscissa[groupIndices][0: nTry])
2242 denom = np.nansum(abscissa[groupIndices][0: nTry]**2./ordinate[groupIndices][0: nTry])
2243 aa = num / denom
2244 else:
2245 log.warning("All points masked in linearity turnoff for group %d.", i)
2246 aa = np.nan
2247 else:
2248 aa = num / denom
2250 residuals[groupIndices] = (ordinate[groupIndices] - aa*abscissa[groupIndices]) / \
2251 ordinate[groupIndices]
2253 # Use the residuals to compute the turnoff.
2254 if use_all_for_normalization:
2255 residuals -= np.nanmedian(residuals)
2256 else:
2257 # Only subtract off the median from the previously estimated
2258 # fitMask.
2259 residuals -= np.nanmedian(residuals[fitMask])
2261 goodPoints = (np.abs(residuals) < maxFracLinearityDeviation) & (ordinate < maxTurnoff)
2263 if goodPoints.sum() > 4:
2264 # This was an adequate fit.
2265 found = True
2266 turnoff = np.max(ordinate[goodPoints])
2267 turnoffIndex = np.where(np.isclose(ordinate, turnoff))[0][0]
2268 else:
2269 # This was a bad fit; remove the largest outlier.
2270 badIndex = np.argmax(np.abs(residuals)[fitMask])
2271 fitIndices, = np.nonzero(fitMask)
2272 fitMask[fitIndices[badIndex]] = False
2274 firstIteration = False
2276 if not found:
2277 # Could not find any reasonable value.
2278 log.warning(
2279 "Could not find a reasonable initial linear fit to compute linearity turnoff for "
2280 "amplifier %s; may need finer sampling of input data?",
2281 ampName,
2282 )
2283 if np.all(~fitMask):
2284 return -1, np.nan, np.nan, goodPoints
2286 turnoff = np.max(ordinate[fitMask])
2287 turnoffIndex = np.where(np.isclose(ordinate, turnoff))[0][0]
2289 residuals = np.zeros(len(ordinate))
2291 # Fit the maximum signal.
2292 if turnoffIndex == (len(residuals) - 1):
2293 # This is the last point; we can't do a fit.
2294 # This is not a warning because we do not actually need this
2295 # value in practice.
2296 log.info(
2297 "No linearity turnoff detected for amplifier %s; try to increase the signal range.",
2298 ampName,
2299 )
2300 maxSignal = ordinate[turnoffIndex]
2301 else:
2302 maxSignalInitial = np.nanmax(ordinate)
2304 highFluxPoints = (np.nan_to_num(ordinate)
2305 > (1.0 - maxFracLinearityDeviation)*maxSignalInitial)
2306 maxSignal = np.median(ordinate[highFluxPoints])
2308 return turnoffIndex, turnoff, maxSignal, goodPoints
2311def _noderator(turnoff0, turnoff1, turnoff2, minNode, lowNodeSize, midNodeSize, highNodeSize):
2312 """The "noderator" node-finder.
2314 Parameters
2315 ----------
2316 turnoff0 : `float`
2317 Zeroth turnoff value (e.g. expectation of low-level
2318 non-linearity threshold) (adu).
2319 turnoff1 : `float`
2320 First turnoff value (e.g. ptc turnoff) (adu).
2321 turnoff2 : `float`
2322 Second turnoff value (e.g. linearity turnoff) (adu).
2323 minNode : `float`
2324 Location to place the first node after 0.0 (if this is <= 0.0
2325 it will be ignored) (adu).
2326 lowNodeSize : `float`
2327 Minimum node size in the low-level non-linearity regime
2328 (below turnoff0) (adu).
2329 midNodeSize : `float`
2330 Minimum node size in the mid-level non-linearity regime
2331 (between turnoff0 and turnoff1) (adu).
2332 highNodeSize : `float`
2333 Minimum node size in the high-level non-linearity regime
2334 (between turnoff1 and turnoff2) (adu).
2336 Returns
2337 -------
2338 nodes : `np.ndarray`
2339 Array of node values (adu).
2340 """
2341 if turnoff0 > minNode:
2342 # At least 2 nodes (edges) in the low signal regime.
2343 nNodesLow = np.clip(int(np.ceil((turnoff0 - minNode) / lowNodeSize)), 2, None)
2344 midStart = turnoff0
2345 else:
2346 nNodesLow = 0
2347 midStart = 0.0
2348 # At least 5 nodes (akima minimum) in the mid signal regime.
2349 nNodesMid = np.clip(int(np.ceil((turnoff1 - midStart) / midNodeSize)), 5, None)
2350 if turnoff2 > turnoff1:
2351 # At least 2 nodes (edges) in the high signal regime.
2352 nNodesHigh = np.clip(int(np.ceil((turnoff2 - turnoff1) / highNodeSize)), 3, None)
2353 else:
2354 nNodesHigh = 0
2355 nodesLow = np.linspace(minNode, turnoff0, nNodesLow)
2356 nodesMid = np.linspace(midStart, turnoff1, nNodesMid)
2357 nodesHigh = np.linspace(turnoff1, turnoff2, nNodesHigh)
2359 # Make sure we do not duplicate nodes when concatenating.
2360 nodeList = []
2361 if nNodesLow > 1:
2362 nodeList.append([0.0])
2363 nodeList.append(nodesLow[:-1])
2364 if nNodesMid > 1:
2365 nodeList.append(nodesMid)
2366 if nNodesHigh > 1:
2367 nodeList.append(nodesHigh[1:])
2368 return np.concatenate(nodeList)