Coverage for python/lsst/cp/pipe/ptc/cpSolvePtcTask.py : 12%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22import numpy as np
23from collections import Counter
25import lsst.pex.config as pexConfig
26import lsst.pipe.base as pipeBase
27from lsst.cp.pipe.utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
29from scipy.optimize import least_squares
31import lsst.pipe.base.connectionTypes as cT
33from .astierCovPtcUtils import fitDataFullCovariance
35from lsst.ip.isr import PhotonTransferCurveDataset
37from lsst.cp.pipe._lookupStaticCalibration import lookupStaticCalibration
39import copy
42__all__ = ['PhotonTransferCurveSolveConfig', 'PhotonTransferCurveSolveTask']
45class PhotonTransferCurveSolveConnections(pipeBase.PipelineTaskConnections,
46 dimensions=("instrument", "detector")):
47 inputCovariances = cT.Input(
48 name="ptcCovariances",
49 doc="Tuple with measured covariances from flats.",
50 storageClass="PhotonTransferCurveDataset",
51 dimensions=("instrument", "exposure", "detector"),
52 multiple=True,
53 )
54 camera = cT.PrerequisiteInput(
55 name="camera",
56 doc="Camera the input data comes from.",
57 storageClass="Camera",
58 dimensions=("instrument",),
59 isCalibration=True,
60 lookupFunction=lookupStaticCalibration,
61 )
62 outputPtcDataset = cT.Output(
63 name="ptcDatsetProposal",
64 doc="Output proposed ptc dataset.",
65 storageClass="PhotonTransferCurveDataset",
66 dimensions=("instrument", "detector"),
67 multiple=False,
68 isCalibration=True,
69 )
72class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig,
73 pipelineConnections=PhotonTransferCurveSolveConnections):
74 """Configuration for fitting measured covariances.
75 """
77 ptcFitType = pexConfig.ChoiceField(
78 dtype=str,
79 doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
80 default="POLYNOMIAL",
81 allowed={
82 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
83 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).",
84 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)"
85 }
86 )
87 maximumRangeCovariancesAstier = pexConfig.Field(
88 dtype=int,
89 doc="Maximum range of covariances as in Astier+19",
90 default=8,
91 )
92 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
93 dtype=float,
94 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
95 default=5.0,
96 )
97 maxIterFullFitCovariancesAstier = pexConfig.Field(
98 dtype=int,
99 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
100 default=3,
101 )
102 polynomialFitDegree = pexConfig.Field(
103 dtype=int,
104 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
105 default=3,
106 )
107 sigmaCutPtcOutliers = pexConfig.Field(
108 dtype=float,
109 doc="Sigma cut for outlier rejection in PTC.",
110 default=5.0,
111 )
112 maxIterationsPtcOutliers = pexConfig.Field(
113 dtype=int,
114 doc="Maximum number of iterations for outlier rejection in PTC.",
115 default=2,
116 )
117 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
118 dtype=float,
119 doc="Initially exclude data points with a variance that are more than a factor of this from being"
120 " linear in the positive direction, from the PTC fit. Note that these points will also be"
121 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
122 " to allow an accurate determination of the sigmas for said iterative fit.",
123 default=0.05,
124 min=0.0,
125 max=1.0,
126 )
127 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
128 dtype=float,
129 doc="Initially exclude data points with a variance that are more than a factor of this from being"
130 " linear in the negative direction, from the PTC fit. Note that these points will also be"
131 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
132 " to allow an accurate determination of the sigmas for said iterative fit.",
133 default=0.25,
134 min=0.0,
135 max=1.0,
136 )
137 minMeanRatioTest = pexConfig.Field(
138 dtype=float,
139 doc="In the initial test to screen out bad points with a ratio test, points with low"
140 " flux can get inadvertantly screened. This test only screens out points with flux"
141 " above this value.",
142 default=20000,
143 )
144 minVarPivotSearch = pexConfig.Field(
145 dtype=float,
146 doc="The code looks for a pivot signal point after which the variance starts decreasing at high-flux"
147 " to exclude then form the PTC model fit. However, sometimes at low fluxes, the variance"
148 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot "
149 " should be sought.",
150 default=10000,
151 )
152 doFitBootstrap = pexConfig.Field(
153 dtype=bool,
154 doc="Use bootstrap for the PTC fit parameters and errors?.",
155 default=False,
156 )
159class PhotonTransferCurveSolveTask(pipeBase.PipelineTask,
160 pipeBase.CmdLineTask):
161 """Task to fit the PTC from flat covariances.
163 This task assembles the list of individual PTC datasets produced
164 by ``PhotonTransferCurveSolveTask`` into one single final PTC
165 dataset. The task fits the measured (co)variances to a polynomial
166 model or to the models described in equations 16 and 20 of
167 Astier+19 (referred to as ``POLYNOMIAL``, ``EXPAPPROXIMATION``,
168 and ``FULLCOVARIANCE`` in the configuration options of the task,
169 respectively). Parameters of interest such as tghe gain and noise
170 are derived from the fits.
172 Astier+19: "The Shape of the Photon Transfer Curve
173 of CCD sensors", arXiv:1905.08677
174 """
176 ConfigClass = PhotonTransferCurveSolveConfig
177 _DefaultName = 'cpPhotonTransferCurveSolve'
179 def runQuantum(self, butlerQC, inputRefs, outputRefs):
180 """Ensure that the input and output dimensions are passed along.
182 Parameters
183 ----------
184 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
185 Butler to operate on.
186 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
187 Input data refs to load.
188 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
189 Output data refs to persist.
190 """
191 inputs = butlerQC.get(inputRefs)
192 outputs = self.run(inputCovariances=inputs['inputCovariances'], camera=inputs['camera'])
193 butlerQC.put(outputs, outputRefs)
195 def run(self, inputCovariances, camera=None, inputExpList=None):
196 """Fit measure covariances to different models.
198 Parameters
199 ----------
200 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`]
201 List of lsst.ip.isr.PhotonTransferCurveDataset datasets.
203 camera : `lsst.afw.cameraGeom.Camera`, optional
204 Input camera.
206 inputExpList : `list` [`~lsst.afw.image.ExposureF`], optional
207 List of exposures.
209 Returns
210 -------
211 results : `lsst.pipe.base.Struct`
212 The results struct containing:
214 ``outputPtcDatset``
215 Final PTC dataset, containing information such as the
216 means, variances, and exposure times
217 (`lsst.ip.isr.PhotonTransferCurveDataset`).
218 """
219 # Assemble partial PTC datasets into a single dataset.
220 ampNames = np.unique(inputCovariances[0].ampNames)
221 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType,
222 self.config.maximumRangeCovariancesAstier)
223 for partialPtcDataset in inputCovariances:
224 if partialPtcDataset.ptcFitType == 'DUMMY':
225 continue
226 for ampName in ampNames:
227 datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName])
228 if type(partialPtcDataset.rawExpTimes[ampName]) is list:
229 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName][0])
230 else:
231 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName])
232 if type(partialPtcDataset.rawMeans[ampName]) is list:
233 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName][0])
234 else:
235 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName])
236 if type(partialPtcDataset.rawVars[ampName]) is list:
237 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName][0])
238 else:
239 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName])
240 if type(partialPtcDataset.expIdMask[ampName]) is list:
241 datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName][0])
242 else:
243 datasetPtc.expIdMask[ampName].append(partialPtcDataset.expIdMask[ampName])
244 datasetPtc.covariances[ampName].append(np.array(partialPtcDataset.covariances[ampName][0]))
245 datasetPtc.covariancesSqrtWeights[ampName].append(
246 np.array(partialPtcDataset.covariancesSqrtWeights[ampName][0]))
247 # Sort arrays that are filled so far in the final dataset by
248 # rawMeans index
249 for ampName in ampNames:
250 index = np.argsort(np.ravel(np.array(datasetPtc.rawMeans[ampName])))
251 datasetPtc.inputExpIdPairs[ampName] = np.array(datasetPtc.inputExpIdPairs[ampName])[index]
252 datasetPtc.rawExpTimes[ampName] = np.array(datasetPtc.rawExpTimes[ampName])[index]
253 datasetPtc.rawMeans[ampName] = np.array(datasetPtc.rawMeans[ampName])[index]
254 datasetPtc.rawVars[ampName] = np.array(datasetPtc.rawVars[ampName])[index]
255 datasetPtc.expIdMask[ampName] = np.array(datasetPtc.expIdMask[ampName])[index]
256 datasetPtc.covariances[ampName] = np.array(datasetPtc.covariances[ampName])[index]
257 datasetPtc.covariancesSqrtWeights[ampName] = np.array(
258 datasetPtc.covariancesSqrtWeights[ampName])[index]
259 if self.config.ptcFitType == "FULLCOVARIANCE":
260 # Calculate covariances and fit them, including the PTC,
261 # to Astier+19 full model (Eq. 20) First, fit get the flat
262 # pairs that are masked, fitting C_00 vs mu to the
263 # EXPAPPROXIMATION model (Eq. 16 in Astier+19). The
264 # points at these fluxes will also be masked when
265 # calculating the other covariances, C_ij)
266 tempDatasetPtc = copy.copy(datasetPtc)
267 tempDatasetPtc.ptcFitType = "EXPAPPROXIMATION"
268 tempDatasetPtc = self.fitPtc(tempDatasetPtc)
269 for ampName in datasetPtc.ampNames:
270 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName]
271 datasetPtc.fitType = "FULLCOVARIANCE"
272 datasetPtc = self.fitCovariancesAstier(datasetPtc)
273 # The other options are: self.config.ptcFitType in
274 # ("EXPAPPROXIMATION", "POLYNOMIAL")
275 else:
276 # Fit the PTC to a polynomial or to Astier+19 exponential
277 # approximation (Eq. 16). Fill up
278 # PhotonTransferCurveDataset object.
279 datasetPtc = self.fitPtc(datasetPtc)
280 if inputExpList is not None:
281 # It should be a list of exposures, to get the detector.
282 detector = inputExpList[0].getDetector()
283 else:
284 detector = None
285 datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector)
287 return pipeBase.Struct(
288 outputPtcDataset=datasetPtc,
289 )
291 def fitCovariancesAstier(self, dataset):
292 """Fit measured flat covariances to full model in Astier+19.
294 Parameters
295 ----------
296 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
297 The dataset containing information such as the means,
298 (co)variances, and exposure times.
300 Returns
301 -------
302 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
303 This is the same dataset as the input paramter, however,
304 it has been modified to include information such as the
305 fit vectors and the fit parameters. See the class
306 `PhotonTransferCurveDatase`.
307 """
308 covFits, covFitsNoB = fitDataFullCovariance(dataset)
309 dataset = self.getOutputPtcDataCovAstier(dataset, covFits, covFitsNoB)
311 return dataset
313 def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB):
314 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit
315 objects.
317 Parameters
318 ----------
319 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
320 The dataset containing information such as the means,
321 variances and exposure times.
322 covFits : `dict`
323 Dictionary of CovFit objects, with amp names as keys.
324 covFitsNoB : `dict`
325 Dictionary of CovFit objects, with amp names as keys, and
326 'b=0' in Eq. 20 of Astier+19.
328 Returns
329 -------
330 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
331 This is the same dataset as the input paramter, however,
332 it has been modified to include extra information such as
333 the mask 1D array, gains, reoudout noise, measured signal,
334 measured variance, modeled variance, a, and b coefficient
335 matrices (see Astier+19) per amplifier. See the class
336 `PhotonTransferCurveDatase`.
337 """
338 assert(len(covFits) == len(covFitsNoB))
340 for i, amp in enumerate(dataset.ampNames):
341 lenInputTimes = len(dataset.rawExpTimes[amp])
342 # Not used when ptcFitType is 'FULLCOVARIANCE'
343 dataset.ptcFitPars[amp] = [np.nan]
344 dataset.ptcFitParsError[amp] = [np.nan]
345 dataset.ptcFitChiSq[amp] = np.nan
346 if amp in covFits:
347 fit = covFits[amp]
348 fitNoB = covFitsNoB[amp]
349 # Save full covariances, covariances models, and their weights
350 # dataset.expIdMask is already full
351 dataset.covariances[amp] = fit.cov
352 dataset.covariancesModel[amp] = fit.evalCovModel()
353 dataset.covariancesSqrtWeights[amp] = fit.sqrtW
354 dataset.aMatrix[amp] = fit.getA()
355 dataset.bMatrix[amp] = fit.getB()
356 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel()
357 dataset.aMatrixNoB[amp] = fitNoB.getA()
359 (meanVecFinal, varVecFinal, varVecModel,
360 wc, varMask) = fit.getFitData(0, 0, divideByMu=False)
361 gain = fit.getGain()
363 dataset.gain[amp] = gain
364 dataset.gainErr[amp] = fit.getGainErr()
365 dataset.noise[amp] = np.sqrt(fit.getRon())
366 dataset.noiseErr[amp] = fit.getRonErr()
367 dataset.finalVars[amp] = varVecFinal
368 dataset.finalModelVars[amp] = varVecModel
369 dataset.finalMeans[amp] = meanVecFinal
371 else:
372 # Bad amp
373 # Entries need to have proper dimensions so read/write
374 # with astropy.Table works.
375 matrixSide = self.config.maximumRangeCovariancesAstier
376 nanMatrix = np.full((matrixSide, matrixSide), np.nan)
377 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
379 dataset.covariances[amp] = listNanMatrix
380 dataset.covariancesModel[amp] = listNanMatrix
381 dataset.covariancesSqrtWeights[amp] = listNanMatrix
382 dataset.aMatrix[amp] = nanMatrix
383 dataset.bMatrix[amp] = nanMatrix
384 dataset.covariancesModelNoB[amp] = listNanMatrix
385 dataset.aMatrixNoB[amp] = nanMatrix
387 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes)
388 dataset.gain[amp] = np.nan
389 dataset.gainErr[amp] = np.nan
390 dataset.noise[amp] = np.nan
391 dataset.noiseErr[amp] = np.nan
392 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes)
393 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes)
394 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes)
396 return dataset
398 @staticmethod
399 def _initialParsForPolynomial(order):
400 assert(order >= 2)
401 pars = np.zeros(order, dtype=float)
402 pars[0] = 10
403 pars[1] = 1
404 pars[2:] = 0.0001
405 return pars
407 @staticmethod
408 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]):
409 if not len(lowers):
410 lowers = [np.NINF for p in initialPars]
411 if not len(uppers):
412 uppers = [np.inf for p in initialPars]
413 lowers[1] = 0 # no negative gains
414 return (lowers, uppers)
416 @staticmethod
417 def _boundsForAstier(initialPars, lowers=[], uppers=[]):
418 if not len(lowers):
419 lowers = [np.NINF for p in initialPars]
420 if not len(uppers):
421 uppers = [np.inf for p in initialPars]
422 return (lowers, uppers)
424 @staticmethod
425 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative,
426 minMeanRatioTest, minVarPivotSearch):
427 """Return a boolean array to mask bad points.
429 Parameters
430 ----------
431 means : `numpy.array`
432 Input array with mean signal values.
433 variances : `numpy.array`
434 Input array with variances at each mean value.
435 maxDeviationPositive : `float`
436 Maximum deviation from being constant for the variance/mean
437 ratio, in the positive direction.
438 maxDeviationNegative : `float`
439 Maximum deviation from being constant for the variance/mean
440 ratio, in the negative direction.
441 minMeanRatioTest : `float`
442 Minimum signal value (in ADU) after which to start examining
443 the ratios var/mean.
444 minVarPivotSearch : `float`
445 Minimum variance point (in ADU^2) after which the pivot point
446 wher the variance starts decreasing should be sought.
448 Returns
449 ------
450 goodPoints : `numpy.array` [`bool`]
451 Boolean array to select good (`True`) and bad (`False`)
452 points.
454 Notes
455 -----
456 A linear function has a constant ratio, so find the median
457 value of the ratios, and exclude the points that deviate from
458 that by more than a factor of maxDeviationPositive/negative.
459 Asymmetric deviations are supported as we expect the PTC to
460 turn down as the flux increases, but sometimes it anomalously
461 turns upwards just before turning over, which ruins the fits,
462 so it is wise to be stricter about restricting positive
463 outliers than negative ones.
465 Too high and points that are so bad that fit will fail will be
466 included Too low and the non-linear points will be excluded,
467 biasing the NL fit. This function also masks points after the
468 variance starts decreasing.
469 """
470 assert(len(means) == len(variances))
471 ratios = [b/a for (a, b) in zip(means, variances)]
472 medianRatio = np.nanmedian(ratios)
473 ratioDeviations = [0.0 if a < minMeanRatioTest else (r/medianRatio)-1
474 for (a, r) in zip(means, ratios)]
476 # so that it doesn't matter if the deviation is expressed as
477 # positive or negative
478 maxDeviationPositive = abs(maxDeviationPositive)
479 maxDeviationNegative = -1. * abs(maxDeviationNegative)
481 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative)
482 else False for r in ratioDeviations])
484 # Eliminate points beyond which the variance decreases
485 pivot = np.where(np.array(np.diff(variances)) < 0)[0]
486 if len(pivot) > 0:
487 # For small values, sometimes the variance decreases slightly
488 # Only look when var > self.config.minVarPivotSearch
489 pivot = [p for p in pivot if variances[p] > minVarPivotSearch]
490 if len(pivot) > 0:
491 pivot = np.min(pivot)
492 goodPoints[pivot+1:len(goodPoints)] = False
494 return goodPoints
496 def _makeZeroSafe(self, array, substituteValue=1e-9):
497 """"""
498 array = np.array(array)
499 nBad = Counter(np.ravel(array))[0]
500 if nBad == 0:
501 return array
503 index, = np.where(array == 0)
504 if len(index):
505 msg = f"Found {nBad} zeros in array at elements {index}"
506 self.log.warn(msg)
508 array[index] = substituteValue
510 return array
512 def fitPtc(self, dataset):
513 """Fit the photon transfer curve to a polynomial or to Astier+19
514 approximation.
516 Fit the photon transfer curve with either a polynomial of the order
517 specified in the task config, or using the exponential approximation
518 in Astier+19 (Eq. 16).
520 Sigma clipping is performed iteratively for the fit, as well as an
521 initial clipping of data points that are more than
522 config.initialNonLinearityExclusionThreshold away from lying on a
523 straight line. This other step is necessary because the photon transfer
524 curve turns over catastrophically at very high flux (because saturation
525 drops the variance to ~0) and these far outliers cause the initial fit
526 to fail, meaning the sigma cannot be calculated to perform the
527 sigma-clipping.
529 Parameters
530 ----------
531 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
532 The dataset containing the means, variances and exposure times.
534 Returns
535 -------
536 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
537 This is the same dataset as the input parameter, however,
538 it has been modified to include information such as the
539 fit vectors and the fit parameters. See the class
540 `PhotonTransferCurveDatase`.
542 Raises
543 ------
544 RuntimeError:
545 Raises if dataset.ptcFitType is None or empty.
546 """
547 if dataset.ptcFitType:
548 ptcFitType = dataset.ptcFitType
549 else:
550 raise RuntimeError("ptcFitType is None of empty in PTC dataset.")
551 matrixSide = self.config.maximumRangeCovariancesAstier
552 nanMatrix = np.empty((matrixSide, matrixSide))
553 nanMatrix[:] = np.nan
555 for amp in dataset.ampNames:
556 lenInputTimes = len(dataset.rawExpTimes[amp])
557 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
558 listNanMatrix[:] = np.nan
560 dataset.covariancesModel[amp] = listNanMatrix
561 dataset.aMatrix[amp] = nanMatrix
562 dataset.bMatrix[amp] = nanMatrix
563 dataset.covariancesModelNoB[amp] = listNanMatrix
564 dataset.aMatrixNoB[amp] = nanMatrix
566 def errFunc(p, x, y):
567 return ptcFunc(p, x) - y
569 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
570 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
572 for i, ampName in enumerate(dataset.ampNames):
573 timeVecOriginal = np.ravel(np.array(dataset.rawExpTimes[ampName]))
574 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[ampName]))
575 varVecOriginal = np.ravel(np.array(dataset.rawVars[ampName]))
576 varVecOriginal = self._makeZeroSafe(varVecOriginal)
578 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
579 self.config.initialNonLinearityExclusionThresholdPositive,
580 self.config.initialNonLinearityExclusionThresholdNegative,
581 self.config.minMeanRatioTest,
582 self.config.minVarPivotSearch)
583 if not (goodPoints.any()):
584 msg = (f"SERIOUS: All points in goodPoints: {goodPoints} are bad."
585 f"Setting {ampName} to BAD.")
586 self.log.warn(msg)
587 # Fill entries with NaNs
588 self.fillBadAmp(dataset, ptcFitType, ampName)
589 continue
591 mask = goodPoints
593 if ptcFitType == 'EXPAPPROXIMATION':
594 ptcFunc = funcAstier
595 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noisei^2
596 # lowers and uppers obtained from BOT data studies by
597 # C. Lage (UC Davis, 11/2020).
598 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000],
599 uppers=[1e-4, 2.5, 2000])
600 if ptcFitType == 'POLYNOMIAL':
601 ptcFunc = funcPolynomial
602 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
603 bounds = self._boundsForPolynomial(parsIniPtc)
605 # Before bootstrap fit, do an iterative fit to get rid of outliers
606 count = 1
607 while count <= maxIterationsPtcOutliers:
608 # Note that application of the mask actually shrinks the array
609 # to size rather than setting elements to zero (as we want) so
610 # always update mask itself and re-apply to the original data
611 meanTempVec = meanVecOriginal[mask]
612 varTempVec = varVecOriginal[mask]
613 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
614 pars = res.x
616 # change this to the original from the temp because
617 # the masks are ANDed meaning once a point is masked
618 # it's always masked, and the masks must always be the
619 # same length for broadcasting
620 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
621 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids])
622 mask = mask & newMask
623 if not (mask.any() and newMask.any()):
624 msg = (f"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
625 f"Setting {ampName} to BAD.")
626 self.log.warn(msg)
627 # Fill entries with NaNs
628 self.fillBadAmp(dataset, ptcFitType, ampName)
629 break
630 nDroppedTotal = Counter(mask)[False]
631 self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
632 count += 1
633 # objects should never shrink
634 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
635 if not (mask.any() and newMask.any()):
636 continue
637 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName])
638 # store the final mask
639 if len(dataset.expIdMask[ampName]):
640 dataset.expIdMask[ampName] &= mask # bitwise_and if there is already a mask
641 else:
642 dataset.expIdMask[ampName] = mask
643 parsIniPtc = pars
644 meanVecFinal = meanVecOriginal[mask]
645 varVecFinal = varVecOriginal[mask]
647 if Counter(mask)[False] > 0:
648 self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:"
649 f" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
651 if (len(meanVecFinal) < len(parsIniPtc)):
652 msg = (f"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of "
653 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
654 self.log.warn(msg)
655 # Fill entries with NaNs
656 self.fillBadAmp(dataset, ptcFitType, ampName)
657 continue
658 # Fit the PTC
659 if self.config.doFitBootstrap:
660 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal,
661 varVecFinal, ptcFunc,
662 weightsY=1./np.sqrt(varVecFinal))
663 else:
664 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal,
665 varVecFinal, ptcFunc,
666 weightsY=1./np.sqrt(varVecFinal))
667 dataset.ptcFitPars[ampName] = parsFit
668 dataset.ptcFitParsError[ampName] = parsFitErr
669 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
670 # Masked variances (measured and modeled) and means. Need
671 # to pad the array so astropy.Table does not crash (the
672 # mask may vary per amp).
673 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
674 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength), 'constant',
675 constant_values=np.nan)
676 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
677 'constant', constant_values=np.nan)
678 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength), 'constant',
679 constant_values=np.nan)
680 if ptcFitType == 'EXPAPPROXIMATION':
681 ptcGain = parsFit[1]
682 ptcGainErr = parsFitErr[1]
683 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
684 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
685 if ptcFitType == 'POLYNOMIAL':
686 ptcGain = 1./parsFit[1]
687 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
688 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
689 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
690 dataset.gain[ampName] = ptcGain
691 dataset.gainErr[ampName] = ptcGainErr
692 dataset.noise[ampName] = ptcNoise
693 dataset.noiseErr[ampName] = ptcNoiseErr
695 if not len(dataset.ptcFitType) == 0:
696 dataset.ptcFitType = ptcFitType
697 if len(dataset.badAmps) == 0:
698 dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0]))
700 return dataset
702 def fillBadAmp(self, dataset, ptcFitType, ampName):
703 """Fill the dataset with NaNs if there are not enough good points.
705 Parameters
706 ----------
707 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
708 The dataset containing the means, variances and exposure times.
709 ptcFitType : {'POLYNOMIAL', 'EXPAPPROXIMATION'}
710 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
711 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC.
712 ampName : `str`
713 Amplifier name.
714 """
715 dataset.badAmps.append(ampName)
716 dataset.expIdMask[ampName] = np.repeat(False, len(dataset.rawExpTimes[ampName]))
717 dataset.gain[ampName] = np.nan
718 dataset.gainErr[ampName] = np.nan
719 dataset.noise[ampName] = np.nan
720 dataset.noiseErr[ampName] = np.nan
721 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
722 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
723 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
724 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
725 dataset.ptcFitChiSq[ampName] = np.nan
726 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
727 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
728 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
730 return