Coverage for python/lsst/cp/pipe/ptc.py : 11%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['MeasurePhotonTransferCurveTask',
24 'MeasurePhotonTransferCurveTaskConfig',
25 'PhotonTransferCurveDataset']
27import numpy as np
28import matplotlib.pyplot as plt
29from sqlite3 import OperationalError
30from collections import Counter
31from dataclasses import dataclass
33import lsst.afw.math as afwMath
34import lsst.pex.config as pexConfig
35import lsst.pipe.base as pipeBase
36from .utils import (NonexistentDatasetTaskDataIdContainer, PairedVisitListTaskRunner,
37 checkExpLengthEqual, fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
38from scipy.optimize import least_squares
40from lsst.ip.isr.linearize import Linearizer
41import datetime
43from .astierCovPtcUtils import (fftSize, CovFft, computeCovDirect, fitData)
46class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config):
47 """Config class for photon transfer curve measurement task"""
48 ccdKey = pexConfig.Field(
49 dtype=str,
50 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
51 default='ccd',
52 )
53 ptcFitType = pexConfig.ChoiceField(
54 dtype=str,
55 doc="Fit PTC to approximation in Astier+19 (Equation 16) or to a polynomial.",
56 default="POLYNOMIAL",
57 allowed={
58 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
59 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).",
60 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)"
61 }
62 )
63 maximumRangeCovariancesAstier = pexConfig.Field(
64 dtype=int,
65 doc="Maximum range of covariances as in Astier+19",
66 default=8,
67 )
68 covAstierRealSpace = pexConfig.Field(
69 dtype=bool,
70 doc="Calculate covariances in real space or via FFT? (see appendix A of Astier+19).",
71 default=False,
72 )
73 polynomialFitDegree = pexConfig.Field(
74 dtype=int,
75 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
76 default=3,
77 )
78 doCreateLinearizer = pexConfig.Field(
79 dtype=bool,
80 doc="Calculate non-linearity and persist linearizer?",
81 default=False,
82 )
83 linearizerType = pexConfig.ChoiceField(
84 dtype=str,
85 doc="Linearizer type, if doCreateLinearizer=True",
86 default="LINEARIZEPOLYNOMIAL",
87 allowed={
88 "LINEARIZEPOLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegreeNonLinearity' to set 'n').",
89 "LINEARIZESQUARED": "c0 quadratic coefficient derived from coefficients of polynomiual fit",
90 "LOOKUPTABLE": "Loouk table formed from linear part of polynomial fit."
91 }
92 )
93 polynomialFitDegreeNonLinearity = pexConfig.Field(
94 dtype=int,
95 doc="If doCreateLinearizer, degree of polynomial to fit the meanSignal vs exposureTime" +
96 " curve to produce the table for LinearizeLookupTable.",
97 default=3,
98 )
99 binSize = pexConfig.Field(
100 dtype=int,
101 doc="Bin the image by this factor in both dimensions.",
102 default=1,
103 )
104 minMeanSignal = pexConfig.Field(
105 dtype=float,
106 doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.",
107 default=0,
108 )
109 maxMeanSignal = pexConfig.Field(
110 dtype=float,
111 doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.",
112 default=9e6,
113 )
114 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
115 dtype=float,
116 doc="Initially exclude data points with a variance that are more than a factor of this from being"
117 " linear in the positive direction, from the PTC fit. Note that these points will also be"
118 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
119 " to allow an accurate determination of the sigmas for said iterative fit.",
120 default=0.12,
121 min=0.0,
122 max=1.0,
123 )
124 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
125 dtype=float,
126 doc="Initially exclude data points with a variance that are more than a factor of this from being"
127 " linear in the negative direction, from the PTC fit. Note that these points will also be"
128 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
129 " to allow an accurate determination of the sigmas for said iterative fit.",
130 default=0.25,
131 min=0.0,
132 max=1.0,
133 )
134 sigmaCutPtcOutliers = pexConfig.Field(
135 dtype=float,
136 doc="Sigma cut for outlier rejection in PTC.",
137 default=5.0,
138 )
139 maskNameList = pexConfig.ListField(
140 dtype=str,
141 doc="Mask list to exclude from statistics calculations.",
142 default=['SUSPECT', 'BAD', 'NO_DATA'],
143 )
144 nSigmaClipPtc = pexConfig.Field(
145 dtype=float,
146 doc="Sigma cut for afwMath.StatisticsControl()",
147 default=5.5,
148 )
149 nIterSigmaClipPtc = pexConfig.Field(
150 dtype=int,
151 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
152 default=1,
153 )
154 maxIterationsPtcOutliers = pexConfig.Field(
155 dtype=int,
156 doc="Maximum number of iterations for outlier rejection in PTC.",
157 default=2,
158 )
159 doFitBootstrap = pexConfig.Field(
160 dtype=bool,
161 doc="Use bootstrap for the PTC fit parameters and errors?.",
162 default=False,
163 )
164 maxAduForLookupTableLinearizer = pexConfig.Field(
165 dtype=int,
166 doc="Maximum DN value for the LookupTable linearizer.",
167 default=2**18,
168 )
169 instrumentName = pexConfig.Field(
170 dtype=str,
171 doc="Instrument name.",
172 default='',
173 )
176@dataclass
177class LinearityResidualsAndLinearizersDataset:
178 """A simple class to hold the output from the
179 `calculateLinearityResidualAndLinearizers` function.
180 """
181 # Normalized coefficients for polynomial NL correction
182 polynomialLinearizerCoefficients: list
183 # Normalized coefficient for quadratic polynomial NL correction (c0)
184 quadraticPolynomialLinearizerCoefficient: float
185 # LUT array row for the amplifier at hand
186 linearizerTableRow: list
187 meanSignalVsTimePolyFitPars: list
188 meanSignalVsTimePolyFitParsErr: list
189 meanSignalVsTimePolyFitReducedChiSq: float
192class PhotonTransferCurveDataset:
193 """A simple class to hold the output data from the PTC task.
195 The dataset is made up of a dictionary for each item, keyed by the
196 amplifiers' names, which much be supplied at construction time.
198 New items cannot be added to the class to save accidentally saving to the
199 wrong property, and the class can be frozen if desired.
201 inputVisitPairs records the visits used to produce the data.
202 When fitPtc() or fitCovariancesAstier() is run, a mask is built up, which is by definition
203 always the same length as inputVisitPairs, rawExpTimes, rawMeans
204 and rawVars, and is a list of bools, which are incrementally set to False
205 as points are discarded from the fits.
207 PTC fit parameters for polynomials are stored in a list in ascending order
208 of polynomial term, i.e. par[0]*x^0 + par[1]*x + par[2]*x^2 etc
209 with the length of the list corresponding to the order of the polynomial
210 plus one.
212 Parameters
213 ----------
214 ampNames : `list`
215 List with the names of the amplifiers of the detector at hand.
217 ptcFitType : `str`
218 Type of model fitted to the PTC: "POLYNOMIAL", "EXPAPPROXIMATION", or "FULLCOVARIANCE".
220 Returns
221 -------
222 `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
223 Output dataset from MeasurePhotonTransferCurveTask.
224 """
226 def __init__(self, ampNames, ptcFitType):
227 # add items to __dict__ directly because __setattr__ is overridden
229 # instance variables
230 self.__dict__["ptcFitType"] = ptcFitType
231 self.__dict__["ampNames"] = ampNames
232 self.__dict__["badAmps"] = []
234 # raw data variables
235 # visitMask is the mask produced after outlier rejection. The mask produced by "FULLCOVARIANCE"
236 # may differ from the one produced in the other two PTC fit types.
237 self.__dict__["inputVisitPairs"] = {ampName: [] for ampName in ampNames}
238 self.__dict__["visitMask"] = {ampName: [] for ampName in ampNames}
239 self.__dict__["rawExpTimes"] = {ampName: [] for ampName in ampNames}
240 self.__dict__["rawMeans"] = {ampName: [] for ampName in ampNames}
241 self.__dict__["rawVars"] = {ampName: [] for ampName in ampNames}
243 # Gain and noise
244 self.__dict__["gain"] = {ampName: -1. for ampName in ampNames}
245 self.__dict__["gainErr"] = {ampName: -1. for ampName in ampNames}
246 self.__dict__["noise"] = {ampName: -1. for ampName in ampNames}
247 self.__dict__["noiseErr"] = {ampName: -1. for ampName in ampNames}
249 # if ptcFitTye in ["POLYNOMIAL", "EXPAPPROXIMATION"]
250 # fit information
251 self.__dict__["ptcFitPars"] = {ampName: [] for ampName in ampNames}
252 self.__dict__["ptcFitParsError"] = {ampName: [] for ampName in ampNames}
253 self.__dict__["ptcFitReducedChiSquared"] = {ampName: [] for ampName in ampNames}
255 # if ptcFitTye in ["FULLCOVARIANCE"]
256 # "covariancesTuple" is a numpy recarray with entries of the form
257 # ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
258 # "covariancesFits" has CovFit objects that fit the measured covariances to Eq. 20 of Astier+19.
259 # In "covariancesFitsWithNoB", "b"=0 in the model described by Eq. 20 of Astier+19.
260 self.__dict__["covariancesTuple"] = {ampName: [] for ampName in ampNames}
261 self.__dict__["covariancesFitsWithNoB"] = {ampName: [] for ampName in ampNames}
262 self.__dict__["covariancesFits"] = {ampName: [] for ampName in ampNames}
263 self.__dict__["aMatrix"] = {ampName: [] for ampName in ampNames}
264 self.__dict__["bMatrix"] = {ampName: [] for ampName in ampNames}
266 # "final" means that the "raw" vectors above had "visitMask" applied.
267 self.__dict__["finalVars"] = {ampName: [] for ampName in ampNames}
268 self.__dict__["finalModelVars"] = {ampName: [] for ampName in ampNames}
269 self.__dict__["finalMeans"] = {ampName: [] for ampName in ampNames}
271 def __setattr__(self, attribute, value):
272 """Protect class attributes"""
273 if attribute not in self.__dict__:
274 raise AttributeError(f"{attribute} is not already a member of PhotonTransferCurveDataset, which"
275 " does not support setting of new attributes.")
276 else:
277 self.__dict__[attribute] = value
279 def getVisitsUsed(self, ampName):
280 """Get the visits used, i.e. not discarded, for a given amp.
282 If no mask has been created yet, all visits are returned.
283 """
284 if len(self.visitMask[ampName]) == 0:
285 return self.inputVisitPairs[ampName]
287 # if the mask exists it had better be the same length as the visitPairs
288 assert len(self.visitMask[ampName]) == len(self.inputVisitPairs[ampName])
290 pairs = self.inputVisitPairs[ampName]
291 mask = self.visitMask[ampName]
292 # cast to bool required because numpy
293 return [(v1, v2) for ((v1, v2), m) in zip(pairs, mask) if bool(m) is True]
295 def getGoodAmps(self):
296 return [amp for amp in self.ampNames if amp not in self.badAmps]
299class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask):
300 """A class to calculate, fit, and plot a PTC from a set of flat pairs.
302 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
303 used in astronomical detectors characterization (e.g., Janesick 2001,
304 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the
305 PTC from a series of pairs of flat-field images; each pair taken at identical exposure
306 times. The difference image of each pair is formed to eliminate fixed pattern noise,
307 and then the variance of the difference image and the mean of the average image
308 are used to produce the PTC. An n-degree polynomial or the approximation in Equation
309 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
310 arXiv:1905.08677) can be fitted to the PTC curve. These models include
311 parameters such as the gain (e/DN) and readout noise.
313 Linearizers to correct for signal-chain non-linearity are also calculated.
314 The `Linearizer` class, in general, can support per-amp linearizers, but in this
315 task this is not supported.
317 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the
318 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements
319 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain
320 and the noise.
322 Parameters
323 ----------
325 *args: `list`
326 Positional arguments passed to the Task constructor. None used at this
327 time.
328 **kwargs: `dict`
329 Keyword arguments passed on to the Task constructor. None used at this
330 time.
332 """
334 RunnerClass = PairedVisitListTaskRunner
335 ConfigClass = MeasurePhotonTransferCurveTaskConfig
336 _DefaultName = "measurePhotonTransferCurve"
338 def __init__(self, *args, **kwargs):
339 pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
340 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
341 self.config.validate()
342 self.config.freeze()
344 @classmethod
345 def _makeArgumentParser(cls):
346 """Augment argument parser for the MeasurePhotonTransferCurveTask."""
347 parser = pipeBase.ArgumentParser(name=cls._DefaultName)
348 parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
349 help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
350 parser.add_id_argument("--id", datasetType="photonTransferCurveDataset",
351 ContainerClass=NonexistentDatasetTaskDataIdContainer,
352 help="The ccds to use, e.g. --id ccd=0..100")
353 return parser
355 @pipeBase.timeMethod
356 def runDataRef(self, dataRef, visitPairs):
357 """Run the Photon Transfer Curve (PTC) measurement task.
359 For a dataRef (which is each detector here),
360 and given a list of visit pairs (postISR) at different exposure times,
361 measure the PTC.
363 Parameters
364 ----------
365 dataRef : list of lsst.daf.persistence.ButlerDataRef
366 dataRef for the detector for the visits to be fit.
368 visitPairs : `iterable` of `tuple` of `int`
369 Pairs of visit numbers to be processed together
370 """
372 # setup necessary objects
373 detNum = dataRef.dataId[self.config.ccdKey]
374 detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
375 # expand some missing fields that we need for lsstCam. This is a work-around
376 # for Gen2 problems that I (RHL) don't feel like solving. The calibs pipelines
377 # (which inherit from CalibTask) use addMissingKeys() to do basically the same thing
378 #
379 # Basically, the butler's trying to look up the fields in `raw_visit` which won't work
380 for name in dataRef.getButler().getKeys('bias'):
381 if name not in dataRef.dataId:
382 try:
383 dataRef.dataId[name] = \
384 dataRef.getButler().queryMetadata('raw', [name], detector=detNum)[0]
385 except OperationalError:
386 pass
388 amps = detector.getAmplifiers()
389 ampNames = [amp.getName() for amp in amps]
390 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType)
391 self.log.info('Measuring PTC using %s visits for detector %s' % (visitPairs, detector.getId()))
393 tupleRecords = []
394 allTags = []
395 for (v1, v2) in visitPairs:
396 # Get postISR exposures.
397 dataRef.dataId['expId'] = v1
398 exp1 = dataRef.get("postISRCCD", immediate=True)
399 dataRef.dataId['expId'] = v2
400 exp2 = dataRef.get("postISRCCD", immediate=True)
401 del dataRef.dataId['expId']
403 checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
404 expTime = exp1.getInfo().getVisitInfo().getExposureTime()
406 tupleRows = []
407 nAmpsNan = 0
408 for ampNumber, amp in enumerate(detector):
409 ampName = amp.getName()
410 # covAstier: (i, j, var (cov[0,0]), cov, npix)
411 doRealSpace = self.config.covAstierRealSpace
412 muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=amp.getBBox(),
413 covAstierRealSpace=doRealSpace)
414 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
415 msg = (f"NaN mean or var, or None cov in amp {ampNumber} in visit pair {v1}, {v2} "
416 "of detector {detNum}.")
417 self.log.warn(msg)
418 nAmpsNan += 1
419 continue
421 datasetPtc.rawExpTimes[ampName].append(expTime)
422 datasetPtc.rawMeans[ampName].append(muDiff)
423 datasetPtc.rawVars[ampName].append(varDiff)
424 datasetPtc.inputVisitPairs[ampName].append((v1, v2))
426 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName) for covRow in covAstier]
427 tags = ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
428 if nAmpsNan == len(ampNames):
429 msg = f"NaN mean in all amps of visit pair {v1}, {v2} of detector {detNum}."
430 self.log.warn(msg)
431 continue
432 allTags += tags
433 tupleRecords += tupleRows
434 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags)
436 if self.config.ptcFitType in ["FULLCOVARIANCE", ]:
437 # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20)
438 datasetPtc = self.fitCovariancesAstier(datasetPtc, covariancesWithTags)
439 elif self.config.ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
440 # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16)
441 # Fill up PhotonTransferCurveDataset object.
442 datasetPtc = self.fitPtc(datasetPtc, self.config.ptcFitType)
444 # Fit a poynomial to calculate non-linearity and persist linearizer.
445 if self.config.doCreateLinearizer:
446 numberAmps = len(amps)
447 numberAduValues = self.config.maxAduForLookupTableLinearizer
448 lookupTableArray = np.zeros((numberAmps, numberAduValues), dtype=np.float32)
450 # Fit (non)linearity of signal vs time curve.
451 # Fill up PhotonTransferCurveDataset object.
452 # Fill up array for LUT linearizer (tableArray).
453 # Produce coefficients for Polynomial ans Squared linearizers.
454 # Build linearizer objects.
455 linearizer = self.fitNonLinearityAndBuildLinearizers(datasetPtc, detector,
456 tableArray=lookupTableArray,
457 log=self.log)
459 if self.config.linearizerType == "LINEARIZEPOLYNOMIAL":
460 linDataType = 'linearizePolynomial'
461 linMsg = "polynomial (coefficients for a polynomial correction)."
462 elif self.config.linearizerType == "LINEARIZESQUARED":
463 linDataType = 'linearizePolynomial'
464 linMsg = "squared (c0, derived from k_i coefficients of a polynomial fit)."
465 elif self.config.linearizerType == "LOOKUPTABLE":
466 linDataType = 'linearizePolynomial'
467 linMsg = "lookup table (linear component of polynomial fit)."
468 else:
469 raise RuntimeError("Invalid config.linearizerType {selg.config.linearizerType}. "
470 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
472 butler = dataRef.getButler()
473 self.log.info(f"Writing linearizer: \n {linMsg}")
475 detName = detector.getName()
476 now = datetime.datetime.utcnow()
477 calibDate = now.strftime("%Y-%m-%d")
479 butler.put(linearizer, datasetType=linDataType, dataId={'detector': detNum,
480 'detectorName': detName, 'calibDate': calibDate})
482 self.log.info(f"Writing PTC data to {dataRef.getUri(write=True)}")
483 dataRef.put(datasetPtc, datasetType="photonTransferCurveDataset")
485 return pipeBase.Struct(exitStatus=0)
487 def fitCovariancesAstier(self, dataset, covariancesWithTagsArray):
488 """Fit measured flat covariances to full model in Astier+19.
490 Parameters
491 ----------
492 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
493 The dataset containing information such as the means, variances and exposure times.
495 covariancesWithTagsArray : `numpy.recarray`
496 Tuple with at least (mu, cov, var, i, j, npix), where:
497 mu : 0.5*(m1 + m2), where:
498 mu1: mean value of flat1
499 mu2: mean value of flat2
500 cov: covariance value at lag(i, j)
501 var: variance(covariance value at lag(0, 0))
502 i: lag dimension
503 j: lag dimension
504 npix: number of pixels used for covariance calculation.
506 Returns
507 -------
508 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
509 This is the same dataset as the input paramter, however, it has been modified
510 to include information such as the fit vectors and the fit parameters. See
511 the class `PhotonTransferCurveDatase`.
512 """
514 covFits, covFitsNoB = fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal,
515 r=self.config.maximumRangeCovariancesAstier)
517 dataset.covariancesTuple = covariancesWithTagsArray
518 dataset.covariancesFits = covFits
519 dataset.covariancesFitsWithNoB = covFitsNoB
520 dataset = self.getOutputPtcDataCovAstier(dataset, covFits)
522 return dataset
524 def getOutputPtcDataCovAstier(self, dataset, covFits):
525 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
527 Parameters
528 ----------
529 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
530 The dataset containing information such as the means, variances and exposure times.
532 covFits: `dict`
533 Dictionary of CovFit objects, with amp names as keys.
535 Returns
536 -------
537 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
538 This is the same dataset as the input paramter, however, it has been modified
539 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
540 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
541 See the class `PhotonTransferCurveDatase`.
542 """
544 for i, amp in enumerate(covFits):
545 fit = covFits[amp]
546 meanVecFinal, varVecFinal, varVecModel, wc = fit.getNormalizedFitData(0, 0, divideByMu=False)
547 gain = fit.getGain()
548 dataset.visitMask[amp] = fit.getMaskVar()
549 dataset.gain[amp] = gain
550 dataset.gainErr[amp] = fit.getGainErr()
551 dataset.noise[amp] = np.sqrt(np.fabs(fit.getRon()))
552 dataset.noiseErr[amp] = fit.getRonErr()
553 dataset.finalVars[amp].append(varVecFinal/(gain**2))
554 dataset.finalModelVars[amp].append(varVecModel/(gain**2))
555 dataset.finalMeans[amp].append(meanVecFinal/gain)
556 dataset.aMatrix[amp].append(fit.getA())
557 dataset.bMatrix[amp].append(fit.getB())
559 return dataset
561 def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False):
562 """Calculate the mean of each of two exposures and the variance and covariance of their difference.
564 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A).
565 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just
566 keep one (covariance).
568 Parameters
569 ----------
570 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
571 First exposure of flat field pair.
573 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
574 Second exposure of flat field pair.
576 region : `lsst.geom.Box2I`, optional
577 Region of each exposure where to perform the calculations (e.g, an amplifier).
579 covAstierRealSpace : `bool`, optional
580 Should the covariannces in Astier+19 be calculated in real space or via FFT?
581 See Appendix A of Astier+19.
583 Returns
584 -------
585 mu : `float` or `NaN`
586 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
587 both exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
589 varDiff : `float` or `NaN`
590 Half of the clipped variance of the difference of the regions inthe two input
591 exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
593 covDiffAstier : `list` or `NaN`
594 List with tuples of the form (dx, dy, var, cov, npix), where:
595 dx : `int`
596 Lag in x
597 dy : `int`
598 Lag in y
599 var : `float`
600 Variance at (dx, dy).
601 cov : `float`
602 Covariance at (dx, dy).
603 nPix : `int`
604 Number of pixel pairs used to evaluate var and cov.
605 If either mu1 or m2 are NaN's, the returned value is NaN.
606 """
608 if region is not None:
609 im1Area = exposure1.maskedImage[region]
610 im2Area = exposure2.maskedImage[region]
611 else:
612 im1Area = exposure1.maskedImage
613 im2Area = exposure2.maskedImage
615 im1Area = afwMath.binImage(im1Area, self.config.binSize)
616 im2Area = afwMath.binImage(im2Area, self.config.binSize)
618 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
619 im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
620 self.config.nIterSigmaClipPtc,
621 im1MaskVal)
622 im1StatsCtrl.setNanSafe(True)
623 im1StatsCtrl.setAndMask(im1MaskVal)
625 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList)
626 im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
627 self.config.nIterSigmaClipPtc,
628 im2MaskVal)
629 im2StatsCtrl.setNanSafe(True)
630 im2StatsCtrl.setAndMask(im2MaskVal)
632 # Clipped mean of images; then average of mean.
633 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue()
634 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue()
635 if np.isnan(mu1) or np.isnan(mu2):
636 return np.nan, np.nan, None
637 mu = 0.5*(mu1 + mu2)
639 # Take difference of pairs
640 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
641 temp = im2Area.clone()
642 temp *= mu1
643 diffIm = im1Area.clone()
644 diffIm *= mu2
645 diffIm -= temp
646 diffIm /= mu
648 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList)
649 diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
650 self.config.nIterSigmaClipPtc,
651 diffImMaskVal)
652 diffImStatsCtrl.setNanSafe(True)
653 diffImStatsCtrl.setAndMask(diffImMaskVal)
655 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue())
657 # Get the mask and identify good pixels as '1', and the rest as '0'.
658 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0)
659 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0)
661 w12 = w1*w2
662 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
663 w = w12*wDiff
665 maxRangeCov = self.config.maximumRangeCovariancesAstier
666 if covAstierRealSpace:
667 covDiffAstier = computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov)
668 else:
669 shapeDiff = diffIm.getImage().getArray().shape
670 fftShape = (fftSize(shapeDiff[0] + maxRangeCov), fftSize(shapeDiff[1]+maxRangeCov))
671 c = CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov)
672 covDiffAstier = c.reportCovFft(maxRangeCov)
674 return mu, varDiff, covDiffAstier
676 def computeCovDirect(self, diffImage, weightImage, maxRange):
677 """Compute covariances of diffImage in real space.
679 For lags larger than ~25, it is slower than the FFT way.
680 Taken from https://github.com/PierreAstier/bfptc/
682 Parameters
683 ----------
684 diffImage : `numpy.array`
685 Image to compute the covariance of.
687 weightImage : `numpy.array`
688 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
690 maxRange : `int`
691 Last index of the covariance to be computed.
693 Returns
694 -------
695 outList : `list`
696 List with tuples of the form (dx, dy, var, cov, npix), where:
697 dx : `int`
698 Lag in x
699 dy : `int`
700 Lag in y
701 var : `float`
702 Variance at (dx, dy).
703 cov : `float`
704 Covariance at (dx, dy).
705 nPix : `int`
706 Number of pixel pairs used to evaluate var and cov.
707 """
708 outList = []
709 var = 0
710 # (dy,dx) = (0,0) has to be first
711 for dy in range(maxRange + 1):
712 for dx in range(0, maxRange + 1):
713 if (dx*dy > 0):
714 cov1, nPix1 = self.covDirectValue(diffImage, weightImage, dx, dy)
715 cov2, nPix2 = self.covDirectValue(diffImage, weightImage, dx, -dy)
716 cov = 0.5*(cov1 + cov2)
717 nPix = nPix1 + nPix2
718 else:
719 cov, nPix = self.covDirectValue(diffImage, weightImage, dx, dy)
720 if (dx == 0 and dy == 0):
721 var = cov
722 outList.append((dx, dy, var, cov, nPix))
724 return outList
726 def covDirectValue(self, diffImage, weightImage, dx, dy):
727 """Compute covariances of diffImage in real space at lag (dx, dy).
729 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19).
731 Parameters
732 ----------
733 diffImage : `numpy.array`
734 Image to compute the covariance of.
736 weightImage : `numpy.array`
737 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
739 dx : `int`
740 Lag in x.
742 dy : `int`
743 Lag in y.
745 Returns
746 -------
747 cov : `float`
748 Covariance at (dx, dy)
750 nPix : `int`
751 Number of pixel pairs used to evaluate var and cov.
752 """
753 (nCols, nRows) = diffImage.shape
754 # switching both signs does not change anything:
755 # it just swaps im1 and im2 below
756 if (dx < 0):
757 (dx, dy) = (-dx, -dy)
758 # now, we have dx >0. We have to distinguish two cases
759 # depending on the sign of dy
760 if dy >= 0:
761 im1 = diffImage[dy:, dx:]
762 w1 = weightImage[dy:, dx:]
763 im2 = diffImage[:nCols - dy, :nRows - dx]
764 w2 = weightImage[:nCols - dy, :nRows - dx]
765 else:
766 im1 = diffImage[:nCols + dy, dx:]
767 w1 = weightImage[:nCols + dy, dx:]
768 im2 = diffImage[-dy:, :nRows - dx]
769 w2 = weightImage[-dy:, :nRows - dx]
770 # use the same mask for all 3 calculations
771 wAll = w1*w2
772 # do not use mean() because weightImage=0 pixels would then count
773 nPix = wAll.sum()
774 im1TimesW = im1*wAll
775 s1 = im1TimesW.sum()/nPix
776 s2 = (im2*wAll).sum()/nPix
777 p = (im1TimesW*im2).sum()/nPix
778 cov = p - s1*s2
780 return cov, nPix
782 def fitNonLinearityAndBuildLinearizers(self, datasetPtc, detector, tableArray=None, log=None):
783 """Fit non-linearity function and build linearizer objects.
785 Parameters
786 ----------
787 datasePtct : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
788 The dataset containing information such as the means, variances and exposure times.
789 nLinearity
791 detector : `lsst.afw.cameraGeom.Detector`
792 Detector object.
794 tableArray : `np.array`, optional
795 Optional. Look-up table array with size rows=nAmps and columns=DN values.
796 It will be modified in-place if supplied.
798 log : `lsst.log.Log`, optional
799 Logger to handle messages.
801 Returns
802 -------
803 linearizer : `lsst.ip.isr.Linearizer`
804 Linearizer object
805 """
807 # Fit NonLinearity
808 datasetNonLinearity = self.fitNonLinearity(datasetPtc, tableArray=tableArray)
810 # Produce linearizer
811 now = datetime.datetime.utcnow()
812 calibDate = now.strftime("%Y-%m-%d")
813 linType = self.config.linearizerType
815 if linType == "LOOKUPTABLE":
816 tableArray = tableArray
817 else:
818 tableArray = None
820 linearizer = self.buildLinearizerObject(datasetNonLinearity, detector, calibDate, linType,
821 instruName=self.config.instrumentName,
822 tableArray=tableArray,
823 log=log)
825 return linearizer
827 def fitNonLinearity(self, datasetPtc, tableArray=None):
828 """Fit a polynomial to signal vs effective time curve to calculate linearity and residuals.
830 Parameters
831 ----------
832 datasetPtc : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
833 The dataset containing the means, variances and exposure times.
835 tableArray : `np.array`
836 Optional. Look-up table array with size rows=nAmps and columns=DN values.
837 It will be modified in-place if supplied.
839 Returns
840 -------
841 datasetNonLinearity : `dict`
842 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset`
843 dataclasses. Each one holds the output of `calculateLinearityResidualAndLinearizers` per
844 amplifier.
845 """
846 datasetNonLinearity = {ampName: [] for ampName in datasetPtc.ampNames}
847 for i, ampName in enumerate(datasetPtc.ampNames):
848 # If a mask is not found, use all points.
849 if (len(datasetPtc.visitMask[ampName]) == 0):
850 self.log.warn(f"Mask not found for {ampName} in non-linearity fit. Using all points.")
851 mask = np.repeat(True, len(datasetPtc.rawExpTimes[ampName]))
852 else:
853 mask = datasetPtc.visitMask[ampName]
855 timeVecFinal = np.array(datasetPtc.rawExpTimes[ampName])[mask]
856 meanVecFinal = np.array(datasetPtc.rawMeans[ampName])[mask]
858 # Non-linearity residuals (NL of mean vs time curve): percentage, and fit to a quadratic function
859 # In this case, len(parsIniNonLinearity) = 3 indicates that we want a quadratic fit
860 datasetLinRes = self.calculateLinearityResidualAndLinearizers(timeVecFinal, meanVecFinal)
862 # LinearizerLookupTable
863 if tableArray is not None:
864 tableArray[i, :] = datasetLinRes.linearizerTableRow
866 datasetNonLinearity[ampName] = datasetLinRes
868 return datasetNonLinearity
870 def calculateLinearityResidualAndLinearizers(self, exposureTimeVector, meanSignalVector):
871 """Calculate linearity residual and fit an n-order polynomial to the mean vs time curve
872 to produce corrections (deviation from linear part of polynomial) for a particular amplifier
873 to populate LinearizeLookupTable.
874 Use the coefficients of this fit to calculate the correction coefficients for LinearizePolynomial
875 and LinearizeSquared."
877 Parameters
878 ---------
880 exposureTimeVector: `list` of `float`
881 List of exposure times for each flat pair
883 meanSignalVector: `list` of `float`
884 List of mean signal from diference image of flat pairs
886 Returns
887 -------
888 dataset : `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset`
889 The dataset containing the fit parameters, the NL correction coefficients, and the
890 LUT row for the amplifier at hand.
892 Notes
893 -----
894 datase members:
896 dataset.polynomialLinearizerCoefficients : `list` of `float`
897 Coefficients for LinearizePolynomial, where corrImage = uncorrImage + sum_i c_i uncorrImage^(2 +
898 i).
899 c_(j-2) = -k_j/(k_1^j) with units DN^(1-j) (c.f., Eq. 37 of 2003.05978). The units of k_j are
900 DN/t^j, and they are fit from meanSignalVector = k0 + k1*exposureTimeVector +
901 k2*exposureTimeVector^2 + ... + kn*exposureTimeVector^n, with
902 n = "polynomialFitDegreeNonLinearity". k_0 and k_1 and degenerate with bias level and gain,
903 and are not used by the non-linearity correction. Therefore, j = 2...n in the above expression
904 (see `LinearizePolynomial` class in `linearize.py`.)
906 dataset.quadraticPolynomialLinearizerCoefficient : `float`
907 Coefficient for LinearizeSquared, where corrImage = uncorrImage + c0*uncorrImage^2.
908 c0 = -k2/(k1^2), where k1 and k2 are fit from
909 meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +...
910 + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity".
912 dataset.linearizerTableRow : `list` of `float`
913 One dimensional array with deviation from linear part of n-order polynomial fit
914 to mean vs time curve. This array will be one row (for the particular amplifier at hand)
915 of the table array for LinearizeLookupTable.
917 dataset.meanSignalVsTimePolyFitPars : `list` of `float`
918 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
920 dataset.meanSignalVsTimePolyFitParsErr : `list` of `float`
921 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
923 dataset.meanSignalVsTimePolyFitReducedChiSq : `float`
924 Reduced unweighted chi squared from polynomial fit to meanSignalVector vs exposureTimeVector.
925 """
927 # Lookup table linearizer
928 parsIniNonLinearity = self._initialParsForPolynomial(self.config.polynomialFitDegreeNonLinearity + 1)
929 if self.config.doFitBootstrap:
930 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = fitBootstrap(parsIniNonLinearity,
931 exposureTimeVector,
932 meanSignalVector,
933 funcPolynomial)
934 else:
935 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = fitLeastSq(parsIniNonLinearity,
936 exposureTimeVector,
937 meanSignalVector,
938 funcPolynomial)
940 # LinearizeLookupTable:
941 # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN
942 tMax = (self.config.maxAduForLookupTableLinearizer - parsFit[0])/parsFit[1]
943 timeRange = np.linspace(0, tMax, self.config.maxAduForLookupTableLinearizer)
944 signalIdeal = parsFit[0] + parsFit[1]*timeRange
945 signalUncorrected = funcPolynomial(parsFit, timeRange)
946 linearizerTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has corrections
947 # LinearizePolynomial and LinearizeSquared:
948 # Check that magnitude of higher order (>= 3) coefficents of the polyFit are small,
949 # i.e., less than threshold = 1e-10 (typical quadratic and cubic coefficents are ~1e-6
950 # and ~1e-12).
951 k1 = parsFit[1]
952 polynomialLinearizerCoefficients = []
953 for i, coefficient in enumerate(parsFit):
954 c = -coefficient/(k1**i)
955 polynomialLinearizerCoefficients.append(c)
956 if np.fabs(c) > 1e-10:
957 msg = f"Coefficient {c} in polynomial fit larger than threshold 1e-10."
958 self.log.warn(msg)
959 # Coefficient for LinearizedSquared. Called "c0" in linearize.py
960 c0 = polynomialLinearizerCoefficients[2]
962 dataset = LinearityResidualsAndLinearizersDataset([], None, [], [], [], None)
963 dataset.polynomialLinearizerCoefficients = polynomialLinearizerCoefficients
964 dataset.quadraticPolynomialLinearizerCoefficient = c0
965 dataset.linearizerTableRow = linearizerTableRow
966 dataset.meanSignalVsTimePolyFitPars = parsFit
967 dataset.meanSignalVsTimePolyFitParsErr = parsFitErr
968 dataset.meanSignalVsTimePolyFitReducedChiSq = reducedChiSquaredNonLinearityFit
970 return dataset
972 def buildLinearizerObject(self, datasetNonLinearity, detector, calibDate, linearizerType, instruName='',
973 tableArray=None, log=None):
974 """Build linearizer object to persist.
976 Parameters
977 ----------
978 datasetNonLinearity : `dict`
979 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` objects.
981 detector : `lsst.afw.cameraGeom.Detector`
982 Detector object
984 calibDate : `datetime.datetime`
985 Calibration date
987 linearizerType : `str`
988 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'
990 instruName : `str`, optional
991 Instrument name
993 tableArray : `np.array`, optional
994 Look-up table array with size rows=nAmps and columns=DN values
996 log : `lsst.log.Log`, optional
997 Logger to handle messages
999 Returns
1000 -------
1001 linearizer : `lsst.ip.isr.Linearizer`
1002 Linearizer object
1003 """
1004 detName = detector.getName()
1005 detNum = detector.getId()
1006 if linearizerType == "LOOKUPTABLE":
1007 if tableArray is not None:
1008 linearizer = Linearizer(detector=detector, table=tableArray, log=log)
1009 else:
1010 raise RuntimeError("tableArray must be provided when creating a LookupTable linearizer")
1011 elif linearizerType in ("LINEARIZESQUARED", "LINEARIZEPOLYNOMIAL"):
1012 linearizer = Linearizer(log=log)
1013 else:
1014 raise RuntimeError("Invalid linearizerType {linearizerType} to build a Linearizer object. "
1015 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
1016 for i, amp in enumerate(detector.getAmplifiers()):
1017 ampName = amp.getName()
1018 datasetNonLinAmp = datasetNonLinearity[ampName]
1019 if linearizerType == "LOOKUPTABLE":
1020 linearizer.linearityCoeffs[ampName] = [i, 0]
1021 linearizer.linearityType[ampName] = "LookupTable"
1022 elif linearizerType == "LINEARIZESQUARED":
1023 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars
1024 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr
1025 linearizer.linearityFitReducedChiSquared[ampName] = (
1026 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq)
1027 linearizer.linearityCoeffs[ampName] = [
1028 datasetNonLinAmp.quadraticPolynomialLinearizerCoefficient]
1029 linearizer.linearityType[ampName] = "Squared"
1030 elif linearizerType == "LINEARIZEPOLYNOMIAL":
1031 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars
1032 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr
1033 linearizer.linearityFitReducedChiSquared[ampName] = (
1034 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq)
1035 # Slice correction coefficients (starting at 2) for polynomial linearizer
1036 # (and squared linearizer above). The first and second are reduntant with
1037 # the bias and gain, respectively, and are not used by LinearizerPolynomial.
1038 polyLinCoeffs = np.array(datasetNonLinAmp.polynomialLinearizerCoefficients[2:])
1039 linearizer.linearityCoeffs[ampName] = polyLinCoeffs
1040 linearizer.linearityType[ampName] = "Polynomial"
1041 linearizer.linearityBBox[ampName] = amp.getBBox()
1042 linearizer.validate()
1043 calibId = f"detectorName={detName} detector={detNum} calibDate={calibDate} ccd={detNum} filter=NONE"
1045 try:
1046 raftName = detName.split("_")[0]
1047 calibId += f" raftName={raftName}"
1048 except Exception:
1049 raftname = "NONE"
1050 calibId += f" raftName={raftname}"
1052 serial = detector.getSerial()
1053 linearizer.updateMetadata(instrumentName=instruName, detectorId=f"{detNum}",
1054 calibId=calibId, serial=serial, detectorName=f"{detName}")
1056 return linearizer
1058 @staticmethod
1059 def _initialParsForPolynomial(order):
1060 assert(order >= 2)
1061 pars = np.zeros(order, dtype=np.float)
1062 pars[0] = 10
1063 pars[1] = 1
1064 pars[2:] = 0.0001
1065 return pars
1067 @staticmethod
1068 def _boundsForPolynomial(initialPars):
1069 lowers = [np.NINF for p in initialPars]
1070 uppers = [np.inf for p in initialPars]
1071 lowers[1] = 0 # no negative gains
1072 return (lowers, uppers)
1074 @staticmethod
1075 def _boundsForAstier(initialPars):
1076 lowers = [np.NINF for p in initialPars]
1077 uppers = [np.inf for p in initialPars]
1078 return (lowers, uppers)
1080 @staticmethod
1081 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
1082 """Return a boolean array to mask bad points.
1084 A linear function has a constant ratio, so find the median
1085 value of the ratios, and exclude the points that deviate
1086 from that by more than a factor of maxDeviationPositive/negative.
1087 Asymmetric deviations are supported as we expect the PTC to turn
1088 down as the flux increases, but sometimes it anomalously turns
1089 upwards just before turning over, which ruins the fits, so it
1090 is wise to be stricter about restricting positive outliers than
1091 negative ones.
1093 Too high and points that are so bad that fit will fail will be included
1094 Too low and the non-linear points will be excluded, biasing the NL fit."""
1095 ratios = [b/a for (a, b) in zip(means, variances)]
1096 medianRatio = np.median(ratios)
1097 ratioDeviations = [(r/medianRatio)-1 for r in ratios]
1099 # so that it doesn't matter if the deviation is expressed as positive or negative
1100 maxDeviationPositive = abs(maxDeviationPositive)
1101 maxDeviationNegative = -1. * abs(maxDeviationNegative)
1103 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative)
1104 else False for r in ratioDeviations])
1105 return goodPoints
1107 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
1108 """"""
1109 nBad = Counter(array)[0]
1110 if nBad == 0:
1111 return array
1113 if warn:
1114 msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
1115 self.log.warn(msg)
1117 array[array == 0] = substituteValue
1118 return array
1120 def fitPtc(self, dataset, ptcFitType):
1121 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
1123 Fit the photon transfer curve with either a polynomial of the order
1124 specified in the task config, or using the Astier approximation.
1126 Sigma clipping is performed iteratively for the fit, as well as an
1127 initial clipping of data points that are more than
1128 config.initialNonLinearityExclusionThreshold away from lying on a
1129 straight line. This other step is necessary because the photon transfer
1130 curve turns over catastrophically at very high flux (because saturation
1131 drops the variance to ~0) and these far outliers cause the initial fit
1132 to fail, meaning the sigma cannot be calculated to perform the
1133 sigma-clipping.
1135 Parameters
1136 ----------
1137 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
1138 The dataset containing the means, variances and exposure times
1140 ptcFitType : `str`
1141 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
1142 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
1144 Returns
1145 -------
1146 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
1147 This is the same dataset as the input paramter, however, it has been modified
1148 to include information such as the fit vectors and the fit parameters. See
1149 the class `PhotonTransferCurveDatase`.
1150 """
1152 def errFunc(p, x, y):
1153 return ptcFunc(p, x) - y
1155 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
1156 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
1158 for i, ampName in enumerate(dataset.ampNames):
1159 timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
1160 meanVecOriginal = np.array(dataset.rawMeans[ampName])
1161 varVecOriginal = np.array(dataset.rawVars[ampName])
1162 varVecOriginal = self._makeZeroSafe(varVecOriginal)
1164 mask = ((meanVecOriginal >= self.config.minMeanSignal) &
1165 (meanVecOriginal <= self.config.maxMeanSignal))
1167 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
1168 self.config.initialNonLinearityExclusionThresholdPositive,
1169 self.config.initialNonLinearityExclusionThresholdNegative)
1170 mask = mask & goodPoints
1172 if ptcFitType == 'EXPAPPROXIMATION':
1173 ptcFunc = funcAstier
1174 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise
1175 bounds = self._boundsForAstier(parsIniPtc)
1176 if ptcFitType == 'POLYNOMIAL':
1177 ptcFunc = funcPolynomial
1178 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
1179 bounds = self._boundsForPolynomial(parsIniPtc)
1181 # Before bootstrap fit, do an iterative fit to get rid of outliers
1182 count = 1
1183 while count <= maxIterationsPtcOutliers:
1184 # Note that application of the mask actually shrinks the array
1185 # to size rather than setting elements to zero (as we want) so
1186 # always update mask itself and re-apply to the original data
1187 meanTempVec = meanVecOriginal[mask]
1188 varTempVec = varVecOriginal[mask]
1189 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
1190 pars = res.x
1192 # change this to the original from the temp because the masks are ANDed
1193 # meaning once a point is masked it's always masked, and the masks must
1194 # always be the same length for broadcasting
1195 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
1196 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids])
1197 mask = mask & newMask
1199 nDroppedTotal = Counter(mask)[False]
1200 self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
1201 count += 1
1202 # objects should never shrink
1203 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
1205 dataset.visitMask[ampName] = mask # store the final mask
1206 parsIniPtc = pars
1207 meanVecFinal = meanVecOriginal[mask]
1208 varVecFinal = varVecOriginal[mask]
1210 if Counter(mask)[False] > 0:
1211 self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" +
1212 f" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
1214 if (len(meanVecFinal) < len(parsIniPtc)):
1215 msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
1216 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1217 self.log.warn(msg)
1218 # The first and second parameters of initial fit are discarded (bias and gain)
1219 # for the final NL coefficients
1220 dataset.badAmps.append(ampName)
1221 dataset.gain[ampName] = np.nan
1222 dataset.gainErr[ampName] = np.nan
1223 dataset.noise[ampName] = np.nan
1224 dataset.noiseErr[ampName] = np.nan
1225 dataset.ptcFitPars[ampName] = np.nan
1226 dataset.ptcFitParsError[ampName] = np.nan
1227 dataset.ptcFitReducedChiSquared[ampName] = np.nan
1228 continue
1230 # Fit the PTC
1231 if self.config.doFitBootstrap:
1232 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal,
1233 varVecFinal, ptcFunc)
1234 else:
1235 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal,
1236 varVecFinal, ptcFunc)
1237 dataset.ptcFitPars[ampName] = parsFit
1238 dataset.ptcFitParsError[ampName] = parsFitErr
1239 dataset.ptcFitReducedChiSquared[ampName] = reducedChiSqPtc
1241 if ptcFitType == 'EXPAPPROXIMATION':
1242 ptcGain = parsFit[1]
1243 ptcGainErr = parsFitErr[1]
1244 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1245 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1246 if ptcFitType == 'POLYNOMIAL':
1247 ptcGain = 1./parsFit[1]
1248 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1249 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1250 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1251 dataset.gain[ampName] = ptcGain
1252 dataset.gainErr[ampName] = ptcGainErr
1253 dataset.noise[ampName] = ptcNoise
1254 dataset.noiseErr[ampName] = ptcNoiseErr
1255 if not len(dataset.ptcFitType) == 0:
1256 dataset.ptcFitType = ptcFitType
1258 return dataset