Coverage for python/lsst/cp/pipe/ptc.py : 11%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['MeasurePhotonTransferCurveTask',
24 'MeasurePhotonTransferCurveTaskConfig',
25 'PhotonTransferCurveDataset']
27import numpy as np
28import matplotlib.pyplot as plt
29from sqlite3 import OperationalError
30from collections import Counter
31from dataclasses import dataclass
33import lsst.afw.math as afwMath
34import lsst.pex.config as pexConfig
35import lsst.pipe.base as pipeBase
36from .utils import (NonexistentDatasetTaskDataIdContainer, PairedVisitListTaskRunner,
37 checkExpLengthEqual, fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
38from scipy.optimize import least_squares
40from lsst.ip.isr.linearize import Linearizer
41import datetime
43from .astierCovPtcUtils import (fftSize, CovFft, computeCovDirect, fitData)
46class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config):
47 """Config class for photon transfer curve measurement task"""
48 ccdKey = pexConfig.Field(
49 dtype=str,
50 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
51 default='ccd',
52 )
53 ptcFitType = pexConfig.ChoiceField(
54 dtype=str,
55 doc="Fit PTC to approximation in Astier+19 (Equation 16) or to a polynomial.",
56 default="POLYNOMIAL",
57 allowed={
58 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
59 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).",
60 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)"
61 }
62 )
63 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
64 dtype=float,
65 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
66 default=5.0,
67 )
68 maxIterFullFitCovariancesAstier = pexConfig.Field(
69 dtype=int,
70 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
71 default=3,
72 )
73 maximumRangeCovariancesAstier = pexConfig.Field(
74 dtype=int,
75 doc="Maximum range of covariances as in Astier+19",
76 default=8,
77 )
78 covAstierRealSpace = pexConfig.Field(
79 dtype=bool,
80 doc="Calculate covariances in real space or via FFT? (see appendix A of Astier+19).",
81 default=False,
82 )
83 polynomialFitDegree = pexConfig.Field(
84 dtype=int,
85 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
86 default=3,
87 )
88 doCreateLinearizer = pexConfig.Field(
89 dtype=bool,
90 doc="Calculate non-linearity and persist linearizer?",
91 default=False,
92 )
93 linearizerType = pexConfig.ChoiceField(
94 dtype=str,
95 doc="Linearizer type, if doCreateLinearizer=True",
96 default="LINEARIZEPOLYNOMIAL",
97 allowed={
98 "LINEARIZEPOLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegreeNonLinearity' to set 'n').",
99 "LINEARIZESQUARED": "c0 quadratic coefficient derived from coefficients of polynomiual fit",
100 "LOOKUPTABLE": "Loouk table formed from linear part of polynomial fit."
101 }
102 )
103 polynomialFitDegreeNonLinearity = pexConfig.Field(
104 dtype=int,
105 doc="If doCreateLinearizer, degree of polynomial to fit the meanSignal vs exposureTime" +
106 " curve to produce the table for LinearizeLookupTable.",
107 default=3,
108 )
109 binSize = pexConfig.Field(
110 dtype=int,
111 doc="Bin the image by this factor in both dimensions.",
112 default=1,
113 )
114 minMeanSignal = pexConfig.Field(
115 dtype=float,
116 doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.",
117 default=0,
118 )
119 maxMeanSignal = pexConfig.Field(
120 dtype=float,
121 doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.",
122 default=9e6,
123 )
124 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
125 dtype=float,
126 doc="Initially exclude data points with a variance that are more than a factor of this from being"
127 " linear in the positive direction, from the PTC fit. Note that these points will also be"
128 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
129 " to allow an accurate determination of the sigmas for said iterative fit.",
130 default=0.12,
131 min=0.0,
132 max=1.0,
133 )
134 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
135 dtype=float,
136 doc="Initially exclude data points with a variance that are more than a factor of this from being"
137 " linear in the negative direction, from the PTC fit. Note that these points will also be"
138 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
139 " to allow an accurate determination of the sigmas for said iterative fit.",
140 default=0.25,
141 min=0.0,
142 max=1.0,
143 )
144 sigmaCutPtcOutliers = pexConfig.Field(
145 dtype=float,
146 doc="Sigma cut for outlier rejection in PTC.",
147 default=5.0,
148 )
149 maskNameList = pexConfig.ListField(
150 dtype=str,
151 doc="Mask list to exclude from statistics calculations.",
152 default=['SUSPECT', 'BAD', 'NO_DATA'],
153 )
154 nSigmaClipPtc = pexConfig.Field(
155 dtype=float,
156 doc="Sigma cut for afwMath.StatisticsControl()",
157 default=5.5,
158 )
159 nIterSigmaClipPtc = pexConfig.Field(
160 dtype=int,
161 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
162 default=1,
163 )
164 maxIterationsPtcOutliers = pexConfig.Field(
165 dtype=int,
166 doc="Maximum number of iterations for outlier rejection in PTC.",
167 default=2,
168 )
169 doFitBootstrap = pexConfig.Field(
170 dtype=bool,
171 doc="Use bootstrap for the PTC fit parameters and errors?.",
172 default=False,
173 )
174 maxAduForLookupTableLinearizer = pexConfig.Field(
175 dtype=int,
176 doc="Maximum DN value for the LookupTable linearizer.",
177 default=2**18,
178 )
179 instrumentName = pexConfig.Field(
180 dtype=str,
181 doc="Instrument name.",
182 default='',
183 )
186@dataclass
187class LinearityResidualsAndLinearizersDataset:
188 """A simple class to hold the output from the
189 `calculateLinearityResidualAndLinearizers` function.
190 """
191 # Normalized coefficients for polynomial NL correction
192 polynomialLinearizerCoefficients: list
193 # Normalized coefficient for quadratic polynomial NL correction (c0)
194 quadraticPolynomialLinearizerCoefficient: float
195 # LUT array row for the amplifier at hand
196 linearizerTableRow: list
197 meanSignalVsTimePolyFitPars: list
198 meanSignalVsTimePolyFitParsErr: list
199 meanSignalVsTimePolyFitReducedChiSq: float
202class PhotonTransferCurveDataset:
203 """A simple class to hold the output data from the PTC task.
205 The dataset is made up of a dictionary for each item, keyed by the
206 amplifiers' names, which much be supplied at construction time.
208 New items cannot be added to the class to save accidentally saving to the
209 wrong property, and the class can be frozen if desired.
211 inputVisitPairs records the visits used to produce the data.
212 When fitPtc() or fitCovariancesAstier() is run, a mask is built up, which is by definition
213 always the same length as inputVisitPairs, rawExpTimes, rawMeans
214 and rawVars, and is a list of bools, which are incrementally set to False
215 as points are discarded from the fits.
217 PTC fit parameters for polynomials are stored in a list in ascending order
218 of polynomial term, i.e. par[0]*x^0 + par[1]*x + par[2]*x^2 etc
219 with the length of the list corresponding to the order of the polynomial
220 plus one.
222 Parameters
223 ----------
224 ampNames : `list`
225 List with the names of the amplifiers of the detector at hand.
227 ptcFitType : `str`
228 Type of model fitted to the PTC: "POLYNOMIAL", "EXPAPPROXIMATION", or "FULLCOVARIANCE".
230 Returns
231 -------
232 `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
233 Output dataset from MeasurePhotonTransferCurveTask.
234 """
236 def __init__(self, ampNames, ptcFitType):
237 # add items to __dict__ directly because __setattr__ is overridden
239 # instance variables
240 self.__dict__["ptcFitType"] = ptcFitType
241 self.__dict__["ampNames"] = ampNames
242 self.__dict__["badAmps"] = []
244 # raw data variables
245 # visitMask is the mask produced after outlier rejection. The mask produced by "FULLCOVARIANCE"
246 # may differ from the one produced in the other two PTC fit types.
247 self.__dict__["inputVisitPairs"] = {ampName: [] for ampName in ampNames}
248 self.__dict__["visitMask"] = {ampName: [] for ampName in ampNames}
249 self.__dict__["rawExpTimes"] = {ampName: [] for ampName in ampNames}
250 self.__dict__["rawMeans"] = {ampName: [] for ampName in ampNames}
251 self.__dict__["rawVars"] = {ampName: [] for ampName in ampNames}
253 # Gain and noise
254 self.__dict__["gain"] = {ampName: -1. for ampName in ampNames}
255 self.__dict__["gainErr"] = {ampName: -1. for ampName in ampNames}
256 self.__dict__["noise"] = {ampName: -1. for ampName in ampNames}
257 self.__dict__["noiseErr"] = {ampName: -1. for ampName in ampNames}
259 # if ptcFitTye in ["POLYNOMIAL", "EXPAPPROXIMATION"]
260 # fit information
261 self.__dict__["ptcFitPars"] = {ampName: [] for ampName in ampNames}
262 self.__dict__["ptcFitParsError"] = {ampName: [] for ampName in ampNames}
263 self.__dict__["ptcFitReducedChiSquared"] = {ampName: [] for ampName in ampNames}
265 # if ptcFitTye in ["FULLCOVARIANCE"]
266 # "covariancesTuple" is a numpy recarray with entries of the form
267 # ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
268 # "covariancesFits" has CovFit objects that fit the measured covariances to Eq. 20 of Astier+19.
269 # In "covariancesFitsWithNoB", "b"=0 in the model described by Eq. 20 of Astier+19.
270 self.__dict__["covariancesTuple"] = {ampName: [] for ampName in ampNames}
271 self.__dict__["covariancesFitsWithNoB"] = {ampName: [] for ampName in ampNames}
272 self.__dict__["covariancesFits"] = {ampName: [] for ampName in ampNames}
273 self.__dict__["aMatrix"] = {ampName: [] for ampName in ampNames}
274 self.__dict__["bMatrix"] = {ampName: [] for ampName in ampNames}
276 # "final" means that the "raw" vectors above had "visitMask" applied.
277 self.__dict__["finalVars"] = {ampName: [] for ampName in ampNames}
278 self.__dict__["finalModelVars"] = {ampName: [] for ampName in ampNames}
279 self.__dict__["finalMeans"] = {ampName: [] for ampName in ampNames}
281 def __setattr__(self, attribute, value):
282 """Protect class attributes"""
283 if attribute not in self.__dict__:
284 raise AttributeError(f"{attribute} is not already a member of PhotonTransferCurveDataset, which"
285 " does not support setting of new attributes.")
286 else:
287 self.__dict__[attribute] = value
289 def getVisitsUsed(self, ampName):
290 """Get the visits used, i.e. not discarded, for a given amp.
292 If no mask has been created yet, all visits are returned.
293 """
294 if len(self.visitMask[ampName]) == 0:
295 return self.inputVisitPairs[ampName]
297 # if the mask exists it had better be the same length as the visitPairs
298 assert len(self.visitMask[ampName]) == len(self.inputVisitPairs[ampName])
300 pairs = self.inputVisitPairs[ampName]
301 mask = self.visitMask[ampName]
302 # cast to bool required because numpy
303 return [(v1, v2) for ((v1, v2), m) in zip(pairs, mask) if bool(m) is True]
305 def getGoodAmps(self):
306 return [amp for amp in self.ampNames if amp not in self.badAmps]
309class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask):
310 """A class to calculate, fit, and plot a PTC from a set of flat pairs.
312 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
313 used in astronomical detectors characterization (e.g., Janesick 2001,
314 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the
315 PTC from a series of pairs of flat-field images; each pair taken at identical exposure
316 times. The difference image of each pair is formed to eliminate fixed pattern noise,
317 and then the variance of the difference image and the mean of the average image
318 are used to produce the PTC. An n-degree polynomial or the approximation in Equation
319 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
320 arXiv:1905.08677) can be fitted to the PTC curve. These models include
321 parameters such as the gain (e/DN) and readout noise.
323 Linearizers to correct for signal-chain non-linearity are also calculated.
324 The `Linearizer` class, in general, can support per-amp linearizers, but in this
325 task this is not supported.
327 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the
328 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements
329 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain
330 and the noise.
332 Parameters
333 ----------
335 *args: `list`
336 Positional arguments passed to the Task constructor. None used at this
337 time.
338 **kwargs: `dict`
339 Keyword arguments passed on to the Task constructor. None used at this
340 time.
342 """
344 RunnerClass = PairedVisitListTaskRunner
345 ConfigClass = MeasurePhotonTransferCurveTaskConfig
346 _DefaultName = "measurePhotonTransferCurve"
348 def __init__(self, *args, **kwargs):
349 pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
350 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
351 self.config.validate()
352 self.config.freeze()
354 @classmethod
355 def _makeArgumentParser(cls):
356 """Augment argument parser for the MeasurePhotonTransferCurveTask."""
357 parser = pipeBase.ArgumentParser(name=cls._DefaultName)
358 parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
359 help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
360 parser.add_id_argument("--id", datasetType="photonTransferCurveDataset",
361 ContainerClass=NonexistentDatasetTaskDataIdContainer,
362 help="The ccds to use, e.g. --id ccd=0..100")
363 return parser
365 @pipeBase.timeMethod
366 def runDataRef(self, dataRef, visitPairs):
367 """Run the Photon Transfer Curve (PTC) measurement task.
369 For a dataRef (which is each detector here),
370 and given a list of visit pairs (postISR) at different exposure times,
371 measure the PTC.
373 Parameters
374 ----------
375 dataRef : list of lsst.daf.persistence.ButlerDataRef
376 dataRef for the detector for the visits to be fit.
378 visitPairs : `iterable` of `tuple` of `int`
379 Pairs of visit numbers to be processed together
380 """
382 # setup necessary objects
383 detNum = dataRef.dataId[self.config.ccdKey]
384 detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
385 # expand some missing fields that we need for lsstCam. This is a work-around
386 # for Gen2 problems that I (RHL) don't feel like solving. The calibs pipelines
387 # (which inherit from CalibTask) use addMissingKeys() to do basically the same thing
388 #
389 # Basically, the butler's trying to look up the fields in `raw_visit` which won't work
390 for name in dataRef.getButler().getKeys('bias'):
391 if name not in dataRef.dataId:
392 try:
393 dataRef.dataId[name] = \
394 dataRef.getButler().queryMetadata('raw', [name], detector=detNum)[0]
395 except OperationalError:
396 pass
398 amps = detector.getAmplifiers()
399 ampNames = [amp.getName() for amp in amps]
400 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType)
401 self.log.info('Measuring PTC using %s visits for detector %s' % (visitPairs, detector.getId()))
403 tupleRecords = []
404 allTags = []
405 for (v1, v2) in visitPairs:
406 # Get postISR exposures.
407 dataRef.dataId['expId'] = v1
408 exp1 = dataRef.get("postISRCCD", immediate=True)
409 dataRef.dataId['expId'] = v2
410 exp2 = dataRef.get("postISRCCD", immediate=True)
411 del dataRef.dataId['expId']
413 checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
414 expTime = exp1.getInfo().getVisitInfo().getExposureTime()
415 tupleRows = []
416 nAmpsNan = 0
417 for ampNumber, amp in enumerate(detector):
418 ampName = amp.getName()
419 # covAstier: (i, j, var (cov[0,0]), cov, npix)
420 doRealSpace = self.config.covAstierRealSpace
421 muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=amp.getBBox(),
422 covAstierRealSpace=doRealSpace)
423 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
424 msg = (f"NaN mean or var, or None cov in amp {ampNumber} in visit pair {v1}, {v2} "
425 "of detector {detNum}.")
426 self.log.warn(msg)
427 nAmpsNan += 1
428 continue
429 tags = ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
430 if (muDiff <= self.config.minMeanSignal) or (muDiff >= self.config.maxMeanSignal):
431 continue
432 datasetPtc.rawExpTimes[ampName].append(expTime)
433 datasetPtc.rawMeans[ampName].append(muDiff)
434 datasetPtc.rawVars[ampName].append(varDiff)
435 datasetPtc.inputVisitPairs[ampName].append((v1, v2))
437 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName) for covRow in covAstier]
438 if nAmpsNan == len(ampNames):
439 msg = f"NaN mean in all amps of visit pair {v1}, {v2} of detector {detNum}."
440 self.log.warn(msg)
441 continue
442 allTags += tags
443 tupleRecords += tupleRows
444 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags)
446 if self.config.ptcFitType in ["FULLCOVARIANCE", ]:
447 # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20)
448 datasetPtc = self.fitCovariancesAstier(datasetPtc, covariancesWithTags)
449 elif self.config.ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
450 # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16)
451 # Fill up PhotonTransferCurveDataset object.
452 datasetPtc = self.fitPtc(datasetPtc, self.config.ptcFitType)
454 # Fit a poynomial to calculate non-linearity and persist linearizer.
455 if self.config.doCreateLinearizer:
456 numberAmps = len(amps)
457 numberAduValues = self.config.maxAduForLookupTableLinearizer
458 lookupTableArray = np.zeros((numberAmps, numberAduValues), dtype=np.float32)
460 # Fit (non)linearity of signal vs time curve.
461 # Fill up PhotonTransferCurveDataset object.
462 # Fill up array for LUT linearizer (tableArray).
463 # Produce coefficients for Polynomial ans Squared linearizers.
464 # Build linearizer objects.
465 linearizer = self.fitNonLinearityAndBuildLinearizers(datasetPtc, detector,
466 tableArray=lookupTableArray,
467 log=self.log)
469 if self.config.linearizerType == "LINEARIZEPOLYNOMIAL":
470 linDataType = 'linearizePolynomial'
471 linMsg = "polynomial (coefficients for a polynomial correction)."
472 elif self.config.linearizerType == "LINEARIZESQUARED":
473 linDataType = 'linearizePolynomial'
474 linMsg = "squared (c0, derived from k_i coefficients of a polynomial fit)."
475 elif self.config.linearizerType == "LOOKUPTABLE":
476 linDataType = 'linearizePolynomial'
477 linMsg = "lookup table (linear component of polynomial fit)."
478 else:
479 raise RuntimeError("Invalid config.linearizerType {selg.config.linearizerType}. "
480 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
482 butler = dataRef.getButler()
483 self.log.info(f"Writing linearizer: \n {linMsg}")
485 detName = detector.getName()
486 now = datetime.datetime.utcnow()
487 calibDate = now.strftime("%Y-%m-%d")
489 butler.put(linearizer, datasetType=linDataType, dataId={'detector': detNum,
490 'detectorName': detName, 'calibDate': calibDate})
492 self.log.info(f"Writing PTC data to {dataRef.getUri(write=True)}")
493 dataRef.put(datasetPtc, datasetType="photonTransferCurveDataset")
495 return pipeBase.Struct(exitStatus=0)
497 def fitCovariancesAstier(self, dataset, covariancesWithTagsArray):
498 """Fit measured flat covariances to full model in Astier+19.
500 Parameters
501 ----------
502 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
503 The dataset containing information such as the means, variances and exposure times.
505 covariancesWithTagsArray : `numpy.recarray`
506 Tuple with at least (mu, cov, var, i, j, npix), where:
507 mu : 0.5*(m1 + m2), where:
508 mu1: mean value of flat1
509 mu2: mean value of flat2
510 cov: covariance value at lag(i, j)
511 var: variance(covariance value at lag(0, 0))
512 i: lag dimension
513 j: lag dimension
514 npix: number of pixels used for covariance calculation.
516 Returns
517 -------
518 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
519 This is the same dataset as the input paramter, however, it has been modified
520 to include information such as the fit vectors and the fit parameters. See
521 the class `PhotonTransferCurveDatase`.
522 """
524 covFits, covFitsNoB = fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal,
525 r=self.config.maximumRangeCovariancesAstier,
526 nSigmaFullFit=self.config.sigmaClipFullFitCovariancesAstier,
527 maxIterFullFit=self.config.maxIterFullFitCovariancesAstier)
529 dataset.covariancesTuple = covariancesWithTagsArray
530 dataset.covariancesFits = covFits
531 dataset.covariancesFitsWithNoB = covFitsNoB
532 dataset = self.getOutputPtcDataCovAstier(dataset, covFits)
534 return dataset
536 def getOutputPtcDataCovAstier(self, dataset, covFits):
537 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
539 Parameters
540 ----------
541 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
542 The dataset containing information such as the means, variances and exposure times.
544 covFits: `dict`
545 Dictionary of CovFit objects, with amp names as keys.
547 Returns
548 -------
549 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
550 This is the same dataset as the input paramter, however, it has been modified
551 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
552 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
553 See the class `PhotonTransferCurveDatase`.
554 """
556 for i, amp in enumerate(covFits):
557 fit = covFits[amp]
558 (meanVecFinal, varVecFinal, varVecModel,
559 wc, varMask) = fit.getFitData(0, 0, divideByMu=False, returnMasked=True)
560 gain = fit.getGain()
561 dataset.visitMask[amp] = varMask
562 dataset.gain[amp] = gain
563 dataset.gainErr[amp] = fit.getGainErr()
564 dataset.noise[amp] = np.sqrt(np.fabs(fit.getRon()))
565 dataset.noiseErr[amp] = fit.getRonErr()
566 dataset.finalVars[amp].append(varVecFinal/(gain**2))
567 dataset.finalModelVars[amp].append(varVecModel/(gain**2))
568 dataset.finalMeans[amp].append(meanVecFinal/gain)
569 dataset.aMatrix[amp].append(fit.getA())
570 dataset.bMatrix[amp].append(fit.getB())
572 return dataset
574 def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False):
575 """Calculate the mean of each of two exposures and the variance and covariance of their difference.
577 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A).
578 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just
579 keep one (covariance).
581 Parameters
582 ----------
583 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
584 First exposure of flat field pair.
586 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
587 Second exposure of flat field pair.
589 region : `lsst.geom.Box2I`, optional
590 Region of each exposure where to perform the calculations (e.g, an amplifier).
592 covAstierRealSpace : `bool`, optional
593 Should the covariannces in Astier+19 be calculated in real space or via FFT?
594 See Appendix A of Astier+19.
596 Returns
597 -------
598 mu : `float` or `NaN`
599 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
600 both exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
602 varDiff : `float` or `NaN`
603 Half of the clipped variance of the difference of the regions inthe two input
604 exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
606 covDiffAstier : `list` or `NaN`
607 List with tuples of the form (dx, dy, var, cov, npix), where:
608 dx : `int`
609 Lag in x
610 dy : `int`
611 Lag in y
612 var : `float`
613 Variance at (dx, dy).
614 cov : `float`
615 Covariance at (dx, dy).
616 nPix : `int`
617 Number of pixel pairs used to evaluate var and cov.
618 If either mu1 or m2 are NaN's, the returned value is NaN.
619 """
621 if region is not None:
622 im1Area = exposure1.maskedImage[region]
623 im2Area = exposure2.maskedImage[region]
624 else:
625 im1Area = exposure1.maskedImage
626 im2Area = exposure2.maskedImage
628 if self.config.binSize > 1:
629 im1Area = afwMath.binImage(im1Area, self.config.binSize)
630 im2Area = afwMath.binImage(im2Area, self.config.binSize)
632 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
633 im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
634 self.config.nIterSigmaClipPtc,
635 im1MaskVal)
636 im1StatsCtrl.setNanSafe(True)
637 im1StatsCtrl.setAndMask(im1MaskVal)
639 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList)
640 im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
641 self.config.nIterSigmaClipPtc,
642 im2MaskVal)
643 im2StatsCtrl.setNanSafe(True)
644 im2StatsCtrl.setAndMask(im2MaskVal)
646 # Clipped mean of images; then average of mean.
647 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue()
648 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue()
649 if np.isnan(mu1) or np.isnan(mu2):
650 return np.nan, np.nan, None
651 mu = 0.5*(mu1 + mu2)
653 # Take difference of pairs
654 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
655 temp = im2Area.clone()
656 temp *= mu1
657 diffIm = im1Area.clone()
658 diffIm *= mu2
659 diffIm -= temp
660 diffIm /= mu
662 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList)
663 diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
664 self.config.nIterSigmaClipPtc,
665 diffImMaskVal)
666 diffImStatsCtrl.setNanSafe(True)
667 diffImStatsCtrl.setAndMask(diffImMaskVal)
669 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue())
671 # Get the mask and identify good pixels as '1', and the rest as '0'.
672 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0)
673 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0)
675 w12 = w1*w2
676 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
677 w = w12*wDiff
679 maxRangeCov = self.config.maximumRangeCovariancesAstier
680 if covAstierRealSpace:
681 covDiffAstier = computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov)
682 else:
683 shapeDiff = diffIm.getImage().getArray().shape
684 fftShape = (fftSize(shapeDiff[0] + maxRangeCov), fftSize(shapeDiff[1]+maxRangeCov))
685 c = CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov)
686 covDiffAstier = c.reportCovFft(maxRangeCov)
688 return mu, varDiff, covDiffAstier
690 def computeCovDirect(self, diffImage, weightImage, maxRange):
691 """Compute covariances of diffImage in real space.
693 For lags larger than ~25, it is slower than the FFT way.
694 Taken from https://github.com/PierreAstier/bfptc/
696 Parameters
697 ----------
698 diffImage : `numpy.array`
699 Image to compute the covariance of.
701 weightImage : `numpy.array`
702 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
704 maxRange : `int`
705 Last index of the covariance to be computed.
707 Returns
708 -------
709 outList : `list`
710 List with tuples of the form (dx, dy, var, cov, npix), where:
711 dx : `int`
712 Lag in x
713 dy : `int`
714 Lag in y
715 var : `float`
716 Variance at (dx, dy).
717 cov : `float`
718 Covariance at (dx, dy).
719 nPix : `int`
720 Number of pixel pairs used to evaluate var and cov.
721 """
722 outList = []
723 var = 0
724 # (dy,dx) = (0,0) has to be first
725 for dy in range(maxRange + 1):
726 for dx in range(0, maxRange + 1):
727 if (dx*dy > 0):
728 cov1, nPix1 = self.covDirectValue(diffImage, weightImage, dx, dy)
729 cov2, nPix2 = self.covDirectValue(diffImage, weightImage, dx, -dy)
730 cov = 0.5*(cov1 + cov2)
731 nPix = nPix1 + nPix2
732 else:
733 cov, nPix = self.covDirectValue(diffImage, weightImage, dx, dy)
734 if (dx == 0 and dy == 0):
735 var = cov
736 outList.append((dx, dy, var, cov, nPix))
738 return outList
740 def covDirectValue(self, diffImage, weightImage, dx, dy):
741 """Compute covariances of diffImage in real space at lag (dx, dy).
743 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19).
745 Parameters
746 ----------
747 diffImage : `numpy.array`
748 Image to compute the covariance of.
750 weightImage : `numpy.array`
751 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
753 dx : `int`
754 Lag in x.
756 dy : `int`
757 Lag in y.
759 Returns
760 -------
761 cov : `float`
762 Covariance at (dx, dy)
764 nPix : `int`
765 Number of pixel pairs used to evaluate var and cov.
766 """
767 (nCols, nRows) = diffImage.shape
768 # switching both signs does not change anything:
769 # it just swaps im1 and im2 below
770 if (dx < 0):
771 (dx, dy) = (-dx, -dy)
772 # now, we have dx >0. We have to distinguish two cases
773 # depending on the sign of dy
774 if dy >= 0:
775 im1 = diffImage[dy:, dx:]
776 w1 = weightImage[dy:, dx:]
777 im2 = diffImage[:nCols - dy, :nRows - dx]
778 w2 = weightImage[:nCols - dy, :nRows - dx]
779 else:
780 im1 = diffImage[:nCols + dy, dx:]
781 w1 = weightImage[:nCols + dy, dx:]
782 im2 = diffImage[-dy:, :nRows - dx]
783 w2 = weightImage[-dy:, :nRows - dx]
784 # use the same mask for all 3 calculations
785 wAll = w1*w2
786 # do not use mean() because weightImage=0 pixels would then count
787 nPix = wAll.sum()
788 im1TimesW = im1*wAll
789 s1 = im1TimesW.sum()/nPix
790 s2 = (im2*wAll).sum()/nPix
791 p = (im1TimesW*im2).sum()/nPix
792 cov = p - s1*s2
794 return cov, nPix
796 def fitNonLinearityAndBuildLinearizers(self, datasetPtc, detector, tableArray=None, log=None):
797 """Fit non-linearity function and build linearizer objects.
799 Parameters
800 ----------
801 datasePtct : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
802 The dataset containing information such as the means, variances and exposure times.
803 nLinearity
805 detector : `lsst.afw.cameraGeom.Detector`
806 Detector object.
808 tableArray : `np.array`, optional
809 Optional. Look-up table array with size rows=nAmps and columns=DN values.
810 It will be modified in-place if supplied.
812 log : `lsst.log.Log`, optional
813 Logger to handle messages.
815 Returns
816 -------
817 linearizer : `lsst.ip.isr.Linearizer`
818 Linearizer object
819 """
821 # Fit NonLinearity
822 datasetNonLinearity = self.fitNonLinearity(datasetPtc, tableArray=tableArray)
824 # Produce linearizer
825 now = datetime.datetime.utcnow()
826 calibDate = now.strftime("%Y-%m-%d")
827 linType = self.config.linearizerType
829 if linType == "LOOKUPTABLE":
830 tableArray = tableArray
831 else:
832 tableArray = None
834 linearizer = self.buildLinearizerObject(datasetNonLinearity, detector, calibDate, linType,
835 instruName=self.config.instrumentName,
836 tableArray=tableArray,
837 log=log)
839 return linearizer
841 def fitNonLinearity(self, datasetPtc, tableArray=None):
842 """Fit a polynomial to signal vs effective time curve to calculate linearity and residuals.
844 Parameters
845 ----------
846 datasetPtc : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
847 The dataset containing the means, variances and exposure times.
849 tableArray : `np.array`
850 Optional. Look-up table array with size rows=nAmps and columns=DN values.
851 It will be modified in-place if supplied.
853 Returns
854 -------
855 datasetNonLinearity : `dict`
856 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset`
857 dataclasses. Each one holds the output of `calculateLinearityResidualAndLinearizers` per
858 amplifier.
859 """
860 datasetNonLinearity = {ampName: [] for ampName in datasetPtc.ampNames}
861 for i, ampName in enumerate(datasetPtc.ampNames):
862 # If a mask is not found, use all points.
863 if (len(datasetPtc.visitMask[ampName]) == 0):
864 self.log.warn(f"Mask not found for {ampName} in non-linearity fit. Using all points.")
865 mask = np.repeat(True, len(datasetPtc.rawExpTimes[ampName]))
866 else:
867 mask = datasetPtc.visitMask[ampName]
869 timeVecFinal = np.array(datasetPtc.rawExpTimes[ampName])[mask]
870 meanVecFinal = np.array(datasetPtc.rawMeans[ampName])[mask]
872 # Non-linearity residuals (NL of mean vs time curve): percentage, and fit to a quadratic function
873 # In this case, len(parsIniNonLinearity) = 3 indicates that we want a quadratic fit
874 datasetLinRes = self.calculateLinearityResidualAndLinearizers(timeVecFinal, meanVecFinal)
876 # LinearizerLookupTable
877 if tableArray is not None:
878 tableArray[i, :] = datasetLinRes.linearizerTableRow
880 datasetNonLinearity[ampName] = datasetLinRes
882 return datasetNonLinearity
884 def calculateLinearityResidualAndLinearizers(self, exposureTimeVector, meanSignalVector):
885 """Calculate linearity residual and fit an n-order polynomial to the mean vs time curve
886 to produce corrections (deviation from linear part of polynomial) for a particular amplifier
887 to populate LinearizeLookupTable.
888 Use the coefficients of this fit to calculate the correction coefficients for LinearizePolynomial
889 and LinearizeSquared."
891 Parameters
892 ---------
894 exposureTimeVector: `list` of `float`
895 List of exposure times for each flat pair
897 meanSignalVector: `list` of `float`
898 List of mean signal from diference image of flat pairs
900 Returns
901 -------
902 dataset : `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset`
903 The dataset containing the fit parameters, the NL correction coefficients, and the
904 LUT row for the amplifier at hand.
906 Notes
907 -----
908 datase members:
910 dataset.polynomialLinearizerCoefficients : `list` of `float`
911 Coefficients for LinearizePolynomial, where corrImage = uncorrImage + sum_i c_i uncorrImage^(2 +
912 i).
913 c_(j-2) = -k_j/(k_1^j) with units DN^(1-j) (c.f., Eq. 37 of 2003.05978). The units of k_j are
914 DN/t^j, and they are fit from meanSignalVector = k0 + k1*exposureTimeVector +
915 k2*exposureTimeVector^2 + ... + kn*exposureTimeVector^n, with
916 n = "polynomialFitDegreeNonLinearity". k_0 and k_1 and degenerate with bias level and gain,
917 and are not used by the non-linearity correction. Therefore, j = 2...n in the above expression
918 (see `LinearizePolynomial` class in `linearize.py`.)
920 dataset.quadraticPolynomialLinearizerCoefficient : `float`
921 Coefficient for LinearizeSquared, where corrImage = uncorrImage + c0*uncorrImage^2.
922 c0 = -k2/(k1^2), where k1 and k2 are fit from
923 meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +...
924 + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity".
926 dataset.linearizerTableRow : `list` of `float`
927 One dimensional array with deviation from linear part of n-order polynomial fit
928 to mean vs time curve. This array will be one row (for the particular amplifier at hand)
929 of the table array for LinearizeLookupTable.
931 dataset.meanSignalVsTimePolyFitPars : `list` of `float`
932 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
934 dataset.meanSignalVsTimePolyFitParsErr : `list` of `float`
935 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
937 dataset.meanSignalVsTimePolyFitReducedChiSq : `float`
938 Reduced unweighted chi squared from polynomial fit to meanSignalVector vs exposureTimeVector.
939 """
941 # Lookup table linearizer
942 parsIniNonLinearity = self._initialParsForPolynomial(self.config.polynomialFitDegreeNonLinearity + 1)
943 if self.config.doFitBootstrap:
944 (parsFit, parsFitErr,
945 reducedChiSquaredNonLinearityFit) = fitBootstrap(parsIniNonLinearity,
946 exposureTimeVector,
947 meanSignalVector,
948 funcPolynomial,
949 weightsY=1./np.sqrt(meanSignalVector))
950 else:
951 (parsFit, parsFitErr,
952 reducedChiSquaredNonLinearityFit) = fitLeastSq(parsIniNonLinearity,
953 exposureTimeVector,
954 meanSignalVector,
955 funcPolynomial,
956 weightsY=1./np.sqrt(meanSignalVector))
958 # LinearizeLookupTable:
959 # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN
960 tMax = (self.config.maxAduForLookupTableLinearizer - parsFit[0])/parsFit[1]
961 timeRange = np.linspace(0, tMax, self.config.maxAduForLookupTableLinearizer)
962 signalIdeal = parsFit[0] + parsFit[1]*timeRange
963 signalUncorrected = funcPolynomial(parsFit, timeRange)
964 linearizerTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has corrections
965 # LinearizePolynomial and LinearizeSquared:
966 # Check that magnitude of higher order (>= 3) coefficents of the polyFit are small,
967 # i.e., less than threshold = 1e-10 (typical quadratic and cubic coefficents are ~1e-6
968 # and ~1e-12).
969 k1 = parsFit[1]
970 polynomialLinearizerCoefficients = []
971 for i, coefficient in enumerate(parsFit):
972 c = -coefficient/(k1**i)
973 polynomialLinearizerCoefficients.append(c)
974 if np.fabs(c) > 1e-10:
975 msg = f"Coefficient {c} in polynomial fit larger than threshold 1e-10."
976 self.log.warn(msg)
977 # Coefficient for LinearizedSquared. Called "c0" in linearize.py
978 c0 = polynomialLinearizerCoefficients[2]
980 dataset = LinearityResidualsAndLinearizersDataset([], None, [], [], [], None)
981 dataset.polynomialLinearizerCoefficients = polynomialLinearizerCoefficients
982 dataset.quadraticPolynomialLinearizerCoefficient = c0
983 dataset.linearizerTableRow = linearizerTableRow
984 dataset.meanSignalVsTimePolyFitPars = parsFit
985 dataset.meanSignalVsTimePolyFitParsErr = parsFitErr
986 dataset.meanSignalVsTimePolyFitReducedChiSq = reducedChiSquaredNonLinearityFit
988 return dataset
990 def buildLinearizerObject(self, datasetNonLinearity, detector, calibDate, linearizerType, instruName='',
991 tableArray=None, log=None):
992 """Build linearizer object to persist.
994 Parameters
995 ----------
996 datasetNonLinearity : `dict`
997 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` objects.
999 detector : `lsst.afw.cameraGeom.Detector`
1000 Detector object
1002 calibDate : `datetime.datetime`
1003 Calibration date
1005 linearizerType : `str`
1006 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'
1008 instruName : `str`, optional
1009 Instrument name
1011 tableArray : `np.array`, optional
1012 Look-up table array with size rows=nAmps and columns=DN values
1014 log : `lsst.log.Log`, optional
1015 Logger to handle messages
1017 Returns
1018 -------
1019 linearizer : `lsst.ip.isr.Linearizer`
1020 Linearizer object
1021 """
1022 detName = detector.getName()
1023 detNum = detector.getId()
1024 if linearizerType == "LOOKUPTABLE":
1025 if tableArray is not None:
1026 linearizer = Linearizer(detector=detector, table=tableArray, log=log)
1027 else:
1028 raise RuntimeError("tableArray must be provided when creating a LookupTable linearizer")
1029 elif linearizerType in ("LINEARIZESQUARED", "LINEARIZEPOLYNOMIAL"):
1030 linearizer = Linearizer(log=log)
1031 else:
1032 raise RuntimeError("Invalid linearizerType {linearizerType} to build a Linearizer object. "
1033 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
1034 for i, amp in enumerate(detector.getAmplifiers()):
1035 ampName = amp.getName()
1036 datasetNonLinAmp = datasetNonLinearity[ampName]
1037 if linearizerType == "LOOKUPTABLE":
1038 linearizer.linearityCoeffs[ampName] = [i, 0]
1039 linearizer.linearityType[ampName] = "LookupTable"
1040 elif linearizerType == "LINEARIZESQUARED":
1041 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars
1042 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr
1043 linearizer.linearityFitReducedChiSquared[ampName] = (
1044 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq)
1045 linearizer.linearityCoeffs[ampName] = [
1046 datasetNonLinAmp.quadraticPolynomialLinearizerCoefficient]
1047 linearizer.linearityType[ampName] = "Squared"
1048 elif linearizerType == "LINEARIZEPOLYNOMIAL":
1049 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars
1050 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr
1051 linearizer.linearityFitReducedChiSquared[ampName] = (
1052 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq)
1053 # Slice correction coefficients (starting at 2) for polynomial linearizer
1054 # (and squared linearizer above). The first and second are reduntant with
1055 # the bias and gain, respectively, and are not used by LinearizerPolynomial.
1056 polyLinCoeffs = np.array(datasetNonLinAmp.polynomialLinearizerCoefficients[2:])
1057 linearizer.linearityCoeffs[ampName] = polyLinCoeffs
1058 linearizer.linearityType[ampName] = "Polynomial"
1059 linearizer.linearityBBox[ampName] = amp.getBBox()
1060 linearizer.validate()
1061 calibId = f"detectorName={detName} detector={detNum} calibDate={calibDate} ccd={detNum} filter=NONE"
1063 try:
1064 raftName = detName.split("_")[0]
1065 calibId += f" raftName={raftName}"
1066 except Exception:
1067 raftname = "NONE"
1068 calibId += f" raftName={raftname}"
1070 serial = detector.getSerial()
1071 linearizer.updateMetadata(instrumentName=instruName, detectorId=f"{detNum}",
1072 calibId=calibId, serial=serial, detectorName=f"{detName}")
1074 return linearizer
1076 @staticmethod
1077 def _initialParsForPolynomial(order):
1078 assert(order >= 2)
1079 pars = np.zeros(order, dtype=np.float)
1080 pars[0] = 10
1081 pars[1] = 1
1082 pars[2:] = 0.0001
1083 return pars
1085 @staticmethod
1086 def _boundsForPolynomial(initialPars):
1087 lowers = [np.NINF for p in initialPars]
1088 uppers = [np.inf for p in initialPars]
1089 lowers[1] = 0 # no negative gains
1090 return (lowers, uppers)
1092 @staticmethod
1093 def _boundsForAstier(initialPars):
1094 lowers = [np.NINF for p in initialPars]
1095 uppers = [np.inf for p in initialPars]
1096 return (lowers, uppers)
1098 @staticmethod
1099 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
1100 """Return a boolean array to mask bad points.
1102 A linear function has a constant ratio, so find the median
1103 value of the ratios, and exclude the points that deviate
1104 from that by more than a factor of maxDeviationPositive/negative.
1105 Asymmetric deviations are supported as we expect the PTC to turn
1106 down as the flux increases, but sometimes it anomalously turns
1107 upwards just before turning over, which ruins the fits, so it
1108 is wise to be stricter about restricting positive outliers than
1109 negative ones.
1111 Too high and points that are so bad that fit will fail will be included
1112 Too low and the non-linear points will be excluded, biasing the NL fit."""
1113 ratios = [b/a for (a, b) in zip(means, variances)]
1114 medianRatio = np.median(ratios)
1115 ratioDeviations = [(r/medianRatio)-1 for r in ratios]
1117 # so that it doesn't matter if the deviation is expressed as positive or negative
1118 maxDeviationPositive = abs(maxDeviationPositive)
1119 maxDeviationNegative = -1. * abs(maxDeviationNegative)
1121 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative)
1122 else False for r in ratioDeviations])
1123 return goodPoints
1125 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
1126 """"""
1127 nBad = Counter(array)[0]
1128 if nBad == 0:
1129 return array
1131 if warn:
1132 msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
1133 self.log.warn(msg)
1135 array[array == 0] = substituteValue
1136 return array
1138 def fitPtc(self, dataset, ptcFitType):
1139 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
1141 Fit the photon transfer curve with either a polynomial of the order
1142 specified in the task config, or using the Astier approximation.
1144 Sigma clipping is performed iteratively for the fit, as well as an
1145 initial clipping of data points that are more than
1146 config.initialNonLinearityExclusionThreshold away from lying on a
1147 straight line. This other step is necessary because the photon transfer
1148 curve turns over catastrophically at very high flux (because saturation
1149 drops the variance to ~0) and these far outliers cause the initial fit
1150 to fail, meaning the sigma cannot be calculated to perform the
1151 sigma-clipping.
1153 Parameters
1154 ----------
1155 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
1156 The dataset containing the means, variances and exposure times
1158 ptcFitType : `str`
1159 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
1160 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
1162 Returns
1163 -------
1164 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
1165 This is the same dataset as the input paramter, however, it has been modified
1166 to include information such as the fit vectors and the fit parameters. See
1167 the class `PhotonTransferCurveDatase`.
1168 """
1170 def errFunc(p, x, y):
1171 return ptcFunc(p, x) - y
1173 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
1174 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
1176 for i, ampName in enumerate(dataset.ampNames):
1177 timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
1178 meanVecOriginal = np.array(dataset.rawMeans[ampName])
1179 varVecOriginal = np.array(dataset.rawVars[ampName])
1180 varVecOriginal = self._makeZeroSafe(varVecOriginal)
1182 mask = ((meanVecOriginal >= self.config.minMeanSignal) &
1183 (meanVecOriginal <= self.config.maxMeanSignal))
1185 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
1186 self.config.initialNonLinearityExclusionThresholdPositive,
1187 self.config.initialNonLinearityExclusionThresholdNegative)
1188 mask = mask & goodPoints
1190 if ptcFitType == 'EXPAPPROXIMATION':
1191 ptcFunc = funcAstier
1192 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise
1193 bounds = self._boundsForAstier(parsIniPtc)
1194 if ptcFitType == 'POLYNOMIAL':
1195 ptcFunc = funcPolynomial
1196 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
1197 bounds = self._boundsForPolynomial(parsIniPtc)
1199 # Before bootstrap fit, do an iterative fit to get rid of outliers
1200 count = 1
1201 while count <= maxIterationsPtcOutliers:
1202 # Note that application of the mask actually shrinks the array
1203 # to size rather than setting elements to zero (as we want) so
1204 # always update mask itself and re-apply to the original data
1205 meanTempVec = meanVecOriginal[mask]
1206 varTempVec = varVecOriginal[mask]
1207 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
1208 pars = res.x
1210 # change this to the original from the temp because the masks are ANDed
1211 # meaning once a point is masked it's always masked, and the masks must
1212 # always be the same length for broadcasting
1213 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
1214 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids])
1215 mask = mask & newMask
1217 nDroppedTotal = Counter(mask)[False]
1218 self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
1219 count += 1
1220 # objects should never shrink
1221 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
1223 dataset.visitMask[ampName] = mask # store the final mask
1224 parsIniPtc = pars
1225 meanVecFinal = meanVecOriginal[mask]
1226 varVecFinal = varVecOriginal[mask]
1228 if Counter(mask)[False] > 0:
1229 self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" +
1230 f" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
1232 if (len(meanVecFinal) < len(parsIniPtc)):
1233 msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
1234 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1235 self.log.warn(msg)
1236 # The first and second parameters of initial fit are discarded (bias and gain)
1237 # for the final NL coefficients
1238 dataset.badAmps.append(ampName)
1239 dataset.gain[ampName] = np.nan
1240 dataset.gainErr[ampName] = np.nan
1241 dataset.noise[ampName] = np.nan
1242 dataset.noiseErr[ampName] = np.nan
1243 dataset.ptcFitPars[ampName] = np.nan
1244 dataset.ptcFitParsError[ampName] = np.nan
1245 dataset.ptcFitReducedChiSquared[ampName] = np.nan
1246 continue
1248 # Fit the PTC
1249 if self.config.doFitBootstrap:
1250 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal,
1251 varVecFinal, ptcFunc,
1252 weightsY=1./np.sqrt(varVecFinal))
1253 else:
1254 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal,
1255 varVecFinal, ptcFunc,
1256 weightsY=1./np.sqrt(varVecFinal))
1257 dataset.ptcFitPars[ampName] = parsFit
1258 dataset.ptcFitParsError[ampName] = parsFitErr
1259 dataset.ptcFitReducedChiSquared[ampName] = reducedChiSqPtc
1261 if ptcFitType == 'EXPAPPROXIMATION':
1262 ptcGain = parsFit[1]
1263 ptcGainErr = parsFitErr[1]
1264 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1265 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1266 if ptcFitType == 'POLYNOMIAL':
1267 ptcGain = 1./parsFit[1]
1268 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1269 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1270 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1271 dataset.gain[ampName] = ptcGain
1272 dataset.gainErr[ampName] = ptcGainErr
1273 dataset.noise[ampName] = ptcNoise
1274 dataset.noiseErr[ampName] = ptcNoiseErr
1275 if not len(dataset.ptcFitType) == 0:
1276 dataset.ptcFitType = ptcFitType
1278 return dataset