Coverage for python/lsst/cp/pipe/ptc.py : 11%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['MeasurePhotonTransferCurveTask',
24 'MeasurePhotonTransferCurveTaskConfig',
25 'PhotonTransferCurveDataset']
27import numpy as np
28import matplotlib.pyplot as plt
29from sqlite3 import OperationalError
30from collections import Counter
31from dataclasses import dataclass
33import lsst.afw.math as afwMath
34import lsst.pex.config as pexConfig
35import lsst.pipe.base as pipeBase
36from .utils import (NonexistentDatasetTaskDataIdContainer, PairedVisitListTaskRunner,
37 checkExpLengthEqual, fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
38from scipy.optimize import least_squares
40from lsst.ip.isr.linearize import Linearizer
41import datetime
43from .astierCovPtcUtils import (fftSize, CovFft, computeCovDirect, fitData)
46class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config):
47 """Config class for photon transfer curve measurement task"""
48 ccdKey = pexConfig.Field(
49 dtype=str,
50 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
51 default='ccd',
52 )
53 ptcFitType = pexConfig.ChoiceField(
54 dtype=str,
55 doc="Fit PTC to approximation in Astier+19 (Equation 16) or to a polynomial.",
56 default="POLYNOMIAL",
57 allowed={
58 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
59 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).",
60 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)"
61 }
62 )
63 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
64 dtype=float,
65 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
66 default=5.0,
67 )
68 maxIterFullFitCovariancesAstier = pexConfig.Field(
69 dtype=int,
70 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
71 default=3,
72 )
73 maximumRangeCovariancesAstier = pexConfig.Field(
74 dtype=int,
75 doc="Maximum range of covariances as in Astier+19",
76 default=8,
77 )
78 covAstierRealSpace = pexConfig.Field(
79 dtype=bool,
80 doc="Calculate covariances in real space or via FFT? (see appendix A of Astier+19).",
81 default=False,
82 )
83 polynomialFitDegree = pexConfig.Field(
84 dtype=int,
85 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
86 default=3,
87 )
88 doCreateLinearizer = pexConfig.Field(
89 dtype=bool,
90 doc="Calculate non-linearity and persist linearizer?",
91 default=False,
92 )
93 linearizerType = pexConfig.ChoiceField(
94 dtype=str,
95 doc="Linearizer type, if doCreateLinearizer=True",
96 default="LINEARIZEPOLYNOMIAL",
97 allowed={
98 "LINEARIZEPOLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegreeNonLinearity' to set 'n').",
99 "LINEARIZESQUARED": "c0 quadratic coefficient derived from coefficients of polynomiual fit",
100 "LOOKUPTABLE": "Loouk table formed from linear part of polynomial fit."
101 }
102 )
103 polynomialFitDegreeNonLinearity = pexConfig.Field(
104 dtype=int,
105 doc="If doCreateLinearizer, degree of polynomial to fit the meanSignal vs exposureTime" +
106 " curve to produce the table for LinearizeLookupTable.",
107 default=3,
108 )
109 binSize = pexConfig.Field(
110 dtype=int,
111 doc="Bin the image by this factor in both dimensions.",
112 default=1,
113 )
114 minMeanSignal = pexConfig.Field(
115 dtype=float,
116 doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.",
117 default=0,
118 )
119 maxMeanSignal = pexConfig.Field(
120 dtype=float,
121 doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.",
122 default=9e6,
123 )
124 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
125 dtype=float,
126 doc="Initially exclude data points with a variance that are more than a factor of this from being"
127 " linear in the positive direction, from the PTC fit. Note that these points will also be"
128 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
129 " to allow an accurate determination of the sigmas for said iterative fit.",
130 default=0.12,
131 min=0.0,
132 max=1.0,
133 )
134 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
135 dtype=float,
136 doc="Initially exclude data points with a variance that are more than a factor of this from being"
137 " linear in the negative direction, from the PTC fit. Note that these points will also be"
138 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
139 " to allow an accurate determination of the sigmas for said iterative fit.",
140 default=0.25,
141 min=0.0,
142 max=1.0,
143 )
144 sigmaCutPtcOutliers = pexConfig.Field(
145 dtype=float,
146 doc="Sigma cut for outlier rejection in PTC.",
147 default=5.0,
148 )
149 maskNameList = pexConfig.ListField(
150 dtype=str,
151 doc="Mask list to exclude from statistics calculations.",
152 default=['SUSPECT', 'BAD', 'NO_DATA'],
153 )
154 nSigmaClipPtc = pexConfig.Field(
155 dtype=float,
156 doc="Sigma cut for afwMath.StatisticsControl()",
157 default=5.5,
158 )
159 nIterSigmaClipPtc = pexConfig.Field(
160 dtype=int,
161 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
162 default=1,
163 )
164 maxIterationsPtcOutliers = pexConfig.Field(
165 dtype=int,
166 doc="Maximum number of iterations for outlier rejection in PTC.",
167 default=2,
168 )
169 doFitBootstrap = pexConfig.Field(
170 dtype=bool,
171 doc="Use bootstrap for the PTC fit parameters and errors?.",
172 default=False,
173 )
174 maxAduForLookupTableLinearizer = pexConfig.Field(
175 dtype=int,
176 doc="Maximum DN value for the LookupTable linearizer.",
177 default=2**18,
178 )
179 instrumentName = pexConfig.Field(
180 dtype=str,
181 doc="Instrument name.",
182 default='',
183 )
186@dataclass
187class LinearityResidualsAndLinearizersDataset:
188 """A simple class to hold the output from the
189 `calculateLinearityResidualAndLinearizers` function.
190 """
191 # Normalized coefficients for polynomial NL correction
192 polynomialLinearizerCoefficients: list
193 # Normalized coefficient for quadratic polynomial NL correction (c0)
194 quadraticPolynomialLinearizerCoefficient: float
195 # LUT array row for the amplifier at hand
196 linearizerTableRow: list
197 meanSignalVsTimePolyFitPars: list
198 meanSignalVsTimePolyFitParsErr: list
199 meanSignalVsTimePolyFitReducedChiSq: float
202class PhotonTransferCurveDataset:
203 """A simple class to hold the output data from the PTC task.
205 The dataset is made up of a dictionary for each item, keyed by the
206 amplifiers' names, which much be supplied at construction time.
208 New items cannot be added to the class to save accidentally saving to the
209 wrong property, and the class can be frozen if desired.
211 inputVisitPairs records the visits used to produce the data.
212 When fitPtc() or fitCovariancesAstier() is run, a mask is built up, which is by definition
213 always the same length as inputVisitPairs, rawExpTimes, rawMeans
214 and rawVars, and is a list of bools, which are incrementally set to False
215 as points are discarded from the fits.
217 PTC fit parameters for polynomials are stored in a list in ascending order
218 of polynomial term, i.e. par[0]*x^0 + par[1]*x + par[2]*x^2 etc
219 with the length of the list corresponding to the order of the polynomial
220 plus one.
222 Parameters
223 ----------
224 ampNames : `list`
225 List with the names of the amplifiers of the detector at hand.
227 ptcFitType : `str`
228 Type of model fitted to the PTC: "POLYNOMIAL", "EXPAPPROXIMATION", or "FULLCOVARIANCE".
230 Returns
231 -------
232 `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
233 Output dataset from MeasurePhotonTransferCurveTask.
234 """
236 def __init__(self, ampNames, ptcFitType):
237 # add items to __dict__ directly because __setattr__ is overridden
239 # instance variables
240 self.__dict__["ptcFitType"] = ptcFitType
241 self.__dict__["ampNames"] = ampNames
242 self.__dict__["badAmps"] = []
244 # raw data variables
245 # visitMask is the mask produced after outlier rejection. The mask produced by "FULLCOVARIANCE"
246 # may differ from the one produced in the other two PTC fit types.
247 self.__dict__["inputVisitPairs"] = {ampName: [] for ampName in ampNames}
248 self.__dict__["visitMask"] = {ampName: [] for ampName in ampNames}
249 self.__dict__["rawExpTimes"] = {ampName: [] for ampName in ampNames}
250 self.__dict__["rawMeans"] = {ampName: [] for ampName in ampNames}
251 self.__dict__["rawVars"] = {ampName: [] for ampName in ampNames}
253 # Gain and noise
254 self.__dict__["gain"] = {ampName: -1. for ampName in ampNames}
255 self.__dict__["gainErr"] = {ampName: -1. for ampName in ampNames}
256 self.__dict__["noise"] = {ampName: -1. for ampName in ampNames}
257 self.__dict__["noiseErr"] = {ampName: -1. for ampName in ampNames}
259 # if ptcFitTye in ["POLYNOMIAL", "EXPAPPROXIMATION"]
260 # fit information
261 self.__dict__["ptcFitPars"] = {ampName: [] for ampName in ampNames}
262 self.__dict__["ptcFitParsError"] = {ampName: [] for ampName in ampNames}
263 self.__dict__["ptcFitReducedChiSquared"] = {ampName: [] for ampName in ampNames}
265 # if ptcFitTye in ["FULLCOVARIANCE"]
266 # "covariancesTuple" is a numpy recarray with entries of the form
267 # ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
268 # "covariancesFits" has CovFit objects that fit the measured covariances to Eq. 20 of Astier+19.
269 # In "covariancesFitsWithNoB", "b"=0 in the model described by Eq. 20 of Astier+19.
270 self.__dict__["covariancesTuple"] = {ampName: [] for ampName in ampNames}
271 self.__dict__["covariancesFitsWithNoB"] = {ampName: [] for ampName in ampNames}
272 self.__dict__["covariancesFits"] = {ampName: [] for ampName in ampNames}
273 self.__dict__["aMatrix"] = {ampName: [] for ampName in ampNames}
274 self.__dict__["bMatrix"] = {ampName: [] for ampName in ampNames}
276 # "final" means that the "raw" vectors above had "visitMask" applied.
277 self.__dict__["finalVars"] = {ampName: [] for ampName in ampNames}
278 self.__dict__["finalModelVars"] = {ampName: [] for ampName in ampNames}
279 self.__dict__["finalMeans"] = {ampName: [] for ampName in ampNames}
281 def __setattr__(self, attribute, value):
282 """Protect class attributes"""
283 if attribute not in self.__dict__:
284 raise AttributeError(f"{attribute} is not already a member of PhotonTransferCurveDataset, which"
285 " does not support setting of new attributes.")
286 else:
287 self.__dict__[attribute] = value
289 def getVisitsUsed(self, ampName):
290 """Get the visits used, i.e. not discarded, for a given amp.
292 If no mask has been created yet, all visits are returned.
293 """
294 if len(self.visitMask[ampName]) == 0:
295 return self.inputVisitPairs[ampName]
297 # if the mask exists it had better be the same length as the visitPairs
298 assert len(self.visitMask[ampName]) == len(self.inputVisitPairs[ampName])
300 pairs = self.inputVisitPairs[ampName]
301 mask = self.visitMask[ampName]
302 # cast to bool required because numpy
303 return [(v1, v2) for ((v1, v2), m) in zip(pairs, mask) if bool(m) is True]
305 def getGoodAmps(self):
306 return [amp for amp in self.ampNames if amp not in self.badAmps]
309class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask):
310 """A class to calculate, fit, and plot a PTC from a set of flat pairs.
312 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
313 used in astronomical detectors characterization (e.g., Janesick 2001,
314 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the
315 PTC from a series of pairs of flat-field images; each pair taken at identical exposure
316 times. The difference image of each pair is formed to eliminate fixed pattern noise,
317 and then the variance of the difference image and the mean of the average image
318 are used to produce the PTC. An n-degree polynomial or the approximation in Equation
319 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
320 arXiv:1905.08677) can be fitted to the PTC curve. These models include
321 parameters such as the gain (e/DN) and readout noise.
323 Linearizers to correct for signal-chain non-linearity are also calculated.
324 The `Linearizer` class, in general, can support per-amp linearizers, but in this
325 task this is not supported.
327 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the
328 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements
329 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain
330 and the noise.
332 Parameters
333 ----------
335 *args: `list`
336 Positional arguments passed to the Task constructor. None used at this
337 time.
338 **kwargs: `dict`
339 Keyword arguments passed on to the Task constructor. None used at this
340 time.
342 """
344 RunnerClass = PairedVisitListTaskRunner
345 ConfigClass = MeasurePhotonTransferCurveTaskConfig
346 _DefaultName = "measurePhotonTransferCurve"
348 def __init__(self, *args, **kwargs):
349 pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
350 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
351 self.config.validate()
352 self.config.freeze()
354 @classmethod
355 def _makeArgumentParser(cls):
356 """Augment argument parser for the MeasurePhotonTransferCurveTask."""
357 parser = pipeBase.ArgumentParser(name=cls._DefaultName)
358 parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
359 help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
360 parser.add_id_argument("--id", datasetType="photonTransferCurveDataset",
361 ContainerClass=NonexistentDatasetTaskDataIdContainer,
362 help="The ccds to use, e.g. --id ccd=0..100")
363 return parser
365 @pipeBase.timeMethod
366 def runDataRef(self, dataRef, visitPairs):
367 """Run the Photon Transfer Curve (PTC) measurement task.
369 For a dataRef (which is each detector here),
370 and given a list of visit pairs (postISR) at different exposure times,
371 measure the PTC.
373 Parameters
374 ----------
375 dataRef : list of lsst.daf.persistence.ButlerDataRef
376 dataRef for the detector for the visits to be fit.
378 visitPairs : `iterable` of `tuple` of `int`
379 Pairs of visit numbers to be processed together
380 """
382 # setup necessary objects
383 detNum = dataRef.dataId[self.config.ccdKey]
384 detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
385 # expand some missing fields that we need for lsstCam. This is a work-around
386 # for Gen2 problems that I (RHL) don't feel like solving. The calibs pipelines
387 # (which inherit from CalibTask) use addMissingKeys() to do basically the same thing
388 #
389 # Basically, the butler's trying to look up the fields in `raw_visit` which won't work
390 for name in dataRef.getButler().getKeys('bias'):
391 if name not in dataRef.dataId:
392 try:
393 dataRef.dataId[name] = \
394 dataRef.getButler().queryMetadata('raw', [name], detector=detNum)[0]
395 except OperationalError:
396 pass
398 amps = detector.getAmplifiers()
399 ampNames = [amp.getName() for amp in amps]
400 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType)
401 self.log.info('Measuring PTC using %s visits for detector %s' % (visitPairs, detector.getId()))
403 tupleRecords = []
404 allTags = []
405 for (v1, v2) in visitPairs:
406 # Get postISR exposures.
407 try:
408 dataRef.dataId['expId'] = v1
409 exp1 = dataRef.get("postISRCCD", immediate=True)
410 dataRef.dataId['expId'] = v2
411 exp2 = dataRef.get("postISRCCD", immediate=True)
412 except RuntimeError:
413 self.log.warn(f"postISR exposure for either expId {v1} or expId {v2} could not be retreived. "
414 "Ignoring flat pair.")
415 continue
416 del dataRef.dataId['expId']
418 checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
419 expTime = exp1.getInfo().getVisitInfo().getExposureTime()
420 tupleRows = []
421 nAmpsNan = 0
422 for ampNumber, amp in enumerate(detector):
423 ampName = amp.getName()
424 # covAstier: (i, j, var (cov[0,0]), cov, npix)
425 doRealSpace = self.config.covAstierRealSpace
426 muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=amp.getBBox(),
427 covAstierRealSpace=doRealSpace)
428 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
429 msg = (f"NaN mean or var, or None cov in amp {ampNumber} in visit pair {v1}, {v2} "
430 "of detector {detNum}.")
431 self.log.warn(msg)
432 nAmpsNan += 1
433 continue
434 tags = ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
435 if (muDiff <= self.config.minMeanSignal) or (muDiff >= self.config.maxMeanSignal):
436 continue
437 datasetPtc.rawExpTimes[ampName].append(expTime)
438 datasetPtc.rawMeans[ampName].append(muDiff)
439 datasetPtc.rawVars[ampName].append(varDiff)
440 datasetPtc.inputVisitPairs[ampName].append((v1, v2))
442 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName) for covRow in covAstier]
443 if nAmpsNan == len(ampNames):
444 msg = f"NaN mean in all amps of visit pair {v1}, {v2} of detector {detNum}."
445 self.log.warn(msg)
446 continue
447 allTags += tags
448 tupleRecords += tupleRows
449 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags)
451 if self.config.ptcFitType in ["FULLCOVARIANCE", ]:
452 # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20)
453 datasetPtc = self.fitCovariancesAstier(datasetPtc, covariancesWithTags)
454 elif self.config.ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
455 # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16)
456 # Fill up PhotonTransferCurveDataset object.
457 datasetPtc = self.fitPtc(datasetPtc, self.config.ptcFitType)
459 # Fit a poynomial to calculate non-linearity and persist linearizer.
460 if self.config.doCreateLinearizer:
461 numberAmps = len(amps)
462 numberAduValues = self.config.maxAduForLookupTableLinearizer
463 lookupTableArray = np.zeros((numberAmps, numberAduValues), dtype=np.float32)
465 # Fit (non)linearity of signal vs time curve.
466 # Fill up PhotonTransferCurveDataset object.
467 # Fill up array for LUT linearizer (tableArray).
468 # Produce coefficients for Polynomial ans Squared linearizers.
469 # Build linearizer objects.
470 linearizer = self.fitNonLinearityAndBuildLinearizers(datasetPtc, detector,
471 tableArray=lookupTableArray,
472 log=self.log)
474 if self.config.linearizerType == "LINEARIZEPOLYNOMIAL":
475 linDataType = 'linearizePolynomial'
476 linMsg = "polynomial (coefficients for a polynomial correction)."
477 elif self.config.linearizerType == "LINEARIZESQUARED":
478 linDataType = 'linearizePolynomial'
479 linMsg = "squared (c0, derived from k_i coefficients of a polynomial fit)."
480 elif self.config.linearizerType == "LOOKUPTABLE":
481 linDataType = 'linearizePolynomial'
482 linMsg = "lookup table (linear component of polynomial fit)."
483 else:
484 raise RuntimeError("Invalid config.linearizerType {selg.config.linearizerType}. "
485 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
487 butler = dataRef.getButler()
488 self.log.info(f"Writing linearizer: \n {linMsg}")
490 detName = detector.getName()
491 now = datetime.datetime.utcnow()
492 calibDate = now.strftime("%Y-%m-%d")
494 butler.put(linearizer, datasetType=linDataType, dataId={'detector': detNum,
495 'detectorName': detName, 'calibDate': calibDate})
497 self.log.info(f"Writing PTC data to {dataRef.getUri(write=True)}")
498 dataRef.put(datasetPtc, datasetType="photonTransferCurveDataset")
500 return pipeBase.Struct(exitStatus=0)
502 def fitCovariancesAstier(self, dataset, covariancesWithTagsArray):
503 """Fit measured flat covariances to full model in Astier+19.
505 Parameters
506 ----------
507 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
508 The dataset containing information such as the means, variances and exposure times.
510 covariancesWithTagsArray : `numpy.recarray`
511 Tuple with at least (mu, cov, var, i, j, npix), where:
512 mu : 0.5*(m1 + m2), where:
513 mu1: mean value of flat1
514 mu2: mean value of flat2
515 cov: covariance value at lag(i, j)
516 var: variance(covariance value at lag(0, 0))
517 i: lag dimension
518 j: lag dimension
519 npix: number of pixels used for covariance calculation.
521 Returns
522 -------
523 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
524 This is the same dataset as the input paramter, however, it has been modified
525 to include information such as the fit vectors and the fit parameters. See
526 the class `PhotonTransferCurveDatase`.
527 """
529 covFits, covFitsNoB = fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal,
530 r=self.config.maximumRangeCovariancesAstier,
531 nSigmaFullFit=self.config.sigmaClipFullFitCovariancesAstier,
532 maxIterFullFit=self.config.maxIterFullFitCovariancesAstier)
534 dataset.covariancesTuple = covariancesWithTagsArray
535 dataset.covariancesFits = covFits
536 dataset.covariancesFitsWithNoB = covFitsNoB
537 dataset = self.getOutputPtcDataCovAstier(dataset, covFits)
539 return dataset
541 def getOutputPtcDataCovAstier(self, dataset, covFits):
542 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
544 Parameters
545 ----------
546 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
547 The dataset containing information such as the means, variances and exposure times.
549 covFits: `dict`
550 Dictionary of CovFit objects, with amp names as keys.
552 Returns
553 -------
554 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
555 This is the same dataset as the input paramter, however, it has been modified
556 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
557 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
558 See the class `PhotonTransferCurveDatase`.
559 """
561 for i, amp in enumerate(covFits):
562 fit = covFits[amp]
563 (meanVecFinal, varVecFinal, varVecModel,
564 wc, varMask) = fit.getFitData(0, 0, divideByMu=False, returnMasked=True)
565 gain = fit.getGain()
566 dataset.visitMask[amp] = varMask
567 dataset.gain[amp] = gain
568 dataset.gainErr[amp] = fit.getGainErr()
569 dataset.noise[amp] = np.sqrt(np.fabs(fit.getRon()))
570 dataset.noiseErr[amp] = fit.getRonErr()
571 dataset.finalVars[amp].append(varVecFinal/(gain**2))
572 dataset.finalModelVars[amp].append(varVecModel/(gain**2))
573 dataset.finalMeans[amp].append(meanVecFinal/gain)
574 dataset.aMatrix[amp].append(fit.getA())
575 dataset.bMatrix[amp].append(fit.getB())
577 return dataset
579 def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False):
580 """Calculate the mean of each of two exposures and the variance and covariance of their difference.
582 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A).
583 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just
584 keep one (covariance).
586 Parameters
587 ----------
588 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
589 First exposure of flat field pair.
591 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
592 Second exposure of flat field pair.
594 region : `lsst.geom.Box2I`, optional
595 Region of each exposure where to perform the calculations (e.g, an amplifier).
597 covAstierRealSpace : `bool`, optional
598 Should the covariannces in Astier+19 be calculated in real space or via FFT?
599 See Appendix A of Astier+19.
601 Returns
602 -------
603 mu : `float` or `NaN`
604 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
605 both exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
607 varDiff : `float` or `NaN`
608 Half of the clipped variance of the difference of the regions inthe two input
609 exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
611 covDiffAstier : `list` or `NaN`
612 List with tuples of the form (dx, dy, var, cov, npix), where:
613 dx : `int`
614 Lag in x
615 dy : `int`
616 Lag in y
617 var : `float`
618 Variance at (dx, dy).
619 cov : `float`
620 Covariance at (dx, dy).
621 nPix : `int`
622 Number of pixel pairs used to evaluate var and cov.
623 If either mu1 or m2 are NaN's, the returned value is NaN.
624 """
626 if region is not None:
627 im1Area = exposure1.maskedImage[region]
628 im2Area = exposure2.maskedImage[region]
629 else:
630 im1Area = exposure1.maskedImage
631 im2Area = exposure2.maskedImage
633 if self.config.binSize > 1:
634 im1Area = afwMath.binImage(im1Area, self.config.binSize)
635 im2Area = afwMath.binImage(im2Area, self.config.binSize)
637 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
638 im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
639 self.config.nIterSigmaClipPtc,
640 im1MaskVal)
641 im1StatsCtrl.setNanSafe(True)
642 im1StatsCtrl.setAndMask(im1MaskVal)
644 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList)
645 im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
646 self.config.nIterSigmaClipPtc,
647 im2MaskVal)
648 im2StatsCtrl.setNanSafe(True)
649 im2StatsCtrl.setAndMask(im2MaskVal)
651 # Clipped mean of images; then average of mean.
652 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue()
653 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue()
654 if np.isnan(mu1) or np.isnan(mu2):
655 return np.nan, np.nan, None
656 mu = 0.5*(mu1 + mu2)
658 # Take difference of pairs
659 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
660 temp = im2Area.clone()
661 temp *= mu1
662 diffIm = im1Area.clone()
663 diffIm *= mu2
664 diffIm -= temp
665 diffIm /= mu
667 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList)
668 diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
669 self.config.nIterSigmaClipPtc,
670 diffImMaskVal)
671 diffImStatsCtrl.setNanSafe(True)
672 diffImStatsCtrl.setAndMask(diffImMaskVal)
674 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue())
676 # Get the mask and identify good pixels as '1', and the rest as '0'.
677 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0)
678 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0)
680 w12 = w1*w2
681 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
682 w = w12*wDiff
684 maxRangeCov = self.config.maximumRangeCovariancesAstier
685 if covAstierRealSpace:
686 covDiffAstier = computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov)
687 else:
688 shapeDiff = diffIm.getImage().getArray().shape
689 fftShape = (fftSize(shapeDiff[0] + maxRangeCov), fftSize(shapeDiff[1]+maxRangeCov))
690 c = CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov)
691 covDiffAstier = c.reportCovFft(maxRangeCov)
693 return mu, varDiff, covDiffAstier
695 def computeCovDirect(self, diffImage, weightImage, maxRange):
696 """Compute covariances of diffImage in real space.
698 For lags larger than ~25, it is slower than the FFT way.
699 Taken from https://github.com/PierreAstier/bfptc/
701 Parameters
702 ----------
703 diffImage : `numpy.array`
704 Image to compute the covariance of.
706 weightImage : `numpy.array`
707 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
709 maxRange : `int`
710 Last index of the covariance to be computed.
712 Returns
713 -------
714 outList : `list`
715 List with tuples of the form (dx, dy, var, cov, npix), where:
716 dx : `int`
717 Lag in x
718 dy : `int`
719 Lag in y
720 var : `float`
721 Variance at (dx, dy).
722 cov : `float`
723 Covariance at (dx, dy).
724 nPix : `int`
725 Number of pixel pairs used to evaluate var and cov.
726 """
727 outList = []
728 var = 0
729 # (dy,dx) = (0,0) has to be first
730 for dy in range(maxRange + 1):
731 for dx in range(0, maxRange + 1):
732 if (dx*dy > 0):
733 cov1, nPix1 = self.covDirectValue(diffImage, weightImage, dx, dy)
734 cov2, nPix2 = self.covDirectValue(diffImage, weightImage, dx, -dy)
735 cov = 0.5*(cov1 + cov2)
736 nPix = nPix1 + nPix2
737 else:
738 cov, nPix = self.covDirectValue(diffImage, weightImage, dx, dy)
739 if (dx == 0 and dy == 0):
740 var = cov
741 outList.append((dx, dy, var, cov, nPix))
743 return outList
745 def covDirectValue(self, diffImage, weightImage, dx, dy):
746 """Compute covariances of diffImage in real space at lag (dx, dy).
748 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19).
750 Parameters
751 ----------
752 diffImage : `numpy.array`
753 Image to compute the covariance of.
755 weightImage : `numpy.array`
756 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
758 dx : `int`
759 Lag in x.
761 dy : `int`
762 Lag in y.
764 Returns
765 -------
766 cov : `float`
767 Covariance at (dx, dy)
769 nPix : `int`
770 Number of pixel pairs used to evaluate var and cov.
771 """
772 (nCols, nRows) = diffImage.shape
773 # switching both signs does not change anything:
774 # it just swaps im1 and im2 below
775 if (dx < 0):
776 (dx, dy) = (-dx, -dy)
777 # now, we have dx >0. We have to distinguish two cases
778 # depending on the sign of dy
779 if dy >= 0:
780 im1 = diffImage[dy:, dx:]
781 w1 = weightImage[dy:, dx:]
782 im2 = diffImage[:nCols - dy, :nRows - dx]
783 w2 = weightImage[:nCols - dy, :nRows - dx]
784 else:
785 im1 = diffImage[:nCols + dy, dx:]
786 w1 = weightImage[:nCols + dy, dx:]
787 im2 = diffImage[-dy:, :nRows - dx]
788 w2 = weightImage[-dy:, :nRows - dx]
789 # use the same mask for all 3 calculations
790 wAll = w1*w2
791 # do not use mean() because weightImage=0 pixels would then count
792 nPix = wAll.sum()
793 im1TimesW = im1*wAll
794 s1 = im1TimesW.sum()/nPix
795 s2 = (im2*wAll).sum()/nPix
796 p = (im1TimesW*im2).sum()/nPix
797 cov = p - s1*s2
799 return cov, nPix
801 def fitNonLinearityAndBuildLinearizers(self, datasetPtc, detector, tableArray=None, log=None):
802 """Fit non-linearity function and build linearizer objects.
804 Parameters
805 ----------
806 datasePtct : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
807 The dataset containing information such as the means, variances and exposure times.
808 nLinearity
810 detector : `lsst.afw.cameraGeom.Detector`
811 Detector object.
813 tableArray : `np.array`, optional
814 Optional. Look-up table array with size rows=nAmps and columns=DN values.
815 It will be modified in-place if supplied.
817 log : `lsst.log.Log`, optional
818 Logger to handle messages.
820 Returns
821 -------
822 linearizer : `lsst.ip.isr.Linearizer`
823 Linearizer object
824 """
826 # Fit NonLinearity
827 datasetNonLinearity = self.fitNonLinearity(datasetPtc, tableArray=tableArray)
829 # Produce linearizer
830 now = datetime.datetime.utcnow()
831 calibDate = now.strftime("%Y-%m-%d")
832 linType = self.config.linearizerType
834 if linType == "LOOKUPTABLE":
835 tableArray = tableArray
836 else:
837 tableArray = None
839 linearizer = self.buildLinearizerObject(datasetNonLinearity, detector, calibDate, linType,
840 instruName=self.config.instrumentName,
841 tableArray=tableArray,
842 log=log)
844 return linearizer
846 def fitNonLinearity(self, datasetPtc, tableArray=None):
847 """Fit a polynomial to signal vs effective time curve to calculate linearity and residuals.
849 Parameters
850 ----------
851 datasetPtc : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
852 The dataset containing the means, variances and exposure times.
854 tableArray : `np.array`
855 Optional. Look-up table array with size rows=nAmps and columns=DN values.
856 It will be modified in-place if supplied.
858 Returns
859 -------
860 datasetNonLinearity : `dict`
861 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset`
862 dataclasses. Each one holds the output of `calculateLinearityResidualAndLinearizers` per
863 amplifier.
864 """
865 datasetNonLinearity = {ampName: [] for ampName in datasetPtc.ampNames}
866 for i, ampName in enumerate(datasetPtc.ampNames):
867 # If a mask is not found, use all points.
868 if (len(datasetPtc.visitMask[ampName]) == 0):
869 self.log.warn(f"Mask not found for {ampName} in non-linearity fit. Using all points.")
870 mask = np.repeat(True, len(datasetPtc.rawExpTimes[ampName]))
871 else:
872 mask = datasetPtc.visitMask[ampName]
874 timeVecFinal = np.array(datasetPtc.rawExpTimes[ampName])[mask]
875 meanVecFinal = np.array(datasetPtc.rawMeans[ampName])[mask]
877 # Non-linearity residuals (NL of mean vs time curve): percentage, and fit to a quadratic function
878 # In this case, len(parsIniNonLinearity) = 3 indicates that we want a quadratic fit
879 datasetLinRes = self.calculateLinearityResidualAndLinearizers(timeVecFinal, meanVecFinal)
881 # LinearizerLookupTable
882 if tableArray is not None:
883 tableArray[i, :] = datasetLinRes.linearizerTableRow
885 datasetNonLinearity[ampName] = datasetLinRes
887 return datasetNonLinearity
889 def calculateLinearityResidualAndLinearizers(self, exposureTimeVector, meanSignalVector):
890 """Calculate linearity residual and fit an n-order polynomial to the mean vs time curve
891 to produce corrections (deviation from linear part of polynomial) for a particular amplifier
892 to populate LinearizeLookupTable.
893 Use the coefficients of this fit to calculate the correction coefficients for LinearizePolynomial
894 and LinearizeSquared."
896 Parameters
897 ---------
899 exposureTimeVector: `list` of `float`
900 List of exposure times for each flat pair
902 meanSignalVector: `list` of `float`
903 List of mean signal from diference image of flat pairs
905 Returns
906 -------
907 dataset : `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset`
908 The dataset containing the fit parameters, the NL correction coefficients, and the
909 LUT row for the amplifier at hand.
911 Notes
912 -----
913 datase members:
915 dataset.polynomialLinearizerCoefficients : `list` of `float`
916 Coefficients for LinearizePolynomial, where corrImage = uncorrImage + sum_i c_i uncorrImage^(2 +
917 i).
918 c_(j-2) = -k_j/(k_1^j) with units DN^(1-j) (c.f., Eq. 37 of 2003.05978). The units of k_j are
919 DN/t^j, and they are fit from meanSignalVector = k0 + k1*exposureTimeVector +
920 k2*exposureTimeVector^2 + ... + kn*exposureTimeVector^n, with
921 n = "polynomialFitDegreeNonLinearity". k_0 and k_1 and degenerate with bias level and gain,
922 and are not used by the non-linearity correction. Therefore, j = 2...n in the above expression
923 (see `LinearizePolynomial` class in `linearize.py`.)
925 dataset.quadraticPolynomialLinearizerCoefficient : `float`
926 Coefficient for LinearizeSquared, where corrImage = uncorrImage + c0*uncorrImage^2.
927 c0 = -k2/(k1^2), where k1 and k2 are fit from
928 meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +...
929 + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity".
931 dataset.linearizerTableRow : `list` of `float`
932 One dimensional array with deviation from linear part of n-order polynomial fit
933 to mean vs time curve. This array will be one row (for the particular amplifier at hand)
934 of the table array for LinearizeLookupTable.
936 dataset.meanSignalVsTimePolyFitPars : `list` of `float`
937 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
939 dataset.meanSignalVsTimePolyFitParsErr : `list` of `float`
940 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
942 dataset.meanSignalVsTimePolyFitReducedChiSq : `float`
943 Reduced unweighted chi squared from polynomial fit to meanSignalVector vs exposureTimeVector.
944 """
946 # Lookup table linearizer
947 parsIniNonLinearity = self._initialParsForPolynomial(self.config.polynomialFitDegreeNonLinearity + 1)
948 if self.config.doFitBootstrap:
949 (parsFit, parsFitErr,
950 reducedChiSquaredNonLinearityFit) = fitBootstrap(parsIniNonLinearity,
951 exposureTimeVector,
952 meanSignalVector,
953 funcPolynomial,
954 weightsY=1./np.sqrt(meanSignalVector))
955 else:
956 (parsFit, parsFitErr,
957 reducedChiSquaredNonLinearityFit) = fitLeastSq(parsIniNonLinearity,
958 exposureTimeVector,
959 meanSignalVector,
960 funcPolynomial,
961 weightsY=1./np.sqrt(meanSignalVector))
963 # LinearizeLookupTable:
964 # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN
965 tMax = (self.config.maxAduForLookupTableLinearizer - parsFit[0])/parsFit[1]
966 timeRange = np.linspace(0, tMax, self.config.maxAduForLookupTableLinearizer)
967 signalIdeal = parsFit[0] + parsFit[1]*timeRange
968 signalUncorrected = funcPolynomial(parsFit, timeRange)
969 linearizerTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has corrections
970 # LinearizePolynomial and LinearizeSquared:
971 # Check that magnitude of higher order (>= 3) coefficents of the polyFit are small,
972 # i.e., less than threshold = 1e-10 (typical quadratic and cubic coefficents are ~1e-6
973 # and ~1e-12).
974 k1 = parsFit[1]
975 polynomialLinearizerCoefficients = []
976 for i, coefficient in enumerate(parsFit):
977 c = -coefficient/(k1**i)
978 polynomialLinearizerCoefficients.append(c)
979 if np.fabs(c) > 1e-10:
980 msg = f"Coefficient {c} in polynomial fit larger than threshold 1e-10."
981 self.log.warn(msg)
982 # Coefficient for LinearizedSquared. Called "c0" in linearize.py
983 c0 = polynomialLinearizerCoefficients[2]
985 dataset = LinearityResidualsAndLinearizersDataset([], None, [], [], [], None)
986 dataset.polynomialLinearizerCoefficients = polynomialLinearizerCoefficients
987 dataset.quadraticPolynomialLinearizerCoefficient = c0
988 dataset.linearizerTableRow = linearizerTableRow
989 dataset.meanSignalVsTimePolyFitPars = parsFit
990 dataset.meanSignalVsTimePolyFitParsErr = parsFitErr
991 dataset.meanSignalVsTimePolyFitReducedChiSq = reducedChiSquaredNonLinearityFit
993 return dataset
995 def buildLinearizerObject(self, datasetNonLinearity, detector, calibDate, linearizerType, instruName='',
996 tableArray=None, log=None):
997 """Build linearizer object to persist.
999 Parameters
1000 ----------
1001 datasetNonLinearity : `dict`
1002 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` objects.
1004 detector : `lsst.afw.cameraGeom.Detector`
1005 Detector object
1007 calibDate : `datetime.datetime`
1008 Calibration date
1010 linearizerType : `str`
1011 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'
1013 instruName : `str`, optional
1014 Instrument name
1016 tableArray : `np.array`, optional
1017 Look-up table array with size rows=nAmps and columns=DN values
1019 log : `lsst.log.Log`, optional
1020 Logger to handle messages
1022 Returns
1023 -------
1024 linearizer : `lsst.ip.isr.Linearizer`
1025 Linearizer object
1026 """
1027 detName = detector.getName()
1028 detNum = detector.getId()
1029 if linearizerType == "LOOKUPTABLE":
1030 if tableArray is not None:
1031 linearizer = Linearizer(detector=detector, table=tableArray, log=log)
1032 else:
1033 raise RuntimeError("tableArray must be provided when creating a LookupTable linearizer")
1034 elif linearizerType in ("LINEARIZESQUARED", "LINEARIZEPOLYNOMIAL"):
1035 linearizer = Linearizer(log=log)
1036 else:
1037 raise RuntimeError("Invalid linearizerType {linearizerType} to build a Linearizer object. "
1038 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
1039 for i, amp in enumerate(detector.getAmplifiers()):
1040 ampName = amp.getName()
1041 datasetNonLinAmp = datasetNonLinearity[ampName]
1042 if linearizerType == "LOOKUPTABLE":
1043 linearizer.linearityCoeffs[ampName] = [i, 0]
1044 linearizer.linearityType[ampName] = "LookupTable"
1045 elif linearizerType == "LINEARIZESQUARED":
1046 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars
1047 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr
1048 linearizer.linearityFitReducedChiSquared[ampName] = (
1049 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq)
1050 linearizer.linearityCoeffs[ampName] = [
1051 datasetNonLinAmp.quadraticPolynomialLinearizerCoefficient]
1052 linearizer.linearityType[ampName] = "Squared"
1053 elif linearizerType == "LINEARIZEPOLYNOMIAL":
1054 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars
1055 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr
1056 linearizer.linearityFitReducedChiSquared[ampName] = (
1057 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq)
1058 # Slice correction coefficients (starting at 2) for polynomial linearizer
1059 # (and squared linearizer above). The first and second are reduntant with
1060 # the bias and gain, respectively, and are not used by LinearizerPolynomial.
1061 polyLinCoeffs = np.array(datasetNonLinAmp.polynomialLinearizerCoefficients[2:])
1062 linearizer.linearityCoeffs[ampName] = polyLinCoeffs
1063 linearizer.linearityType[ampName] = "Polynomial"
1064 linearizer.linearityBBox[ampName] = amp.getBBox()
1065 linearizer.validate()
1066 calibId = f"detectorName={detName} detector={detNum} calibDate={calibDate} ccd={detNum} filter=NONE"
1068 try:
1069 raftName = detName.split("_")[0]
1070 calibId += f" raftName={raftName}"
1071 except Exception:
1072 raftname = "NONE"
1073 calibId += f" raftName={raftname}"
1075 serial = detector.getSerial()
1076 linearizer.updateMetadata(instrumentName=instruName, detectorId=f"{detNum}",
1077 calibId=calibId, serial=serial, detectorName=f"{detName}")
1079 return linearizer
1081 @staticmethod
1082 def _initialParsForPolynomial(order):
1083 assert(order >= 2)
1084 pars = np.zeros(order, dtype=np.float)
1085 pars[0] = 10
1086 pars[1] = 1
1087 pars[2:] = 0.0001
1088 return pars
1090 @staticmethod
1091 def _boundsForPolynomial(initialPars):
1092 lowers = [np.NINF for p in initialPars]
1093 uppers = [np.inf for p in initialPars]
1094 lowers[1] = 0 # no negative gains
1095 return (lowers, uppers)
1097 @staticmethod
1098 def _boundsForAstier(initialPars):
1099 lowers = [np.NINF for p in initialPars]
1100 uppers = [np.inf for p in initialPars]
1101 return (lowers, uppers)
1103 @staticmethod
1104 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
1105 """Return a boolean array to mask bad points.
1107 A linear function has a constant ratio, so find the median
1108 value of the ratios, and exclude the points that deviate
1109 from that by more than a factor of maxDeviationPositive/negative.
1110 Asymmetric deviations are supported as we expect the PTC to turn
1111 down as the flux increases, but sometimes it anomalously turns
1112 upwards just before turning over, which ruins the fits, so it
1113 is wise to be stricter about restricting positive outliers than
1114 negative ones.
1116 Too high and points that are so bad that fit will fail will be included
1117 Too low and the non-linear points will be excluded, biasing the NL fit."""
1118 ratios = [b/a for (a, b) in zip(means, variances)]
1119 medianRatio = np.median(ratios)
1120 ratioDeviations = [(r/medianRatio)-1 for r in ratios]
1122 # so that it doesn't matter if the deviation is expressed as positive or negative
1123 maxDeviationPositive = abs(maxDeviationPositive)
1124 maxDeviationNegative = -1. * abs(maxDeviationNegative)
1126 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative)
1127 else False for r in ratioDeviations])
1128 return goodPoints
1130 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
1131 """"""
1132 nBad = Counter(array)[0]
1133 if nBad == 0:
1134 return array
1136 if warn:
1137 msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
1138 self.log.warn(msg)
1140 array[array == 0] = substituteValue
1141 return array
1143 def fitPtc(self, dataset, ptcFitType):
1144 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
1146 Fit the photon transfer curve with either a polynomial of the order
1147 specified in the task config, or using the Astier approximation.
1149 Sigma clipping is performed iteratively for the fit, as well as an
1150 initial clipping of data points that are more than
1151 config.initialNonLinearityExclusionThreshold away from lying on a
1152 straight line. This other step is necessary because the photon transfer
1153 curve turns over catastrophically at very high flux (because saturation
1154 drops the variance to ~0) and these far outliers cause the initial fit
1155 to fail, meaning the sigma cannot be calculated to perform the
1156 sigma-clipping.
1158 Parameters
1159 ----------
1160 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
1161 The dataset containing the means, variances and exposure times
1163 ptcFitType : `str`
1164 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
1165 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
1167 Returns
1168 -------
1169 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
1170 This is the same dataset as the input paramter, however, it has been modified
1171 to include information such as the fit vectors and the fit parameters. See
1172 the class `PhotonTransferCurveDatase`.
1173 """
1175 def errFunc(p, x, y):
1176 return ptcFunc(p, x) - y
1178 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
1179 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
1181 for i, ampName in enumerate(dataset.ampNames):
1182 timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
1183 meanVecOriginal = np.array(dataset.rawMeans[ampName])
1184 varVecOriginal = np.array(dataset.rawVars[ampName])
1185 varVecOriginal = self._makeZeroSafe(varVecOriginal)
1187 mask = ((meanVecOriginal >= self.config.minMeanSignal) &
1188 (meanVecOriginal <= self.config.maxMeanSignal))
1190 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
1191 self.config.initialNonLinearityExclusionThresholdPositive,
1192 self.config.initialNonLinearityExclusionThresholdNegative)
1193 mask = mask & goodPoints
1195 if ptcFitType == 'EXPAPPROXIMATION':
1196 ptcFunc = funcAstier
1197 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise
1198 bounds = self._boundsForAstier(parsIniPtc)
1199 if ptcFitType == 'POLYNOMIAL':
1200 ptcFunc = funcPolynomial
1201 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
1202 bounds = self._boundsForPolynomial(parsIniPtc)
1204 # Before bootstrap fit, do an iterative fit to get rid of outliers
1205 count = 1
1206 while count <= maxIterationsPtcOutliers:
1207 # Note that application of the mask actually shrinks the array
1208 # to size rather than setting elements to zero (as we want) so
1209 # always update mask itself and re-apply to the original data
1210 meanTempVec = meanVecOriginal[mask]
1211 varTempVec = varVecOriginal[mask]
1212 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
1213 pars = res.x
1215 # change this to the original from the temp because the masks are ANDed
1216 # meaning once a point is masked it's always masked, and the masks must
1217 # always be the same length for broadcasting
1218 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
1219 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids])
1220 mask = mask & newMask
1222 nDroppedTotal = Counter(mask)[False]
1223 self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
1224 count += 1
1225 # objects should never shrink
1226 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
1228 dataset.visitMask[ampName] = mask # store the final mask
1229 parsIniPtc = pars
1230 meanVecFinal = meanVecOriginal[mask]
1231 varVecFinal = varVecOriginal[mask]
1233 if Counter(mask)[False] > 0:
1234 self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" +
1235 f" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
1237 if (len(meanVecFinal) < len(parsIniPtc)):
1238 msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
1239 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1240 self.log.warn(msg)
1241 # The first and second parameters of initial fit are discarded (bias and gain)
1242 # for the final NL coefficients
1243 dataset.badAmps.append(ampName)
1244 dataset.gain[ampName] = np.nan
1245 dataset.gainErr[ampName] = np.nan
1246 dataset.noise[ampName] = np.nan
1247 dataset.noiseErr[ampName] = np.nan
1248 dataset.ptcFitPars[ampName] = np.nan
1249 dataset.ptcFitParsError[ampName] = np.nan
1250 dataset.ptcFitReducedChiSquared[ampName] = np.nan
1251 continue
1253 # Fit the PTC
1254 if self.config.doFitBootstrap:
1255 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal,
1256 varVecFinal, ptcFunc,
1257 weightsY=1./np.sqrt(varVecFinal))
1258 else:
1259 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal,
1260 varVecFinal, ptcFunc,
1261 weightsY=1./np.sqrt(varVecFinal))
1262 dataset.ptcFitPars[ampName] = parsFit
1263 dataset.ptcFitParsError[ampName] = parsFitErr
1264 dataset.ptcFitReducedChiSquared[ampName] = reducedChiSqPtc
1266 if ptcFitType == 'EXPAPPROXIMATION':
1267 ptcGain = parsFit[1]
1268 ptcGainErr = parsFitErr[1]
1269 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1270 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1271 if ptcFitType == 'POLYNOMIAL':
1272 ptcGain = 1./parsFit[1]
1273 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1274 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1275 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1276 dataset.gain[ampName] = ptcGain
1277 dataset.gainErr[ampName] = ptcGainErr
1278 dataset.noise[ampName] = ptcNoise
1279 dataset.noiseErr[ampName] = ptcNoiseErr
1280 if not len(dataset.ptcFitType) == 0:
1281 dataset.ptcFitType = ptcFitType
1283 return dataset