Coverage for python/lsst/cp/pipe/ptc.py : 11%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
23__all__ = ['MeasurePhotonTransferCurveTask',
24 'MeasurePhotonTransferCurveTaskConfig',
25 'PhotonTransferCurveDataset']
27import numpy as np
28import matplotlib.pyplot as plt
29from sqlite3 import OperationalError
30from collections import Counter
31from dataclasses import dataclass
33import lsst.afw.math as afwMath
34import lsst.pex.config as pexConfig
35import lsst.pipe.base as pipeBase
36from .utils import (NonexistentDatasetTaskDataIdContainer, PairedVisitListTaskRunner,
37 checkExpLengthEqual, fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
38from scipy.optimize import least_squares
40from lsst.ip.isr.linearize import Linearizer
41import datetime
43from .astierCovPtcUtils import (fftSize, CovFft, computeCovDirect, fitData)
46class MeasurePhotonTransferCurveTaskConfig(pexConfig.Config):
47 """Config class for photon transfer curve measurement task"""
48 ccdKey = pexConfig.Field(
49 dtype=str,
50 doc="The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
51 default='ccd',
52 )
53 ptcFitType = pexConfig.ChoiceField(
54 dtype=str,
55 doc="Fit PTC to approximation in Astier+19 (Equation 16) or to a polynomial.",
56 default="POLYNOMIAL",
57 allowed={
58 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
59 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).",
60 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)"
61 }
62 )
63 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
64 dtype=float,
65 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
66 default=5.0,
67 )
68 maxIterFullFitCovariancesAstier = pexConfig.Field(
69 dtype=int,
70 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
71 default=3,
72 )
73 maximumRangeCovariancesAstier = pexConfig.Field(
74 dtype=int,
75 doc="Maximum range of covariances as in Astier+19",
76 default=8,
77 )
78 covAstierRealSpace = pexConfig.Field(
79 dtype=bool,
80 doc="Calculate covariances in real space or via FFT? (see appendix A of Astier+19).",
81 default=False,
82 )
83 polynomialFitDegree = pexConfig.Field(
84 dtype=int,
85 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
86 default=3,
87 )
88 doCreateLinearizer = pexConfig.Field(
89 dtype=bool,
90 doc="Calculate non-linearity and persist linearizer?",
91 default=False,
92 )
93 linearizerType = pexConfig.ChoiceField(
94 dtype=str,
95 doc="Linearizer type, if doCreateLinearizer=True",
96 default="LINEARIZEPOLYNOMIAL",
97 allowed={
98 "LINEARIZEPOLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegreeNonLinearity' to set 'n').",
99 "LINEARIZESQUARED": "c0 quadratic coefficient derived from coefficients of polynomiual fit",
100 "LOOKUPTABLE": "Loouk table formed from linear part of polynomial fit."
101 }
102 )
103 polynomialFitDegreeNonLinearity = pexConfig.Field(
104 dtype=int,
105 doc="If doCreateLinearizer, degree of polynomial to fit the meanSignal vs exposureTime" +
106 " curve to produce the table for LinearizeLookupTable.",
107 default=3,
108 )
109 binSize = pexConfig.Field(
110 dtype=int,
111 doc="Bin the image by this factor in both dimensions.",
112 default=1,
113 )
114 minMeanSignal = pexConfig.Field(
115 dtype=float,
116 doc="Minimum value (inclusive) of mean signal (in DN) above which to consider.",
117 default=0,
118 )
119 maxMeanSignal = pexConfig.Field(
120 dtype=float,
121 doc="Maximum value (inclusive) of mean signal (in DN) below which to consider.",
122 default=9e6,
123 )
124 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
125 dtype=float,
126 doc="Initially exclude data points with a variance that are more than a factor of this from being"
127 " linear in the positive direction, from the PTC fit. Note that these points will also be"
128 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
129 " to allow an accurate determination of the sigmas for said iterative fit.",
130 default=0.12,
131 min=0.0,
132 max=1.0,
133 )
134 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
135 dtype=float,
136 doc="Initially exclude data points with a variance that are more than a factor of this from being"
137 " linear in the negative direction, from the PTC fit. Note that these points will also be"
138 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
139 " to allow an accurate determination of the sigmas for said iterative fit.",
140 default=0.25,
141 min=0.0,
142 max=1.0,
143 )
144 sigmaCutPtcOutliers = pexConfig.Field(
145 dtype=float,
146 doc="Sigma cut for outlier rejection in PTC.",
147 default=5.0,
148 )
149 maskNameList = pexConfig.ListField(
150 dtype=str,
151 doc="Mask list to exclude from statistics calculations.",
152 default=['SUSPECT', 'BAD', 'NO_DATA'],
153 )
154 nSigmaClipPtc = pexConfig.Field(
155 dtype=float,
156 doc="Sigma cut for afwMath.StatisticsControl()",
157 default=5.5,
158 )
159 nIterSigmaClipPtc = pexConfig.Field(
160 dtype=int,
161 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
162 default=1,
163 )
164 maxIterationsPtcOutliers = pexConfig.Field(
165 dtype=int,
166 doc="Maximum number of iterations for outlier rejection in PTC.",
167 default=2,
168 )
169 doFitBootstrap = pexConfig.Field(
170 dtype=bool,
171 doc="Use bootstrap for the PTC fit parameters and errors?.",
172 default=False,
173 )
174 maxAduForLookupTableLinearizer = pexConfig.Field(
175 dtype=int,
176 doc="Maximum DN value for the LookupTable linearizer.",
177 default=2**18,
178 )
179 instrumentName = pexConfig.Field(
180 dtype=str,
181 doc="Instrument name.",
182 default='',
183 )
186@dataclass
187class LinearityResidualsAndLinearizersDataset:
188 """A simple class to hold the output from the
189 `calculateLinearityResidualAndLinearizers` function.
190 """
191 # Normalized coefficients for polynomial NL correction
192 polynomialLinearizerCoefficients: list
193 # Normalized coefficient for quadratic polynomial NL correction (c0)
194 quadraticPolynomialLinearizerCoefficient: float
195 # LUT array row for the amplifier at hand
196 linearizerTableRow: list
197 meanSignalVsTimePolyFitPars: list
198 meanSignalVsTimePolyFitParsErr: list
199 meanSignalVsTimePolyFitReducedChiSq: float
202class PhotonTransferCurveDataset:
203 """A simple class to hold the output data from the PTC task.
205 The dataset is made up of a dictionary for each item, keyed by the
206 amplifiers' names, which much be supplied at construction time.
208 New items cannot be added to the class to save accidentally saving to the
209 wrong property, and the class can be frozen if desired.
211 inputVisitPairs records the visits used to produce the data.
212 When fitPtc() or fitCovariancesAstier() is run, a mask is built up, which is by definition
213 always the same length as inputVisitPairs, rawExpTimes, rawMeans
214 and rawVars, and is a list of bools, which are incrementally set to False
215 as points are discarded from the fits.
217 PTC fit parameters for polynomials are stored in a list in ascending order
218 of polynomial term, i.e. par[0]*x^0 + par[1]*x + par[2]*x^2 etc
219 with the length of the list corresponding to the order of the polynomial
220 plus one.
222 Parameters
223 ----------
224 ampNames : `list`
225 List with the names of the amplifiers of the detector at hand.
227 ptcFitType : `str`
228 Type of model fitted to the PTC: "POLYNOMIAL", "EXPAPPROXIMATION", or "FULLCOVARIANCE".
230 Returns
231 -------
232 `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
233 Output dataset from MeasurePhotonTransferCurveTask.
234 """
236 def __init__(self, ampNames, ptcFitType):
237 # add items to __dict__ directly because __setattr__ is overridden
239 # instance variables
240 self.__dict__["ptcFitType"] = ptcFitType
241 self.__dict__["ampNames"] = ampNames
242 self.__dict__["badAmps"] = []
244 # raw data variables
245 # visitMask is the mask produced after outlier rejection. The mask produced by "FULLCOVARIANCE"
246 # may differ from the one produced in the other two PTC fit types.
247 self.__dict__["inputVisitPairs"] = {ampName: [] for ampName in ampNames}
248 self.__dict__["visitMask"] = {ampName: [] for ampName in ampNames}
249 self.__dict__["rawExpTimes"] = {ampName: [] for ampName in ampNames}
250 self.__dict__["rawMeans"] = {ampName: [] for ampName in ampNames}
251 self.__dict__["rawVars"] = {ampName: [] for ampName in ampNames}
253 # Gain and noise
254 self.__dict__["gain"] = {ampName: -1. for ampName in ampNames}
255 self.__dict__["gainErr"] = {ampName: -1. for ampName in ampNames}
256 self.__dict__["noise"] = {ampName: -1. for ampName in ampNames}
257 self.__dict__["noiseErr"] = {ampName: -1. for ampName in ampNames}
259 # if ptcFitTye in ["POLYNOMIAL", "EXPAPPROXIMATION"]
260 # fit information
261 self.__dict__["ptcFitPars"] = {ampName: [] for ampName in ampNames}
262 self.__dict__["ptcFitParsError"] = {ampName: [] for ampName in ampNames}
263 self.__dict__["ptcFitReducedChiSquared"] = {ampName: [] for ampName in ampNames}
265 # if ptcFitTye in ["FULLCOVARIANCE"]
266 # "covariancesTuple" is a numpy recarray with entries of the form
267 # ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
268 # "covariancesFits" has CovFit objects that fit the measured covariances to Eq. 20 of Astier+19.
269 # In "covariancesFitsWithNoB", "b"=0 in the model described by Eq. 20 of Astier+19.
270 self.__dict__["covariancesTuple"] = {ampName: [] for ampName in ampNames}
271 self.__dict__["covariancesFitsWithNoB"] = {ampName: [] for ampName in ampNames}
272 self.__dict__["covariancesFits"] = {ampName: [] for ampName in ampNames}
273 self.__dict__["aMatrix"] = {ampName: [] for ampName in ampNames}
274 self.__dict__["bMatrix"] = {ampName: [] for ampName in ampNames}
276 # "final" means that the "raw" vectors above had "visitMask" applied.
277 self.__dict__["finalVars"] = {ampName: [] for ampName in ampNames}
278 self.__dict__["finalModelVars"] = {ampName: [] for ampName in ampNames}
279 self.__dict__["finalMeans"] = {ampName: [] for ampName in ampNames}
281 def __setattr__(self, attribute, value):
282 """Protect class attributes"""
283 if attribute not in self.__dict__:
284 raise AttributeError(f"{attribute} is not already a member of PhotonTransferCurveDataset, which"
285 " does not support setting of new attributes.")
286 else:
287 self.__dict__[attribute] = value
289 def getVisitsUsed(self, ampName):
290 """Get the visits used, i.e. not discarded, for a given amp.
292 If no mask has been created yet, all visits are returned.
293 """
294 if len(self.visitMask[ampName]) == 0:
295 return self.inputVisitPairs[ampName]
297 # if the mask exists it had better be the same length as the visitPairs
298 assert len(self.visitMask[ampName]) == len(self.inputVisitPairs[ampName])
300 pairs = self.inputVisitPairs[ampName]
301 mask = self.visitMask[ampName]
302 # cast to bool required because numpy
303 return [(v1, v2) for ((v1, v2), m) in zip(pairs, mask) if bool(m) is True]
305 def getGoodAmps(self):
306 return [amp for amp in self.ampNames if amp not in self.badAmps]
309class MeasurePhotonTransferCurveTask(pipeBase.CmdLineTask):
310 """A class to calculate, fit, and plot a PTC from a set of flat pairs.
312 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
313 used in astronomical detectors characterization (e.g., Janesick 2001,
314 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the
315 PTC from a series of pairs of flat-field images; each pair taken at identical exposure
316 times. The difference image of each pair is formed to eliminate fixed pattern noise,
317 and then the variance of the difference image and the mean of the average image
318 are used to produce the PTC. An n-degree polynomial or the approximation in Equation
319 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
320 arXiv:1905.08677) can be fitted to the PTC curve. These models include
321 parameters such as the gain (e/DN) and readout noise.
323 Linearizers to correct for signal-chain non-linearity are also calculated.
324 The `Linearizer` class, in general, can support per-amp linearizers, but in this
325 task this is not supported.
327 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the
328 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements
329 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain
330 and the noise.
332 Parameters
333 ----------
335 *args: `list`
336 Positional arguments passed to the Task constructor. None used at this
337 time.
338 **kwargs: `dict`
339 Keyword arguments passed on to the Task constructor. None used at this
340 time.
342 """
344 RunnerClass = PairedVisitListTaskRunner
345 ConfigClass = MeasurePhotonTransferCurveTaskConfig
346 _DefaultName = "measurePhotonTransferCurve"
348 def __init__(self, *args, **kwargs):
349 pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
350 plt.interactive(False) # stop windows popping up when plotting. When headless, use 'agg' backend too
351 self.config.validate()
352 self.config.freeze()
354 @classmethod
355 def _makeArgumentParser(cls):
356 """Augment argument parser for the MeasurePhotonTransferCurveTask."""
357 parser = pipeBase.ArgumentParser(name=cls._DefaultName)
358 parser.add_argument("--visit-pairs", dest="visitPairs", nargs="*",
359 help="Visit pairs to use. Each pair must be of the form INT,INT e.g. 123,456")
360 parser.add_id_argument("--id", datasetType="photonTransferCurveDataset",
361 ContainerClass=NonexistentDatasetTaskDataIdContainer,
362 help="The ccds to use, e.g. --id ccd=0..100")
363 return parser
365 @pipeBase.timeMethod
366 def runDataRef(self, dataRef, visitPairs):
367 """Run the Photon Transfer Curve (PTC) measurement task.
369 For a dataRef (which is each detector here),
370 and given a list of visit pairs (postISR) at different exposure times,
371 measure the PTC.
373 Parameters
374 ----------
375 dataRef : list of lsst.daf.persistence.ButlerDataRef
376 dataRef for the detector for the visits to be fit.
378 visitPairs : `iterable` of `tuple` of `int`
379 Pairs of visit numbers to be processed together
380 """
382 # setup necessary objects
383 detNum = dataRef.dataId[self.config.ccdKey]
384 detector = dataRef.get('camera')[dataRef.dataId[self.config.ccdKey]]
385 # expand some missing fields that we need for lsstCam. This is a work-around
386 # for Gen2 problems that I (RHL) don't feel like solving. The calibs pipelines
387 # (which inherit from CalibTask) use addMissingKeys() to do basically the same thing
388 #
389 # Basically, the butler's trying to look up the fields in `raw_visit` which won't work
390 for name in dataRef.getButler().getKeys('bias'):
391 if name not in dataRef.dataId:
392 try:
393 dataRef.dataId[name] = \
394 dataRef.getButler().queryMetadata('raw', [name], detector=detNum)[0]
395 except OperationalError:
396 pass
398 amps = detector.getAmplifiers()
399 ampNames = [amp.getName() for amp in amps]
400 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType)
401 self.log.info('Measuring PTC using %s visits for detector %s' % (visitPairs, detector.getId()))
403 tupleRecords = []
404 allTags = []
405 for (v1, v2) in visitPairs:
406 # Get postISR exposures.
407 dataRef.dataId['expId'] = v1
408 exp1 = dataRef.get("postISRCCD", immediate=True)
409 dataRef.dataId['expId'] = v2
410 exp2 = dataRef.get("postISRCCD", immediate=True)
411 del dataRef.dataId['expId']
413 checkExpLengthEqual(exp1, exp2, v1, v2, raiseWithMessage=True)
414 expTime = exp1.getInfo().getVisitInfo().getExposureTime()
415 tupleRows = []
416 nAmpsNan = 0
417 for ampNumber, amp in enumerate(detector):
418 ampName = amp.getName()
419 # covAstier: (i, j, var (cov[0,0]), cov, npix)
420 doRealSpace = self.config.covAstierRealSpace
421 muDiff, varDiff, covAstier = self.measureMeanVarCov(exp1, exp2, region=amp.getBBox(),
422 covAstierRealSpace=doRealSpace)
423 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
424 msg = (f"NaN mean or var, or None cov in amp {ampNumber} in visit pair {v1}, {v2} "
425 "of detector {detNum}.")
426 self.log.warn(msg)
427 nAmpsNan += 1
428 continue
429 tags = ['mu', 'i', 'j', 'var', 'cov', 'npix', 'ext', 'expTime', 'ampName']
430 if (muDiff <= self.config.minMeanSignal) or (muDiff >= self.config.maxMeanSignal):
431 continue
432 datasetPtc.rawExpTimes[ampName].append(expTime)
433 datasetPtc.rawMeans[ampName].append(muDiff)
434 datasetPtc.rawVars[ampName].append(varDiff)
435 datasetPtc.inputVisitPairs[ampName].append((v1, v2))
437 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName) for covRow in covAstier]
438 if nAmpsNan == len(ampNames):
439 msg = f"NaN mean in all amps of visit pair {v1}, {v2} of detector {detNum}."
440 self.log.warn(msg)
441 continue
442 allTags += tags
443 tupleRecords += tupleRows
444 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags)
446 if self.config.ptcFitType in ["FULLCOVARIANCE", ]:
447 # Calculate covariances and fit them, including the PTC, to Astier+19 full model (Eq. 20)
448 datasetPtc = self.fitCovariancesAstier(datasetPtc, covariancesWithTags)
449 elif self.config.ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
450 # Fit the PTC to a polynomial or to Astier+19 exponential approximation (Eq. 16)
451 # Fill up PhotonTransferCurveDataset object.
452 datasetPtc = self.fitPtc(datasetPtc, self.config.ptcFitType)
454 # Fit a poynomial to calculate non-linearity and persist linearizer.
455 if self.config.doCreateLinearizer:
456 numberAmps = len(amps)
457 numberAduValues = self.config.maxAduForLookupTableLinearizer
458 lookupTableArray = np.zeros((numberAmps, numberAduValues), dtype=np.float32)
460 # Fit (non)linearity of signal vs time curve.
461 # Fill up PhotonTransferCurveDataset object.
462 # Fill up array for LUT linearizer (tableArray).
463 # Produce coefficients for Polynomial ans Squared linearizers.
464 # Build linearizer objects.
465 linearizer = self.fitNonLinearityAndBuildLinearizers(datasetPtc, detector,
466 tableArray=lookupTableArray,
467 log=self.log)
469 if self.config.linearizerType == "LINEARIZEPOLYNOMIAL":
470 linDataType = 'linearizePolynomial'
471 linMsg = "polynomial (coefficients for a polynomial correction)."
472 elif self.config.linearizerType == "LINEARIZESQUARED":
473 linDataType = 'linearizePolynomial'
474 linMsg = "squared (c0, derived from k_i coefficients of a polynomial fit)."
475 elif self.config.linearizerType == "LOOKUPTABLE":
476 linDataType = 'linearizePolynomial'
477 linMsg = "lookup table (linear component of polynomial fit)."
478 else:
479 raise RuntimeError("Invalid config.linearizerType {selg.config.linearizerType}. "
480 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
482 butler = dataRef.getButler()
483 self.log.info(f"Writing linearizer: \n {linMsg}")
485 detName = detector.getName()
486 now = datetime.datetime.utcnow()
487 calibDate = now.strftime("%Y-%m-%d")
489 butler.put(linearizer, datasetType=linDataType, dataId={'detector': detNum,
490 'detectorName': detName, 'calibDate': calibDate})
492 self.log.info(f"Writing PTC data to {dataRef.getUri(write=True)}")
493 dataRef.put(datasetPtc, datasetType="photonTransferCurveDataset")
495 return pipeBase.Struct(exitStatus=0)
497 def fitCovariancesAstier(self, dataset, covariancesWithTagsArray):
498 """Fit measured flat covariances to full model in Astier+19.
500 Parameters
501 ----------
502 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
503 The dataset containing information such as the means, variances and exposure times.
505 covariancesWithTagsArray : `numpy.recarray`
506 Tuple with at least (mu, cov, var, i, j, npix), where:
507 mu : 0.5*(m1 + m2), where:
508 mu1: mean value of flat1
509 mu2: mean value of flat2
510 cov: covariance value at lag(i, j)
511 var: variance(covariance value at lag(0, 0))
512 i: lag dimension
513 j: lag dimension
514 npix: number of pixels used for covariance calculation.
516 Returns
517 -------
518 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
519 This is the same dataset as the input paramter, however, it has been modified
520 to include information such as the fit vectors and the fit parameters. See
521 the class `PhotonTransferCurveDatase`.
522 """
524 covFits, covFitsNoB = fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal,
525 r=self.config.maximumRangeCovariancesAstier,
526 nSigmaFullFit=self.config.sigmaClipFullFitCovariancesAstier,
527 maxIterFullFit=self.config.maxIterFullFitCovariancesAstier)
529 dataset.covariancesTuple = covariancesWithTagsArray
530 dataset.covariancesFits = covFits
531 dataset.covariancesFitsWithNoB = covFitsNoB
532 dataset = self.getOutputPtcDataCovAstier(dataset, covFits)
534 return dataset
536 def getOutputPtcDataCovAstier(self, dataset, covFits):
537 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
539 Parameters
540 ----------
541 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
542 The dataset containing information such as the means, variances and exposure times.
544 covFits: `dict`
545 Dictionary of CovFit objects, with amp names as keys.
547 Returns
548 -------
549 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
550 This is the same dataset as the input paramter, however, it has been modified
551 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
552 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
553 See the class `PhotonTransferCurveDatase`.
554 """
556 for i, amp in enumerate(covFits):
557 fit = covFits[amp]
558 (meanVecFinal, varVecFinal, varVecModel,
559 wc, varMask) = fit.getFitData(0, 0, divideByMu=False, returnMasked=True)
560 gain = fit.getGain()
561 dataset.visitMask[amp] = varMask
562 dataset.gain[amp] = gain
563 dataset.gainErr[amp] = fit.getGainErr()
564 dataset.noise[amp] = np.sqrt(np.fabs(fit.getRon()))
565 dataset.noiseErr[amp] = fit.getRonErr()
566 dataset.finalVars[amp].append(varVecFinal/(gain**2))
567 dataset.finalModelVars[amp].append(varVecModel/(gain**2))
568 dataset.finalMeans[amp].append(meanVecFinal/gain)
569 dataset.aMatrix[amp].append(fit.getA())
570 dataset.bMatrix[amp].append(fit.getB())
572 return dataset
574 def measureMeanVarCov(self, exposure1, exposure2, region=None, covAstierRealSpace=False):
575 """Calculate the mean of each of two exposures and the variance and covariance of their difference.
577 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A).
578 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just
579 keep one (covariance).
581 Parameters
582 ----------
583 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
584 First exposure of flat field pair.
586 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
587 Second exposure of flat field pair.
589 region : `lsst.geom.Box2I`, optional
590 Region of each exposure where to perform the calculations (e.g, an amplifier).
592 covAstierRealSpace : `bool`, optional
593 Should the covariannces in Astier+19 be calculated in real space or via FFT?
594 See Appendix A of Astier+19.
596 Returns
597 -------
598 mu : `float` or `NaN`
599 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
600 both exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
602 varDiff : `float` or `NaN`
603 Half of the clipped variance of the difference of the regions inthe two input
604 exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
606 covDiffAstier : `list` or `NaN`
607 List with tuples of the form (dx, dy, var, cov, npix), where:
608 dx : `int`
609 Lag in x
610 dy : `int`
611 Lag in y
612 var : `float`
613 Variance at (dx, dy).
614 cov : `float`
615 Covariance at (dx, dy).
616 nPix : `int`
617 Number of pixel pairs used to evaluate var and cov.
618 If either mu1 or m2 are NaN's, the returned value is NaN.
619 """
621 if region is not None:
622 im1Area = exposure1.maskedImage[region]
623 im2Area = exposure2.maskedImage[region]
624 else:
625 im1Area = exposure1.maskedImage
626 im2Area = exposure2.maskedImage
628 im1Area = afwMath.binImage(im1Area, self.config.binSize)
629 im2Area = afwMath.binImage(im2Area, self.config.binSize)
631 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
632 im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
633 self.config.nIterSigmaClipPtc,
634 im1MaskVal)
635 im1StatsCtrl.setNanSafe(True)
636 im1StatsCtrl.setAndMask(im1MaskVal)
638 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList)
639 im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
640 self.config.nIterSigmaClipPtc,
641 im2MaskVal)
642 im2StatsCtrl.setNanSafe(True)
643 im2StatsCtrl.setAndMask(im2MaskVal)
645 # Clipped mean of images; then average of mean.
646 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue()
647 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue()
648 if np.isnan(mu1) or np.isnan(mu2):
649 return np.nan, np.nan, None
650 mu = 0.5*(mu1 + mu2)
652 # Take difference of pairs
653 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
654 temp = im2Area.clone()
655 temp *= mu1
656 diffIm = im1Area.clone()
657 diffIm *= mu2
658 diffIm -= temp
659 diffIm /= mu
661 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList)
662 diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
663 self.config.nIterSigmaClipPtc,
664 diffImMaskVal)
665 diffImStatsCtrl.setNanSafe(True)
666 diffImStatsCtrl.setAndMask(diffImMaskVal)
668 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue())
670 # Get the mask and identify good pixels as '1', and the rest as '0'.
671 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0)
672 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0)
674 w12 = w1*w2
675 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
676 w = w12*wDiff
678 maxRangeCov = self.config.maximumRangeCovariancesAstier
679 if covAstierRealSpace:
680 covDiffAstier = computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov)
681 else:
682 shapeDiff = diffIm.getImage().getArray().shape
683 fftShape = (fftSize(shapeDiff[0] + maxRangeCov), fftSize(shapeDiff[1]+maxRangeCov))
684 c = CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov)
685 covDiffAstier = c.reportCovFft(maxRangeCov)
687 return mu, varDiff, covDiffAstier
689 def computeCovDirect(self, diffImage, weightImage, maxRange):
690 """Compute covariances of diffImage in real space.
692 For lags larger than ~25, it is slower than the FFT way.
693 Taken from https://github.com/PierreAstier/bfptc/
695 Parameters
696 ----------
697 diffImage : `numpy.array`
698 Image to compute the covariance of.
700 weightImage : `numpy.array`
701 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
703 maxRange : `int`
704 Last index of the covariance to be computed.
706 Returns
707 -------
708 outList : `list`
709 List with tuples of the form (dx, dy, var, cov, npix), where:
710 dx : `int`
711 Lag in x
712 dy : `int`
713 Lag in y
714 var : `float`
715 Variance at (dx, dy).
716 cov : `float`
717 Covariance at (dx, dy).
718 nPix : `int`
719 Number of pixel pairs used to evaluate var and cov.
720 """
721 outList = []
722 var = 0
723 # (dy,dx) = (0,0) has to be first
724 for dy in range(maxRange + 1):
725 for dx in range(0, maxRange + 1):
726 if (dx*dy > 0):
727 cov1, nPix1 = self.covDirectValue(diffImage, weightImage, dx, dy)
728 cov2, nPix2 = self.covDirectValue(diffImage, weightImage, dx, -dy)
729 cov = 0.5*(cov1 + cov2)
730 nPix = nPix1 + nPix2
731 else:
732 cov, nPix = self.covDirectValue(diffImage, weightImage, dx, dy)
733 if (dx == 0 and dy == 0):
734 var = cov
735 outList.append((dx, dy, var, cov, nPix))
737 return outList
739 def covDirectValue(self, diffImage, weightImage, dx, dy):
740 """Compute covariances of diffImage in real space at lag (dx, dy).
742 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19).
744 Parameters
745 ----------
746 diffImage : `numpy.array`
747 Image to compute the covariance of.
749 weightImage : `numpy.array`
750 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
752 dx : `int`
753 Lag in x.
755 dy : `int`
756 Lag in y.
758 Returns
759 -------
760 cov : `float`
761 Covariance at (dx, dy)
763 nPix : `int`
764 Number of pixel pairs used to evaluate var and cov.
765 """
766 (nCols, nRows) = diffImage.shape
767 # switching both signs does not change anything:
768 # it just swaps im1 and im2 below
769 if (dx < 0):
770 (dx, dy) = (-dx, -dy)
771 # now, we have dx >0. We have to distinguish two cases
772 # depending on the sign of dy
773 if dy >= 0:
774 im1 = diffImage[dy:, dx:]
775 w1 = weightImage[dy:, dx:]
776 im2 = diffImage[:nCols - dy, :nRows - dx]
777 w2 = weightImage[:nCols - dy, :nRows - dx]
778 else:
779 im1 = diffImage[:nCols + dy, dx:]
780 w1 = weightImage[:nCols + dy, dx:]
781 im2 = diffImage[-dy:, :nRows - dx]
782 w2 = weightImage[-dy:, :nRows - dx]
783 # use the same mask for all 3 calculations
784 wAll = w1*w2
785 # do not use mean() because weightImage=0 pixels would then count
786 nPix = wAll.sum()
787 im1TimesW = im1*wAll
788 s1 = im1TimesW.sum()/nPix
789 s2 = (im2*wAll).sum()/nPix
790 p = (im1TimesW*im2).sum()/nPix
791 cov = p - s1*s2
793 return cov, nPix
795 def fitNonLinearityAndBuildLinearizers(self, datasetPtc, detector, tableArray=None, log=None):
796 """Fit non-linearity function and build linearizer objects.
798 Parameters
799 ----------
800 datasePtct : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
801 The dataset containing information such as the means, variances and exposure times.
802 nLinearity
804 detector : `lsst.afw.cameraGeom.Detector`
805 Detector object.
807 tableArray : `np.array`, optional
808 Optional. Look-up table array with size rows=nAmps and columns=DN values.
809 It will be modified in-place if supplied.
811 log : `lsst.log.Log`, optional
812 Logger to handle messages.
814 Returns
815 -------
816 linearizer : `lsst.ip.isr.Linearizer`
817 Linearizer object
818 """
820 # Fit NonLinearity
821 datasetNonLinearity = self.fitNonLinearity(datasetPtc, tableArray=tableArray)
823 # Produce linearizer
824 now = datetime.datetime.utcnow()
825 calibDate = now.strftime("%Y-%m-%d")
826 linType = self.config.linearizerType
828 if linType == "LOOKUPTABLE":
829 tableArray = tableArray
830 else:
831 tableArray = None
833 linearizer = self.buildLinearizerObject(datasetNonLinearity, detector, calibDate, linType,
834 instruName=self.config.instrumentName,
835 tableArray=tableArray,
836 log=log)
838 return linearizer
840 def fitNonLinearity(self, datasetPtc, tableArray=None):
841 """Fit a polynomial to signal vs effective time curve to calculate linearity and residuals.
843 Parameters
844 ----------
845 datasetPtc : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
846 The dataset containing the means, variances and exposure times.
848 tableArray : `np.array`
849 Optional. Look-up table array with size rows=nAmps and columns=DN values.
850 It will be modified in-place if supplied.
852 Returns
853 -------
854 datasetNonLinearity : `dict`
855 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset`
856 dataclasses. Each one holds the output of `calculateLinearityResidualAndLinearizers` per
857 amplifier.
858 """
859 datasetNonLinearity = {ampName: [] for ampName in datasetPtc.ampNames}
860 for i, ampName in enumerate(datasetPtc.ampNames):
861 # If a mask is not found, use all points.
862 if (len(datasetPtc.visitMask[ampName]) == 0):
863 self.log.warn(f"Mask not found for {ampName} in non-linearity fit. Using all points.")
864 mask = np.repeat(True, len(datasetPtc.rawExpTimes[ampName]))
865 else:
866 mask = datasetPtc.visitMask[ampName]
868 timeVecFinal = np.array(datasetPtc.rawExpTimes[ampName])[mask]
869 meanVecFinal = np.array(datasetPtc.rawMeans[ampName])[mask]
871 # Non-linearity residuals (NL of mean vs time curve): percentage, and fit to a quadratic function
872 # In this case, len(parsIniNonLinearity) = 3 indicates that we want a quadratic fit
873 datasetLinRes = self.calculateLinearityResidualAndLinearizers(timeVecFinal, meanVecFinal)
875 # LinearizerLookupTable
876 if tableArray is not None:
877 tableArray[i, :] = datasetLinRes.linearizerTableRow
879 datasetNonLinearity[ampName] = datasetLinRes
881 return datasetNonLinearity
883 def calculateLinearityResidualAndLinearizers(self, exposureTimeVector, meanSignalVector):
884 """Calculate linearity residual and fit an n-order polynomial to the mean vs time curve
885 to produce corrections (deviation from linear part of polynomial) for a particular amplifier
886 to populate LinearizeLookupTable.
887 Use the coefficients of this fit to calculate the correction coefficients for LinearizePolynomial
888 and LinearizeSquared."
890 Parameters
891 ---------
893 exposureTimeVector: `list` of `float`
894 List of exposure times for each flat pair
896 meanSignalVector: `list` of `float`
897 List of mean signal from diference image of flat pairs
899 Returns
900 -------
901 dataset : `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset`
902 The dataset containing the fit parameters, the NL correction coefficients, and the
903 LUT row for the amplifier at hand.
905 Notes
906 -----
907 datase members:
909 dataset.polynomialLinearizerCoefficients : `list` of `float`
910 Coefficients for LinearizePolynomial, where corrImage = uncorrImage + sum_i c_i uncorrImage^(2 +
911 i).
912 c_(j-2) = -k_j/(k_1^j) with units DN^(1-j) (c.f., Eq. 37 of 2003.05978). The units of k_j are
913 DN/t^j, and they are fit from meanSignalVector = k0 + k1*exposureTimeVector +
914 k2*exposureTimeVector^2 + ... + kn*exposureTimeVector^n, with
915 n = "polynomialFitDegreeNonLinearity". k_0 and k_1 and degenerate with bias level and gain,
916 and are not used by the non-linearity correction. Therefore, j = 2...n in the above expression
917 (see `LinearizePolynomial` class in `linearize.py`.)
919 dataset.quadraticPolynomialLinearizerCoefficient : `float`
920 Coefficient for LinearizeSquared, where corrImage = uncorrImage + c0*uncorrImage^2.
921 c0 = -k2/(k1^2), where k1 and k2 are fit from
922 meanSignalVector = k0 + k1*exposureTimeVector + k2*exposureTimeVector^2 +...
923 + kn*exposureTimeVector^n, with n = "polynomialFitDegreeNonLinearity".
925 dataset.linearizerTableRow : `list` of `float`
926 One dimensional array with deviation from linear part of n-order polynomial fit
927 to mean vs time curve. This array will be one row (for the particular amplifier at hand)
928 of the table array for LinearizeLookupTable.
930 dataset.meanSignalVsTimePolyFitPars : `list` of `float`
931 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
933 dataset.meanSignalVsTimePolyFitParsErr : `list` of `float`
934 Parameters from n-order polynomial fit to meanSignalVector vs exposureTimeVector.
936 dataset.meanSignalVsTimePolyFitReducedChiSq : `float`
937 Reduced unweighted chi squared from polynomial fit to meanSignalVector vs exposureTimeVector.
938 """
940 # Lookup table linearizer
941 parsIniNonLinearity = self._initialParsForPolynomial(self.config.polynomialFitDegreeNonLinearity + 1)
942 if self.config.doFitBootstrap:
943 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = fitBootstrap(parsIniNonLinearity,
944 exposureTimeVector,
945 meanSignalVector,
946 funcPolynomial)
947 else:
948 parsFit, parsFitErr, reducedChiSquaredNonLinearityFit = fitLeastSq(parsIniNonLinearity,
949 exposureTimeVector,
950 meanSignalVector,
951 funcPolynomial)
953 # LinearizeLookupTable:
954 # Use linear part to get time at wich signal is maxAduForLookupTableLinearizer DN
955 tMax = (self.config.maxAduForLookupTableLinearizer - parsFit[0])/parsFit[1]
956 timeRange = np.linspace(0, tMax, self.config.maxAduForLookupTableLinearizer)
957 signalIdeal = parsFit[0] + parsFit[1]*timeRange
958 signalUncorrected = funcPolynomial(parsFit, timeRange)
959 linearizerTableRow = signalIdeal - signalUncorrected # LinearizerLookupTable has corrections
960 # LinearizePolynomial and LinearizeSquared:
961 # Check that magnitude of higher order (>= 3) coefficents of the polyFit are small,
962 # i.e., less than threshold = 1e-10 (typical quadratic and cubic coefficents are ~1e-6
963 # and ~1e-12).
964 k1 = parsFit[1]
965 polynomialLinearizerCoefficients = []
966 for i, coefficient in enumerate(parsFit):
967 c = -coefficient/(k1**i)
968 polynomialLinearizerCoefficients.append(c)
969 if np.fabs(c) > 1e-10:
970 msg = f"Coefficient {c} in polynomial fit larger than threshold 1e-10."
971 self.log.warn(msg)
972 # Coefficient for LinearizedSquared. Called "c0" in linearize.py
973 c0 = polynomialLinearizerCoefficients[2]
975 dataset = LinearityResidualsAndLinearizersDataset([], None, [], [], [], None)
976 dataset.polynomialLinearizerCoefficients = polynomialLinearizerCoefficients
977 dataset.quadraticPolynomialLinearizerCoefficient = c0
978 dataset.linearizerTableRow = linearizerTableRow
979 dataset.meanSignalVsTimePolyFitPars = parsFit
980 dataset.meanSignalVsTimePolyFitParsErr = parsFitErr
981 dataset.meanSignalVsTimePolyFitReducedChiSq = reducedChiSquaredNonLinearityFit
983 return dataset
985 def buildLinearizerObject(self, datasetNonLinearity, detector, calibDate, linearizerType, instruName='',
986 tableArray=None, log=None):
987 """Build linearizer object to persist.
989 Parameters
990 ----------
991 datasetNonLinearity : `dict`
992 Dictionary of `lsst.cp.pipe.ptc.LinearityResidualsAndLinearizersDataset` objects.
994 detector : `lsst.afw.cameraGeom.Detector`
995 Detector object
997 calibDate : `datetime.datetime`
998 Calibration date
1000 linearizerType : `str`
1001 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'
1003 instruName : `str`, optional
1004 Instrument name
1006 tableArray : `np.array`, optional
1007 Look-up table array with size rows=nAmps and columns=DN values
1009 log : `lsst.log.Log`, optional
1010 Logger to handle messages
1012 Returns
1013 -------
1014 linearizer : `lsst.ip.isr.Linearizer`
1015 Linearizer object
1016 """
1017 detName = detector.getName()
1018 detNum = detector.getId()
1019 if linearizerType == "LOOKUPTABLE":
1020 if tableArray is not None:
1021 linearizer = Linearizer(detector=detector, table=tableArray, log=log)
1022 else:
1023 raise RuntimeError("tableArray must be provided when creating a LookupTable linearizer")
1024 elif linearizerType in ("LINEARIZESQUARED", "LINEARIZEPOLYNOMIAL"):
1025 linearizer = Linearizer(log=log)
1026 else:
1027 raise RuntimeError("Invalid linearizerType {linearizerType} to build a Linearizer object. "
1028 "Supported: 'LOOKUPTABLE', 'LINEARIZESQUARED', or 'LINEARIZEPOLYNOMIAL'")
1029 for i, amp in enumerate(detector.getAmplifiers()):
1030 ampName = amp.getName()
1031 datasetNonLinAmp = datasetNonLinearity[ampName]
1032 if linearizerType == "LOOKUPTABLE":
1033 linearizer.linearityCoeffs[ampName] = [i, 0]
1034 linearizer.linearityType[ampName] = "LookupTable"
1035 elif linearizerType == "LINEARIZESQUARED":
1036 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars
1037 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr
1038 linearizer.linearityFitReducedChiSquared[ampName] = (
1039 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq)
1040 linearizer.linearityCoeffs[ampName] = [
1041 datasetNonLinAmp.quadraticPolynomialLinearizerCoefficient]
1042 linearizer.linearityType[ampName] = "Squared"
1043 elif linearizerType == "LINEARIZEPOLYNOMIAL":
1044 linearizer.fitParams[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitPars
1045 linearizer.fitParamsErr[ampName] = datasetNonLinAmp.meanSignalVsTimePolyFitParsErr
1046 linearizer.linearityFitReducedChiSquared[ampName] = (
1047 datasetNonLinAmp.meanSignalVsTimePolyFitReducedChiSq)
1048 # Slice correction coefficients (starting at 2) for polynomial linearizer
1049 # (and squared linearizer above). The first and second are reduntant with
1050 # the bias and gain, respectively, and are not used by LinearizerPolynomial.
1051 polyLinCoeffs = np.array(datasetNonLinAmp.polynomialLinearizerCoefficients[2:])
1052 linearizer.linearityCoeffs[ampName] = polyLinCoeffs
1053 linearizer.linearityType[ampName] = "Polynomial"
1054 linearizer.linearityBBox[ampName] = amp.getBBox()
1055 linearizer.validate()
1056 calibId = f"detectorName={detName} detector={detNum} calibDate={calibDate} ccd={detNum} filter=NONE"
1058 try:
1059 raftName = detName.split("_")[0]
1060 calibId += f" raftName={raftName}"
1061 except Exception:
1062 raftname = "NONE"
1063 calibId += f" raftName={raftname}"
1065 serial = detector.getSerial()
1066 linearizer.updateMetadata(instrumentName=instruName, detectorId=f"{detNum}",
1067 calibId=calibId, serial=serial, detectorName=f"{detName}")
1069 return linearizer
1071 @staticmethod
1072 def _initialParsForPolynomial(order):
1073 assert(order >= 2)
1074 pars = np.zeros(order, dtype=np.float)
1075 pars[0] = 10
1076 pars[1] = 1
1077 pars[2:] = 0.0001
1078 return pars
1080 @staticmethod
1081 def _boundsForPolynomial(initialPars):
1082 lowers = [np.NINF for p in initialPars]
1083 uppers = [np.inf for p in initialPars]
1084 lowers[1] = 0 # no negative gains
1085 return (lowers, uppers)
1087 @staticmethod
1088 def _boundsForAstier(initialPars):
1089 lowers = [np.NINF for p in initialPars]
1090 uppers = [np.inf for p in initialPars]
1091 return (lowers, uppers)
1093 @staticmethod
1094 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
1095 """Return a boolean array to mask bad points.
1097 A linear function has a constant ratio, so find the median
1098 value of the ratios, and exclude the points that deviate
1099 from that by more than a factor of maxDeviationPositive/negative.
1100 Asymmetric deviations are supported as we expect the PTC to turn
1101 down as the flux increases, but sometimes it anomalously turns
1102 upwards just before turning over, which ruins the fits, so it
1103 is wise to be stricter about restricting positive outliers than
1104 negative ones.
1106 Too high and points that are so bad that fit will fail will be included
1107 Too low and the non-linear points will be excluded, biasing the NL fit."""
1108 ratios = [b/a for (a, b) in zip(means, variances)]
1109 medianRatio = np.median(ratios)
1110 ratioDeviations = [(r/medianRatio)-1 for r in ratios]
1112 # so that it doesn't matter if the deviation is expressed as positive or negative
1113 maxDeviationPositive = abs(maxDeviationPositive)
1114 maxDeviationNegative = -1. * abs(maxDeviationNegative)
1116 goodPoints = np.array([True if (r < maxDeviationPositive and r > maxDeviationNegative)
1117 else False for r in ratioDeviations])
1118 return goodPoints
1120 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
1121 """"""
1122 nBad = Counter(array)[0]
1123 if nBad == 0:
1124 return array
1126 if warn:
1127 msg = f"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
1128 self.log.warn(msg)
1130 array[array == 0] = substituteValue
1131 return array
1133 def fitPtc(self, dataset, ptcFitType):
1134 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
1136 Fit the photon transfer curve with either a polynomial of the order
1137 specified in the task config, or using the Astier approximation.
1139 Sigma clipping is performed iteratively for the fit, as well as an
1140 initial clipping of data points that are more than
1141 config.initialNonLinearityExclusionThreshold away from lying on a
1142 straight line. This other step is necessary because the photon transfer
1143 curve turns over catastrophically at very high flux (because saturation
1144 drops the variance to ~0) and these far outliers cause the initial fit
1145 to fail, meaning the sigma cannot be calculated to perform the
1146 sigma-clipping.
1148 Parameters
1149 ----------
1150 dataset : `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
1151 The dataset containing the means, variances and exposure times
1153 ptcFitType : `str`
1154 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
1155 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
1157 Returns
1158 -------
1159 dataset: `lsst.cp.pipe.ptc.PhotonTransferCurveDataset`
1160 This is the same dataset as the input paramter, however, it has been modified
1161 to include information such as the fit vectors and the fit parameters. See
1162 the class `PhotonTransferCurveDatase`.
1163 """
1165 def errFunc(p, x, y):
1166 return ptcFunc(p, x) - y
1168 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
1169 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
1171 for i, ampName in enumerate(dataset.ampNames):
1172 timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
1173 meanVecOriginal = np.array(dataset.rawMeans[ampName])
1174 varVecOriginal = np.array(dataset.rawVars[ampName])
1175 varVecOriginal = self._makeZeroSafe(varVecOriginal)
1177 mask = ((meanVecOriginal >= self.config.minMeanSignal) &
1178 (meanVecOriginal <= self.config.maxMeanSignal))
1180 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
1181 self.config.initialNonLinearityExclusionThresholdPositive,
1182 self.config.initialNonLinearityExclusionThresholdNegative)
1183 mask = mask & goodPoints
1185 if ptcFitType == 'EXPAPPROXIMATION':
1186 ptcFunc = funcAstier
1187 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise
1188 bounds = self._boundsForAstier(parsIniPtc)
1189 if ptcFitType == 'POLYNOMIAL':
1190 ptcFunc = funcPolynomial
1191 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
1192 bounds = self._boundsForPolynomial(parsIniPtc)
1194 # Before bootstrap fit, do an iterative fit to get rid of outliers
1195 count = 1
1196 while count <= maxIterationsPtcOutliers:
1197 # Note that application of the mask actually shrinks the array
1198 # to size rather than setting elements to zero (as we want) so
1199 # always update mask itself and re-apply to the original data
1200 meanTempVec = meanVecOriginal[mask]
1201 varTempVec = varVecOriginal[mask]
1202 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
1203 pars = res.x
1205 # change this to the original from the temp because the masks are ANDed
1206 # meaning once a point is masked it's always masked, and the masks must
1207 # always be the same length for broadcasting
1208 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
1209 newMask = np.array([True if np.abs(r) < sigmaCutPtcOutliers else False for r in sigResids])
1210 mask = mask & newMask
1212 nDroppedTotal = Counter(mask)[False]
1213 self.log.debug(f"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
1214 count += 1
1215 # objects should never shrink
1216 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
1218 dataset.visitMask[ampName] = mask # store the final mask
1219 parsIniPtc = pars
1220 meanVecFinal = meanVecOriginal[mask]
1221 varVecFinal = varVecOriginal[mask]
1223 if Counter(mask)[False] > 0:
1224 self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" +
1225 f" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
1227 if (len(meanVecFinal) < len(parsIniPtc)):
1228 msg = (f"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
1229 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1230 self.log.warn(msg)
1231 # The first and second parameters of initial fit are discarded (bias and gain)
1232 # for the final NL coefficients
1233 dataset.badAmps.append(ampName)
1234 dataset.gain[ampName] = np.nan
1235 dataset.gainErr[ampName] = np.nan
1236 dataset.noise[ampName] = np.nan
1237 dataset.noiseErr[ampName] = np.nan
1238 dataset.ptcFitPars[ampName] = np.nan
1239 dataset.ptcFitParsError[ampName] = np.nan
1240 dataset.ptcFitReducedChiSquared[ampName] = np.nan
1241 continue
1243 # Fit the PTC
1244 if self.config.doFitBootstrap:
1245 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal,
1246 varVecFinal, ptcFunc)
1247 else:
1248 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal,
1249 varVecFinal, ptcFunc)
1250 dataset.ptcFitPars[ampName] = parsFit
1251 dataset.ptcFitParsError[ampName] = parsFitErr
1252 dataset.ptcFitReducedChiSquared[ampName] = reducedChiSqPtc
1254 if ptcFitType == 'EXPAPPROXIMATION':
1255 ptcGain = parsFit[1]
1256 ptcGainErr = parsFitErr[1]
1257 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1258 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1259 if ptcFitType == 'POLYNOMIAL':
1260 ptcGain = 1./parsFit[1]
1261 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1262 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1263 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1264 dataset.gain[ampName] = ptcGain
1265 dataset.gainErr[ampName] = ptcGainErr
1266 dataset.noise[ampName] = ptcNoise
1267 dataset.noiseErr[ampName] = ptcNoiseErr
1268 if not len(dataset.ptcFitType) == 0:
1269 dataset.ptcFitType = ptcFitType
1271 return dataset