23 import matplotlib.pyplot
as plt
24 from collections
import Counter
29 from .utils
import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
30 from scipy.optimize
import least_squares
34 from .astierCovPtcUtils
import (fftSize, CovFft, computeCovDirect, fitData)
35 from .linearity
import LinearitySolveTask
36 from .photodiode
import getBOTphotodiodeData
38 from lsst.pipe.tasks.getRepositoryData
import DataRefListRunner
41 __all__ = [
'MeasurePhotonTransferCurveTask',
42 'MeasurePhotonTransferCurveTaskConfig']
46 """Config class for photon transfer curve measurement task"""
47 ccdKey = pexConfig.Field(
49 doc=
"The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
52 ptcFitType = pexConfig.ChoiceField(
54 doc=
"Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
57 "POLYNOMIAL":
"n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
58 "EXPAPPROXIMATION":
"Approximation in Astier+19 (Eq. 16).",
59 "FULLCOVARIANCE":
"Full covariances model in Astier+19 (Eq. 20)"
62 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
64 doc=
"sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
67 maxIterFullFitCovariancesAstier = pexConfig.Field(
69 doc=
"Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
72 maximumRangeCovariancesAstier = pexConfig.Field(
74 doc=
"Maximum range of covariances as in Astier+19",
77 covAstierRealSpace = pexConfig.Field(
79 doc=
"Calculate covariances in real space or via FFT? (see appendix A of Astier+19).",
82 polynomialFitDegree = pexConfig.Field(
84 doc=
"Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
87 linearity = pexConfig.ConfigurableField(
88 target=LinearitySolveTask,
89 doc=
"Task to solve the linearity."
92 doCreateLinearizer = pexConfig.Field(
94 doc=
"Calculate non-linearity and persist linearizer?",
98 binSize = pexConfig.Field(
100 doc=
"Bin the image by this factor in both dimensions.",
103 minMeanSignal = pexConfig.Field(
105 doc=
"Minimum value (inclusive) of mean signal (in DN) above which to consider.",
108 maxMeanSignal = pexConfig.Field(
110 doc=
"Maximum value (inclusive) of mean signal (in DN) below which to consider.",
113 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
115 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
116 " linear in the positive direction, from the PTC fit. Note that these points will also be"
117 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
118 " to allow an accurate determination of the sigmas for said iterative fit.",
123 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
125 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
126 " linear in the negative direction, from the PTC fit. Note that these points will also be"
127 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
128 " to allow an accurate determination of the sigmas for said iterative fit.",
133 sigmaCutPtcOutliers = pexConfig.Field(
135 doc=
"Sigma cut for outlier rejection in PTC.",
138 maskNameList = pexConfig.ListField(
140 doc=
"Mask list to exclude from statistics calculations.",
141 default=[
'SUSPECT',
'BAD',
'NO_DATA'],
143 nSigmaClipPtc = pexConfig.Field(
145 doc=
"Sigma cut for afwMath.StatisticsControl()",
148 nIterSigmaClipPtc = pexConfig.Field(
150 doc=
"Number of sigma-clipping iterations for afwMath.StatisticsControl()",
153 maxIterationsPtcOutliers = pexConfig.Field(
155 doc=
"Maximum number of iterations for outlier rejection in PTC.",
158 doFitBootstrap = pexConfig.Field(
160 doc=
"Use bootstrap for the PTC fit parameters and errors?.",
163 doPhotodiode = pexConfig.Field(
165 doc=
"Apply a correction based on the photodiode readings if available?",
168 photodiodeDataPath = pexConfig.Field(
170 doc=
"Gen2 only: path to locate the data photodiode data files.",
173 instrumentName = pexConfig.Field(
175 doc=
"Instrument name.",
181 """A class to calculate, fit, and plot a PTC from a set of flat pairs.
183 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
184 used in astronomical detectors characterization (e.g., Janesick 2001,
185 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the
186 PTC from a series of pairs of flat-field images; each pair taken at identical exposure
187 times. The difference image of each pair is formed to eliminate fixed pattern noise,
188 and then the variance of the difference image and the mean of the average image
189 are used to produce the PTC. An n-degree polynomial or the approximation in Equation
190 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
191 arXiv:1905.08677) can be fitted to the PTC curve. These models include
192 parameters such as the gain (e/DN) and readout noise.
194 Linearizers to correct for signal-chain non-linearity are also calculated.
195 The `Linearizer` class, in general, can support per-amp linearizers, but in this
196 task this is not supported.
198 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the
199 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements
200 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain
207 Positional arguments passed to the Task constructor. None used at this
210 Keyword arguments passed on to the Task constructor. None used at this
215 RunnerClass = DataRefListRunner
216 ConfigClass = MeasurePhotonTransferCurveTaskConfig
217 _DefaultName =
"measurePhotonTransferCurve"
220 pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
221 self.makeSubtask(
"linearity")
222 plt.interactive(
False)
223 self.config.validate()
228 """Run the Photon Transfer Curve (PTC) measurement task.
230 For a dataRef (which is each detector here),
231 and given a list of exposure pairs (postISR) at different exposure times,
236 dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
237 Data references for exposures for detectors to process.
239 if len(dataRefList) < 2:
240 raise RuntimeError(
"Insufficient inputs to combine.")
243 dataRef = dataRefList[0]
245 detNum = dataRef.dataId[self.config.ccdKey]
246 camera = dataRef.get(
'camera')
247 detector = camera[dataRef.dataId[self.config.ccdKey]]
249 amps = detector.getAmplifiers()
250 ampNames = [amp.getName()
for amp
in amps]
251 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType)
256 for (exp1, exp2)
in expPairs.values():
257 id1 = exp1.getInfo().getVisitInfo().getExposureId()
258 id2 = exp2.getInfo().getVisitInfo().getExposureId()
259 expIds.append((id1, id2))
260 self.log.info(f
"Measuring PTC using {expIds} exposures for detector {detector.getId()}")
265 if self.config.doPhotodiode:
266 for (expId1, expId2)
in expIds:
268 for i, expId
in enumerate([expId1, expId2]):
273 dataRef.dataId[
'expId'] = expId//1000
274 if self.config.photodiodeDataPath:
279 charges[i] = photodiodeData.getCharge()
283 self.log.warn(f
"No photodiode data found for {expId}")
285 for ampName
in ampNames:
286 datasetPtc.photoCharge[ampName].append((charges[0], charges[1]))
290 for ampName
in ampNames:
291 datasetPtc.photoCharge[ampName] = np.repeat(np.nan, len(expIds))
293 for ampName
in ampNames:
294 datasetPtc.inputExpIdPairs[ampName] = expIds
298 for expTime, (exp1, exp2)
in expPairs.items():
299 expId1 = exp1.getInfo().getVisitInfo().getExposureId()
300 expId2 = exp2.getInfo().getVisitInfo().getExposureId()
303 for ampNumber, amp
in enumerate(detector):
304 ampName = amp.getName()
306 doRealSpace = self.config.covAstierRealSpace
307 muDiff, varDiff, covAstier = self.
measureMeanVarCov(exp1, exp2, region=amp.getBBox(),
308 covAstierRealSpace=doRealSpace)
309 datasetPtc.rawExpTimes[ampName].append(expTime)
310 datasetPtc.rawMeans[ampName].append(muDiff)
311 datasetPtc.rawVars[ampName].append(varDiff)
313 if np.isnan(muDiff)
or np.isnan(varDiff)
or (covAstier
is None):
314 msg = (f
"NaN mean or var, or None cov in amp {ampName} in exposure pair {expId1},"
315 f
" {expId2} of detector {detNum}.")
319 tags = [
'mu',
'i',
'j',
'var',
'cov',
'npix',
'ext',
'expTime',
'ampName']
320 if (muDiff <= self.config.minMeanSignal)
or (muDiff >= self.config.maxMeanSignal):
323 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName)
for covRow
in covAstier]
324 if nAmpsNan == len(ampNames):
325 msg = f
"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
329 tupleRecords += tupleRows
330 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags)
332 if self.config.ptcFitType
in [
"FULLCOVARIANCE", ]:
335 elif self.config.ptcFitType
in [
"EXPAPPROXIMATION",
"POLYNOMIAL"]:
338 datasetPtc = self.
fitPtc(datasetPtc, self.config.ptcFitType)
340 detName = detector.getName()
341 now = datetime.datetime.utcnow()
342 calibDate = now.strftime(
"%Y-%m-%d")
343 butler = dataRef.getButler()
345 datasetPtc.updateMetadata(setDate=
True, camera=camera, detector=detector)
348 if self.config.doCreateLinearizer:
354 dimensions = {
'camera': camera.getName(),
'detector': detector.getId()}
355 linearityResults = self.linearity.run(datasetPtc, camera, dimensions)
356 linearizer = linearityResults.outputLinearizer
358 self.log.info(
"Writing linearizer:")
360 detName = detector.getName()
361 now = datetime.datetime.utcnow()
362 calibDate = now.strftime(
"%Y-%m-%d")
364 butler.put(linearizer, datasetType=
'linearizer',
365 dataId={
'detector': detNum,
'detectorName': detName,
'calibDate': calibDate})
367 self.log.info(f
"Writing PTC data.")
368 butler.put(datasetPtc, datasetType=
'photonTransferCurveDataset', dataId={
'detector': detNum,
369 'detectorName': detName,
'calibDate': calibDate})
371 return pipeBase.Struct(exitStatus=0)
374 """Produce a list of flat pairs indexed by exposure time.
378 dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
379 Data references for exposures for detectors to process.
383 flatPairs : `dict` [`float`, `lsst.afw.image.exposure.exposure.ExposureF`]
384 Dictionary that groups flat-field exposures that have the same exposure time (seconds).
388 We use the difference of one pair of flat-field images taken at the same exposure time when
389 calculating the PTC to reduce Fixed Pattern Noise. If there are > 2 flat-field images with the
390 same exposure time, the first two are kept and the rest discarded.
395 for dataRef
in dataRefList:
397 tempFlat = dataRef.get(
"postISRCCD")
399 self.log.warn(
"postISR exposure could not be retrieved. Ignoring flat.")
401 expDate = tempFlat.getInfo().getVisitInfo().getDate().get()
402 expDict.setdefault(expDate, tempFlat)
403 sortedExps = {k: expDict[k]
for k
in sorted(expDict)}
406 for exp
in sortedExps:
407 tempFlat = sortedExps[exp]
408 expTime = tempFlat.getInfo().getVisitInfo().getExposureTime()
409 listAtExpTime = flatPairs.setdefault(expTime, [])
410 if len(listAtExpTime) >= 2:
411 self.log.warn(f
"Already found 2 exposures at expTime {expTime}. "
412 f
"Ignoring exposure {tempFlat.getInfo().getVisitInfo().getExposureId()}")
414 listAtExpTime.append(tempFlat)
417 for (key, value)
in flatPairs.items():
419 keysToDrop.append(key)
422 for key
in keysToDrop:
423 self.log.warn(f
"Only one exposure found at expTime {key}. Dropping exposure "
424 f
"{flatPairs[key][0].getInfo().getVisitInfo().getExposureId()}.")
429 """Fit measured flat covariances to full model in Astier+19.
433 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
434 The dataset containing information such as the means, variances and exposure times.
436 covariancesWithTagsArray : `numpy.recarray`
437 Tuple with at least (mu, cov, var, i, j, npix), where:
438 mu : 0.5*(m1 + m2), where:
439 mu1: mean value of flat1
440 mu2: mean value of flat2
441 cov: covariance value at lag(i, j)
442 var: variance(covariance value at lag(0, 0))
445 npix: number of pixels used for covariance calculation.
449 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
450 This is the same dataset as the input paramter, however, it has been modified
451 to include information such as the fit vectors and the fit parameters. See
452 the class `PhotonTransferCurveDatase`.
455 covFits, covFitsNoB =
fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal,
456 r=self.config.maximumRangeCovariancesAstier,
457 nSigmaFullFit=self.config.sigmaClipFullFitCovariancesAstier,
458 maxIterFullFit=self.config.maxIterFullFitCovariancesAstier)
465 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
469 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
470 The dataset containing information such as the means, variances and exposure times.
473 Dictionary of CovFit objects, with amp names as keys.
476 Dictionary of CovFit objects, with amp names as keys, and 'b=0' in Eq. 20 of Astier+19.
480 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
481 This is the same dataset as the input paramter, however, it has been modified
482 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
483 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
484 See the class `PhotonTransferCurveDatase`.
486 assert(len(covFits) == len(covFitsNoB))
488 for i, amp
in enumerate(dataset.ampNames):
489 lenInputTimes = len(dataset.rawExpTimes[amp])
491 dataset.ptcFitPars[amp] = np.nan
492 dataset.ptcFitParsError[amp] = np.nan
493 dataset.ptcFitChiSq[amp] = np.nan
496 fitNoB = covFitsNoB[amp]
498 dataset.covariances[amp] = fit.cov
499 dataset.covariancesModel[amp] = fit.evalCovModel()
500 dataset.covariancesSqrtWeights[amp] = fit.sqrtW
501 dataset.aMatrix[amp] = fit.getA()
502 dataset.bMatrix[amp] = fit.getB()
503 dataset.covariancesNoB[amp] = fitNoB.cov
504 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel()
505 dataset.covariancesSqrtWeightsNoB[amp] = fitNoB.sqrtW
506 dataset.aMatrixNoB[amp] = fitNoB.getA()
508 (meanVecFinal, varVecFinal, varVecModel,
509 wc, varMask) = fit.getFitData(0, 0, divideByMu=
False, returnMasked=
True)
511 dataset.expIdMask[amp] = varMask
512 dataset.gain[amp] = gain
513 dataset.gainErr[amp] = fit.getGainErr()
514 dataset.noise[amp] = np.sqrt(fit.getRon())
515 dataset.noiseErr[amp] = fit.getRonErr()
517 padLength = lenInputTimes - len(varVecFinal)
518 dataset.finalVars[amp] = np.pad(varVecFinal/(gain**2), (0, padLength),
'constant',
519 constant_values=np.nan)
520 dataset.finalModelVars[amp] = np.pad(varVecModel/(gain**2), (0, padLength),
'constant',
521 constant_values=np.nan)
522 dataset.finalMeans[amp] = np.pad(meanVecFinal/gain, (0, padLength),
'constant',
523 constant_values=np.nan)
527 matrixSide = self.config.maximumRangeCovariancesAstier
528 nanMatrix = np.full((matrixSide, matrixSide), np.nan)
529 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
531 dataset.covariances[amp] = listNanMatrix
532 dataset.covariancesModel[amp] = listNanMatrix
533 dataset.covariancesSqrtWeights[amp] = listNanMatrix
534 dataset.aMatrix[amp] = nanMatrix
535 dataset.bMatrix[amp] = nanMatrix
536 dataset.covariancesNoB[amp] = listNanMatrix
537 dataset.covariancesModelNoB[amp] = listNanMatrix
538 dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
539 dataset.aMatrixNoB[amp] = nanMatrix
541 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes)
542 dataset.gain[amp] = np.nan
543 dataset.gainErr[amp] = np.nan
544 dataset.noise[amp] = np.nan
545 dataset.noiseErr[amp] = np.nan
546 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes)
547 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes)
548 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes)
553 """Calculate the mean of each of two exposures and the variance and covariance of their difference.
555 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A).
556 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just
557 keep one (covariance).
561 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
562 First exposure of flat field pair.
564 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
565 Second exposure of flat field pair.
567 region : `lsst.geom.Box2I`, optional
568 Region of each exposure where to perform the calculations (e.g, an amplifier).
570 covAstierRealSpace : `bool`, optional
571 Should the covariannces in Astier+19 be calculated in real space or via FFT?
572 See Appendix A of Astier+19.
576 mu : `float` or `NaN`
577 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
578 both exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
580 varDiff : `float` or `NaN`
581 Half of the clipped variance of the difference of the regions inthe two input
582 exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
584 covDiffAstier : `list` or `NaN`
585 List with tuples of the form (dx, dy, var, cov, npix), where:
591 Variance at (dx, dy).
593 Covariance at (dx, dy).
595 Number of pixel pairs used to evaluate var and cov.
596 If either mu1 or m2 are NaN's, the returned value is NaN.
599 if region
is not None:
600 im1Area = exposure1.maskedImage[region]
601 im2Area = exposure2.maskedImage[region]
603 im1Area = exposure1.maskedImage
604 im2Area = exposure2.maskedImage
606 if self.config.binSize > 1:
607 im1Area = afwMath.binImage(im1Area, self.config.binSize)
608 im2Area = afwMath.binImage(im2Area, self.config.binSize)
610 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
611 im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
612 self.config.nIterSigmaClipPtc,
614 im1StatsCtrl.setNanSafe(
True)
615 im1StatsCtrl.setAndMask(im1MaskVal)
617 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList)
618 im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
619 self.config.nIterSigmaClipPtc,
621 im2StatsCtrl.setNanSafe(
True)
622 im2StatsCtrl.setAndMask(im2MaskVal)
625 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue()
626 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue()
627 if np.isnan(mu1)
or np.isnan(mu2):
628 return np.nan, np.nan,
None
633 temp = im2Area.clone()
635 diffIm = im1Area.clone()
640 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList)
641 diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
642 self.config.nIterSigmaClipPtc,
644 diffImStatsCtrl.setNanSafe(
True)
645 diffImStatsCtrl.setAndMask(diffImMaskVal)
647 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue())
650 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0)
651 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0)
654 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
657 maxRangeCov = self.config.maximumRangeCovariancesAstier
658 if covAstierRealSpace:
659 covDiffAstier =
computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov)
661 shapeDiff = diffIm.getImage().getArray().shape
662 fftShape = (
fftSize(shapeDiff[0] + maxRangeCov),
fftSize(shapeDiff[1]+maxRangeCov))
663 c =
CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov)
664 covDiffAstier = c.reportCovFft(maxRangeCov)
666 return mu, varDiff, covDiffAstier
669 """Compute covariances of diffImage in real space.
671 For lags larger than ~25, it is slower than the FFT way.
672 Taken from https://github.com/PierreAstier/bfptc/
676 diffImage : `numpy.array`
677 Image to compute the covariance of.
679 weightImage : `numpy.array`
680 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
683 Last index of the covariance to be computed.
688 List with tuples of the form (dx, dy, var, cov, npix), where:
694 Variance at (dx, dy).
696 Covariance at (dx, dy).
698 Number of pixel pairs used to evaluate var and cov.
703 for dy
in range(maxRange + 1):
704 for dx
in range(0, maxRange + 1):
707 cov2, nPix2 = self.
covDirectValue(diffImage, weightImage, dx, -dy)
708 cov = 0.5*(cov1 + cov2)
712 if (dx == 0
and dy == 0):
714 outList.append((dx, dy, var, cov, nPix))
719 """Compute covariances of diffImage in real space at lag (dx, dy).
721 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19).
725 diffImage : `numpy.array`
726 Image to compute the covariance of.
728 weightImage : `numpy.array`
729 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
740 Covariance at (dx, dy)
743 Number of pixel pairs used to evaluate var and cov.
745 (nCols, nRows) = diffImage.shape
749 (dx, dy) = (-dx, -dy)
753 im1 = diffImage[dy:, dx:]
754 w1 = weightImage[dy:, dx:]
755 im2 = diffImage[:nCols - dy, :nRows - dx]
756 w2 = weightImage[:nCols - dy, :nRows - dx]
758 im1 = diffImage[:nCols + dy, dx:]
759 w1 = weightImage[:nCols + dy, dx:]
760 im2 = diffImage[-dy:, :nRows - dx]
761 w2 = weightImage[-dy:, :nRows - dx]
767 s1 = im1TimesW.sum()/nPix
768 s2 = (im2*wAll).sum()/nPix
769 p = (im1TimesW*im2).sum()/nPix
775 def _initialParsForPolynomial(order):
777 pars = np.zeros(order, dtype=np.float)
784 def _boundsForPolynomial(initialPars):
785 lowers = [np.NINF
for p
in initialPars]
786 uppers = [np.inf
for p
in initialPars]
788 return (lowers, uppers)
791 def _boundsForAstier(initialPars):
792 lowers = [np.NINF
for p
in initialPars]
793 uppers = [np.inf
for p
in initialPars]
794 return (lowers, uppers)
797 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
798 """Return a boolean array to mask bad points.
802 means : `numpy.array`
803 Input array with mean signal values.
805 variances : `numpy.array`
806 Input array with variances at each mean value.
808 maxDeviationPositive : `float`
809 Maximum deviation from being constant for the variance/mean
810 ratio, in the positive direction.
812 maxDeviationNegative : `float`
813 Maximum deviation from being constant for the variance/mean
814 ratio, in the negative direction.
818 goodPoints : `numpy.array` [`bool`]
819 Boolean array to select good (`True`) and bad (`False`)
824 A linear function has a constant ratio, so find the median
825 value of the ratios, and exclude the points that deviate
826 from that by more than a factor of maxDeviationPositive/negative.
827 Asymmetric deviations are supported as we expect the PTC to turn
828 down as the flux increases, but sometimes it anomalously turns
829 upwards just before turning over, which ruins the fits, so it
830 is wise to be stricter about restricting positive outliers than
833 Too high and points that are so bad that fit will fail will be included
834 Too low and the non-linear points will be excluded, biasing the NL fit."""
836 assert(len(means) == len(variances))
837 ratios = [b/a
for (a, b)
in zip(means, variances)]
838 medianRatio = np.nanmedian(ratios)
839 ratioDeviations = [(r/medianRatio)-1
for r
in ratios]
842 maxDeviationPositive = abs(maxDeviationPositive)
843 maxDeviationNegative = -1. * abs(maxDeviationNegative)
845 goodPoints = np.array([
True if (r < maxDeviationPositive
and r > maxDeviationNegative)
846 else False for r
in ratioDeviations])
849 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
851 nBad = Counter(array)[0]
856 msg = f
"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
859 array[array == 0] = substituteValue
863 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
865 Fit the photon transfer curve with either a polynomial of the order
866 specified in the task config, or using the Astier approximation.
868 Sigma clipping is performed iteratively for the fit, as well as an
869 initial clipping of data points that are more than
870 config.initialNonLinearityExclusionThreshold away from lying on a
871 straight line. This other step is necessary because the photon transfer
872 curve turns over catastrophically at very high flux (because saturation
873 drops the variance to ~0) and these far outliers cause the initial fit
874 to fail, meaning the sigma cannot be calculated to perform the
879 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
880 The dataset containing the means, variances and exposure times
883 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
884 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
888 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
889 This is the same dataset as the input paramter, however, it has been modified
890 to include information such as the fit vectors and the fit parameters. See
891 the class `PhotonTransferCurveDatase`.
894 matrixSide = self.config.maximumRangeCovariancesAstier
895 nanMatrix = np.empty((matrixSide, matrixSide))
896 nanMatrix[:] = np.nan
898 for amp
in dataset.ampNames:
899 lenInputTimes = len(dataset.rawExpTimes[amp])
900 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
901 listNanMatrix[:] = np.nan
903 dataset.covariances[amp] = listNanMatrix
904 dataset.covariancesModel[amp] = listNanMatrix
905 dataset.covariancesSqrtWeights[amp] = listNanMatrix
906 dataset.aMatrix[amp] = nanMatrix
907 dataset.bMatrix[amp] = nanMatrix
908 dataset.covariancesNoB[amp] = listNanMatrix
909 dataset.covariancesModelNoB[amp] = listNanMatrix
910 dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
911 dataset.aMatrixNoB[amp] = nanMatrix
913 def errFunc(p, x, y):
914 return ptcFunc(p, x) - y
916 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
917 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
919 for i, ampName
in enumerate(dataset.ampNames):
920 timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
921 meanVecOriginal = np.array(dataset.rawMeans[ampName])
922 varVecOriginal = np.array(dataset.rawVars[ampName])
925 mask = ((meanVecOriginal >= self.config.minMeanSignal) &
926 (meanVecOriginal <= self.config.maxMeanSignal))
929 self.config.initialNonLinearityExclusionThresholdPositive,
930 self.config.initialNonLinearityExclusionThresholdNegative)
931 if not (mask.any()
and goodPoints.any()):
932 msg = (f
"\nSERIOUS: All points in either mask: {mask} or goodPoints: {goodPoints} are bad."
933 f
"Setting {ampName} to BAD.")
937 dataset.badAmps.append(ampName)
938 dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
939 dataset.gain[ampName] = np.nan
940 dataset.gainErr[ampName] = np.nan
941 dataset.noise[ampName] = np.nan
942 dataset.noiseErr[ampName] = np.nan
943 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
944 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
945 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
946 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
947 dataset.ptcFitChiSq[ampName] = np.nan
948 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
949 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
950 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
953 mask = mask & goodPoints
955 if ptcFitType ==
'EXPAPPROXIMATION':
957 parsIniPtc = [-1e-9, 1.0, 10.]
959 if ptcFitType ==
'POLYNOMIAL':
960 ptcFunc = funcPolynomial
966 while count <= maxIterationsPtcOutliers:
970 meanTempVec = meanVecOriginal[mask]
971 varTempVec = varVecOriginal[mask]
972 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
978 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
979 newMask = np.array([
True if np.abs(r) < sigmaCutPtcOutliers
else False for r
in sigResids])
980 mask = mask & newMask
981 if not (mask.any()
and newMask.any()):
982 msg = (f
"\nSERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
983 f
"Setting {ampName} to BAD.")
987 dataset.badAmps.append(ampName)
988 dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
989 dataset.gain[ampName] = np.nan
990 dataset.gainErr[ampName] = np.nan
991 dataset.noise[ampName] = np.nan
992 dataset.noiseErr[ampName] = np.nan
993 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
994 if ptcFitType
in [
"POLYNOMIAL", ]
else
995 np.repeat(np.nan, 3))
996 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
997 if ptcFitType
in [
"POLYNOMIAL", ]
else
998 np.repeat(np.nan, 3))
999 dataset.ptcFitChiSq[ampName] = np.nan
1000 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1001 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1002 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1004 nDroppedTotal = Counter(mask)[
False]
1005 self.log.debug(f
"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
1008 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
1010 if not (mask.any()
and newMask.any()):
1012 dataset.expIdMask[ampName] = mask
1014 meanVecFinal = meanVecOriginal[mask]
1015 varVecFinal = varVecOriginal[mask]
1017 if Counter(mask)[
False] > 0:
1018 self.log.info((f
"Number of points discarded in PTC of amplifier {ampName}:" +
1019 f
" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
1021 if (len(meanVecFinal) < len(parsIniPtc)):
1022 msg = (f
"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
1023 f
"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1027 dataset.badAmps.append(ampName)
1028 dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1029 dataset.gain[ampName] = np.nan
1030 dataset.gainErr[ampName] = np.nan
1031 dataset.noise[ampName] = np.nan
1032 dataset.noiseErr[ampName] = np.nan
1033 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
1034 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
1035 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
1036 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
1037 dataset.ptcFitChiSq[ampName] = np.nan
1038 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1039 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1040 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1044 if self.config.doFitBootstrap:
1045 parsFit, parsFitErr, reducedChiSqPtc =
fitBootstrap(parsIniPtc, meanVecFinal,
1046 varVecFinal, ptcFunc,
1047 weightsY=1./np.sqrt(varVecFinal))
1049 parsFit, parsFitErr, reducedChiSqPtc =
fitLeastSq(parsIniPtc, meanVecFinal,
1050 varVecFinal, ptcFunc,
1051 weightsY=1./np.sqrt(varVecFinal))
1052 dataset.ptcFitPars[ampName] = parsFit
1053 dataset.ptcFitParsError[ampName] = parsFitErr
1054 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
1057 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
1058 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength),
'constant',
1059 constant_values=np.nan)
1060 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
1061 'constant', constant_values=np.nan)
1062 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength),
'constant',
1063 constant_values=np.nan)
1065 if ptcFitType ==
'EXPAPPROXIMATION':
1066 ptcGain = parsFit[1]
1067 ptcGainErr = parsFitErr[1]
1068 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1069 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1070 if ptcFitType ==
'POLYNOMIAL':
1071 ptcGain = 1./parsFit[1]
1072 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1073 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1074 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1075 dataset.gain[ampName] = ptcGain
1076 dataset.gainErr[ampName] = ptcGainErr
1077 dataset.noise[ampName] = ptcNoise
1078 dataset.noiseErr[ampName] = ptcNoiseErr
1079 if not len(dataset.ptcFitType) == 0:
1080 dataset.ptcFitType = ptcFitType
1081 if len(dataset.badAmps) == 0:
1082 dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0]))