23 import matplotlib.pyplot
as plt
24 from collections
import Counter
29 from .utils
import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier)
30 from scipy.optimize
import least_squares
34 from .astierCovPtcUtils
import (fftSize, CovFft, computeCovDirect, fitData)
35 from .linearity
import LinearitySolveTask
36 from .photodiode
import getBOTphotodiodeData
38 from lsst.pipe.tasks.getRepositoryData
import DataRefListRunner
41 __all__ = [
'MeasurePhotonTransferCurveTask',
42 'MeasurePhotonTransferCurveTaskConfig']
46 """Config class for photon transfer curve measurement task"""
47 ccdKey = pexConfig.Field(
49 doc=
"The key by which to pull a detector from a dataId, e.g. 'ccd' or 'detector'.",
52 ptcFitType = pexConfig.ChoiceField(
54 doc=
"Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
57 "POLYNOMIAL":
"n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
58 "EXPAPPROXIMATION":
"Approximation in Astier+19 (Eq. 16).",
59 "FULLCOVARIANCE":
"Full covariances model in Astier+19 (Eq. 20)"
62 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
64 doc=
"sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
67 maxIterFullFitCovariancesAstier = pexConfig.Field(
69 doc=
"Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
72 maximumRangeCovariancesAstier = pexConfig.Field(
74 doc=
"Maximum range of covariances as in Astier+19",
77 covAstierRealSpace = pexConfig.Field(
79 doc=
"Calculate covariances in real space or via FFT? (see appendix A of Astier+19).",
82 polynomialFitDegree = pexConfig.Field(
84 doc=
"Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
87 linearity = pexConfig.ConfigurableField(
88 target=LinearitySolveTask,
89 doc=
"Task to solve the linearity."
92 doCreateLinearizer = pexConfig.Field(
94 doc=
"Calculate non-linearity and persist linearizer?",
98 binSize = pexConfig.Field(
100 doc=
"Bin the image by this factor in both dimensions.",
103 minMeanSignal = pexConfig.Field(
105 doc=
"Minimum value (inclusive) of mean signal (in DN) above which to consider.",
108 maxMeanSignal = pexConfig.Field(
110 doc=
"Maximum value (inclusive) of mean signal (in DN) below which to consider.",
113 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
115 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
116 " linear in the positive direction, from the PTC fit. Note that these points will also be"
117 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
118 " to allow an accurate determination of the sigmas for said iterative fit.",
123 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
125 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
126 " linear in the negative direction, from the PTC fit. Note that these points will also be"
127 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
128 " to allow an accurate determination of the sigmas for said iterative fit.",
133 sigmaCutPtcOutliers = pexConfig.Field(
135 doc=
"Sigma cut for outlier rejection in PTC.",
138 maskNameList = pexConfig.ListField(
140 doc=
"Mask list to exclude from statistics calculations.",
141 default=[
'SUSPECT',
'BAD',
'NO_DATA'],
143 nSigmaClipPtc = pexConfig.Field(
145 doc=
"Sigma cut for afwMath.StatisticsControl()",
148 nIterSigmaClipPtc = pexConfig.Field(
150 doc=
"Number of sigma-clipping iterations for afwMath.StatisticsControl()",
153 maxIterationsPtcOutliers = pexConfig.Field(
155 doc=
"Maximum number of iterations for outlier rejection in PTC.",
158 doFitBootstrap = pexConfig.Field(
160 doc=
"Use bootstrap for the PTC fit parameters and errors?.",
163 doPhotodiode = pexConfig.Field(
165 doc=
"Apply a correction based on the photodiode readings if available?",
168 photodiodeDataPath = pexConfig.Field(
170 doc=
"Gen2 only: path to locate the data photodiode data files.",
173 instrumentName = pexConfig.Field(
175 doc=
"Instrument name.",
181 """A class to calculate, fit, and plot a PTC from a set of flat pairs.
183 The Photon Transfer Curve (var(signal) vs mean(signal)) is a standard tool
184 used in astronomical detectors characterization (e.g., Janesick 2001,
185 Janesick 2007). If ptcFitType is "EXPAPPROXIMATION" or "POLYNOMIAL", this task calculates the
186 PTC from a series of pairs of flat-field images; each pair taken at identical exposure
187 times. The difference image of each pair is formed to eliminate fixed pattern noise,
188 and then the variance of the difference image and the mean of the average image
189 are used to produce the PTC. An n-degree polynomial or the approximation in Equation
190 16 of Astier+19 ("The Shape of the Photon Transfer Curve of CCD sensors",
191 arXiv:1905.08677) can be fitted to the PTC curve. These models include
192 parameters such as the gain (e/DN) and readout noise.
194 Linearizers to correct for signal-chain non-linearity are also calculated.
195 The `Linearizer` class, in general, can support per-amp linearizers, but in this
196 task this is not supported.
198 If ptcFitType is "FULLCOVARIANCE", the covariances of the difference images are calculated via the
199 DFT methods described in Astier+19 and the variances for the PTC are given by the cov[0,0] elements
200 at each signal level. The full model in Equation 20 of Astier+19 is fit to the PTC to get the gain
207 Positional arguments passed to the Task constructor. None used at this
210 Keyword arguments passed on to the Task constructor. None used at this
215 RunnerClass = DataRefListRunner
216 ConfigClass = MeasurePhotonTransferCurveTaskConfig
217 _DefaultName =
"measurePhotonTransferCurve"
220 pipeBase.CmdLineTask.__init__(self, *args, **kwargs)
221 self.makeSubtask(
"linearity")
222 plt.interactive(
False)
223 self.config.validate()
228 """Run the Photon Transfer Curve (PTC) measurement task.
230 For a dataRef (which is each detector here),
231 and given a list of exposure pairs (postISR) at different exposure times,
236 dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
237 Data references for exposures for detectors to process.
239 if len(dataRefList) < 2:
240 raise RuntimeError(
"Insufficient inputs to combine.")
243 dataRef = dataRefList[0]
245 detNum = dataRef.dataId[self.config.ccdKey]
246 camera = dataRef.get(
'camera')
247 detector = camera[dataRef.dataId[self.config.ccdKey]]
249 amps = detector.getAmplifiers()
250 ampNames = [amp.getName()
for amp
in amps]
251 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType)
256 for (exp1, exp2)
in expPairs.values():
257 id1 = exp1.getInfo().getVisitInfo().getExposureId()
258 id2 = exp2.getInfo().getVisitInfo().getExposureId()
259 expIds.append((id1, id2))
260 self.log.info(f
"Measuring PTC using {expIds} exposures for detector {detector.getId()}")
265 if self.config.doPhotodiode:
266 for (expId1, expId2)
in expIds:
268 for i, expId
in enumerate([expId1, expId2]):
273 dataRef.dataId[
'expId'] = expId//1000
274 if self.config.photodiodeDataPath:
279 charges[i] = photodiodeData.getCharge()
283 self.log.warn(f
"No photodiode data found for {expId}")
285 for ampName
in ampNames:
286 datasetPtc.photoCharge[ampName].append((charges[0], charges[1]))
290 for ampName
in ampNames:
291 datasetPtc.photoCharge[ampName] = np.repeat(np.nan, len(expIds))
293 for ampName
in ampNames:
294 datasetPtc.inputExpIdPairs[ampName] = expIds
298 for expTime, (exp1, exp2)
in expPairs.items():
299 expId1 = exp1.getInfo().getVisitInfo().getExposureId()
300 expId2 = exp2.getInfo().getVisitInfo().getExposureId()
303 for ampNumber, amp
in enumerate(detector):
304 ampName = amp.getName()
306 doRealSpace = self.config.covAstierRealSpace
307 muDiff, varDiff, covAstier = self.
measureMeanVarCov(exp1, exp2, region=amp.getBBox(),
308 covAstierRealSpace=doRealSpace)
309 datasetPtc.rawExpTimes[ampName].append(expTime)
310 datasetPtc.rawMeans[ampName].append(muDiff)
311 datasetPtc.rawVars[ampName].append(varDiff)
313 if np.isnan(muDiff)
or np.isnan(varDiff)
or (covAstier
is None):
314 msg = (f
"NaN mean or var, or None cov in amp {ampName} in exposure pair {expId1},"
315 f
" {expId2} of detector {detNum}.")
319 tags = [
'mu',
'i',
'j',
'var',
'cov',
'npix',
'ext',
'expTime',
'ampName']
320 if (muDiff <= self.config.minMeanSignal)
or (muDiff >= self.config.maxMeanSignal):
323 tupleRows += [(muDiff, ) + covRow + (ampNumber, expTime, ampName)
for covRow
in covAstier]
324 if nAmpsNan == len(ampNames):
325 msg = f
"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
329 tupleRecords += tupleRows
330 covariancesWithTags = np.core.records.fromrecords(tupleRecords, names=allTags)
332 if self.config.ptcFitType
in [
"FULLCOVARIANCE", ]:
335 elif self.config.ptcFitType
in [
"EXPAPPROXIMATION",
"POLYNOMIAL"]:
338 datasetPtc = self.
fitPtc(datasetPtc, self.config.ptcFitType)
340 detName = detector.getName()
341 now = datetime.datetime.utcnow()
342 calibDate = now.strftime(
"%Y-%m-%d")
343 butler = dataRef.getButler()
345 datasetPtc.updateMetadata(setDate=
True, camera=camera, detector=detector)
348 if self.config.doCreateLinearizer:
354 dimensions = {
'camera': camera.getName(),
'detector': detector.getId()}
355 linearityResults = self.linearity.run(datasetPtc, camera, dimensions)
356 linearizer = linearityResults.outputLinearizer
358 self.log.info(
"Writing linearizer:")
359 butler.put(linearizer, datasetType=
'Linearizer', dataId={
'detector': detNum,
360 'detectorName': detName,
'calibDate': calibDate})
362 self.log.info(f
"Writing PTC data.")
363 butler.put(datasetPtc, datasetType=
'photonTransferCurveDataset', dataId={
'detector': detNum,
364 'detectorName': detName,
'calibDate': calibDate})
366 return pipeBase.Struct(exitStatus=0)
369 """Produce a list of flat pairs indexed by exposure time.
373 dataRefList : `list` [`lsst.daf.peristence.ButlerDataRef`]
374 Data references for exposures for detectors to process.
378 flatPairs : `dict` [`float`, `lsst.afw.image.exposure.exposure.ExposureF`]
379 Dictionary that groups flat-field exposures that have the same exposure time (seconds).
383 We use the difference of one pair of flat-field images taken at the same exposure time when
384 calculating the PTC to reduce Fixed Pattern Noise. If there are > 2 flat-field images with the
385 same exposure time, the first two are kept and the rest discarded.
390 for dataRef
in dataRefList:
392 tempFlat = dataRef.get(
"postISRCCD")
394 self.log.warn(
"postISR exposure could not be retrieved. Ignoring flat.")
396 expDate = tempFlat.getInfo().getVisitInfo().getDate().get()
397 expDict.setdefault(expDate, tempFlat)
398 sortedExps = {k: expDict[k]
for k
in sorted(expDict)}
401 for exp
in sortedExps:
402 tempFlat = sortedExps[exp]
403 expTime = tempFlat.getInfo().getVisitInfo().getExposureTime()
404 listAtExpTime = flatPairs.setdefault(expTime, [])
405 if len(listAtExpTime) >= 2:
406 self.log.warn(f
"Already found 2 exposures at expTime {expTime}. "
407 f
"Ignoring exposure {tempFlat.getInfo().getVisitInfo().getExposureId()}")
409 listAtExpTime.append(tempFlat)
412 for (key, value)
in flatPairs.items():
414 keysToDrop.append(key)
417 for key
in keysToDrop:
418 self.log.warn(f
"Only one exposure found at expTime {key}. Dropping exposure "
419 f
"{flatPairs[key][0].getInfo().getVisitInfo().getExposureId()}.")
424 """Fit measured flat covariances to full model in Astier+19.
428 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
429 The dataset containing information such as the means, variances and exposure times.
431 covariancesWithTagsArray : `numpy.recarray`
432 Tuple with at least (mu, cov, var, i, j, npix), where:
433 mu : 0.5*(m1 + m2), where:
434 mu1: mean value of flat1
435 mu2: mean value of flat2
436 cov: covariance value at lag(i, j)
437 var: variance(covariance value at lag(0, 0))
440 npix: number of pixels used for covariance calculation.
444 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
445 This is the same dataset as the input paramter, however, it has been modified
446 to include information such as the fit vectors and the fit parameters. See
447 the class `PhotonTransferCurveDatase`.
450 covFits, covFitsNoB =
fitData(covariancesWithTagsArray, maxMu=self.config.maxMeanSignal,
451 r=self.config.maximumRangeCovariancesAstier,
452 nSigmaFullFit=self.config.sigmaClipFullFitCovariancesAstier,
453 maxIterFullFit=self.config.maxIterFullFitCovariancesAstier)
460 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
464 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
465 The dataset containing information such as the means, variances and exposure times.
468 Dictionary of CovFit objects, with amp names as keys.
471 Dictionary of CovFit objects, with amp names as keys, and 'b=0' in Eq. 20 of Astier+19.
475 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
476 This is the same dataset as the input paramter, however, it has been modified
477 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
478 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
479 See the class `PhotonTransferCurveDatase`.
481 assert(len(covFits) == len(covFitsNoB))
483 for i, amp
in enumerate(dataset.ampNames):
484 lenInputTimes = len(dataset.rawExpTimes[amp])
486 dataset.ptcFitPars[amp] = np.nan
487 dataset.ptcFitParsError[amp] = np.nan
488 dataset.ptcFitChiSq[amp] = np.nan
491 fitNoB = covFitsNoB[amp]
493 dataset.covariances[amp] = fit.cov
494 dataset.covariancesModel[amp] = fit.evalCovModel()
495 dataset.covariancesSqrtWeights[amp] = fit.sqrtW
496 dataset.aMatrix[amp] = fit.getA()
497 dataset.bMatrix[amp] = fit.getB()
498 dataset.covariancesNoB[amp] = fitNoB.cov
499 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel()
500 dataset.covariancesSqrtWeightsNoB[amp] = fitNoB.sqrtW
501 dataset.aMatrixNoB[amp] = fitNoB.getA()
503 (meanVecFinal, varVecFinal, varVecModel,
504 wc, varMask) = fit.getFitData(0, 0, divideByMu=
False, returnMasked=
True)
506 dataset.expIdMask[amp] = varMask
507 dataset.gain[amp] = gain
508 dataset.gainErr[amp] = fit.getGainErr()
509 dataset.noise[amp] = np.sqrt(fit.getRon())
510 dataset.noiseErr[amp] = fit.getRonErr()
512 padLength = lenInputTimes - len(varVecFinal)
513 dataset.finalVars[amp] = np.pad(varVecFinal/(gain**2), (0, padLength),
'constant',
514 constant_values=np.nan)
515 dataset.finalModelVars[amp] = np.pad(varVecModel/(gain**2), (0, padLength),
'constant',
516 constant_values=np.nan)
517 dataset.finalMeans[amp] = np.pad(meanVecFinal/gain, (0, padLength),
'constant',
518 constant_values=np.nan)
522 matrixSide = self.config.maximumRangeCovariancesAstier
523 nanMatrix = np.full((matrixSide, matrixSide), np.nan)
524 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
526 dataset.covariances[amp] = listNanMatrix
527 dataset.covariancesModel[amp] = listNanMatrix
528 dataset.covariancesSqrtWeights[amp] = listNanMatrix
529 dataset.aMatrix[amp] = nanMatrix
530 dataset.bMatrix[amp] = nanMatrix
531 dataset.covariancesNoB[amp] = listNanMatrix
532 dataset.covariancesModelNoB[amp] = listNanMatrix
533 dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
534 dataset.aMatrixNoB[amp] = nanMatrix
536 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes)
537 dataset.gain[amp] = np.nan
538 dataset.gainErr[amp] = np.nan
539 dataset.noise[amp] = np.nan
540 dataset.noiseErr[amp] = np.nan
541 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes)
542 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes)
543 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes)
548 """Calculate the mean of each of two exposures and the variance and covariance of their difference.
550 The variance is calculated via afwMath, and the covariance via the methods in Astier+19 (appendix A).
551 In theory, var = covariance[0,0]. This should be validated, and in the future, we may decide to just
552 keep one (covariance).
556 exposure1 : `lsst.afw.image.exposure.exposure.ExposureF`
557 First exposure of flat field pair.
559 exposure2 : `lsst.afw.image.exposure.exposure.ExposureF`
560 Second exposure of flat field pair.
562 region : `lsst.geom.Box2I`, optional
563 Region of each exposure where to perform the calculations (e.g, an amplifier).
565 covAstierRealSpace : `bool`, optional
566 Should the covariannces in Astier+19 be calculated in real space or via FFT?
567 See Appendix A of Astier+19.
571 mu : `float` or `NaN`
572 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means of the regions in
573 both exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
575 varDiff : `float` or `NaN`
576 Half of the clipped variance of the difference of the regions inthe two input
577 exposures. If either mu1 or m2 are NaN's, the returned value is NaN.
579 covDiffAstier : `list` or `NaN`
580 List with tuples of the form (dx, dy, var, cov, npix), where:
586 Variance at (dx, dy).
588 Covariance at (dx, dy).
590 Number of pixel pairs used to evaluate var and cov.
591 If either mu1 or m2 are NaN's, the returned value is NaN.
594 if region
is not None:
595 im1Area = exposure1.maskedImage[region]
596 im2Area = exposure2.maskedImage[region]
598 im1Area = exposure1.maskedImage
599 im2Area = exposure2.maskedImage
601 if self.config.binSize > 1:
602 im1Area = afwMath.binImage(im1Area, self.config.binSize)
603 im2Area = afwMath.binImage(im2Area, self.config.binSize)
605 im1MaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
606 im1StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
607 self.config.nIterSigmaClipPtc,
609 im1StatsCtrl.setNanSafe(
True)
610 im1StatsCtrl.setAndMask(im1MaskVal)
612 im2MaskVal = exposure2.getMask().getPlaneBitMask(self.config.maskNameList)
613 im2StatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
614 self.config.nIterSigmaClipPtc,
616 im2StatsCtrl.setNanSafe(
True)
617 im2StatsCtrl.setAndMask(im2MaskVal)
620 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, im1StatsCtrl).getValue()
621 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, im2StatsCtrl).getValue()
622 if np.isnan(mu1)
or np.isnan(mu2):
623 return np.nan, np.nan,
None
628 temp = im2Area.clone()
630 diffIm = im1Area.clone()
635 diffImMaskVal = diffIm.getMask().getPlaneBitMask(self.config.maskNameList)
636 diffImStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
637 self.config.nIterSigmaClipPtc,
639 diffImStatsCtrl.setNanSafe(
True)
640 diffImStatsCtrl.setAndMask(diffImMaskVal)
642 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, diffImStatsCtrl).getValue())
645 w1 = np.where(im1Area.getMask().getArray() == 0, 1, 0)
646 w2 = np.where(im2Area.getMask().getArray() == 0, 1, 0)
649 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
652 maxRangeCov = self.config.maximumRangeCovariancesAstier
653 if covAstierRealSpace:
654 covDiffAstier =
computeCovDirect(diffIm.getImage().getArray(), w, maxRangeCov)
656 shapeDiff = diffIm.getImage().getArray().shape
657 fftShape = (
fftSize(shapeDiff[0] + maxRangeCov),
fftSize(shapeDiff[1]+maxRangeCov))
658 c =
CovFft(diffIm.getImage().getArray(), w, fftShape, maxRangeCov)
659 covDiffAstier = c.reportCovFft(maxRangeCov)
661 return mu, varDiff, covDiffAstier
664 """Compute covariances of diffImage in real space.
666 For lags larger than ~25, it is slower than the FFT way.
667 Taken from https://github.com/PierreAstier/bfptc/
671 diffImage : `numpy.array`
672 Image to compute the covariance of.
674 weightImage : `numpy.array`
675 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
678 Last index of the covariance to be computed.
683 List with tuples of the form (dx, dy, var, cov, npix), where:
689 Variance at (dx, dy).
691 Covariance at (dx, dy).
693 Number of pixel pairs used to evaluate var and cov.
698 for dy
in range(maxRange + 1):
699 for dx
in range(0, maxRange + 1):
702 cov2, nPix2 = self.
covDirectValue(diffImage, weightImage, dx, -dy)
703 cov = 0.5*(cov1 + cov2)
707 if (dx == 0
and dy == 0):
709 outList.append((dx, dy, var, cov, nPix))
714 """Compute covariances of diffImage in real space at lag (dx, dy).
716 Taken from https://github.com/PierreAstier/bfptc/ (c.f., appendix of Astier+19).
720 diffImage : `numpy.array`
721 Image to compute the covariance of.
723 weightImage : `numpy.array`
724 Weight image of diffImage (1's and 0's for good and bad pixels, respectively).
735 Covariance at (dx, dy)
738 Number of pixel pairs used to evaluate var and cov.
740 (nCols, nRows) = diffImage.shape
744 (dx, dy) = (-dx, -dy)
748 im1 = diffImage[dy:, dx:]
749 w1 = weightImage[dy:, dx:]
750 im2 = diffImage[:nCols - dy, :nRows - dx]
751 w2 = weightImage[:nCols - dy, :nRows - dx]
753 im1 = diffImage[:nCols + dy, dx:]
754 w1 = weightImage[:nCols + dy, dx:]
755 im2 = diffImage[-dy:, :nRows - dx]
756 w2 = weightImage[-dy:, :nRows - dx]
762 s1 = im1TimesW.sum()/nPix
763 s2 = (im2*wAll).sum()/nPix
764 p = (im1TimesW*im2).sum()/nPix
770 def _initialParsForPolynomial(order):
772 pars = np.zeros(order, dtype=np.float)
779 def _boundsForPolynomial(initialPars):
780 lowers = [np.NINF
for p
in initialPars]
781 uppers = [np.inf
for p
in initialPars]
783 return (lowers, uppers)
786 def _boundsForAstier(initialPars):
787 lowers = [np.NINF
for p
in initialPars]
788 uppers = [np.inf
for p
in initialPars]
789 return (lowers, uppers)
792 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative):
793 """Return a boolean array to mask bad points.
797 means : `numpy.array`
798 Input array with mean signal values.
800 variances : `numpy.array`
801 Input array with variances at each mean value.
803 maxDeviationPositive : `float`
804 Maximum deviation from being constant for the variance/mean
805 ratio, in the positive direction.
807 maxDeviationNegative : `float`
808 Maximum deviation from being constant for the variance/mean
809 ratio, in the negative direction.
813 goodPoints : `numpy.array` [`bool`]
814 Boolean array to select good (`True`) and bad (`False`)
819 A linear function has a constant ratio, so find the median
820 value of the ratios, and exclude the points that deviate
821 from that by more than a factor of maxDeviationPositive/negative.
822 Asymmetric deviations are supported as we expect the PTC to turn
823 down as the flux increases, but sometimes it anomalously turns
824 upwards just before turning over, which ruins the fits, so it
825 is wise to be stricter about restricting positive outliers than
828 Too high and points that are so bad that fit will fail will be included
829 Too low and the non-linear points will be excluded, biasing the NL fit."""
831 assert(len(means) == len(variances))
832 ratios = [b/a
for (a, b)
in zip(means, variances)]
833 medianRatio = np.nanmedian(ratios)
834 ratioDeviations = [(r/medianRatio)-1
for r
in ratios]
837 maxDeviationPositive = abs(maxDeviationPositive)
838 maxDeviationNegative = -1. * abs(maxDeviationNegative)
840 goodPoints = np.array([
True if (r < maxDeviationPositive
and r > maxDeviationNegative)
841 else False for r
in ratioDeviations])
844 def _makeZeroSafe(self, array, warn=True, substituteValue=1e-9):
846 nBad = Counter(array)[0]
851 msg = f
"Found {nBad} zeros in array at elements {[x for x in np.where(array==0)[0]]}"
854 array[array == 0] = substituteValue
858 """Fit the photon transfer curve to a polynimial or to Astier+19 approximation.
860 Fit the photon transfer curve with either a polynomial of the order
861 specified in the task config, or using the Astier approximation.
863 Sigma clipping is performed iteratively for the fit, as well as an
864 initial clipping of data points that are more than
865 config.initialNonLinearityExclusionThreshold away from lying on a
866 straight line. This other step is necessary because the photon transfer
867 curve turns over catastrophically at very high flux (because saturation
868 drops the variance to ~0) and these far outliers cause the initial fit
869 to fail, meaning the sigma cannot be calculated to perform the
874 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
875 The dataset containing the means, variances and exposure times
878 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
879 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC
883 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
884 This is the same dataset as the input paramter, however, it has been modified
885 to include information such as the fit vectors and the fit parameters. See
886 the class `PhotonTransferCurveDatase`.
889 matrixSide = self.config.maximumRangeCovariancesAstier
890 nanMatrix = np.empty((matrixSide, matrixSide))
891 nanMatrix[:] = np.nan
893 for amp
in dataset.ampNames:
894 lenInputTimes = len(dataset.rawExpTimes[amp])
895 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
896 listNanMatrix[:] = np.nan
898 dataset.covariances[amp] = listNanMatrix
899 dataset.covariancesModel[amp] = listNanMatrix
900 dataset.covariancesSqrtWeights[amp] = listNanMatrix
901 dataset.aMatrix[amp] = nanMatrix
902 dataset.bMatrix[amp] = nanMatrix
903 dataset.covariancesNoB[amp] = listNanMatrix
904 dataset.covariancesModelNoB[amp] = listNanMatrix
905 dataset.covariancesSqrtWeightsNoB[amp] = listNanMatrix
906 dataset.aMatrixNoB[amp] = nanMatrix
908 def errFunc(p, x, y):
909 return ptcFunc(p, x) - y
911 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
912 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
914 for i, ampName
in enumerate(dataset.ampNames):
915 timeVecOriginal = np.array(dataset.rawExpTimes[ampName])
916 meanVecOriginal = np.array(dataset.rawMeans[ampName])
917 varVecOriginal = np.array(dataset.rawVars[ampName])
920 mask = ((meanVecOriginal >= self.config.minMeanSignal) &
921 (meanVecOriginal <= self.config.maxMeanSignal))
924 self.config.initialNonLinearityExclusionThresholdPositive,
925 self.config.initialNonLinearityExclusionThresholdNegative)
926 if not (mask.any()
and goodPoints.any()):
927 msg = (f
"\nSERIOUS: All points in either mask: {mask} or goodPoints: {goodPoints} are bad."
928 f
"Setting {ampName} to BAD.")
932 dataset.badAmps.append(ampName)
933 dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
934 dataset.gain[ampName] = np.nan
935 dataset.gainErr[ampName] = np.nan
936 dataset.noise[ampName] = np.nan
937 dataset.noiseErr[ampName] = np.nan
938 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
939 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
940 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
941 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
942 dataset.ptcFitChiSq[ampName] = np.nan
943 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
944 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
945 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
948 mask = mask & goodPoints
950 if ptcFitType ==
'EXPAPPROXIMATION':
952 parsIniPtc = [-1e-9, 1.0, 10.]
954 if ptcFitType ==
'POLYNOMIAL':
955 ptcFunc = funcPolynomial
961 while count <= maxIterationsPtcOutliers:
965 meanTempVec = meanVecOriginal[mask]
966 varTempVec = varVecOriginal[mask]
967 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
973 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
974 newMask = np.array([
True if np.abs(r) < sigmaCutPtcOutliers
else False for r
in sigResids])
975 mask = mask & newMask
976 if not (mask.any()
and newMask.any()):
977 msg = (f
"\nSERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
978 f
"Setting {ampName} to BAD.")
982 dataset.badAmps.append(ampName)
983 dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
984 dataset.gain[ampName] = np.nan
985 dataset.gainErr[ampName] = np.nan
986 dataset.noise[ampName] = np.nan
987 dataset.noiseErr[ampName] = np.nan
988 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
989 if ptcFitType
in [
"POLYNOMIAL", ]
else
990 np.repeat(np.nan, 3))
991 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
992 if ptcFitType
in [
"POLYNOMIAL", ]
else
993 np.repeat(np.nan, 3))
994 dataset.ptcFitChiSq[ampName] = np.nan
995 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
996 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
997 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
999 nDroppedTotal = Counter(mask)[
False]
1000 self.log.debug(f
"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
1003 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
1005 if not (mask.any()
and newMask.any()):
1007 dataset.expIdMask[ampName] = mask
1009 meanVecFinal = meanVecOriginal[mask]
1010 varVecFinal = varVecOriginal[mask]
1012 if Counter(mask)[
False] > 0:
1013 self.log.info((f
"Number of points discarded in PTC of amplifier {ampName}:" +
1014 f
" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
1016 if (len(meanVecFinal) < len(parsIniPtc)):
1017 msg = (f
"\nSERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of"
1018 f
"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1022 dataset.badAmps.append(ampName)
1023 dataset.expIdMask[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1024 dataset.gain[ampName] = np.nan
1025 dataset.gainErr[ampName] = np.nan
1026 dataset.noise[ampName] = np.nan
1027 dataset.noiseErr[ampName] = np.nan
1028 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
1029 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
1030 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
1031 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
1032 dataset.ptcFitChiSq[ampName] = np.nan
1033 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1034 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1035 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1039 if self.config.doFitBootstrap:
1040 parsFit, parsFitErr, reducedChiSqPtc =
fitBootstrap(parsIniPtc, meanVecFinal,
1041 varVecFinal, ptcFunc,
1042 weightsY=1./np.sqrt(varVecFinal))
1044 parsFit, parsFitErr, reducedChiSqPtc =
fitLeastSq(parsIniPtc, meanVecFinal,
1045 varVecFinal, ptcFunc,
1046 weightsY=1./np.sqrt(varVecFinal))
1047 dataset.ptcFitPars[ampName] = parsFit
1048 dataset.ptcFitParsError[ampName] = parsFitErr
1049 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
1052 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
1053 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength),
'constant',
1054 constant_values=np.nan)
1055 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
1056 'constant', constant_values=np.nan)
1057 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength),
'constant',
1058 constant_values=np.nan)
1060 if ptcFitType ==
'EXPAPPROXIMATION':
1061 ptcGain = parsFit[1]
1062 ptcGainErr = parsFitErr[1]
1063 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1064 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1065 if ptcFitType ==
'POLYNOMIAL':
1066 ptcGain = 1./parsFit[1]
1067 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1068 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1069 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1070 dataset.gain[ampName] = ptcGain
1071 dataset.gainErr[ampName] = ptcGainErr
1072 dataset.noise[ampName] = ptcNoise
1073 dataset.noiseErr[ampName] = ptcNoiseErr
1074 if not len(dataset.ptcFitType) == 0:
1075 dataset.ptcFitType = ptcFitType
1076 if len(dataset.badAmps) == 0:
1077 dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0]))