23 from collections
import Counter
29 from scipy.optimize
import least_squares
33 from .astierCovPtcUtils
import fitData
42 __all__ = [
'PhotonTransferCurveSolveConfig',
'PhotonTransferCurveSolveTask']
46 dimensions=(
"instrument",
"detector")):
47 inputCovariances = cT.Input(
48 name=
"ptcCovariances",
49 doc=
"Tuple with measured covariances from flats.",
50 storageClass=
"PhotonTransferCurveDataset",
51 dimensions=(
"instrument",
"exposure",
"detector"),
54 camera = cT.PrerequisiteInput(
56 doc=
"Camera the input data comes from.",
57 storageClass=
"Camera",
58 dimensions=(
"instrument",),
60 lookupFunction=lookupStaticCalibration,
62 outputPtcDataset = cT.Output(
63 name=
"ptcDatsetProposal",
64 doc=
"Output proposed ptc dataset.",
65 storageClass=
"PhotonTransferCurveDataset",
66 dimensions=(
"instrument",
"detector"),
73 pipelineConnections=PhotonTransferCurveSolveConnections):
74 """Configuration for fitting measured covariances.
76 ptcFitType = pexConfig.ChoiceField(
78 doc=
"Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
81 "POLYNOMIAL":
"n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
82 "EXPAPPROXIMATION":
"Approximation in Astier+19 (Eq. 16).",
83 "FULLCOVARIANCE":
"Full covariances model in Astier+19 (Eq. 20)"
86 maximumRangeCovariancesAstier = pexConfig.Field(
88 doc=
"Maximum range of covariances as in Astier+19",
91 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
93 doc=
"sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
96 maxIterFullFitCovariancesAstier = pexConfig.Field(
98 doc=
"Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
101 polynomialFitDegree = pexConfig.Field(
103 doc=
"Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
106 sigmaCutPtcOutliers = pexConfig.Field(
108 doc=
"Sigma cut for outlier rejection in PTC.",
111 maxIterationsPtcOutliers = pexConfig.Field(
113 doc=
"Maximum number of iterations for outlier rejection in PTC.",
116 initialNonLinearityExclusionThresholdPositive = pexConfig.RangeField(
118 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
119 " linear in the positive direction, from the PTC fit. Note that these points will also be"
120 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
121 " to allow an accurate determination of the sigmas for said iterative fit.",
126 initialNonLinearityExclusionThresholdNegative = pexConfig.RangeField(
128 doc=
"Initially exclude data points with a variance that are more than a factor of this from being"
129 " linear in the negative direction, from the PTC fit. Note that these points will also be"
130 " excluded from the non-linearity fit. This is done before the iterative outlier rejection,"
131 " to allow an accurate determination of the sigmas for said iterative fit.",
136 minMeanRatioTest = pexConfig.Field(
138 doc=
"In the initial test to screen out bad points with a ratio test, points with low"
139 " flux can get inadvertantly screened. This test only screens out points with flux"
140 " above this value.",
143 minVarPivotSearch = pexConfig.Field(
145 doc=
"The code looks for a pivot signal point after which the variance starts decreasing at high-flux"
146 " to exclude then form the PTC model fit. However, sometimes at low fluxes, the variance"
147 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot "
148 " should be sought.",
151 doFitBootstrap = pexConfig.Field(
153 doc=
"Use bootstrap for the PTC fit parameters and errors?.",
159 pipeBase.CmdLineTask):
160 """Task to fit the PTC from flat covariances.
161 This task assembles the list of individual PTC datasets produced
162 by `PhotonTransferCurveSolveTask` into one single final PTC dataset.
163 The task fits the measured (co)variances to a polynomial model or to
164 the models described in equations 16 and 20 of Astier+19
165 (referred to as `POLYNOMIAL`, `EXPAPPROXIMATION`, and `FULLCOVARIANCE`
166 in the configuration options of the task, respectively). Parameters
167 of interest such as tghe gain and noise are derived from the fits.
169 Astier+19: "The Shape of the Photon Transfer Curve
170 of CCD sensors", arXiv:1905.08677
172 ConfigClass = PhotonTransferCurveSolveConfig
173 _DefaultName =
'cpPhotonTransferCurveSolve'
176 """Ensure that the input and output dimensions are passed along.
180 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
181 Butler to operate on.
182 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
183 Input data refs to load.
184 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
185 Output data refs to persist.
187 inputs = butlerQC.get(inputRefs)
188 outputs = self.
runrun(inputCovariances=inputs[
'inputCovariances'], camera=inputs[
'camera'])
189 butlerQC.put(outputs, outputRefs)
191 def run(self, inputCovariances, camera=None, inputExpList=None):
192 """Fit measure covariances to different models.
196 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`]
197 List of lsst.ip.isr.PhotonTransferCurveDataset datasets.
199 camera : `lsst.afw.cameraGeom.Camera`, optional
202 inputExpList : `list` [`~lsst.afw.image.exposure.exposure.ExposureF`], optional
207 results : `lsst.pipe.base.Struct`
208 The results struct containing:
209 ``outputPtcDatset`` : `lsst.ip.isr.PhotonTransferCurveDataset`
210 Final PTC dataset, containing information such as the means, variances,
214 ampNames = np.unique(inputCovariances[0].ampNames)
215 datasetPtc = PhotonTransferCurveDataset(ampNames, self.config.ptcFitType,
216 self.config.maximumRangeCovariancesAstier)
217 for partialPtcDataset
in inputCovariances:
218 if partialPtcDataset.ptcFitType ==
'DUMMY':
220 for ampName
in ampNames:
221 datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName])
222 if type(partialPtcDataset.rawExpTimes[ampName])
is list:
223 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName][0])
225 datasetPtc.rawExpTimes[ampName].append(partialPtcDataset.rawExpTimes[ampName])
226 if type(partialPtcDataset.rawMeans[ampName])
is list:
227 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName][0])
229 datasetPtc.rawMeans[ampName].append(partialPtcDataset.rawMeans[ampName])
230 if type(partialPtcDataset.rawVars[ampName])
is list:
231 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName][0])
233 datasetPtc.rawVars[ampName].append(partialPtcDataset.rawVars[ampName])
234 datasetPtc.covariances[ampName].append(np.array(partialPtcDataset.covariances[ampName][0]))
235 datasetPtc.covariancesSqrtWeights[ampName].append(
236 np.array(partialPtcDataset.covariancesSqrtWeights[ampName][0]))
238 for ampName
in ampNames:
239 index = np.argsort(np.ravel(np.array(datasetPtc.rawMeans[ampName])))
240 datasetPtc.inputExpIdPairs[ampName] = np.array(datasetPtc.inputExpIdPairs[ampName])[index]
241 datasetPtc.rawExpTimes[ampName] = np.array(datasetPtc.rawExpTimes[ampName])[index]
242 datasetPtc.rawMeans[ampName] = np.array(datasetPtc.rawMeans[ampName])[index]
243 datasetPtc.rawVars[ampName] = np.array(datasetPtc.rawVars[ampName])[index]
244 datasetPtc.covariances[ampName] = np.array(datasetPtc.covariances[ampName])[index]
245 datasetPtc.covariancesSqrtWeights[ampName] = np.array(
246 datasetPtc.covariancesSqrtWeights[ampName])[index]
248 if self.config.ptcFitType ==
"FULLCOVARIANCE":
253 tempDatasetPtc = copy.copy(datasetPtc)
254 tempDatasetPtc.ptcFitType =
"EXPAPPROXIMATION"
255 tempDatasetPtc = self.
fitPtcfitPtc(tempDatasetPtc)
256 for ampName
in datasetPtc.ampNames:
257 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName]
258 datasetPtc.fitType =
"FULLCOVARIANCE"
264 datasetPtc = self.
fitPtcfitPtc(datasetPtc)
265 if inputExpList
is not None:
267 detector = inputExpList[0].getDetector()
270 datasetPtc.updateMetadata(setDate=
True, camera=camera, detector=detector)
272 return pipeBase.Struct(
273 outputPtcDataset=datasetPtc,
277 """Fit measured flat covariances to full model in Astier+19.
281 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
282 The dataset containing information such as the means, (co)variances,
287 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
288 This is the same dataset as the input paramter, however, it has been modified
289 to include information such as the fit vectors and the fit parameters. See
290 the class `PhotonTransferCurveDatase`.
293 covFits, covFitsNoB =
fitData(dataset,
294 r=self.config.maximumRangeCovariancesAstier)
301 """Get output data for PhotonTransferCurveCovAstierDataset from CovFit objects.
305 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
306 The dataset containing information such as the means, variances and exposure times.
308 Dictionary of CovFit objects, with amp names as keys.
310 Dictionary of CovFit objects, with amp names as keys, and 'b=0' in Eq. 20 of Astier+19.
314 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
315 This is the same dataset as the input paramter, however, it has been modified
316 to include extra information such as the mask 1D array, gains, reoudout noise, measured signal,
317 measured variance, modeled variance, a, and b coefficient matrices (see Astier+19) per amplifier.
318 See the class `PhotonTransferCurveDatase`.
320 assert(len(covFits) == len(covFitsNoB))
322 for i, amp
in enumerate(dataset.ampNames):
323 lenInputTimes = len(dataset.rawExpTimes[amp])
325 dataset.ptcFitPars[amp] = [np.nan]
326 dataset.ptcFitParsError[amp] = [np.nan]
327 dataset.ptcFitChiSq[amp] = np.nan
330 fitNoB = covFitsNoB[amp]
333 dataset.covariances[amp] = fit.cov
334 dataset.covariancesModel[amp] = fit.evalCovModel()
335 dataset.covariancesSqrtWeights[amp] = fit.sqrtW
336 dataset.aMatrix[amp] = fit.getA()
337 dataset.bMatrix[amp] = fit.getB()
338 dataset.covariancesModelNoB[amp] = fitNoB.evalCovModel()
339 dataset.aMatrixNoB[amp] = fitNoB.getA()
341 (meanVecFinal, varVecFinal, varVecModel,
342 wc, varMask) = fit.getFitData(0, 0, divideByMu=
False)
345 dataset.gain[amp] = gain
346 dataset.gainErr[amp] = fit.getGainErr()
347 dataset.noise[amp] = np.sqrt(fit.getRon())
348 dataset.noiseErr[amp] = fit.getRonErr()
349 dataset.finalVars[amp] = varVecFinal
350 dataset.finalModelVars[amp] = varVecModel
351 dataset.finalMeans[amp] = meanVecFinal
356 matrixSide = self.config.maximumRangeCovariancesAstier
357 nanMatrix = np.full((matrixSide, matrixSide), np.nan)
358 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
360 dataset.covariances[amp] = listNanMatrix
361 dataset.covariancesModel[amp] = listNanMatrix
362 dataset.covariancesSqrtWeights[amp] = listNanMatrix
363 dataset.aMatrix[amp] = nanMatrix
364 dataset.bMatrix[amp] = nanMatrix
365 dataset.covariancesModelNoB[amp] = listNanMatrix
366 dataset.aMatrixNoB[amp] = nanMatrix
368 dataset.expIdMask[amp] = np.repeat(np.nan, lenInputTimes)
369 dataset.gain[amp] = np.nan
370 dataset.gainErr[amp] = np.nan
371 dataset.noise[amp] = np.nan
372 dataset.noiseErr[amp] = np.nan
373 dataset.finalVars[amp] = np.repeat(np.nan, lenInputTimes)
374 dataset.finalModelVars[amp] = np.repeat(np.nan, lenInputTimes)
375 dataset.finalMeans[amp] = np.repeat(np.nan, lenInputTimes)
380 def _initialParsForPolynomial(order):
382 pars = np.zeros(order, dtype=np.float)
389 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]):
391 lowers = [np.NINF
for p
in initialPars]
393 uppers = [np.inf
for p
in initialPars]
395 return (lowers, uppers)
398 def _boundsForAstier(initialPars, lowers=[], uppers=[]):
400 lowers = [np.NINF
for p
in initialPars]
402 uppers = [np.inf
for p
in initialPars]
403 return (lowers, uppers)
406 def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative,
407 minMeanRatioTest, minVarPivotSearch):
408 """Return a boolean array to mask bad points.
412 means : `numpy.array`
413 Input array with mean signal values.
414 variances : `numpy.array`
415 Input array with variances at each mean value.
416 maxDeviationPositive : `float`
417 Maximum deviation from being constant for the variance/mean
418 ratio, in the positive direction.
419 maxDeviationNegative : `float`
420 Maximum deviation from being constant for the variance/mean
421 ratio, in the negative direction.
422 minMeanRatioTest : `float`
423 Minimum signal value (in ADU) after which to start examining
425 minVarPivotSearch : `float`
426 Minimum variance point (in ADU^2) after which the pivot point
427 wher the variance starts decreasing should be sought.
431 goodPoints : `numpy.array` [`bool`]
432 Boolean array to select good (`True`) and bad (`False`)
437 A linear function has a constant ratio, so find the median
438 value of the ratios, and exclude the points that deviate
439 from that by more than a factor of maxDeviationPositive/negative.
440 Asymmetric deviations are supported as we expect the PTC to turn
441 down as the flux increases, but sometimes it anomalously turns
442 upwards just before turning over, which ruins the fits, so it
443 is wise to be stricter about restricting positive outliers than
445 Too high and points that are so bad that fit will fail will be included
446 Too low and the non-linear points will be excluded, biasing the NL fit.
447 This function also masks points after the variance starts decreasing.
450 assert(len(means) == len(variances))
451 ratios = [b/a
for (a, b)
in zip(means, variances)]
452 medianRatio = np.nanmedian(ratios)
453 ratioDeviations = [0.0
if a < minMeanRatioTest
else (r/medianRatio)-1
454 for (a, r)
in zip(means, ratios)]
457 maxDeviationPositive = abs(maxDeviationPositive)
458 maxDeviationNegative = -1. * abs(maxDeviationNegative)
460 goodPoints = np.array([
True if (r < maxDeviationPositive
and r > maxDeviationNegative)
461 else False for r
in ratioDeviations])
464 pivot = np.where(np.array(np.diff(variances)) < 0)[0]
468 pivot = [p
for p
in pivot
if variances[p] > minVarPivotSearch]
470 pivot = np.min(pivot)
471 goodPoints[pivot+1:len(goodPoints)] =
False
475 def _makeZeroSafe(self, array, substituteValue=1e-9):
477 array = np.array(array)
478 nBad = Counter(np.ravel(array))[0]
482 index, = np.where(array == 0)
484 msg = f
"Found {nBad} zeros in array at elements {index}"
487 array[index] = substituteValue
492 """Fit the photon transfer curve to a polynomial or to Astier+19 approximation.
494 Fit the photon transfer curve with either a polynomial of the order
495 specified in the task config, or using the exponential approximation
496 in Astier+19 (Eq. 16).
498 Sigma clipping is performed iteratively for the fit, as well as an
499 initial clipping of data points that are more than
500 config.initialNonLinearityExclusionThreshold away from lying on a
501 straight line. This other step is necessary because the photon transfer
502 curve turns over catastrophically at very high flux (because saturation
503 drops the variance to ~0) and these far outliers cause the initial fit
504 to fail, meaning the sigma cannot be calculated to perform the
509 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
510 The dataset containing the means, variances and exposure times.
514 dataset: `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
515 This is the same dataset as the input parameter, however, it has been modified
516 to include information such as the fit vectors and the fit parameters. See
517 the class `PhotonTransferCurveDatase`.
522 Raises if dataset.ptcFitType is None or empty.
524 if dataset.ptcFitType:
525 ptcFitType = dataset.ptcFitType
527 raise RuntimeError(
"ptcFitType is None of empty in PTC dataset.")
528 matrixSide = self.config.maximumRangeCovariancesAstier
529 nanMatrix = np.empty((matrixSide, matrixSide))
530 nanMatrix[:] = np.nan
532 for amp
in dataset.ampNames:
533 lenInputTimes = len(dataset.rawExpTimes[amp])
534 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
535 listNanMatrix[:] = np.nan
537 dataset.covariancesModel[amp] = listNanMatrix
538 dataset.aMatrix[amp] = nanMatrix
539 dataset.bMatrix[amp] = nanMatrix
540 dataset.covariancesModelNoB[amp] = listNanMatrix
541 dataset.aMatrixNoB[amp] = nanMatrix
543 def errFunc(p, x, y):
544 return ptcFunc(p, x) - y
546 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
547 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
549 for i, ampName
in enumerate(dataset.ampNames):
550 timeVecOriginal = np.ravel(np.array(dataset.rawExpTimes[ampName]))
551 meanVecOriginal = np.ravel(np.array(dataset.rawMeans[ampName]))
552 varVecOriginal = np.ravel(np.array(dataset.rawVars[ampName]))
553 varVecOriginal = self.
_makeZeroSafe_makeZeroSafe(varVecOriginal)
556 self.config.initialNonLinearityExclusionThresholdPositive,
557 self.config.initialNonLinearityExclusionThresholdNegative,
558 self.config.minMeanRatioTest,
559 self.config.minVarPivotSearch)
560 if not (goodPoints.any()):
561 msg = (f
"SERIOUS: All points in goodPoints: {goodPoints} are bad."
562 f
"Setting {ampName} to BAD.")
565 self.
fillBadAmpfillBadAmp(dataset, ptcFitType, ampName)
570 if ptcFitType ==
'EXPAPPROXIMATION':
572 parsIniPtc = [-1e-9, 1.0, 10.]
574 bounds = self.
_boundsForAstier_boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000],
575 uppers=[1e-4, 2.5, 2000])
576 if ptcFitType ==
'POLYNOMIAL':
577 ptcFunc = funcPolynomial
583 while count <= maxIterationsPtcOutliers:
587 meanTempVec = meanVecOriginal[mask]
588 varTempVec = varVecOriginal[mask]
589 res = least_squares(errFunc, parsIniPtc, bounds=bounds, args=(meanTempVec, varTempVec))
595 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
596 newMask = np.array([
True if np.abs(r) < sigmaCutPtcOutliers
else False for r
in sigResids])
597 mask = mask & newMask
598 if not (mask.any()
and newMask.any()):
599 msg = (f
"SERIOUS: All points in either mask: {mask} or newMask: {newMask} are bad. "
600 f
"Setting {ampName} to BAD.")
603 self.
fillBadAmpfillBadAmp(dataset, ptcFitType, ampName)
605 nDroppedTotal = Counter(mask)[
False]
606 self.log.debug(f
"Iteration {count}: discarded {nDroppedTotal} points in total for {ampName}")
609 assert (len(mask) == len(timeVecOriginal) == len(meanVecOriginal) == len(varVecOriginal))
610 if not (mask.any()
and newMask.any()):
612 dataset.expIdMask[ampName] = mask
614 meanVecFinal = meanVecOriginal[mask]
615 varVecFinal = varVecOriginal[mask]
617 if Counter(mask)[
False] > 0:
618 self.log.info((f
"Number of points discarded in PTC of amplifier {ampName}:"
619 f
" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))
621 if (len(meanVecFinal) < len(parsIniPtc)):
622 msg = (f
"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of "
623 f
"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
626 self.
fillBadAmpfillBadAmp(dataset, ptcFitType, ampName)
629 if self.config.doFitBootstrap:
630 parsFit, parsFitErr, reducedChiSqPtc =
fitBootstrap(parsIniPtc, meanVecFinal,
631 varVecFinal, ptcFunc,
632 weightsY=1./np.sqrt(varVecFinal))
634 parsFit, parsFitErr, reducedChiSqPtc =
fitLeastSq(parsIniPtc, meanVecFinal,
635 varVecFinal, ptcFunc,
636 weightsY=1./np.sqrt(varVecFinal))
637 dataset.ptcFitPars[ampName] = parsFit
638 dataset.ptcFitParsError[ampName] = parsFitErr
639 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
642 padLength = len(dataset.rawExpTimes[ampName]) - len(varVecFinal)
643 dataset.finalVars[ampName] = np.pad(varVecFinal, (0, padLength),
'constant',
644 constant_values=np.nan)
645 dataset.finalModelVars[ampName] = np.pad(ptcFunc(parsFit, meanVecFinal), (0, padLength),
646 'constant', constant_values=np.nan)
647 dataset.finalMeans[ampName] = np.pad(meanVecFinal, (0, padLength),
'constant',
648 constant_values=np.nan)
649 if ptcFitType ==
'EXPAPPROXIMATION':
651 ptcGainErr = parsFitErr[1]
652 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
653 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
654 if ptcFitType ==
'POLYNOMIAL':
655 ptcGain = 1./parsFit[1]
656 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
657 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
658 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
659 dataset.gain[ampName] = ptcGain
660 dataset.gainErr[ampName] = ptcGainErr
661 dataset.noise[ampName] = ptcNoise
662 dataset.noiseErr[ampName] = ptcNoiseErr
664 if not len(dataset.ptcFitType) == 0:
665 dataset.ptcFitType = ptcFitType
666 if len(dataset.badAmps) == 0:
667 dataset.badAmps = np.repeat(np.nan, len(list(dataset.rawExpTimes.values())[0]))
672 """Fill the dataset with NaNs if there are not enough good points.
676 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
677 The dataset containing the means, variances and exposure times.
679 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
680 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC.
684 dataset.badAmps.append(ampName)
685 dataset.expIdMask[ampName] = np.repeat(
False, len(dataset.rawExpTimes[ampName]))
686 dataset.gain[ampName] = np.nan
687 dataset.gainErr[ampName] = np.nan
688 dataset.noise[ampName] = np.nan
689 dataset.noiseErr[ampName] = np.nan
690 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
691 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
692 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1)
if
693 ptcFitType
in [
"POLYNOMIAL", ]
else np.repeat(np.nan, 3))
694 dataset.ptcFitChiSq[ampName] = np.nan
695 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
696 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
697 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
def _boundsForPolynomial(initialPars, lowers=[], uppers=[])
def runQuantum(self, butlerQC, inputRefs, outputRefs)
def fitCovariancesAstier(self, dataset)
def run(self, inputCovariances, camera=None, inputExpList=None)
def _makeZeroSafe(self, array, substituteValue=1e-9)
def _boundsForAstier(initialPars, lowers=[], uppers=[])
def fitPtc(self, dataset)
def _initialParsForPolynomial(order)
def _getInitialGoodPoints(means, variances, maxDeviationPositive, maxDeviationNegative, minMeanRatioTest, minVarPivotSearch)
def getOutputPtcDataCovAstier(self, dataset, covFits, covFitsNoB)
def fillBadAmp(self, dataset, ptcFitType, ampName)
def fitData(dataset, r=8)
def fitBootstrap(initialParams, dataX, dataY, function, weightsY=None, confidenceSigma=1.)
def fitLeastSq(initialParams, dataX, dataY, function, weightsY=None)