Coverage for python/lsst/cp/pipe/ptc/cpSolvePtcTask.py: 11%
458 statements
« prev ^ index » next coverage.py v7.3.3, created at 2023-12-15 13:14 +0000
« prev ^ index » next coverage.py v7.3.3, created at 2023-12-15 13:14 +0000
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22import numpy as np
23from collections import Counter
25import lsst.pex.config as pexConfig
26import lsst.pipe.base as pipeBase
27from lsst.cp.pipe.utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier, symmetrize)
29from scipy.signal import fftconvolve
30from scipy.optimize import least_squares
31from itertools import groupby
32from operator import itemgetter
34import lsst.pipe.base.connectionTypes as cT
36from lsst.ip.isr import PhotonTransferCurveDataset
38import copy
41__all__ = ['PhotonTransferCurveSolveConfig', 'PhotonTransferCurveSolveTask']
44class PhotonTransferCurveSolveConnections(pipeBase.PipelineTaskConnections,
45 dimensions=("instrument", "detector")):
46 inputCovariances = cT.Input(
47 name="ptcCovariances",
48 doc="Tuple with measured covariances from flats.",
49 storageClass="PhotonTransferCurveDataset",
50 dimensions=("instrument", "exposure", "detector"),
51 isCalibration=True,
52 multiple=True,
53 )
54 camera = cT.PrerequisiteInput(
55 name="camera",
56 doc="Camera the input data comes from.",
57 storageClass="Camera",
58 dimensions=("instrument",),
59 isCalibration=True,
60 )
61 outputPtcDataset = cT.Output(
62 name="ptcDatsetProposal",
63 doc="Output proposed ptc dataset.",
64 storageClass="PhotonTransferCurveDataset",
65 dimensions=("instrument", "detector"),
66 multiple=False,
67 isCalibration=True,
68 )
71class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig,
72 pipelineConnections=PhotonTransferCurveSolveConnections):
73 """Configuration for fitting measured covariances.
74 """
76 ptcFitType = pexConfig.ChoiceField(
77 dtype=str,
78 doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
79 default="POLYNOMIAL",
80 allowed={
81 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
82 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).",
83 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)"
84 }
85 )
86 minMeanSignal = pexConfig.DictField(
87 keytype=str,
88 itemtype=float,
89 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use."
90 " The same cut is applied to all amps if this parameter [`dict`] is passed as "
91 " {'ALL_AMPS': value}",
92 default={'ALL_AMPS': 0.0},
93 )
94 maxMeanSignal = pexConfig.DictField(
95 keytype=str,
96 itemtype=float,
97 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp."
98 " The same cut is applied to all amps if this dictionary is of the form"
99 " {'ALL_AMPS': value}",
100 default={'ALL_AMPS': 1e6},
101 )
102 maximumRangeCovariancesAstier = pexConfig.Field(
103 dtype=int,
104 doc="Maximum range of covariances as in Astier+19",
105 default=8,
106 )
107 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
108 dtype=float,
109 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
110 default=5.0,
111 )
112 maxIterFullFitCovariancesAstier = pexConfig.Field(
113 dtype=int,
114 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
115 default=3,
116 )
117 polynomialFitDegree = pexConfig.Field(
118 dtype=int,
119 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
120 default=3,
121 )
122 doLegacyTurnoffSelection = pexConfig.Field(
123 dtype=bool,
124 doc="Use 'legacy' computation for PTC turnoff selection. If set "
125 "to False, then the KS test p-value selection will be used instead.",
126 default=False,
127 )
128 sigmaCutPtcOutliers = pexConfig.Field(
129 dtype=float,
130 doc="Sigma cut for outlier rejection in PTC.",
131 default=5.0,
132 )
133 maxIterationsPtcOutliers = pexConfig.RangeField(
134 dtype=int,
135 doc="Maximum number of iterations for outlier rejection in PTC.",
136 default=2,
137 min=0
138 )
139 maxSignalInitialPtcOutlierFit = pexConfig.Field(
140 dtype=float,
141 doc="Maximum signal considered for intial outlier fit. This should be below "
142 "the PTC turnoff to ensure accurate outlier rejection. If "
143 "scaleMaxSignalInitialPtcOutlierFit=True then the units are electrons; "
144 "otherwise ADU.",
145 default=50_000.,
146 )
147 scaleMaxSignalInitialPtcOutlierFit = pexConfig.Field(
148 dtype=bool,
149 doc="Scale maxSignalInitialPtcOutlierFit by approximate gain? If yes then "
150 "maxSignalInitialPtcOutlierFit is assumed to have units of electrons, "
151 "otherwise ADU.",
152 default=True,
153 )
154 minVarPivotSearch = pexConfig.Field(
155 dtype=float,
156 doc="The code looks for a pivot signal point after which the variance starts decreasing at high-flux"
157 " to exclude then from the PTC model fit. However, sometimes at low fluxes, the variance"
158 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot "
159 " should be sought. Only used if doLegacyTurnoffSelection is True.",
160 default=10000,
161 )
162 consecutivePointsVarDecreases = pexConfig.RangeField(
163 dtype=int,
164 doc="Required number of consecutive points/fluxes in the PTC where the variance "
165 "decreases in order to find a first estimate of the PTC turn-off. "
166 "Only used if doLegacyTurnoffSelection is True.",
167 default=2,
168 min=2
169 )
170 ksTestMinPvalue = pexConfig.Field(
171 dtype=float,
172 doc="Minimum value of the Gaussian histogram KS test p-value to be used in PTC fit. "
173 "Only used if doLegacyTurnoffSelection is False.",
174 default=0.01,
175 )
176 doFitBootstrap = pexConfig.Field(
177 dtype=bool,
178 doc="Use bootstrap for the PTC fit parameters and errors?.",
179 default=False,
180 )
181 binSize = pexConfig.Field(
182 dtype=int,
183 doc="Bin the image by this factor in both dimensions.",
184 default=1,
185 )
188class PhotonTransferCurveSolveTask(pipeBase.PipelineTask):
189 """Task to fit the PTC from flat covariances.
191 The first task of the PTC measurement pipeline,
192 ``PhotonTransferCurveMeasureTask`` (and assumed to have been run
193 before this task), produced a list of
194 `~lsst.ip.isr.PhotonTransferCurveDataset` objects. Each dataset
195 contains the mean signal and covariances of the
196 difference image of the flat-field images taken at
197 the same exposure time. The list also contains dummy
198 datasets (with no measurements), whose purpose is to have
199 the input and output dimensions of ``PhotonTransferCurveMeasureTask``
200 match.
202 This task, ``PhotonTransferCurveSolveTask``, assembles the list
203 of individual PTC datasets produced
204 by ``PhotonTransferCurveMeasureTask`` into one single final PTC
205 dataset, discarding the dummy datset as appropiate.
206 The task fits the measured (co)variances to one of three models:
207 a polynomial model of a given order, or the models described
208 in equations 16 and 20 of Astier+19. These options are referred
209 to as ``POLYNOMIAL``, ``EXPAPPROXIMATION``, and ``FULLCOVARIANCE``
210 in the configuration options of the task, respectively).
211 Parameters of interest such as the gain and noise are derived
212 from the fits. The ``FULLCOVARIANCE`` model is fitted to the
213 full covariance data (as oppossed to the other two models, which
214 are fit to the variance vs mean measurements only).
216 Astier+19: "The Shape of the Photon Transfer Curve
217 of CCD sensors", arXiv:1905.08677
218 """
220 ConfigClass = PhotonTransferCurveSolveConfig
221 _DefaultName = 'cpPhotonTransferCurveSolve'
223 def runQuantum(self, butlerQC, inputRefs, outputRefs):
224 """Ensure that the input and output dimensions are passed along.
226 Parameters
227 ----------
228 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
229 Butler to operate on.
230 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
231 Input data refs to load.
232 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
233 Output data refs to persist.
234 """
235 inputs = butlerQC.get(inputRefs)
236 detId = inputRefs.inputCovariances[0].dataId['detector']
237 outputs = self.run(inputCovariances=inputs['inputCovariances'], camera=inputs['camera'], detId=detId)
238 butlerQC.put(outputs, outputRefs)
240 def run(self, inputCovariances, camera=None, detId=0):
241 """Fit measured covariances to different models.
243 Parameters
244 ----------
245 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`]
246 List of lsst.ip.isr.PhotonTransferCurveDataset datasets.
247 camera : `lsst.afw.cameraGeom.Camera`, optional
248 Input camera.
249 detId : `int`
250 Detector ID to locate the detector in the camera and
251 populate the `lsst.ip.isr.PhotonTransferCurveDataset`
252 metadata.
253 Returns
254 -------
255 results : `lsst.pipe.base.Struct`
256 The resultins structure contains:
258 ``outputPtcDatset``
259 Final PTC dataset, containing information such as the
260 means, variances, and exposure times
261 (`lsst.ip.isr.PhotonTransferCurveDataset`).
262 """
263 # Find the ampNames from a non-dummy ptc.
264 ampNames = []
265 for partialPtcDataset in inputCovariances:
266 if partialPtcDataset.ptcFitType != 'DUMMY':
267 ampNames = partialPtcDataset.ampNames
268 break
270 # Each amp may have a different min and max ADU signal
271 # specified in the config.
272 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames}
273 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames}
274 for ampName in ampNames:
275 if 'ALL_AMPS' in self.config.maxMeanSignal:
276 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS']
277 elif ampName in self.config.maxMeanSignal:
278 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName]
280 if 'ALL_AMPS' in self.config.minMeanSignal:
281 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS']
282 elif ampName in self.config.minMeanSignal:
283 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName]
285 # Assemble individual PTC datasets into a single PTC dataset.
286 datasetPtc = PhotonTransferCurveDataset(ampNames=ampNames,
287 ptcFitType=self.config.ptcFitType,
288 covMatrixSide=self.config.maximumRangeCovariancesAstier)
289 for partialPtcDataset in inputCovariances:
290 # Ignore dummy datasets
291 if partialPtcDataset.ptcFitType == 'DUMMY':
292 continue
293 for ampName in ampNames:
294 # The partial dataset consists of lists of values for each
295 # quantity. In the case of the input exposure pairs, this is a
296 # list of tuples. In all cases we only want the first
297 # (and only) element of the list.
298 datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName][0])
299 datasetPtc.rawExpTimes[ampName] = np.append(datasetPtc.rawExpTimes[ampName],
300 partialPtcDataset.rawExpTimes[ampName][0])
301 datasetPtc.rawMeans[ampName] = np.append(datasetPtc.rawMeans[ampName],
302 partialPtcDataset.rawMeans[ampName][0])
303 datasetPtc.rawVars[ampName] = np.append(datasetPtc.rawVars[ampName],
304 partialPtcDataset.rawVars[ampName][0])
305 datasetPtc.photoCharges[ampName] = np.append(datasetPtc.photoCharges[ampName],
306 partialPtcDataset.photoCharges[ampName][0])
307 datasetPtc.histVars[ampName] = np.append(datasetPtc.histVars[ampName],
308 partialPtcDataset.histVars[ampName][0])
309 datasetPtc.histChi2Dofs[ampName] = np.append(datasetPtc.histChi2Dofs[ampName],
310 partialPtcDataset.histChi2Dofs[ampName][0])
311 datasetPtc.kspValues[ampName] = np.append(datasetPtc.kspValues[ampName],
312 partialPtcDataset.kspValues[ampName][0])
313 datasetPtc.covariances[ampName] = np.append(
314 datasetPtc.covariances[ampName].ravel(),
315 partialPtcDataset.covariances[ampName].ravel()
316 ).reshape(
317 (
318 len(datasetPtc.rawExpTimes[ampName]),
319 datasetPtc.covMatrixSide,
320 datasetPtc.covMatrixSide,
321 )
322 )
323 datasetPtc.covariancesSqrtWeights[ampName] = np.append(
324 datasetPtc.covariancesSqrtWeights[ampName].ravel(),
325 partialPtcDataset.covariancesSqrtWeights[ampName].ravel()
326 ).reshape(
327 (
328 len(datasetPtc.rawExpTimes[ampName]),
329 datasetPtc.covMatrixSide,
330 datasetPtc.covMatrixSide,
331 )
332 )
334 # Apply min/max masking.
335 rawMean = partialPtcDataset.rawMeans[ampName][0]
336 rawVar = partialPtcDataset.rawVars[ampName][0]
337 expIdMask = partialPtcDataset.expIdMask[ampName][0]
338 if (rawMean <= minMeanSignalDict[ampName]) or (rawMean >= maxMeanSignalDict[ampName]) \
339 or not np.isfinite(rawMean) or not np.isfinite(rawVar):
340 expIdMask = False
342 kspValue = partialPtcDataset.kspValues[ampName][0]
343 if not self.config.doLegacyTurnoffSelection and \
344 kspValue < self.config.ksTestMinPvalue:
345 expIdMask = False
347 datasetPtc.expIdMask[ampName] = np.append(datasetPtc.expIdMask[ampName], expIdMask)
349 for key, value in partialPtcDataset.auxValues.items():
350 if key in datasetPtc.auxValues:
351 datasetPtc.auxValues[key] = np.append(datasetPtc.auxValues[key], value)
352 else:
353 datasetPtc.auxValues[key] = value
355 # Sort arrays that are filled so far in the final dataset by
356 # rawMeans index.
357 # First compute the mean across all the amps to make sure that they are
358 # all sorted the same way.
359 detectorMeans = np.zeros(len(datasetPtc.inputExpIdPairs[ampNames[0]]))
361 for i in range(len(detectorMeans)):
362 arr = np.array([datasetPtc.rawMeans[ampName][i] for ampName in ampNames])
363 good, = (np.isfinite(arr)).nonzero()
364 if good.size == 0:
365 detectorMeans[i] = np.nan
366 else:
367 detectorMeans[i] = np.mean(arr[good])
369 index = np.argsort(detectorMeans)
371 for ampName in ampNames:
372 datasetPtc.inputExpIdPairs[ampName] = np.array(
373 datasetPtc.inputExpIdPairs[ampName]
374 )[index].tolist()
375 datasetPtc.rawExpTimes[ampName] = datasetPtc.rawExpTimes[ampName][index]
376 datasetPtc.rawMeans[ampName] = datasetPtc.rawMeans[ampName][index]
377 datasetPtc.rawVars[ampName] = datasetPtc.rawVars[ampName][index]
378 datasetPtc.photoCharges[ampName] = datasetPtc.photoCharges[ampName][index]
379 datasetPtc.histVars[ampName] = datasetPtc.histVars[ampName][index]
380 datasetPtc.histChi2Dofs[ampName] = datasetPtc.histChi2Dofs[ampName][index]
381 datasetPtc.kspValues[ampName] = datasetPtc.kspValues[ampName][index]
382 datasetPtc.expIdMask[ampName] = datasetPtc.expIdMask[ampName][index]
383 datasetPtc.covariances[ampName] = datasetPtc.covariances[ampName][index]
384 datasetPtc.covariancesSqrtWeights[ampName] = datasetPtc.covariancesSqrtWeights[ampName][index]
385 for key, value in datasetPtc.auxValues.items():
386 datasetPtc.auxValues[key] = value[index]
388 if self.config.ptcFitType == "FULLCOVARIANCE":
389 # Fit the measured covariances vs mean signal to
390 # the Astier+19 full model (Eq. 20). Before that
391 # do a preliminary fit to the variance (C_00) vs mean
392 # signal (mu) curve using the EXPAPPROXIMATION model
393 # (Eq. 16 in Astier+19) in order to
394 # get the flat pairs that are masked. The
395 # points at these fluxes will also be masked when
396 # calculating the other elements of the covariance
397 # matrix, C_ij, i!=j).
399 # Preliminary fit, usign a temp dataset to get the mask
400 tempDatasetPtc = copy.copy(datasetPtc)
401 tempDatasetPtc.ptcFitType = "EXPAPPROXIMATION"
402 tempDatasetPtc = self.fitMeasurementsToModel(tempDatasetPtc)
404 # "FULLCOVARIANCE", using the mask obtained from the
405 # previous fit.
406 for ampName in datasetPtc.ampNames:
407 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName]
408 datasetPtc.fitType = "FULLCOVARIANCE"
409 datasetPtc = self.fitMeasurementsToModel(datasetPtc)
410 # The other options are: self.config.ptcFitType in
411 # ("EXPAPPROXIMATION", "POLYNOMIAL")
412 else:
413 # Fit the PTC to a polynomial or to Astier+19 exponential
414 # approximation (Eq. 16). Fill up
415 # PhotonTransferCurveDataset object.
416 datasetPtc = self.fitMeasurementsToModel(datasetPtc)
418 if camera:
419 detector = camera[detId]
420 else:
421 detector = None
422 datasetPtc.updateMetadataFromExposures(inputCovariances)
423 datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector)
425 return pipeBase.Struct(
426 outputPtcDataset=datasetPtc,
427 )
429 def fitMeasurementsToModel(self, dataset):
430 """Fit the measured covariances vs mean signal to a
431 polynomial or one of the models in Astier+19
432 (Eq. 16 or Eq.20).
434 Parameters
435 ----------
436 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
437 The dataset containing information such as the means,
438 (co)variances, and exposure times.
440 Returns
441 -------
442 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
443 This is the same dataset as the input parameter, however,
444 it has been modified to include information such as the
445 fit vectors and the fit parameters. See the class
446 `PhotonTransferCurveDatase`.
447 """
448 fitType = dataset.ptcFitType
449 if fitType in ["FULLCOVARIANCE", ]:
450 # This model uses the full covariance matrix in the fit.
451 # The PTC is technically defined as variance vs signal,
452 # with variance = Cov_00
453 dataset = self.fitDataFullCovariance(dataset)
454 elif fitType in ["POLYNOMIAL", "EXPAPPROXIMATION"]:
455 # The PTC is technically defined as variance vs signal
456 dataset = self.fitPtc(dataset)
457 else:
458 raise RuntimeError(
459 f"Fitting option {fitType} not one of "
460 "'POLYNOMIAL', 'EXPAPPROXIMATION', or 'FULLCOVARIANCE'"
461 )
463 return dataset
465 def fitDataFullCovariance(self, dataset):
466 """Fit measured flat covariances to the full model in
467 Astier+19 (Eq. 20).
469 Parameters
470 ----------
471 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
472 The dataset containing information such as the means,
473 (co)variances, and exposure times.
475 Returns
476 -------
477 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
478 This is the same dataset as the input parameter, however,
479 it has been modified to include information such as the
480 fit vectors and the fit parameters. See the class
481 `PhotonTransferCurveDatase`.
483 Notes
484 -----
485 The parameters of the full model for C_ij(mu) ("C_ij" and "mu"
486 in ADU^2 and ADU, respectively) in Astier+19 (Eq. 20) are:
488 - "a" coefficients (r by r matrix), units: 1/e
489 - "b" coefficients (r by r matrix), units: 1/e
490 - noise matrix (r by r matrix), units: e^2
491 - gain, units: e/ADU
493 "b" appears in Eq. 20 only through the "ab" combination, which
494 is defined in this code as "c=ab".
496 Total number of parameters: #entries(a) + #entries(c) + #entries(noise)
497 + 1. This is equivalent to r^2 + r^2 + r^2 + 1, where "r" is the
498 maximum lag considered for the covariances calculation, and the
499 extra "1" is the gain. If "b" is 0, then "c" is 0, and len(pInit) will
500 have r^2 fewer entries.
501 """
502 matrixSide = self.config.maximumRangeCovariancesAstier
503 lenParams = matrixSide*matrixSide
505 for ampName in dataset.ampNames:
506 lenInputTimes = len(dataset.rawExpTimes[ampName])
507 # Not used when ptcFitType is 'FULLCOVARIANCE'
508 dataset.ptcFitPars[ampName] = np.array([np.nan])
509 dataset.ptcFitParsError[ampName] = np.array([np.nan])
510 dataset.ptcFitChiSq[ampName] = np.nan
512 if ampName in dataset.badAmps:
513 # Bad amp
514 # Entries need to have proper dimensions so read/write
515 # with astropy.Table works.
516 nanMatrix = np.full((matrixSide, matrixSide), np.nan)
517 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
518 dataset.covariancesModel[ampName] = listNanMatrix
519 dataset.covariancesSqrtWeights[ampName] = listNanMatrix
520 dataset.aMatrix[ampName] = nanMatrix
521 dataset.bMatrix[ampName] = nanMatrix
522 dataset.covariancesModelNoB[ampName] = listNanMatrix
523 dataset.aMatrixNoB[ampName] = nanMatrix
524 dataset.noiseMatrix[ampName] = nanMatrix
525 dataset.noiseMatrixNoB[ampName] = nanMatrix
527 dataset.expIdMask[ampName] = np.repeat(False, lenInputTimes)
528 dataset.gain[ampName] = np.nan
529 dataset.gainErr[ampName] = np.nan
530 dataset.noise[ampName] = np.nan
531 dataset.noiseErr[ampName] = np.nan
532 dataset.finalVars[ampName] = np.repeat(np.nan, lenInputTimes)
533 dataset.finalModelVars[ampName] = np.repeat(np.nan, lenInputTimes)
534 dataset.finalMeans[ampName] = np.repeat(np.nan, lenInputTimes)
535 continue
537 muAtAmp = dataset.rawMeans[ampName]
538 maskAtAmp = dataset.expIdMask[ampName]
539 if len(maskAtAmp) == 0:
540 maskAtAmp = np.repeat(True, len(muAtAmp))
542 muAtAmpMasked = muAtAmp[maskAtAmp]
543 covAtAmp = dataset.covariances[ampName]
544 covAtAmpMasked = np.nan_to_num(covAtAmp)[maskAtAmp]
545 covSqrtWeightsAtAmp = dataset.covariancesSqrtWeights[ampName]
546 covSqrtWeightsAtAmpMasked = np.nan_to_num(covSqrtWeightsAtAmp)[maskAtAmp]
548 # Initial fit, to approximate parameters, with c=0
549 a0, c0, noise0, gain0 = self.initialFitFullCovariance(
550 muAtAmpMasked,
551 covAtAmpMasked,
552 covSqrtWeightsAtAmpMasked
553 )
555 # Fit full model (Eq. 20 of Astier+19) and same model with
556 # b=0 (c=0 in this code)
557 pInit = np.concatenate((a0.ravel(), c0.ravel(), noise0.ravel(), np.array(gain0)), axis=None)
558 functionsDict = {'fullModel': self.funcFullCovarianceModel,
559 'fullModelNoB': self.funcFullCovarianceModelNoB}
560 fitResults = {'fullModel': {'a': [], 'c': [], 'noise': [], 'gain': [], 'paramsErr': []},
561 'fullModelNoB': {'a': [], 'c': [], 'noise': [], 'gain': [], 'paramsErr': []}}
562 for key in functionsDict:
563 params, paramsErr, _ = fitLeastSq(pInit, muAtAmpMasked,
564 covAtAmpMasked.ravel(), functionsDict[key],
565 weightsY=covSqrtWeightsAtAmpMasked.ravel())
566 a = params[:lenParams].reshape((matrixSide, matrixSide))
567 c = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide))
568 noise = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide))
569 gain = params[-1]
571 fitResults[key]['a'] = a
572 fitResults[key]['c'] = c
573 fitResults[key]['noise'] = noise
574 fitResults[key]['gain'] = gain
575 fitResults[key]['paramsErr'] = paramsErr
577 # Put the information in the PTC dataset
579 # Not used when ptcFitType is 'FULLCOVARIANCE'
580 dataset.ptcFitPars[ampName] = np.array([np.nan])
581 dataset.ptcFitParsError[ampName] = np.array([np.nan])
582 dataset.ptcFitChiSq[ampName] = np.nan
584 # Save full covariances, covariances models, and their weights.
585 # dataset.expIdMask is already full, but needs to be
586 # converted to bool.
587 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName], dtype=bool)
588 dataset.covariances[ampName] = covAtAmp
589 # We evaluate the covariance model everywhere, even the
590 # masked amps.
591 dataset.covariancesModel[ampName] = self.evalCovModel(muAtAmp,
592 fitResults['fullModel']['a'],
593 fitResults['fullModel']['c'],
594 fitResults['fullModel']['noise'],
595 fitResults['fullModel']['gain'])
596 dataset.covariancesSqrtWeights[ampName] = covSqrtWeightsAtAmp
597 dataset.aMatrix[ampName] = fitResults['fullModel']['a']
598 dataset.bMatrix[ampName] = fitResults['fullModel']['c']/fitResults['fullModel']['a']
599 dataset.covariancesModelNoB[ampName] = self.evalCovModel(muAtAmp,
600 fitResults['fullModelNoB']['a'],
601 fitResults['fullModelNoB']['c'],
602 fitResults['fullModelNoB']['noise'],
603 fitResults['fullModelNoB']['gain'],
604 setBtoZero=True)
605 dataset.aMatrixNoB[ampName] = fitResults['fullModelNoB']['a']
606 dataset.gain[ampName] = fitResults['fullModel']['gain']
607 dataset.gainErr[ampName] = fitResults['fullModel']['paramsErr'][-1]
608 readoutNoise = fitResults['fullModel']['noise'][0][0]
609 readoutNoiseSqrt = np.sqrt(np.fabs(readoutNoise))
610 dataset.noise[ampName] = readoutNoise
611 readoutNoiseSigma = fitResults['fullModel']['paramsErr'][2*lenParams]
612 dataset.noiseErr[ampName] = 0.5*(readoutNoiseSigma/np.fabs(readoutNoise))*readoutNoiseSqrt
613 dataset.noiseMatrix[ampName] = fitResults['fullModel']['noise']
614 dataset.noiseMatrixNoB[ampName] = fitResults['fullModelNoB']['noise']
616 dataset.finalVars[ampName] = covAtAmp[:, 0, 0]
617 dataset.finalModelVars[ampName] = dataset.covariancesModel[ampName][:, 0, 0]
618 dataset.finalMeans[ampName] = muAtAmp
620 return dataset
622 def initialFitFullCovariance(self, mu, cov, sqrtW):
623 """ Performs a crude parabolic fit of the data in order to start
624 the full fit close to the solution, setting b=0 (c=0) in Eq. 20
625 of Astier+19.
627 Parameters
628 ----------
629 mu : `numpy.array`, (N,)
630 Signal `mu` (ADU)
631 cov : `numpy.array`, (N, M, M)
632 Covariance arrays of size `(M, M)` (with
633 `M = config.maximumRangeCovariancesAstier`),
634 indexed by mean signal `mu`.
635 sqrtW : `numpy.array`, (N,)
636 Covariance weights, defined as 1./sqrt(Variances)
638 Returns
639 -------
640 a : `numpy.array`, (M, M)
641 "a" parameter per flux in Eq. 20 of Astier+19.
642 c : `numpy.array`, (M, M)
643 "c"="ab" parameter per flux in Eq. 20 of Astier+19.
644 noise : `numpy.array`, (M, M)
645 "noise" parameter per flux in Eq. 20 of Astier+19.
646 gain : `float`
647 Amplifier gain (e/ADU)
648 """
649 matrixSide = self.config.maximumRangeCovariancesAstier
651 # Initialize fit parameters
652 a = np.zeros((matrixSide, matrixSide))
653 c = np.zeros((matrixSide, matrixSide))
654 noise = np.zeros((matrixSide, matrixSide))
655 gain = 1.
657 # iterate the fit to account for higher orders
658 # the chi2 does not necessarily go down, so one could
659 # stop when it increases
660 oldChi2 = 1e30
661 for _ in range(5):
662 model = np.nan_to_num(self.evalCovModel(mu, a, c, noise, gain, setBtoZero=True))
663 # loop on lags
664 for i in range(matrixSide):
665 for j in range(matrixSide):
666 # fit a parabola for a given lag
667 parsFit = np.polyfit(mu, cov[:, i, j] - model[:, i, j],
668 2, w=sqrtW[:, i, j])
669 # model equation (Eq. 20) in Astier+19, with c=a*b=0:
670 a[i, j] += parsFit[0]
671 noise[i, j] += parsFit[2]
672 if i + j == 0:
673 gain = 1./(1/gain+parsFit[1])
674 weightedRes = (model - cov)*sqrtW
675 chi2 = (weightedRes.flatten()**2).sum()
676 if chi2 > oldChi2:
677 break
678 oldChi2 = chi2
680 return a, c, noise, gain
682 def funcFullCovarianceModel(self, params, x):
683 """Model to fit covariances from flat fields; Equation 20 of
684 Astier+19.
686 Parameters
687 ----------
688 params : `list`
689 Parameters of the model: aMatrix, CMatrix, noiseMatrix,
690 gain (e/ADU).
691 x : `numpy.array`, (N,)
692 Signal `mu` (ADU)
694 Returns
695 -------
696 y : `numpy.array`, (N,)
697 Covariance matrix.
698 """
699 matrixSide = self.config.maximumRangeCovariancesAstier
700 lenParams = matrixSide*matrixSide
701 aMatrix = params[:lenParams].reshape((matrixSide, matrixSide))
702 cMatrix = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide))
703 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide))
704 gain = params[-1]
706 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain).flatten()
708 def funcFullCovarianceModelNoB(self, params, x):
709 """Model to fit covariances from flat fields; Equation 20 of
710 Astier+19, with b=0 (equivalent to c=a*b=0 in this code).
712 Parameters
713 ----------
714 params : `list`
715 Parameters of the model: aMatrix, CMatrix, noiseMatrix,
716 gain (e/ADU).
717 x : `numpy.array`, (N,)
718 Signal mu (ADU)
720 Returns
721 -------
722 y : `numpy.array`, (N,)
723 Covariance matrix.
724 """
725 matrixSide = self.config.maximumRangeCovariancesAstier
726 lenParams = matrixSide*matrixSide
727 aMatrix = params[:lenParams].reshape((matrixSide, matrixSide))
728 cMatrix = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide))
729 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide))
730 gain = params[-1]
732 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=True).flatten()
734 def evalCovModel(self, mu, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=False):
735 """Computes full covariances model (Eq. 20 of Astier+19).
737 Parameters
738 ----------
739 mu : `numpy.array`, (N,)
740 List of mean signals.
741 aMatrix : `numpy.array`, (M, M)
742 "a" parameter per flux in Eq. 20 of Astier+19.
743 cMatrix : `numpy.array`, (M, M)
744 "c"="ab" parameter per flux in Eq. 20 of Astier+19.
745 noiseMatrix : `numpy.array`, (M, M)
746 "noise" parameter per flux in Eq. 20 of Astier+19.
747 gain : `float`
748 Amplifier gain (e/ADU)
749 setBtoZero=False : `bool`, optional
750 Set "b" parameter in full model (see Astier+19) to zero.
752 Returns
753 -------
754 covModel : `numpy.array`, (N, M, M)
755 Covariances model.
757 Notes
758 -----
759 By default, computes the covModel for the mu's stored(self.mu).
760 Returns cov[Nmu, M, M]. The variance for the PTC is
761 cov[:, 0, 0]. mu and cov are in ADUs and ADUs squared. To use
762 electrons for both, the gain should be set to 1. This routine
763 implements the model in Astier+19 (1905.08677).
764 The parameters of the full model for C_ij(mu) ("C_ij" and "mu"
765 in ADU^2 and ADU, respectively) in Astier+19 (Eq. 20) are:
767 - "a" coefficients (M by M matrix), units: 1/e
768 - "b" coefficients (M by M matrix), units: 1/e
769 - noise matrix (M by M matrix), units: e^2
770 - gain, units: e/ADU
772 "b" appears in Eq. 20 only through the "ab" combination, which
773 is defined in this code as "c=ab".
774 """
775 matrixSide = self.config.maximumRangeCovariancesAstier
776 sa = (matrixSide, matrixSide)
777 # pad a with zeros and symmetrize
778 aEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1))
779 aEnlarged[0:sa[0], 0:sa[1]] = aMatrix
780 aSym = symmetrize(aEnlarged)
781 # pad c with zeros and symmetrize
782 cEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1))
783 cEnlarged[0:sa[0], 0:sa[1]] = cMatrix
784 cSym = symmetrize(cEnlarged)
785 a2 = fftconvolve(aSym, aSym, mode='same')
786 a3 = fftconvolve(a2, aSym, mode='same')
787 ac = fftconvolve(aSym, cSym, mode='same')
788 (xc, yc) = np.unravel_index(np.abs(aSym).argmax(), a2.shape)
790 a1 = aMatrix[np.newaxis, :, :]
791 a2 = a2[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide]
792 a3 = a3[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide]
793 ac = ac[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide]
794 c1 = cMatrix[np.newaxis, ::]
796 # assumes that mu is 1d
797 bigMu = mu[:, np.newaxis, np.newaxis]*gain
798 # c(=a*b in Astier+19) also has a contribution to the last
799 # term, that is absent for now.
800 if setBtoZero:
801 c1 = np.zeros_like(c1)
802 ac = np.zeros_like(ac)
803 covModel = (bigMu/(gain*gain)*(a1*bigMu+2./3.*(bigMu*bigMu)*(a2 + c1)
804 + (1./3.*a3 + 5./6.*ac)*(bigMu*bigMu*bigMu)) + noiseMatrix[np.newaxis, :, :]/gain**2)
805 # add the Poisson term, and the read out noise (variance)
806 covModel[:, 0, 0] += mu/gain
808 return covModel
810 # EXPAPPROXIMATION and POLYNOMIAL fit methods
811 @staticmethod
812 def _initialParsForPolynomial(order):
813 assert order >= 2
814 pars = np.zeros(order, dtype=float)
815 pars[0] = 10
816 pars[1] = 1
817 pars[2:] = 0.0001
818 return pars
820 @staticmethod
821 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]):
822 if not len(lowers):
823 lowers = [np.NINF for p in initialPars]
824 if not len(uppers):
825 uppers = [np.inf for p in initialPars]
826 lowers[1] = 0 # no negative gains
827 return (lowers, uppers)
829 @staticmethod
830 def _boundsForAstier(initialPars, lowers=[], uppers=[]):
831 if not len(lowers):
832 lowers = [np.NINF for p in initialPars]
833 if not len(uppers):
834 uppers = [np.inf for p in initialPars]
835 return (lowers, uppers)
837 @staticmethod
838 def _getInitialGoodPoints(means, variances, minVarPivotSearch, consecutivePointsVarDecreases):
839 """Return a boolean array to mask bad points.
841 Parameters
842 ----------
843 means : `numpy.array`
844 Input array with mean signal values.
845 variances : `numpy.array`
846 Input array with variances at each mean value.
847 minVarPivotSearch : `float`
848 The variance (in ADU^2), above which, the point
849 of decreasing variance should be sought.
850 consecutivePointsVarDecreases : `int`
851 Required number of consecutive points/fluxes
852 in the PTC where the variance
853 decreases in order to find a first
854 estimate of the PTC turn-off.
856 Returns
857 ------
858 goodPoints : `numpy.array` [`bool`]
859 Boolean array to select good (`True`) and bad (`False`)
860 points.
862 Notes
863 -----
864 Eliminate points beyond which the variance decreases.
865 """
866 goodPoints = np.ones_like(means, dtype=bool)
867 # Variances are sorted and should monotonically increase
868 pivotList = np.where(np.array(np.diff(variances)) < 0)[0]
869 if len(pivotList) > 0:
870 # For small values, sometimes the variance decreases slightly
871 # Only look when var > self.config.minVarPivotSearch
872 pivotList = [p for p in pivotList if variances[p] > minVarPivotSearch]
873 # Require that the varince decreases during
874 # consecutivePointsVarDecreases
875 # consecutive points. This will give a first
876 # estimate of the PTC turn-off, which
877 # may be updated (reduced) further in the code.
878 if len(pivotList) > 1:
879 # enumerate(pivotList) creates tuples (index, value), for
880 # each value in pivotList. The lambda function subtracts
881 # each value from the index.
882 # groupby groups elements by equal key value.
883 for k, g in groupby(enumerate(pivotList), lambda x: x[0]-x[1]):
884 group = (map(itemgetter(1), g))
885 # Form groups of consecute values from pivotList
886 group = list(map(int, group))
887 # values in pivotList are indices where np.diff(variances)
888 # is negative, i.e., where the variance starts decreasing.
889 # Find the first group of consecutive numbers when
890 # variance decreases.
891 if len(group) >= consecutivePointsVarDecreases:
892 pivotIndex = np.min(group)
893 goodPoints[pivotIndex+1:] = False
894 break
896 # Finally, we filter out any infinities or NaNs.
897 goodPoints[(~np.isfinite(means)) | (~np.isfinite(variances))] = False
899 return goodPoints
901 def _makeZeroSafe(self, array, substituteValue=1e-9):
902 """"""
903 array = np.array(array)
904 nBad = Counter(np.ravel(array))[0]
905 if nBad == 0:
906 return array
908 index, = np.where(array == 0)
909 if len(index):
910 msg = f"Found {nBad} zeros in array at elements {index}"
911 self.log.warning(msg)
913 array[index] = substituteValue
915 return array
917 def fitPtc(self, dataset):
918 """Fit the photon transfer curve to a polynomial or to the
919 Astier+19 approximation (Eq. 16).
921 Fit the photon transfer curve with either a polynomial of
922 the order specified in the task config, or using the
923 exponential approximation in Astier+19 (Eq. 16).
925 Sigma clipping is performed iteratively for the fit, as
926 well as an initial clipping of data points that are more
927 than `config.initialNonLinearityExclusionThreshold` away
928 from lying on a straight line. This other step is necessary
929 because the photon transfer curve turns over catastrophically
930 at very high flux (because saturation
931 drops the variance to ~0) and these far outliers cause the
932 initial fit to fail, meaning the sigma cannot be calculated
933 to perform the sigma-clipping.
935 Parameters
936 ----------
937 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
938 The dataset containing the means, variances and
939 exposure times.
941 Returns
942 -------
943 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
944 This is the same dataset as the input parameter, however,
945 it has been modified to include information such as the
946 fit vectors and the fit parameters. See the class
947 `PhotonTransferCurveDatase`.
949 Raises
950 ------
951 RuntimeError
952 Raised if dataset.ptcFitType is None or empty.
953 """
954 if dataset.ptcFitType:
955 ptcFitType = dataset.ptcFitType
956 else:
957 raise RuntimeError("ptcFitType is None of empty in PTC dataset.")
958 matrixSide = self.config.maximumRangeCovariancesAstier
959 nanMatrix = np.empty((matrixSide, matrixSide))
960 nanMatrix[:] = np.nan
962 for amp in dataset.ampNames:
963 lenInputTimes = len(dataset.rawExpTimes[amp])
964 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
965 listNanMatrix[:] = np.nan
967 dataset.covariancesModel[amp] = listNanMatrix
968 dataset.aMatrix[amp] = nanMatrix
969 dataset.bMatrix[amp] = nanMatrix
970 dataset.covariancesModelNoB[amp] = listNanMatrix
971 dataset.aMatrixNoB[amp] = nanMatrix
972 dataset.noiseMatrix[amp] = nanMatrix
973 dataset.noiseMatrixNoB[amp] = nanMatrix
975 def errFunc(p, x, y):
976 return ptcFunc(p, x) - y
978 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
979 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
981 for i, ampName in enumerate(dataset.ampNames):
982 meanVecOriginal = dataset.rawMeans[ampName].copy()
983 varVecOriginal = dataset.rawVars[ampName].copy()
984 varVecOriginal = self._makeZeroSafe(varVecOriginal)
986 if self.config.doLegacyTurnoffSelection:
987 # Discard points when the variance starts to decrease after two
988 # consecutive signal levels
989 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
990 self.config.minVarPivotSearch,
991 self.config.consecutivePointsVarDecreases)
992 else:
993 goodPoints = dataset.expIdMask[ampName]
995 # Check if all points are bad from the 'cpExtractPtcTask'
996 initialExpIdMask = dataset.expIdMask[ampName]
998 if not (goodPoints.any() and initialExpIdMask.any()):
999 msg = (f"SERIOUS: All points in goodPoints: {goodPoints} or "
1000 f"in initialExpIdMask: {initialExpIdMask} are bad."
1001 f"Setting {ampName} to BAD.")
1002 self.log.warning(msg)
1003 # Fill entries with NaNs
1004 self.fillBadAmp(dataset, ptcFitType, ampName)
1005 continue
1007 mask = goodPoints
1009 if ptcFitType == 'EXPAPPROXIMATION':
1010 ptcFunc = funcAstier
1011 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise^2
1012 # lowers and uppers obtained from BOT data studies by
1013 # C. Lage (UC Davis, 11/2020).
1014 if self.config.binSize > 1:
1015 bounds = self._boundsForAstier(parsIniPtc)
1016 else:
1017 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.1, -2000],
1018 uppers=[1e-4, 10.0, 2000])
1019 if ptcFitType == 'POLYNOMIAL':
1020 ptcFunc = funcPolynomial
1021 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
1022 bounds = self._boundsForPolynomial(parsIniPtc)
1024 # We perform an initial (unweighted) fit of variance vs signal
1025 # (after initial KS test or post-drop selection) to look for
1026 # outliers, particularly at the high-flux end. The initial fit
1027 # is performed only for points that are guaranteed to be below
1028 # the PTC turnoff and then extrapolated to ensure that high
1029 # flux points that have abnormal variance values can be properly
1030 # rejected in this phase without biasing the initial fit.
1031 # This algorithm was initially developed by Seth Digel for
1032 # the EO Testing pipeline.
1034 if self.config.scaleMaxSignalInitialPtcOutlierFit:
1035 approxGain = np.nanmedian(meanVecOriginal/varVecOriginal)
1036 maxADUInitialPtcOutlierFit = self.config.maxSignalInitialPtcOutlierFit/approxGain
1037 self.log.info(
1038 "Using approximate gain %.3f and ADU signal cutoff of %.1f for amplifier %s",
1039 approxGain,
1040 maxADUInitialPtcOutlierFit,
1041 ampName,
1042 )
1043 else:
1044 maxADUInitialPtcOutlierFit = self.config.maxSignalInitialPtcOutlierFit
1046 if maxIterationsPtcOutliers == 0:
1047 # We are not doing any outlier rejection here, but we do want
1048 # an initial fit.
1049 res = least_squares(
1050 errFunc,
1051 parsIniPtc,
1052 bounds=bounds,
1053 args=(meanVecOriginal[mask], varVecOriginal[mask]),
1054 )
1055 pars = res.x
1056 newMask = mask.copy()
1057 else:
1058 newMask = (mask & (meanVecOriginal <= maxADUInitialPtcOutlierFit))
1060 count = 0
1061 lastMask = mask.copy()
1062 while count < maxIterationsPtcOutliers:
1063 res = least_squares(
1064 errFunc,
1065 parsIniPtc,
1066 bounds=bounds,
1067 args=(meanVecOriginal[newMask], varVecOriginal[newMask]),
1068 )
1069 pars = res.x
1071 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
1072 # The new mask includes points where the residuals are
1073 # finite, are less than the cut, and include the original
1074 # mask of known points that should not be used.
1075 newMask = (
1076 np.isfinite(sigResids)
1077 & (np.abs(np.nan_to_num(sigResids)) < sigmaCutPtcOutliers)
1078 & mask
1079 )
1080 if np.count_nonzero(newMask) == 0:
1081 msg = (f"SERIOUS: All points after outlier rejection are bad. "
1082 f"Setting {ampName} to BAD.")
1083 self.log.warning(msg)
1084 # Fill entries with NaNs
1085 self.fillBadAmp(dataset, ptcFitType, ampName)
1086 break
1088 self.log.debug(
1089 "Iteration %d: Removed %d points in total for %s.",
1090 count,
1091 np.count_nonzero(mask) - np.count_nonzero(newMask),
1092 ampName,
1093 )
1095 # If the mask hasn't changed then break out.
1096 if np.all(newMask == lastMask):
1097 self.log.debug("Convergence at iteration %d; breaking loop for %s.", count, ampName)
1098 break
1100 lastMask = newMask.copy()
1102 count += 1
1104 # Set the mask to the new mask
1105 mask = newMask.copy()
1107 if not mask.any():
1108 # We hae already filled the bad amp above, so continue.
1109 continue
1111 dataset.expIdMask[ampName] = mask
1113 parsIniPtc = pars
1114 meanVecFinal = meanVecOriginal[mask]
1115 varVecFinal = varVecOriginal[mask]
1117 # Save the maximum point after outlier detection as the
1118 # PTC turnoff point.
1119 dataset.ptcTurnoff[ampName] = meanVecFinal[-1]
1121 if Counter(mask)[False] > 0:
1122 self.log.info("Number of points discarded in PTC of amplifier %s:"
1123 " %d out of %d", ampName, Counter(mask)[False], len(meanVecOriginal))
1125 if (len(meanVecFinal) < len(parsIniPtc)):
1126 msg = (f"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of "
1127 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1128 self.log.warning(msg)
1129 # Fill entries with NaNs
1130 self.fillBadAmp(dataset, ptcFitType, ampName)
1131 continue
1132 # Fit the PTC.
1133 # The variance of the variance is Var(v)=2*v^2/Npix. This is
1134 # already calculated in `makeCovArray` of CpPtcExtract.
1135 # dataset.covariancesSqrtWeights[ampName][:,0,0]
1136 # has 1/sqrt(Var(v)).
1137 weightsY = dataset.covariancesSqrtWeights[ampName][:, 0, 0][mask]
1138 if self.config.doFitBootstrap:
1139 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal,
1140 varVecFinal, ptcFunc,
1141 weightsY=weightsY)
1142 else:
1143 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal,
1144 varVecFinal, ptcFunc,
1145 weightsY=weightsY)
1146 dataset.ptcFitPars[ampName] = parsFit
1147 dataset.ptcFitParsError[ampName] = parsFitErr
1148 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
1150 dataset.finalVars[ampName] = varVecOriginal
1151 dataset.finalVars[ampName][~mask] = np.nan
1152 dataset.finalModelVars[ampName] = ptcFunc(parsFit, meanVecOriginal)
1153 dataset.finalModelVars[ampName][~mask] = np.nan
1154 dataset.finalMeans[ampName] = meanVecOriginal
1155 dataset.finalMeans[ampName][~mask] = np.nan
1157 if ptcFitType == 'EXPAPPROXIMATION':
1158 ptcGain = parsFit[1]
1159 ptcGainErr = parsFitErr[1]
1160 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1161 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1162 if ptcFitType == 'POLYNOMIAL':
1163 ptcGain = 1./parsFit[1]
1164 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1165 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1166 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1167 dataset.gain[ampName] = ptcGain
1168 dataset.gainErr[ampName] = ptcGainErr
1169 dataset.noise[ampName] = ptcNoise
1170 dataset.noiseErr[ampName] = ptcNoiseErr
1172 if not len(dataset.ptcFitType) == 0:
1173 dataset.ptcFitType = ptcFitType
1174 if len(dataset.badAmps) == 0:
1175 dataset.badAmps = []
1177 return dataset
1179 def fillBadAmp(self, dataset, ptcFitType, ampName):
1180 """Fill the dataset with NaNs if there are not enough
1181 good points.
1183 Parameters
1184 ----------
1185 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
1186 The dataset containing the means, variances and
1187 exposure times.
1188 ptcFitType : {'POLYNOMIAL', 'EXPAPPROXIMATION'}
1189 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
1190 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC.
1191 ampName : `str`
1192 Amplifier name.
1193 """
1194 dataset.badAmps.append(ampName)
1195 dataset.expIdMask[ampName] = np.repeat(False, len(dataset.rawExpTimes[ampName]))
1196 dataset.gain[ampName] = np.nan
1197 dataset.gainErr[ampName] = np.nan
1198 dataset.noise[ampName] = np.nan
1199 dataset.noiseErr[ampName] = np.nan
1200 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
1201 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
1202 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
1203 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
1204 dataset.ptcFitChiSq[ampName] = np.nan
1205 dataset.ptcTurnoff[ampName] = np.nan
1206 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1207 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1208 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1210 return