Coverage for python/lsst/cp/pipe/ptc/cpSolvePtcTask.py: 11%
453 statements
« prev ^ index » next coverage.py v7.3.0, created at 2023-08-16 12:07 +0000
« prev ^ index » next coverage.py v7.3.0, created at 2023-08-16 12:07 +0000
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22import numpy as np
23from collections import Counter
25import lsst.pex.config as pexConfig
26import lsst.pipe.base as pipeBase
27from lsst.cp.pipe.utils import (fitLeastSq, fitBootstrap, funcPolynomial, funcAstier, symmetrize)
29from scipy.signal import fftconvolve
30from scipy.optimize import least_squares
31from itertools import groupby
32from operator import itemgetter
34import lsst.pipe.base.connectionTypes as cT
36from lsst.ip.isr import PhotonTransferCurveDataset
38from lsst.cp.pipe._lookupStaticCalibration import lookupStaticCalibration
40import copy
43__all__ = ['PhotonTransferCurveSolveConfig', 'PhotonTransferCurveSolveTask']
46class PhotonTransferCurveSolveConnections(pipeBase.PipelineTaskConnections,
47 dimensions=("instrument", "detector")):
48 inputCovariances = cT.Input(
49 name="ptcCovariances",
50 doc="Tuple with measured covariances from flats.",
51 storageClass="PhotonTransferCurveDataset",
52 dimensions=("instrument", "exposure", "detector"),
53 isCalibration=True,
54 multiple=True,
55 )
56 camera = cT.PrerequisiteInput(
57 name="camera",
58 doc="Camera the input data comes from.",
59 storageClass="Camera",
60 dimensions=("instrument",),
61 isCalibration=True,
62 lookupFunction=lookupStaticCalibration,
63 )
64 outputPtcDataset = cT.Output(
65 name="ptcDatsetProposal",
66 doc="Output proposed ptc dataset.",
67 storageClass="PhotonTransferCurveDataset",
68 dimensions=("instrument", "detector"),
69 multiple=False,
70 isCalibration=True,
71 )
74class PhotonTransferCurveSolveConfig(pipeBase.PipelineTaskConfig,
75 pipelineConnections=PhotonTransferCurveSolveConnections):
76 """Configuration for fitting measured covariances.
77 """
79 ptcFitType = pexConfig.ChoiceField(
80 dtype=str,
81 doc="Fit PTC to Eq. 16, Eq. 20 in Astier+19, or to a polynomial.",
82 default="POLYNOMIAL",
83 allowed={
84 "POLYNOMIAL": "n-degree polynomial (use 'polynomialFitDegree' to set 'n').",
85 "EXPAPPROXIMATION": "Approximation in Astier+19 (Eq. 16).",
86 "FULLCOVARIANCE": "Full covariances model in Astier+19 (Eq. 20)"
87 }
88 )
89 minMeanSignal = pexConfig.DictField(
90 keytype=str,
91 itemtype=float,
92 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use."
93 " The same cut is applied to all amps if this parameter [`dict`] is passed as "
94 " {'ALL_AMPS': value}",
95 default={'ALL_AMPS': 0.0},
96 )
97 maxMeanSignal = pexConfig.DictField(
98 keytype=str,
99 itemtype=float,
100 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp."
101 " The same cut is applied to all amps if this dictionary is of the form"
102 " {'ALL_AMPS': value}",
103 default={'ALL_AMPS': 1e6},
104 )
105 maximumRangeCovariancesAstier = pexConfig.Field(
106 dtype=int,
107 doc="Maximum range of covariances as in Astier+19",
108 default=8,
109 )
110 sigmaClipFullFitCovariancesAstier = pexConfig.Field(
111 dtype=float,
112 doc="sigma clip for full model fit for FULLCOVARIANCE ptcFitType ",
113 default=5.0,
114 )
115 maxIterFullFitCovariancesAstier = pexConfig.Field(
116 dtype=int,
117 doc="Maximum number of iterations in full model fit for FULLCOVARIANCE ptcFitType",
118 default=3,
119 )
120 polynomialFitDegree = pexConfig.Field(
121 dtype=int,
122 doc="Degree of polynomial to fit the PTC, when 'ptcFitType'=POLYNOMIAL.",
123 default=3,
124 )
125 doLegacyTurnoffSelection = pexConfig.Field(
126 dtype=bool,
127 doc="Use 'legacy' computation for PTC turnoff selection. If set "
128 "to False, then the KS test p-value selection will be used instead.",
129 default=False,
130 )
131 sigmaCutPtcOutliers = pexConfig.Field(
132 dtype=float,
133 doc="Sigma cut for outlier rejection in PTC.",
134 default=5.0,
135 )
136 maxIterationsPtcOutliers = pexConfig.RangeField(
137 dtype=int,
138 doc="Maximum number of iterations for outlier rejection in PTC.",
139 default=2,
140 min=0
141 )
142 maxSignalInitialPtcOutlierFit = pexConfig.Field(
143 dtype=float,
144 doc="Maximum signal considered for intial outlier fit. This should be below "
145 "the PTC turnoff to ensure accurate outlier rejection.",
146 default=30_000.,
147 )
148 minVarPivotSearch = pexConfig.Field(
149 dtype=float,
150 doc="The code looks for a pivot signal point after which the variance starts decreasing at high-flux"
151 " to exclude then from the PTC model fit. However, sometimes at low fluxes, the variance"
152 " decreases slightly. Set this variable for the variance value, in ADU^2, after which the pivot "
153 " should be sought. Only used if doLegacyTurnoffSelection is True.",
154 default=10000,
155 )
156 consecutivePointsVarDecreases = pexConfig.RangeField(
157 dtype=int,
158 doc="Required number of consecutive points/fluxes in the PTC where the variance "
159 "decreases in order to find a first estimate of the PTC turn-off. "
160 "Only used if doLegacyTurnoffSelection is True.",
161 default=2,
162 min=2
163 )
164 ksTestMinPvalue = pexConfig.Field(
165 dtype=float,
166 doc="Minimum value of the Gaussian histogram KS test p-value to be used in PTC fit. "
167 "Only used if doLegacyTurnoffSelection is False.",
168 default=0.01,
169 )
170 doFitBootstrap = pexConfig.Field(
171 dtype=bool,
172 doc="Use bootstrap for the PTC fit parameters and errors?.",
173 default=False,
174 )
175 binSize = pexConfig.Field(
176 dtype=int,
177 doc="Bin the image by this factor in both dimensions.",
178 default=1,
179 )
182class PhotonTransferCurveSolveTask(pipeBase.PipelineTask):
183 """Task to fit the PTC from flat covariances.
185 The first task of the PTC measurement pipeline,
186 ``PhotonTransferCurveMeasureTask`` (and assumed to have been run
187 before this task), produced a list of
188 `~lsst.ip.isr.PhotonTransferCurveDataset` objects. Each dataset
189 contains the mean signal and covariances of the
190 difference image of the flat-field images taken at
191 the same exposure time. The list also contains dummy
192 datasets (with no measurements), whose purpose is to have
193 the input and output dimensions of ``PhotonTransferCurveMeasureTask``
194 match.
196 This task, ``PhotonTransferCurveSolveTask``, assembles the list
197 of individual PTC datasets produced
198 by ``PhotonTransferCurveMeasureTask`` into one single final PTC
199 dataset, discarding the dummy datset as appropiate.
200 The task fits the measured (co)variances to one of three models:
201 a polynomial model of a given order, or the models described
202 in equations 16 and 20 of Astier+19. These options are referred
203 to as ``POLYNOMIAL``, ``EXPAPPROXIMATION``, and ``FULLCOVARIANCE``
204 in the configuration options of the task, respectively).
205 Parameters of interest such as the gain and noise are derived
206 from the fits. The ``FULLCOVARIANCE`` model is fitted to the
207 full covariance data (as oppossed to the other two models, which
208 are fit to the variance vs mean measurements only).
210 Astier+19: "The Shape of the Photon Transfer Curve
211 of CCD sensors", arXiv:1905.08677
212 """
214 ConfigClass = PhotonTransferCurveSolveConfig
215 _DefaultName = 'cpPhotonTransferCurveSolve'
217 def runQuantum(self, butlerQC, inputRefs, outputRefs):
218 """Ensure that the input and output dimensions are passed along.
220 Parameters
221 ----------
222 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
223 Butler to operate on.
224 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
225 Input data refs to load.
226 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
227 Output data refs to persist.
228 """
229 inputs = butlerQC.get(inputRefs)
230 detId = inputRefs.inputCovariances[0].dataId['detector']
231 outputs = self.run(inputCovariances=inputs['inputCovariances'], camera=inputs['camera'], detId=detId)
232 butlerQC.put(outputs, outputRefs)
234 def run(self, inputCovariances, camera=None, detId=0):
235 """Fit measured covariances to different models.
237 Parameters
238 ----------
239 inputCovariances : `list` [`lsst.ip.isr.PhotonTransferCurveDataset`]
240 List of lsst.ip.isr.PhotonTransferCurveDataset datasets.
241 camera : `lsst.afw.cameraGeom.Camera`, optional
242 Input camera.
243 detId : `int`
244 Detector ID to locate the detector in the camera and
245 populate the `lsst.ip.isr.PhotonTransferCurveDataset`
246 metadata.
247 Returns
248 -------
249 results : `lsst.pipe.base.Struct`
250 The resultins structure contains:
252 ``outputPtcDatset``
253 Final PTC dataset, containing information such as the
254 means, variances, and exposure times
255 (`lsst.ip.isr.PhotonTransferCurveDataset`).
256 """
257 # Find the ampNames from a non-dummy ptc.
258 ampNames = []
259 for partialPtcDataset in inputCovariances:
260 if partialPtcDataset.ptcFitType != 'DUMMY':
261 ampNames = partialPtcDataset.ampNames
262 break
264 # Each amp may have a different min and max ADU signal
265 # specified in the config.
266 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames}
267 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames}
268 for ampName in ampNames:
269 if 'ALL_AMPS' in self.config.maxMeanSignal:
270 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS']
271 elif ampName in self.config.maxMeanSignal:
272 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName]
274 if 'ALL_AMPS' in self.config.minMeanSignal:
275 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS']
276 elif ampName in self.config.minMeanSignal:
277 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName]
279 # Assemble individual PTC datasets into a single PTC dataset.
280 datasetPtc = PhotonTransferCurveDataset(ampNames=ampNames,
281 ptcFitType=self.config.ptcFitType,
282 covMatrixSide=self.config.maximumRangeCovariancesAstier)
283 for partialPtcDataset in inputCovariances:
284 # Ignore dummy datasets
285 if partialPtcDataset.ptcFitType == 'DUMMY':
286 continue
287 for ampName in ampNames:
288 # The partial dataset consists of lists of values for each
289 # quantity. In the case of the input exposure pairs, this is a
290 # list of tuples. In all cases we only want the first
291 # (and only) element of the list.
292 datasetPtc.inputExpIdPairs[ampName].append(partialPtcDataset.inputExpIdPairs[ampName][0])
293 datasetPtc.rawExpTimes[ampName] = np.append(datasetPtc.rawExpTimes[ampName],
294 partialPtcDataset.rawExpTimes[ampName][0])
295 datasetPtc.rawMeans[ampName] = np.append(datasetPtc.rawMeans[ampName],
296 partialPtcDataset.rawMeans[ampName][0])
297 datasetPtc.rawVars[ampName] = np.append(datasetPtc.rawVars[ampName],
298 partialPtcDataset.rawVars[ampName][0])
299 datasetPtc.photoCharges[ampName] = np.append(datasetPtc.photoCharges[ampName],
300 partialPtcDataset.photoCharges[ampName][0])
301 datasetPtc.histVars[ampName] = np.append(datasetPtc.histVars[ampName],
302 partialPtcDataset.histVars[ampName][0])
303 datasetPtc.histChi2Dofs[ampName] = np.append(datasetPtc.histChi2Dofs[ampName],
304 partialPtcDataset.histChi2Dofs[ampName][0])
305 datasetPtc.kspValues[ampName] = np.append(datasetPtc.kspValues[ampName],
306 partialPtcDataset.kspValues[ampName][0])
307 datasetPtc.covariances[ampName] = np.append(
308 datasetPtc.covariances[ampName].ravel(),
309 partialPtcDataset.covariances[ampName].ravel()
310 ).reshape(
311 (
312 len(datasetPtc.rawExpTimes[ampName]),
313 datasetPtc.covMatrixSide,
314 datasetPtc.covMatrixSide,
315 )
316 )
317 datasetPtc.covariancesSqrtWeights[ampName] = np.append(
318 datasetPtc.covariancesSqrtWeights[ampName].ravel(),
319 partialPtcDataset.covariancesSqrtWeights[ampName].ravel()
320 ).reshape(
321 (
322 len(datasetPtc.rawExpTimes[ampName]),
323 datasetPtc.covMatrixSide,
324 datasetPtc.covMatrixSide,
325 )
326 )
328 # Apply min/max masking.
329 rawMean = partialPtcDataset.rawMeans[ampName][0]
330 rawVar = partialPtcDataset.rawVars[ampName][0]
331 expIdMask = partialPtcDataset.expIdMask[ampName][0]
332 if (rawMean <= minMeanSignalDict[ampName]) or (rawMean >= maxMeanSignalDict[ampName]) \
333 or not np.isfinite(rawMean) or not np.isfinite(rawVar):
334 expIdMask = False
336 kspValue = partialPtcDataset.kspValues[ampName][0]
337 if not self.config.doLegacyTurnoffSelection and \
338 kspValue < self.config.ksTestMinPvalue:
339 expIdMask = False
341 datasetPtc.expIdMask[ampName] = np.append(datasetPtc.expIdMask[ampName], expIdMask)
343 for key, value in partialPtcDataset.auxValues.items():
344 if key in datasetPtc.auxValues:
345 datasetPtc.auxValues[key] = np.append(datasetPtc.auxValues[key], value)
346 else:
347 datasetPtc.auxValues[key] = value
349 # Sort arrays that are filled so far in the final dataset by
350 # rawMeans index.
351 # First compute the mean across all the amps to make sure that they are
352 # all sorted the same way.
353 detectorMeans = np.zeros(len(datasetPtc.inputExpIdPairs[ampNames[0]]))
355 for i in range(len(detectorMeans)):
356 arr = np.array([datasetPtc.rawMeans[ampName][i] for ampName in ampNames])
357 good, = (np.isfinite(arr)).nonzero()
358 if good.size == 0:
359 detectorMeans[i] = np.nan
360 else:
361 detectorMeans[i] = np.mean(arr[good])
363 index = np.argsort(detectorMeans)
365 for ampName in ampNames:
366 datasetPtc.inputExpIdPairs[ampName] = np.array(
367 datasetPtc.inputExpIdPairs[ampName]
368 )[index].tolist()
369 datasetPtc.rawExpTimes[ampName] = datasetPtc.rawExpTimes[ampName][index]
370 datasetPtc.rawMeans[ampName] = datasetPtc.rawMeans[ampName][index]
371 datasetPtc.rawVars[ampName] = datasetPtc.rawVars[ampName][index]
372 datasetPtc.photoCharges[ampName] = datasetPtc.photoCharges[ampName][index]
373 datasetPtc.histVars[ampName] = datasetPtc.histVars[ampName][index]
374 datasetPtc.histChi2Dofs[ampName] = datasetPtc.histChi2Dofs[ampName][index]
375 datasetPtc.kspValues[ampName] = datasetPtc.kspValues[ampName][index]
376 datasetPtc.expIdMask[ampName] = datasetPtc.expIdMask[ampName][index]
377 datasetPtc.covariances[ampName] = datasetPtc.covariances[ampName][index]
378 datasetPtc.covariancesSqrtWeights[ampName] = datasetPtc.covariancesSqrtWeights[ampName][index]
379 for key, value in datasetPtc.auxValues.items():
380 datasetPtc.auxValues[key] = value[index]
382 if self.config.ptcFitType == "FULLCOVARIANCE":
383 # Fit the measured covariances vs mean signal to
384 # the Astier+19 full model (Eq. 20). Before that
385 # do a preliminary fit to the variance (C_00) vs mean
386 # signal (mu) curve using the EXPAPPROXIMATION model
387 # (Eq. 16 in Astier+19) in order to
388 # get the flat pairs that are masked. The
389 # points at these fluxes will also be masked when
390 # calculating the other elements of the covariance
391 # matrix, C_ij, i!=j).
393 # Preliminary fit, usign a temp dataset to get the mask
394 tempDatasetPtc = copy.copy(datasetPtc)
395 tempDatasetPtc.ptcFitType = "EXPAPPROXIMATION"
396 tempDatasetPtc = self.fitMeasurementsToModel(tempDatasetPtc)
398 # "FULLCOVARIANCE", using the mask obtained from the
399 # previous fit.
400 for ampName in datasetPtc.ampNames:
401 datasetPtc.expIdMask[ampName] = tempDatasetPtc.expIdMask[ampName]
402 datasetPtc.fitType = "FULLCOVARIANCE"
403 datasetPtc = self.fitMeasurementsToModel(datasetPtc)
404 # The other options are: self.config.ptcFitType in
405 # ("EXPAPPROXIMATION", "POLYNOMIAL")
406 else:
407 # Fit the PTC to a polynomial or to Astier+19 exponential
408 # approximation (Eq. 16). Fill up
409 # PhotonTransferCurveDataset object.
410 datasetPtc = self.fitMeasurementsToModel(datasetPtc)
412 if camera:
413 detector = camera[detId]
414 else:
415 detector = None
416 datasetPtc.updateMetadataFromExposures(inputCovariances)
417 datasetPtc.updateMetadata(setDate=True, camera=camera, detector=detector)
419 return pipeBase.Struct(
420 outputPtcDataset=datasetPtc,
421 )
423 def fitMeasurementsToModel(self, dataset):
424 """Fit the measured covariances vs mean signal to a
425 polynomial or one of the models in Astier+19
426 (Eq. 16 or Eq.20).
428 Parameters
429 ----------
430 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
431 The dataset containing information such as the means,
432 (co)variances, and exposure times.
434 Returns
435 -------
436 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
437 This is the same dataset as the input parameter, however,
438 it has been modified to include information such as the
439 fit vectors and the fit parameters. See the class
440 `PhotonTransferCurveDatase`.
441 """
442 fitType = dataset.ptcFitType
443 if fitType in ["FULLCOVARIANCE", ]:
444 # This model uses the full covariance matrix in the fit.
445 # The PTC is technically defined as variance vs signal,
446 # with variance = Cov_00
447 dataset = self.fitDataFullCovariance(dataset)
448 elif fitType in ["POLYNOMIAL", "EXPAPPROXIMATION"]:
449 # The PTC is technically defined as variance vs signal
450 dataset = self.fitPtc(dataset)
451 else:
452 raise RuntimeError(
453 f"Fitting option {fitType} not one of "
454 "'POLYNOMIAL', 'EXPAPPROXIMATION', or 'FULLCOVARIANCE'"
455 )
457 return dataset
459 def fitDataFullCovariance(self, dataset):
460 """Fit measured flat covariances to the full model in
461 Astier+19 (Eq. 20).
463 Parameters
464 ----------
465 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
466 The dataset containing information such as the means,
467 (co)variances, and exposure times.
469 Returns
470 -------
471 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
472 This is the same dataset as the input parameter, however,
473 it has been modified to include information such as the
474 fit vectors and the fit parameters. See the class
475 `PhotonTransferCurveDatase`.
477 Notes
478 -----
479 The parameters of the full model for C_ij(mu) ("C_ij" and "mu"
480 in ADU^2 and ADU, respectively) in Astier+19 (Eq. 20) are:
482 - "a" coefficients (r by r matrix), units: 1/e
483 - "b" coefficients (r by r matrix), units: 1/e
484 - noise matrix (r by r matrix), units: e^2
485 - gain, units: e/ADU
487 "b" appears in Eq. 20 only through the "ab" combination, which
488 is defined in this code as "c=ab".
490 Total number of parameters: #entries(a) + #entries(c) + #entries(noise)
491 + 1. This is equivalent to r^2 + r^2 + r^2 + 1, where "r" is the
492 maximum lag considered for the covariances calculation, and the
493 extra "1" is the gain. If "b" is 0, then "c" is 0, and len(pInit) will
494 have r^2 fewer entries.
495 """
496 matrixSide = self.config.maximumRangeCovariancesAstier
497 lenParams = matrixSide*matrixSide
499 for ampName in dataset.ampNames:
500 lenInputTimes = len(dataset.rawExpTimes[ampName])
501 # Not used when ptcFitType is 'FULLCOVARIANCE'
502 dataset.ptcFitPars[ampName] = np.array([np.nan])
503 dataset.ptcFitParsError[ampName] = np.array([np.nan])
504 dataset.ptcFitChiSq[ampName] = np.nan
506 if ampName in dataset.badAmps:
507 # Bad amp
508 # Entries need to have proper dimensions so read/write
509 # with astropy.Table works.
510 nanMatrix = np.full((matrixSide, matrixSide), np.nan)
511 listNanMatrix = np.full((lenInputTimes, matrixSide, matrixSide), np.nan)
512 dataset.covariancesModel[ampName] = listNanMatrix
513 dataset.covariancesSqrtWeights[ampName] = listNanMatrix
514 dataset.aMatrix[ampName] = nanMatrix
515 dataset.bMatrix[ampName] = nanMatrix
516 dataset.covariancesModelNoB[ampName] = listNanMatrix
517 dataset.aMatrixNoB[ampName] = nanMatrix
518 dataset.noiseMatrix[ampName] = nanMatrix
519 dataset.noiseMatrixNoB[ampName] = nanMatrix
521 dataset.expIdMask[ampName] = np.repeat(False, lenInputTimes)
522 dataset.gain[ampName] = np.nan
523 dataset.gainErr[ampName] = np.nan
524 dataset.noise[ampName] = np.nan
525 dataset.noiseErr[ampName] = np.nan
526 dataset.finalVars[ampName] = np.repeat(np.nan, lenInputTimes)
527 dataset.finalModelVars[ampName] = np.repeat(np.nan, lenInputTimes)
528 dataset.finalMeans[ampName] = np.repeat(np.nan, lenInputTimes)
529 continue
531 muAtAmp = dataset.rawMeans[ampName]
532 maskAtAmp = dataset.expIdMask[ampName]
533 if len(maskAtAmp) == 0:
534 maskAtAmp = np.repeat(True, len(muAtAmp))
536 muAtAmpMasked = muAtAmp[maskAtAmp]
537 covAtAmp = dataset.covariances[ampName]
538 covAtAmpMasked = np.nan_to_num(covAtAmp)[maskAtAmp]
539 covSqrtWeightsAtAmp = dataset.covariancesSqrtWeights[ampName]
540 covSqrtWeightsAtAmpMasked = np.nan_to_num(covSqrtWeightsAtAmp)[maskAtAmp]
542 # Initial fit, to approximate parameters, with c=0
543 a0, c0, noise0, gain0 = self.initialFitFullCovariance(
544 muAtAmpMasked,
545 covAtAmpMasked,
546 covSqrtWeightsAtAmpMasked
547 )
549 # Fit full model (Eq. 20 of Astier+19) and same model with
550 # b=0 (c=0 in this code)
551 pInit = np.concatenate((a0.ravel(), c0.ravel(), noise0.ravel(), np.array(gain0)), axis=None)
552 functionsDict = {'fullModel': self.funcFullCovarianceModel,
553 'fullModelNoB': self.funcFullCovarianceModelNoB}
554 fitResults = {'fullModel': {'a': [], 'c': [], 'noise': [], 'gain': [], 'paramsErr': []},
555 'fullModelNoB': {'a': [], 'c': [], 'noise': [], 'gain': [], 'paramsErr': []}}
556 for key in functionsDict:
557 params, paramsErr, _ = fitLeastSq(pInit, muAtAmpMasked,
558 covAtAmpMasked.ravel(), functionsDict[key],
559 weightsY=covSqrtWeightsAtAmpMasked.ravel())
560 a = params[:lenParams].reshape((matrixSide, matrixSide))
561 c = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide))
562 noise = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide))
563 gain = params[-1]
565 fitResults[key]['a'] = a
566 fitResults[key]['c'] = c
567 fitResults[key]['noise'] = noise
568 fitResults[key]['gain'] = gain
569 fitResults[key]['paramsErr'] = paramsErr
571 # Put the information in the PTC dataset
573 # Not used when ptcFitType is 'FULLCOVARIANCE'
574 dataset.ptcFitPars[ampName] = np.array([np.nan])
575 dataset.ptcFitParsError[ampName] = np.array([np.nan])
576 dataset.ptcFitChiSq[ampName] = np.nan
578 # Save full covariances, covariances models, and their weights.
579 # dataset.expIdMask is already full, but needs to be
580 # converted to bool.
581 dataset.expIdMask[ampName] = np.array(dataset.expIdMask[ampName], dtype=bool)
582 dataset.covariances[ampName] = covAtAmp
583 # We evaluate the covariance model everywhere, even the
584 # masked amps.
585 dataset.covariancesModel[ampName] = self.evalCovModel(muAtAmp,
586 fitResults['fullModel']['a'],
587 fitResults['fullModel']['c'],
588 fitResults['fullModel']['noise'],
589 fitResults['fullModel']['gain'])
590 dataset.covariancesSqrtWeights[ampName] = covSqrtWeightsAtAmp
591 dataset.aMatrix[ampName] = fitResults['fullModel']['a']
592 dataset.bMatrix[ampName] = fitResults['fullModel']['c']/fitResults['fullModel']['a']
593 dataset.covariancesModelNoB[ampName] = self.evalCovModel(muAtAmp,
594 fitResults['fullModelNoB']['a'],
595 fitResults['fullModelNoB']['c'],
596 fitResults['fullModelNoB']['noise'],
597 fitResults['fullModelNoB']['gain'],
598 setBtoZero=True)
599 dataset.aMatrixNoB[ampName] = fitResults['fullModelNoB']['a']
600 dataset.gain[ampName] = fitResults['fullModel']['gain']
601 dataset.gainErr[ampName] = fitResults['fullModel']['paramsErr'][-1]
602 readoutNoise = fitResults['fullModel']['noise'][0][0]
603 readoutNoiseSqrt = np.sqrt(np.fabs(readoutNoise))
604 dataset.noise[ampName] = readoutNoise
605 readoutNoiseSigma = fitResults['fullModel']['paramsErr'][2*lenParams]
606 dataset.noiseErr[ampName] = 0.5*(readoutNoiseSigma/np.fabs(readoutNoise))*readoutNoiseSqrt
607 dataset.noiseMatrix[ampName] = fitResults['fullModel']['noise']
608 dataset.noiseMatrixNoB[ampName] = fitResults['fullModelNoB']['noise']
610 dataset.finalVars[ampName] = covAtAmp[:, 0, 0]
611 dataset.finalModelVars[ampName] = dataset.covariancesModel[ampName][:, 0, 0]
612 dataset.finalMeans[ampName] = muAtAmp
614 return dataset
616 def initialFitFullCovariance(self, mu, cov, sqrtW):
617 """ Performs a crude parabolic fit of the data in order to start
618 the full fit close to the solution, setting b=0 (c=0) in Eq. 20
619 of Astier+19.
621 Parameters
622 ----------
623 mu : `numpy.array`, (N,)
624 Signal `mu` (ADU)
625 cov : `numpy.array`, (N, M, M)
626 Covariance arrays of size `(M, M)` (with
627 `M = config.maximumRangeCovariancesAstier`),
628 indexed by mean signal `mu`.
629 sqrtW : `numpy.array`, (N,)
630 Covariance weights, defined as 1./sqrt(Variances)
632 Returns
633 -------
634 a : `numpy.array`, (M, M)
635 "a" parameter per flux in Eq. 20 of Astier+19.
636 c : `numpy.array`, (M, M)
637 "c"="ab" parameter per flux in Eq. 20 of Astier+19.
638 noise : `numpy.array`, (M, M)
639 "noise" parameter per flux in Eq. 20 of Astier+19.
640 gain : `float`
641 Amplifier gain (e/ADU)
642 """
643 matrixSide = self.config.maximumRangeCovariancesAstier
645 # Initialize fit parameters
646 a = np.zeros((matrixSide, matrixSide))
647 c = np.zeros((matrixSide, matrixSide))
648 noise = np.zeros((matrixSide, matrixSide))
649 gain = 1.
651 # iterate the fit to account for higher orders
652 # the chi2 does not necessarily go down, so one could
653 # stop when it increases
654 oldChi2 = 1e30
655 for _ in range(5):
656 model = np.nan_to_num(self.evalCovModel(mu, a, c, noise, gain, setBtoZero=True))
657 # loop on lags
658 for i in range(matrixSide):
659 for j in range(matrixSide):
660 # fit a parabola for a given lag
661 parsFit = np.polyfit(mu, cov[:, i, j] - model[:, i, j],
662 2, w=sqrtW[:, i, j])
663 # model equation (Eq. 20) in Astier+19, with c=a*b=0:
664 a[i, j] += parsFit[0]
665 noise[i, j] += parsFit[2]
666 if i + j == 0:
667 gain = 1./(1/gain+parsFit[1])
668 weightedRes = (model - cov)*sqrtW
669 chi2 = (weightedRes.flatten()**2).sum()
670 if chi2 > oldChi2:
671 break
672 oldChi2 = chi2
674 return a, c, noise, gain
676 def funcFullCovarianceModel(self, params, x):
677 """Model to fit covariances from flat fields; Equation 20 of
678 Astier+19.
680 Parameters
681 ----------
682 params : `list`
683 Parameters of the model: aMatrix, CMatrix, noiseMatrix,
684 gain (e/ADU).
685 x : `numpy.array`, (N,)
686 Signal `mu` (ADU)
688 Returns
689 -------
690 y : `numpy.array`, (N,)
691 Covariance matrix.
692 """
693 matrixSide = self.config.maximumRangeCovariancesAstier
694 lenParams = matrixSide*matrixSide
695 aMatrix = params[:lenParams].reshape((matrixSide, matrixSide))
696 cMatrix = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide))
697 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide))
698 gain = params[-1]
700 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain).flatten()
702 def funcFullCovarianceModelNoB(self, params, x):
703 """Model to fit covariances from flat fields; Equation 20 of
704 Astier+19, with b=0 (equivalent to c=a*b=0 in this code).
706 Parameters
707 ----------
708 params : `list`
709 Parameters of the model: aMatrix, CMatrix, noiseMatrix,
710 gain (e/ADU).
711 x : `numpy.array`, (N,)
712 Signal mu (ADU)
714 Returns
715 -------
716 y : `numpy.array`, (N,)
717 Covariance matrix.
718 """
719 matrixSide = self.config.maximumRangeCovariancesAstier
720 lenParams = matrixSide*matrixSide
721 aMatrix = params[:lenParams].reshape((matrixSide, matrixSide))
722 cMatrix = params[lenParams:2*lenParams].reshape((matrixSide, matrixSide))
723 noiseMatrix = params[2*lenParams:3*lenParams].reshape((matrixSide, matrixSide))
724 gain = params[-1]
726 return self.evalCovModel(x, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=True).flatten()
728 def evalCovModel(self, mu, aMatrix, cMatrix, noiseMatrix, gain, setBtoZero=False):
729 """Computes full covariances model (Eq. 20 of Astier+19).
731 Parameters
732 ----------
733 mu : `numpy.array`, (N,)
734 List of mean signals.
735 aMatrix : `numpy.array`, (M, M)
736 "a" parameter per flux in Eq. 20 of Astier+19.
737 cMatrix : `numpy.array`, (M, M)
738 "c"="ab" parameter per flux in Eq. 20 of Astier+19.
739 noiseMatrix : `numpy.array`, (M, M)
740 "noise" parameter per flux in Eq. 20 of Astier+19.
741 gain : `float`
742 Amplifier gain (e/ADU)
743 setBtoZero=False : `bool`, optional
744 Set "b" parameter in full model (see Astier+19) to zero.
746 Returns
747 -------
748 covModel : `numpy.array`, (N, M, M)
749 Covariances model.
751 Notes
752 -----
753 By default, computes the covModel for the mu's stored(self.mu).
754 Returns cov[Nmu, M, M]. The variance for the PTC is
755 cov[:, 0, 0]. mu and cov are in ADUs and ADUs squared. To use
756 electrons for both, the gain should be set to 1. This routine
757 implements the model in Astier+19 (1905.08677).
758 The parameters of the full model for C_ij(mu) ("C_ij" and "mu"
759 in ADU^2 and ADU, respectively) in Astier+19 (Eq. 20) are:
761 - "a" coefficients (M by M matrix), units: 1/e
762 - "b" coefficients (M by M matrix), units: 1/e
763 - noise matrix (M by M matrix), units: e^2
764 - gain, units: e/ADU
766 "b" appears in Eq. 20 only through the "ab" combination, which
767 is defined in this code as "c=ab".
768 """
769 matrixSide = self.config.maximumRangeCovariancesAstier
770 sa = (matrixSide, matrixSide)
771 # pad a with zeros and symmetrize
772 aEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1))
773 aEnlarged[0:sa[0], 0:sa[1]] = aMatrix
774 aSym = symmetrize(aEnlarged)
775 # pad c with zeros and symmetrize
776 cEnlarged = np.zeros((int(sa[0]*1.5)+1, int(sa[1]*1.5)+1))
777 cEnlarged[0:sa[0], 0:sa[1]] = cMatrix
778 cSym = symmetrize(cEnlarged)
779 a2 = fftconvolve(aSym, aSym, mode='same')
780 a3 = fftconvolve(a2, aSym, mode='same')
781 ac = fftconvolve(aSym, cSym, mode='same')
782 (xc, yc) = np.unravel_index(np.abs(aSym).argmax(), a2.shape)
784 a1 = aMatrix[np.newaxis, :, :]
785 a2 = a2[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide]
786 a3 = a3[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide]
787 ac = ac[np.newaxis, xc:xc + matrixSide, yc:yc + matrixSide]
788 c1 = cMatrix[np.newaxis, ::]
790 # assumes that mu is 1d
791 bigMu = mu[:, np.newaxis, np.newaxis]*gain
792 # c(=a*b in Astier+19) also has a contribution to the last
793 # term, that is absent for now.
794 if setBtoZero:
795 c1 = np.zeros_like(c1)
796 ac = np.zeros_like(ac)
797 covModel = (bigMu/(gain*gain)*(a1*bigMu+2./3.*(bigMu*bigMu)*(a2 + c1)
798 + (1./3.*a3 + 5./6.*ac)*(bigMu*bigMu*bigMu)) + noiseMatrix[np.newaxis, :, :]/gain**2)
799 # add the Poisson term, and the read out noise (variance)
800 covModel[:, 0, 0] += mu/gain
802 return covModel
804 # EXPAPPROXIMATION and POLYNOMIAL fit methods
805 @staticmethod
806 def _initialParsForPolynomial(order):
807 assert order >= 2
808 pars = np.zeros(order, dtype=float)
809 pars[0] = 10
810 pars[1] = 1
811 pars[2:] = 0.0001
812 return pars
814 @staticmethod
815 def _boundsForPolynomial(initialPars, lowers=[], uppers=[]):
816 if not len(lowers):
817 lowers = [np.NINF for p in initialPars]
818 if not len(uppers):
819 uppers = [np.inf for p in initialPars]
820 lowers[1] = 0 # no negative gains
821 return (lowers, uppers)
823 @staticmethod
824 def _boundsForAstier(initialPars, lowers=[], uppers=[]):
825 if not len(lowers):
826 lowers = [np.NINF for p in initialPars]
827 if not len(uppers):
828 uppers = [np.inf for p in initialPars]
829 return (lowers, uppers)
831 @staticmethod
832 def _getInitialGoodPoints(means, variances, minVarPivotSearch, consecutivePointsVarDecreases):
833 """Return a boolean array to mask bad points.
835 Parameters
836 ----------
837 means : `numpy.array`
838 Input array with mean signal values.
839 variances : `numpy.array`
840 Input array with variances at each mean value.
841 minVarPivotSearch : `float`
842 The variance (in ADU^2), above which, the point
843 of decreasing variance should be sought.
844 consecutivePointsVarDecreases : `int`
845 Required number of consecutive points/fluxes
846 in the PTC where the variance
847 decreases in order to find a first
848 estimate of the PTC turn-off.
850 Returns
851 ------
852 goodPoints : `numpy.array` [`bool`]
853 Boolean array to select good (`True`) and bad (`False`)
854 points.
856 Notes
857 -----
858 Eliminate points beyond which the variance decreases.
859 """
860 goodPoints = np.ones_like(means, dtype=bool)
861 # Variances are sorted and should monotonically increase
862 pivotList = np.where(np.array(np.diff(variances)) < 0)[0]
863 if len(pivotList) > 0:
864 # For small values, sometimes the variance decreases slightly
865 # Only look when var > self.config.minVarPivotSearch
866 pivotList = [p for p in pivotList if variances[p] > minVarPivotSearch]
867 # Require that the varince decreases during
868 # consecutivePointsVarDecreases
869 # consecutive points. This will give a first
870 # estimate of the PTC turn-off, which
871 # may be updated (reduced) further in the code.
872 if len(pivotList) > 1:
873 # enumerate(pivotList) creates tuples (index, value), for
874 # each value in pivotList. The lambda function subtracts
875 # each value from the index.
876 # groupby groups elements by equal key value.
877 for k, g in groupby(enumerate(pivotList), lambda x: x[0]-x[1]):
878 group = (map(itemgetter(1), g))
879 # Form groups of consecute values from pivotList
880 group = list(map(int, group))
881 # values in pivotList are indices where np.diff(variances)
882 # is negative, i.e., where the variance starts decreasing.
883 # Find the first group of consecutive numbers when
884 # variance decreases.
885 if len(group) >= consecutivePointsVarDecreases:
886 pivotIndex = np.min(group)
887 goodPoints[pivotIndex+1:] = False
888 break
890 # Finally, we filter out any infinities or NaNs.
891 goodPoints[(~np.isfinite(means)) | (~np.isfinite(variances))] = False
893 return goodPoints
895 def _makeZeroSafe(self, array, substituteValue=1e-9):
896 """"""
897 array = np.array(array)
898 nBad = Counter(np.ravel(array))[0]
899 if nBad == 0:
900 return array
902 index, = np.where(array == 0)
903 if len(index):
904 msg = f"Found {nBad} zeros in array at elements {index}"
905 self.log.warning(msg)
907 array[index] = substituteValue
909 return array
911 def fitPtc(self, dataset):
912 """Fit the photon transfer curve to a polynomial or to the
913 Astier+19 approximation (Eq. 16).
915 Fit the photon transfer curve with either a polynomial of
916 the order specified in the task config, or using the
917 exponential approximation in Astier+19 (Eq. 16).
919 Sigma clipping is performed iteratively for the fit, as
920 well as an initial clipping of data points that are more
921 than `config.initialNonLinearityExclusionThreshold` away
922 from lying on a straight line. This other step is necessary
923 because the photon transfer curve turns over catastrophically
924 at very high flux (because saturation
925 drops the variance to ~0) and these far outliers cause the
926 initial fit to fail, meaning the sigma cannot be calculated
927 to perform the sigma-clipping.
929 Parameters
930 ----------
931 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
932 The dataset containing the means, variances and
933 exposure times.
935 Returns
936 -------
937 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
938 This is the same dataset as the input parameter, however,
939 it has been modified to include information such as the
940 fit vectors and the fit parameters. See the class
941 `PhotonTransferCurveDatase`.
943 Raises
944 ------
945 RuntimeError
946 Raised if dataset.ptcFitType is None or empty.
947 """
948 if dataset.ptcFitType:
949 ptcFitType = dataset.ptcFitType
950 else:
951 raise RuntimeError("ptcFitType is None of empty in PTC dataset.")
952 matrixSide = self.config.maximumRangeCovariancesAstier
953 nanMatrix = np.empty((matrixSide, matrixSide))
954 nanMatrix[:] = np.nan
956 for amp in dataset.ampNames:
957 lenInputTimes = len(dataset.rawExpTimes[amp])
958 listNanMatrix = np.empty((lenInputTimes, matrixSide, matrixSide))
959 listNanMatrix[:] = np.nan
961 dataset.covariancesModel[amp] = listNanMatrix
962 dataset.aMatrix[amp] = nanMatrix
963 dataset.bMatrix[amp] = nanMatrix
964 dataset.covariancesModelNoB[amp] = listNanMatrix
965 dataset.aMatrixNoB[amp] = nanMatrix
966 dataset.noiseMatrix[amp] = nanMatrix
967 dataset.noiseMatrixNoB[amp] = nanMatrix
969 def errFunc(p, x, y):
970 return ptcFunc(p, x) - y
972 sigmaCutPtcOutliers = self.config.sigmaCutPtcOutliers
973 maxIterationsPtcOutliers = self.config.maxIterationsPtcOutliers
975 for i, ampName in enumerate(dataset.ampNames):
976 meanVecOriginal = dataset.rawMeans[ampName].copy()
977 varVecOriginal = dataset.rawVars[ampName].copy()
978 varVecOriginal = self._makeZeroSafe(varVecOriginal)
980 if self.config.doLegacyTurnoffSelection:
981 # Discard points when the variance starts to decrease after two
982 # consecutive signal levels
983 goodPoints = self._getInitialGoodPoints(meanVecOriginal, varVecOriginal,
984 self.config.minVarPivotSearch,
985 self.config.consecutivePointsVarDecreases)
986 else:
987 goodPoints = dataset.expIdMask[ampName]
989 # Check if all points are bad from the 'cpExtractPtcTask'
990 initialExpIdMask = dataset.expIdMask[ampName]
992 if not (goodPoints.any() and initialExpIdMask.any()):
993 msg = (f"SERIOUS: All points in goodPoints: {goodPoints} or "
994 f"in initialExpIdMask: {initialExpIdMask} are bad."
995 f"Setting {ampName} to BAD.")
996 self.log.warning(msg)
997 # Fill entries with NaNs
998 self.fillBadAmp(dataset, ptcFitType, ampName)
999 continue
1001 mask = goodPoints
1003 if ptcFitType == 'EXPAPPROXIMATION':
1004 ptcFunc = funcAstier
1005 parsIniPtc = [-1e-9, 1.0, 10.] # a00, gain, noise^2
1006 # lowers and uppers obtained from BOT data studies by
1007 # C. Lage (UC Davis, 11/2020).
1008 if self.config.binSize > 1:
1009 bounds = self._boundsForAstier(parsIniPtc)
1010 else:
1011 bounds = self._boundsForAstier(parsIniPtc, lowers=[-1e-4, 0.5, -2000],
1012 uppers=[1e-4, 2.5, 2000])
1013 if ptcFitType == 'POLYNOMIAL':
1014 ptcFunc = funcPolynomial
1015 parsIniPtc = self._initialParsForPolynomial(self.config.polynomialFitDegree + 1)
1016 bounds = self._boundsForPolynomial(parsIniPtc)
1018 # We perform an initial (unweighted) fit of variance vs signal
1019 # (after initial KS test or post-drop selection) to look for
1020 # outliers, particularly at the high-flux end. The initial fit
1021 # is performed only for points that are guaranteed to be below
1022 # the PTC turnoff and then extrapolated to ensure that high
1023 # flux points that have abnormal variance values can be properly
1024 # rejected in this phase without biasing the initial fit.
1025 # This algorithm was initially developed by Seth Digel for
1026 # the EO Testing pipeline.
1028 if maxIterationsPtcOutliers == 0:
1029 # We are not doing any outlier rejection here, but we do want
1030 # an initial fit.
1031 res = least_squares(
1032 errFunc,
1033 parsIniPtc,
1034 bounds=bounds,
1035 args=(meanVecOriginal[mask], varVecOriginal[mask]),
1036 )
1037 pars = res.x
1038 newMask = mask.copy()
1039 else:
1040 newMask = (mask & (meanVecOriginal <= self.config.maxSignalInitialPtcOutlierFit))
1042 count = 0
1043 lastMask = mask.copy()
1044 while count < maxIterationsPtcOutliers:
1045 res = least_squares(
1046 errFunc,
1047 parsIniPtc,
1048 bounds=bounds,
1049 args=(meanVecOriginal[newMask], varVecOriginal[newMask]),
1050 )
1051 pars = res.x
1053 sigResids = (varVecOriginal - ptcFunc(pars, meanVecOriginal))/np.sqrt(varVecOriginal)
1054 # The new mask includes points where the residuals are
1055 # finite, are less than the cut, and include the original
1056 # mask of known points that should not be used.
1057 newMask = (
1058 np.isfinite(sigResids)
1059 & (np.abs(np.nan_to_num(sigResids)) < sigmaCutPtcOutliers)
1060 & mask
1061 )
1062 if np.count_nonzero(newMask) == 0:
1063 msg = (f"SERIOUS: All points after outlier rejection are bad. "
1064 f"Setting {ampName} to BAD.")
1065 self.log.warning(msg)
1066 # Fill entries with NaNs
1067 self.fillBadAmp(dataset, ptcFitType, ampName)
1068 break
1070 self.log.debug(
1071 "Iteration %d: Removed %d points in total for %s.",
1072 count,
1073 np.count_nonzero(mask) - np.count_nonzero(newMask),
1074 ampName,
1075 )
1077 # If the mask hasn't changed then break out.
1078 if np.all(newMask == lastMask):
1079 self.log.debug("Convergence at iteration %d; breaking loop for %s.", count, ampName)
1080 break
1082 lastMask = newMask.copy()
1084 count += 1
1086 # Set the mask to the new mask
1087 mask = newMask.copy()
1089 if not mask.any():
1090 # We hae already filled the bad amp above, so continue.
1091 continue
1093 dataset.expIdMask[ampName] = mask
1095 parsIniPtc = pars
1096 meanVecFinal = meanVecOriginal[mask]
1097 varVecFinal = varVecOriginal[mask]
1099 # Save the maximum point after outlier detection as the
1100 # PTC turnoff point.
1101 dataset.ptcTurnoff[ampName] = meanVecFinal[-1]
1103 if Counter(mask)[False] > 0:
1104 self.log.info("Number of points discarded in PTC of amplifier %s:"
1105 " %d out of %d", ampName, Counter(mask)[False], len(meanVecOriginal))
1107 if (len(meanVecFinal) < len(parsIniPtc)):
1108 msg = (f"SERIOUS: Not enough data points ({len(meanVecFinal)}) compared to the number of "
1109 f"parameters of the PTC model({len(parsIniPtc)}). Setting {ampName} to BAD.")
1110 self.log.warning(msg)
1111 # Fill entries with NaNs
1112 self.fillBadAmp(dataset, ptcFitType, ampName)
1113 continue
1114 # Fit the PTC.
1115 # The variance of the variance is Var(v)=2*v^2/Npix. This is
1116 # already calculated in `makeCovArray` of CpPtcExtract.
1117 # dataset.covariancesSqrtWeights[ampName][:,0,0]
1118 # has 1/sqrt(Var(v)).
1119 weightsY = dataset.covariancesSqrtWeights[ampName][:, 0, 0][mask]
1120 if self.config.doFitBootstrap:
1121 parsFit, parsFitErr, reducedChiSqPtc = fitBootstrap(parsIniPtc, meanVecFinal,
1122 varVecFinal, ptcFunc,
1123 weightsY=weightsY)
1124 else:
1125 parsFit, parsFitErr, reducedChiSqPtc = fitLeastSq(parsIniPtc, meanVecFinal,
1126 varVecFinal, ptcFunc,
1127 weightsY=weightsY)
1128 dataset.ptcFitPars[ampName] = parsFit
1129 dataset.ptcFitParsError[ampName] = parsFitErr
1130 dataset.ptcFitChiSq[ampName] = reducedChiSqPtc
1132 dataset.finalVars[ampName] = varVecOriginal
1133 dataset.finalVars[ampName][~mask] = np.nan
1134 dataset.finalModelVars[ampName] = ptcFunc(parsFit, meanVecOriginal)
1135 dataset.finalModelVars[ampName][~mask] = np.nan
1136 dataset.finalMeans[ampName] = meanVecOriginal
1137 dataset.finalMeans[ampName][~mask] = np.nan
1139 if ptcFitType == 'EXPAPPROXIMATION':
1140 ptcGain = parsFit[1]
1141 ptcGainErr = parsFitErr[1]
1142 ptcNoise = np.sqrt(np.fabs(parsFit[2]))
1143 ptcNoiseErr = 0.5*(parsFitErr[2]/np.fabs(parsFit[2]))*np.sqrt(np.fabs(parsFit[2]))
1144 if ptcFitType == 'POLYNOMIAL':
1145 ptcGain = 1./parsFit[1]
1146 ptcGainErr = np.fabs(1./parsFit[1])*(parsFitErr[1]/parsFit[1])
1147 ptcNoise = np.sqrt(np.fabs(parsFit[0]))*ptcGain
1148 ptcNoiseErr = (0.5*(parsFitErr[0]/np.fabs(parsFit[0]))*(np.sqrt(np.fabs(parsFit[0]))))*ptcGain
1149 dataset.gain[ampName] = ptcGain
1150 dataset.gainErr[ampName] = ptcGainErr
1151 dataset.noise[ampName] = ptcNoise
1152 dataset.noiseErr[ampName] = ptcNoiseErr
1154 if not len(dataset.ptcFitType) == 0:
1155 dataset.ptcFitType = ptcFitType
1156 if len(dataset.badAmps) == 0:
1157 dataset.badAmps = []
1159 return dataset
1161 def fillBadAmp(self, dataset, ptcFitType, ampName):
1162 """Fill the dataset with NaNs if there are not enough
1163 good points.
1165 Parameters
1166 ----------
1167 dataset : `lsst.ip.isr.ptcDataset.PhotonTransferCurveDataset`
1168 The dataset containing the means, variances and
1169 exposure times.
1170 ptcFitType : {'POLYNOMIAL', 'EXPAPPROXIMATION'}
1171 Fit a 'POLYNOMIAL' (degree: 'polynomialFitDegree') or
1172 'EXPAPPROXIMATION' (Eq. 16 of Astier+19) to the PTC.
1173 ampName : `str`
1174 Amplifier name.
1175 """
1176 dataset.badAmps.append(ampName)
1177 dataset.expIdMask[ampName] = np.repeat(False, len(dataset.rawExpTimes[ampName]))
1178 dataset.gain[ampName] = np.nan
1179 dataset.gainErr[ampName] = np.nan
1180 dataset.noise[ampName] = np.nan
1181 dataset.noiseErr[ampName] = np.nan
1182 dataset.ptcFitPars[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
1183 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
1184 dataset.ptcFitParsError[ampName] = (np.repeat(np.nan, self.config.polynomialFitDegree + 1) if
1185 ptcFitType in ["POLYNOMIAL", ] else np.repeat(np.nan, 3))
1186 dataset.ptcFitChiSq[ampName] = np.nan
1187 dataset.ptcTurnoff[ampName] = np.nan
1188 dataset.finalVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1189 dataset.finalModelVars[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1190 dataset.finalMeans[ampName] = np.repeat(np.nan, len(dataset.rawExpTimes[ampName]))
1192 return