Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 11%
310 statements
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-14 03:00 -0700
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-14 03:00 -0700
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22import numpy as np
23from lmfit.models import GaussianModel
24import scipy.stats
25import warnings
27import lsst.afw.math as afwMath
28import lsst.pex.config as pexConfig
29import lsst.pipe.base as pipeBase
30from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId,
31 arrangeFlatsByExpFlux, sigmaClipCorrection,
32 CovFastFourierTransform)
34import lsst.pipe.base.connectionTypes as cT
36from lsst.ip.isr import PhotonTransferCurveDataset
37from lsst.ip.isr import IsrTask
39__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask']
42class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections,
43 dimensions=("instrument", "detector")):
45 inputExp = cT.Input(
46 name="ptcInputExposurePairs",
47 doc="Input post-ISR processed exposure pairs (flats) to"
48 "measure covariances from.",
49 storageClass="Exposure",
50 dimensions=("instrument", "exposure", "detector"),
51 multiple=True,
52 deferLoad=True,
53 )
54 taskMetadata = cT.Input(
55 name="isr_metadata",
56 doc="Input task metadata to extract statistics from.",
57 storageClass="TaskMetadata",
58 dimensions=("instrument", "exposure", "detector"),
59 multiple=True,
60 )
61 outputCovariances = cT.Output(
62 name="ptcCovariances",
63 doc="Extracted flat (co)variances.",
64 storageClass="PhotonTransferCurveDataset",
65 dimensions=("instrument", "exposure", "detector"),
66 isCalibration=True,
67 multiple=True,
68 )
71class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig,
72 pipelineConnections=PhotonTransferCurveExtractConnections):
73 """Configuration for the measurement of covariances from flats.
74 """
75 matchExposuresType = pexConfig.ChoiceField(
76 dtype=str,
77 doc="Match input exposures by time, flux, or expId",
78 default='TIME',
79 allowed={
80 "TIME": "Match exposures by exposure time.",
81 "FLUX": "Match exposures by target flux. Use header keyword"
82 " in matchExposuresByFluxKeyword to find the flux.",
83 "EXPID": "Match exposures by exposure ID."
84 }
85 )
86 matchExposuresByFluxKeyword = pexConfig.Field(
87 dtype=str,
88 doc="Header keyword for flux if matchExposuresType is FLUX.",
89 default='CCOBFLUX',
90 )
91 maximumRangeCovariancesAstier = pexConfig.Field(
92 dtype=int,
93 doc="Maximum range of covariances as in Astier+19",
94 default=8,
95 )
96 binSize = pexConfig.Field(
97 dtype=int,
98 doc="Bin the image by this factor in both dimensions.",
99 default=1,
100 )
101 minMeanSignal = pexConfig.DictField(
102 keytype=str,
103 itemtype=float,
104 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use."
105 " The same cut is applied to all amps if this parameter [`dict`] is passed as "
106 " {'ALL_AMPS': value}",
107 default={'ALL_AMPS': 0.0},
108 deprecated="This config has been moved to cpSolvePtcTask, and will be removed after v26.",
109 )
110 maxMeanSignal = pexConfig.DictField(
111 keytype=str,
112 itemtype=float,
113 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp."
114 " The same cut is applied to all amps if this dictionary is of the form"
115 " {'ALL_AMPS': value}",
116 default={'ALL_AMPS': 1e6},
117 deprecated="This config has been moved to cpSolvePtcTask, and will be removed after v26.",
118 )
119 maskNameList = pexConfig.ListField(
120 dtype=str,
121 doc="Mask list to exclude from statistics calculations.",
122 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'],
123 )
124 nSigmaClipPtc = pexConfig.Field(
125 dtype=float,
126 doc="Sigma cut for afwMath.StatisticsControl()",
127 default=5.5,
128 )
129 nIterSigmaClipPtc = pexConfig.Field(
130 dtype=int,
131 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
132 default=3,
133 )
134 minNumberGoodPixelsForCovariance = pexConfig.Field(
135 dtype=int,
136 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or"
137 " direclty).",
138 default=10000,
139 )
140 thresholdDiffAfwVarVsCov00 = pexConfig.Field(
141 dtype=float,
142 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 "
143 "for a region of a difference image is greater than this threshold (percentage), "
144 "a warning will be issued.",
145 default=1.,
146 )
147 detectorMeasurementRegion = pexConfig.ChoiceField(
148 dtype=str,
149 doc="Region of each exposure where to perform the calculations (amplifier or full image).",
150 default='AMP',
151 allowed={
152 "AMP": "Amplifier of the detector.",
153 "FULL": "Full image."
154 }
155 )
156 numEdgeSuspect = pexConfig.Field(
157 dtype=int,
158 doc="Number of edge pixels to be flagged as untrustworthy.",
159 default=0,
160 )
161 edgeMaskLevel = pexConfig.ChoiceField(
162 dtype=str,
163 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?",
164 default="DETECTOR",
165 allowed={
166 'DETECTOR': 'Mask only the edges of the full detector.',
167 'AMP': 'Mask edges of each amplifier.',
168 },
169 )
170 doGain = pexConfig.Field(
171 dtype=bool,
172 doc="Calculate a gain per input flat pair.",
173 default=True,
174 )
175 gainCorrectionType = pexConfig.ChoiceField(
176 dtype=str,
177 doc="Correction type for the gain.",
178 default='FULL',
179 allowed={
180 'NONE': 'No correction.',
181 'SIMPLE': 'First order correction.',
182 'FULL': 'Second order correction.'
183 }
184 )
185 ksHistNBins = pexConfig.Field(
186 dtype=int,
187 doc="Number of bins for the KS test histogram.",
188 default=100,
189 )
190 ksHistLimitMultiplier = pexConfig.Field(
191 dtype=float,
192 doc="Number of sigma (as predicted from the mean value) to compute KS test histogram.",
193 default=8.0,
194 )
195 ksHistMinDataValues = pexConfig.Field(
196 dtype=int,
197 doc="Minimum number of good data values to compute KS test histogram.",
198 default=100,
199 )
202class PhotonTransferCurveExtractTask(pipeBase.PipelineTask):
203 """Task to measure covariances from flat fields.
205 This task receives as input a list of flat-field images
206 (flats), and sorts these flats in pairs taken at the
207 same time (the task will raise if there is one one flat
208 at a given exposure time, and it will discard extra flats if
209 there are more than two per exposure time). This task measures
210 the mean, variance, and covariances from a region (e.g.,
211 an amplifier) of the difference image of the two flats with
212 the same exposure time (alternatively, all input images could have
213 the same exposure time but their flux changed).
215 The variance is calculated via afwMath, and the covariance
216 via the methods in Astier+19 (appendix A). In theory,
217 var = covariance[0,0]. This should be validated, and in the
218 future, we may decide to just keep one (covariance).
219 At this moment, if the two values differ by more than the value
220 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will
221 be issued.
223 The measured covariances at a given exposure time (along with
224 other quantities such as the mean) are stored in a PTC dataset
225 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets
226 partially filled at this stage (the remainder of the attributes
227 of the dataset will be filled after running the second task of
228 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`).
230 The number of partially-filled
231 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less
232 than the number of input exposures because the task combines
233 input flats in pairs. However, it is required at this moment
234 that the number of input dimensions matches
235 bijectively the number of output dimensions. Therefore, a number
236 of "dummy" PTC datasets are inserted in the output list. This
237 output list will then be used as input of the next task in the
238 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`,
239 which will assemble the multiple `PhotonTransferCurveDataset`
240 objects into a single one in order to fit the measured covariances
241 as a function of flux to one of three models
242 (see `PhotonTransferCurveSolveTask` for details).
244 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD
245 sensors", arXiv:1905.08677.
246 """
248 ConfigClass = PhotonTransferCurveExtractConfig
249 _DefaultName = 'cpPtcExtract'
251 def runQuantum(self, butlerQC, inputRefs, outputRefs):
252 """Ensure that the input and output dimensions are passed along.
254 Parameters
255 ----------
256 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
257 Butler to operate on.
258 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
259 Input data refs to load.
260 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
261 Output data refs to persist.
262 """
263 inputs = butlerQC.get(inputRefs)
264 # Ids of input list of exposure references
265 # (deferLoad=True in the input connections)
266 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp]
268 # Dictionary, keyed by expTime (or expFlux or expId), with tuples
269 # containing flat exposures and their IDs.
270 matchType = self.config.matchExposuresType
271 if matchType == 'TIME':
272 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims'])
273 elif matchType == 'FLUX':
274 inputs['inputExp'] = arrangeFlatsByExpFlux(inputs['inputExp'], inputs['inputDims'],
275 self.config.matchExposuresByFluxKeyword)
276 else:
277 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims'])
279 outputs = self.run(**inputs)
280 outputs = self._guaranteeOutputs(inputs['inputDims'], outputs, outputRefs)
281 butlerQC.put(outputs, outputRefs)
283 def _guaranteeOutputs(self, inputDims, outputs, outputRefs):
284 """Ensure that all outputRefs have a matching output, and if they do
285 not, fill the output with dummy PTC datasets.
287 Parameters
288 ----------
289 inputDims : `dict` [`str`, `int`]
290 Input exposure dimensions.
291 outputs : `lsst.pipe.base.Struct`
292 Outputs from the ``run`` method. Contains the entry:
294 ``outputCovariances``
295 Output PTC datasets (`list` [`lsst.ip.isr.IsrCalib`])
296 outputRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
297 Container with all of the outputs expected to be generated.
299 Returns
300 -------
301 outputs : `lsst.pipe.base.Struct`
302 Dummy dataset padded version of the input ``outputs`` with
303 the same entries.
304 """
305 newCovariances = []
306 for ref in outputRefs.outputCovariances:
307 outputExpId = ref.dataId['exposure']
308 if outputExpId in inputDims:
309 entry = inputDims.index(outputExpId)
310 newCovariances.append(outputs.outputCovariances[entry])
311 else:
312 newPtc = PhotonTransferCurveDataset(['no amp'], 'DUMMY', 1)
313 newPtc.setAmpValuesPartialDataset('no amp')
314 newCovariances.append(newPtc)
315 return pipeBase.Struct(outputCovariances=newCovariances)
317 def run(self, inputExp, inputDims, taskMetadata):
319 """Measure covariances from difference of flat pairs
321 Parameters
322 ----------
323 inputExp : `dict` [`float`, `list`
324 [`~lsst.pipe.base.connections.DeferredDatasetRef`]]
325 Dictionary that groups references to flat-field exposures that
326 have the same exposure time (seconds), or that groups them
327 sequentially by their exposure id.
328 inputDims : `list`
329 List of exposure IDs.
330 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`]
331 List of exposures metadata from ISR.
333 Returns
334 -------
335 results : `lsst.pipe.base.Struct`
336 The resulting Struct contains:
338 ``outputCovariances``
339 A list containing the per-pair PTC measurements (`list`
340 [`lsst.ip.isr.PhotonTransferCurveDataset`])
341 """
342 # inputExp.values() returns a view, which we turn into a list. We then
343 # access the first exposure-ID tuple to get the detector.
344 # The first "get()" retrieves the exposure from the exposure reference.
345 detector = list(inputExp.values())[0][0][0].get(component='detector')
346 detNum = detector.getId()
347 amps = detector.getAmplifiers()
348 ampNames = [amp.getName() for amp in amps]
350 # Each amp may have a different min and max ADU signal
351 # specified in the config.
352 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames}
353 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames}
354 for ampName in ampNames:
355 if 'ALL_AMPS' in self.config.maxMeanSignal:
356 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS']
357 elif ampName in self.config.maxMeanSignal:
358 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName]
360 if 'ALL_AMPS' in self.config.minMeanSignal:
361 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS']
362 elif ampName in self.config.minMeanSignal:
363 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName]
364 # These are the column names for `tupleRows` below.
365 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'),
366 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')]
367 # Create a dummy ptcDataset. Dummy datasets will be
368 # used to ensure that the number of output and input
369 # dimensions match.
370 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY',
371 self.config.maximumRangeCovariancesAstier)
372 for ampName in ampNames:
373 dummyPtcDataset.setAmpValuesPartialDataset(ampName)
374 # Get read noise. Try from the exposure, then try
375 # taskMetadata. This adds a get() for the exposures.
376 readNoiseLists = {}
377 for pairIndex, expRefs in inputExp.items():
378 # This yields an index (exposure_time, seq_num, or flux)
379 # and a pair of references at that index.
380 for expRef, expId in expRefs:
381 # This yields an exposure ref and an exposureId.
382 exposureMetadata = expRef.get(component="metadata")
383 metadataIndex = inputDims.index(expId)
384 thisTaskMetadata = taskMetadata[metadataIndex]
386 for ampName in ampNames:
387 if ampName not in readNoiseLists:
388 readNoiseLists[ampName] = [self.getReadNoise(exposureMetadata,
389 thisTaskMetadata, ampName)]
390 else:
391 readNoiseLists[ampName].append(self.getReadNoise(exposureMetadata,
392 thisTaskMetadata, ampName))
394 readNoiseDict = {ampName: 0.0 for ampName in ampNames}
395 for ampName in ampNames:
396 # Take median read noise value
397 readNoiseDict[ampName] = np.nanmedian(readNoiseLists[ampName])
399 # Output list with PTC datasets.
400 partialPtcDatasetList = []
401 # The number of output references needs to match that of input
402 # references: initialize outputlist with dummy PTC datasets.
403 for i in range(len(inputDims)):
404 partialPtcDatasetList.append(dummyPtcDataset)
406 if self.config.numEdgeSuspect > 0:
407 isrTask = IsrTask()
408 self.log.info("Masking %d pixels from the edges of all %ss as SUSPECT.",
409 self.config.numEdgeSuspect, self.config.edgeMaskLevel)
411 # Depending on the value of config.matchExposuresType
412 # 'expTime' can stand for exposure time, flux, or ID.
413 for expTime in inputExp:
414 exposures = inputExp[expTime]
415 if len(exposures) == 1:
416 self.log.warning("Only one exposure found at %s %f. Dropping exposure %d.",
417 self.config.matchExposuresType, expTime, exposures[0][1])
418 continue
419 else:
420 # Only use the first two exposures at expTime. Each
421 # element is a tuple (exposure, expId)
422 expRef1, expId1 = exposures[0]
423 expRef2, expId2 = exposures[1]
424 # use get() to obtain `lsst.afw.image.Exposure`
425 exp1, exp2 = expRef1.get(), expRef2.get()
427 if len(exposures) > 2:
428 self.log.warning("Already found 2 exposures at %s %f. Ignoring exposures: %s",
429 self.config.matchExposuresType, expTime,
430 ", ".join(str(i[1]) for i in exposures[2:]))
431 # Mask pixels at the edge of the detector or of each amp
432 if self.config.numEdgeSuspect > 0:
433 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect,
434 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
435 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect,
436 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
438 nAmpsNan = 0
439 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL',
440 self.config.maximumRangeCovariancesAstier)
441 for ampNumber, amp in enumerate(detector):
442 ampName = amp.getName()
443 if self.config.detectorMeasurementRegion == 'AMP':
444 region = amp.getBBox()
445 elif self.config.detectorMeasurementRegion == 'FULL':
446 region = None
448 # Get masked image regions, masking planes, statistic control
449 # objects, and clipped means. Calculate once to reuse in
450 # `measureMeanVarCov` and `getGainFromFlatPair`.
451 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2,
452 region=region)
454 # We demand that both mu1 and mu2 be finite and greater than 0.
455 if not np.isfinite(mu1) or not np.isfinite(mu2) \
456 or ((np.nan_to_num(mu1) + np.nan_to_num(mu2)/2.) <= 0.0):
457 self.log.warning(
458 "Illegal mean value(s) detected for amp %s on exposure pair %d/%d",
459 ampName,
460 expId1,
461 expId2,
462 )
463 partialPtcDataset.setAmpValuesPartialDataset(
464 ampName,
465 inputExpIdPair=(expId1, expId2),
466 rawExpTime=expTime,
467 expIdMask=False,
468 )
469 continue
471 # `measureMeanVarCov` is the function that measures
472 # the variance and covariances from a region of
473 # the difference image of two flats at the same
474 # exposure time. The variable `covAstier` that is
475 # returned is of the form:
476 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in
477 # {maxLag, maxLag}^2].
478 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2)
479 # Estimate the gain from the flat pair
480 if self.config.doGain:
481 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2,
482 correctionType=self.config.gainCorrectionType,
483 readNoise=readNoiseDict[ampName])
484 else:
485 gain = np.nan
487 # Correction factor for bias introduced by sigma
488 # clipping.
489 # Function returns 1/sqrt(varFactor), so it needs
490 # to be squared. varDiff is calculated via
491 # afwMath.VARIANCECLIP.
492 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2
493 varDiff *= varFactor
495 expIdMask = True
496 # Mask data point at this mean signal level if
497 # the signal, variance, or covariance calculations
498 # from `measureMeanVarCov` resulted in NaNs.
499 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
500 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of "
501 "detector %d.", ampName, expId1, expId2, detNum)
502 nAmpsNan += 1
503 expIdMask = False
504 covArray = np.full((1, self.config.maximumRangeCovariancesAstier,
505 self.config.maximumRangeCovariancesAstier), np.nan)
506 covSqrtWeights = np.full_like(covArray, np.nan)
508 # Mask data point if it is outside of the
509 # specified mean signal range.
510 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]):
511 expIdMask = False
513 if covAstier is not None:
514 # Turn the tuples with the measured information
515 # into covariance arrays.
516 # covrow: (i, j, var (cov[0,0]), cov, npix)
517 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime,
518 ampName) for covRow in covAstier]
519 tempStructArray = np.array(tupleRows, dtype=tags)
521 covArray, vcov, _ = self.makeCovArray(tempStructArray,
522 self.config.maximumRangeCovariancesAstier)
524 # The returned covArray should only have 1 entry;
525 # raise if this is not the case.
526 if covArray.shape[0] != 1:
527 raise RuntimeError("Serious programming error in covArray shape.")
529 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov))
531 # Correct covArray for sigma clipping:
532 # 1) Apply varFactor twice for the whole covariance matrix
533 covArray *= varFactor**2
534 # 2) But, only once for the variance element of the
535 # matrix, covArray[0, 0, 0] (so divide one factor out).
536 # (the first 0 is because this is a 3D array for insertion into
537 # the combined dataset).
538 covArray[0, 0, 0] /= varFactor
540 if expIdMask:
541 # Run the Gaussian histogram only if this is a legal
542 # amplifier.
543 histVar, histChi2Dof, kspValue = self.computeGaussianHistogramParameters(
544 im1Area,
545 im2Area,
546 imStatsCtrl,
547 mu1,
548 mu2,
549 )
550 else:
551 histVar = np.nan
552 histChi2Dof = np.nan
553 kspValue = 0.0
555 partialPtcDataset.setAmpValuesPartialDataset(
556 ampName,
557 inputExpIdPair=(expId1, expId2),
558 rawExpTime=expTime,
559 rawMean=muDiff,
560 rawVar=varDiff,
561 expIdMask=expIdMask,
562 covariance=covArray[0, :, :],
563 covSqrtWeights=covSqrtWeights[0, :, :],
564 gain=gain,
565 noise=readNoiseDict[ampName],
566 histVar=histVar,
567 histChi2Dof=histChi2Dof,
568 kspValue=kspValue,
569 )
571 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair.
572 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple
573 # with a single-element array, so [0][0]
574 # is necessary to extract the required index.
575 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0]
576 # `partialPtcDatasetList` is a list of
577 # `PhotonTransferCurveDataset` objects. Some of them
578 # will be dummy datasets (to match length of input
579 # and output references), and the rest will have
580 # datasets with the mean signal, variance, and
581 # covariance measurements at a given exposure
582 # time. The next ppart of the PTC-measurement
583 # pipeline, `solve`, will take this list as input,
584 # and assemble the measurements in the datasets
585 # in an addecuate manner for fitting a PTC
586 # model.
587 partialPtcDataset.updateMetadataFromExposures([exp1, exp2])
588 partialPtcDataset.updateMetadata(setDate=True, detector=detector)
589 partialPtcDatasetList[datasetIndex] = partialPtcDataset
591 if nAmpsNan == len(ampNames):
592 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
593 self.log.warning(msg)
595 return pipeBase.Struct(
596 outputCovariances=partialPtcDatasetList,
597 )
599 def makeCovArray(self, inputTuple, maxRangeFromTuple):
600 """Make covariances array from tuple.
602 Parameters
603 ----------
604 inputTuple : `numpy.ndarray`
605 Structured array with rows with at least
606 (mu, afwVar, cov, var, i, j, npix), where:
607 mu : `float`
608 0.5*(m1 + m2), where mu1 is the mean value of flat1
609 and mu2 is the mean value of flat2.
610 afwVar : `float`
611 Variance of difference flat, calculated with afw.
612 cov : `float`
613 Covariance value at lag(i, j)
614 var : `float`
615 Variance(covariance value at lag(0, 0))
616 i : `int`
617 Lag in dimension "x".
618 j : `int`
619 Lag in dimension "y".
620 npix : `int`
621 Number of pixels used for covariance calculation.
622 maxRangeFromTuple : `int`
623 Maximum range to select from tuple.
625 Returns
626 -------
627 cov : `numpy.array`
628 Covariance arrays, indexed by mean signal mu.
629 vCov : `numpy.array`
630 Variance of the [co]variance arrays, indexed by mean signal mu.
631 muVals : `numpy.array`
632 List of mean signal values.
633 """
634 if maxRangeFromTuple is not None:
635 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple)
636 cutTuple = inputTuple[cut]
637 else:
638 cutTuple = inputTuple
639 # increasing mu order, so that we can group measurements with the
640 # same mu
641 muTemp = cutTuple['mu']
642 ind = np.argsort(muTemp)
644 cutTuple = cutTuple[ind]
645 # should group measurements on the same image pairs(same average)
646 mu = cutTuple['mu']
647 xx = np.hstack(([mu[0]], mu))
648 delta = xx[1:] - xx[:-1]
649 steps, = np.where(delta > 0)
650 ind = np.zeros_like(mu, dtype=int)
651 ind[steps] = 1
652 ind = np.cumsum(ind) # this acts as an image pair index.
653 # now fill the 3-d cov array(and variance)
654 muVals = np.array(np.unique(mu))
655 i = cutTuple['i'].astype(int)
656 j = cutTuple['j'].astype(int)
657 c = 0.5*cutTuple['cov']
658 n = cutTuple['npix']
659 v = 0.5*cutTuple['var']
660 # book and fill
661 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1))
662 var = np.zeros_like(cov)
663 cov[ind, i, j] = c
664 var[ind, i, j] = v**2/n
665 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N
667 return cov, var, muVals
669 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2):
670 """Calculate the mean of each of two exposures and the variance
671 and covariance of their difference. The variance is calculated
672 via afwMath, and the covariance via the methods in Astier+19
673 (appendix A). In theory, var = covariance[0,0]. This should
674 be validated, and in the future, we may decide to just keep
675 one (covariance).
677 Parameters
678 ----------
679 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
680 Masked image from exposure 1.
681 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
682 Masked image from exposure 2.
683 imStatsCtrl : `lsst.afw.math.StatisticsControl`
684 Statistics control object.
685 mu1: `float`
686 Clipped mean of im1Area (ADU).
687 mu2: `float`
688 Clipped mean of im2Area (ADU).
690 Returns
691 -------
692 mu : `float` or `NaN`
693 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means
694 of the regions in both exposures. If either mu1 or m2 are
695 NaN's, the returned value is NaN.
696 varDiff : `float` or `NaN`
697 Half of the clipped variance of the difference of the
698 regions inthe two input exposures. If either mu1 or m2 are
699 NaN's, the returned value is NaN.
700 covDiffAstier : `list` or `NaN`
701 List with tuples of the form (dx, dy, var, cov, npix), where:
702 dx : `int`
703 Lag in x
704 dy : `int`
705 Lag in y
706 var : `float`
707 Variance at (dx, dy).
708 cov : `float`
709 Covariance at (dx, dy).
710 nPix : `int`
711 Number of pixel pairs used to evaluate var and cov.
713 If either mu1 or m2 are NaN's, the returned value is NaN.
714 """
715 if np.isnan(mu1) or np.isnan(mu2):
716 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2)
717 return np.nan, np.nan, None
718 mu = 0.5*(mu1 + mu2)
720 # Take difference of pairs
721 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
722 temp = im2Area.clone()
723 temp *= mu1
724 diffIm = im1Area.clone()
725 diffIm *= mu2
726 diffIm -= temp
727 diffIm /= mu
729 if self.config.binSize > 1:
730 diffIm = afwMath.binImage(diffIm, self.config.binSize)
732 # Variance calculation via afwMath
733 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue())
735 # Covariances calculations
736 # Get the pixels that were not clipped
737 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()
738 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue()
739 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip)
740 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0)
742 # Get the pixels in the mask planes of the difference image
743 # that were ignored by the clipping algorithm
744 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
745 # Combine the two sets of pixels ('1': use; '0': don't use)
746 # into a final weight matrix to be used in the covariance
747 # calculations below.
748 w = unmasked*wDiff
750 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2):
751 self.log.warning("Number of good points for covariance calculation (%s) is less "
752 "(than threshold %s)", np.sum(w),
753 self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2))
754 return np.nan, np.nan, None
756 maxRangeCov = self.config.maximumRangeCovariancesAstier
758 # Calculate covariances via FFT.
759 shapeDiff = np.array(diffIm.image.array.shape)
760 # Calculate the sizes of FFT dimensions.
761 s = shapeDiff + maxRangeCov
762 tempSize = np.array(np.log(s)/np.log(2.)).astype(int)
763 fftSize = np.array(2**(tempSize+1)).astype(int)
764 fftShape = (fftSize[0], fftSize[1])
765 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov)
766 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier
767 try:
768 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov)
769 except ValueError:
770 # This is raised if there are not enough pixels.
771 self.log.warning("Not enough pixels covering the requested covariance range in x/y (%d)",
772 self.config.maximumRangeCovariancesAstier)
773 return np.nan, np.nan, None
775 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0]
776 # is the Cov[0,0] element, [3] is the variance, and there's a
777 # factor of 0.5 difference with afwMath.VARIANCECLIP.
778 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00
779 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5))
780 if fractionalDiff >= thresholdPercentage:
781 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] "
782 "is more than %f%%: %f", thresholdPercentage, fractionalDiff)
784 return mu, varDiff, covDiffAstier
786 def getImageAreasMasksStats(self, exposure1, exposure2, region=None):
787 """Get image areas in a region as well as masks and statistic objects.
789 Parameters
790 ----------
791 exposure1 : `lsst.afw.image.ExposureF`
792 First exposure of flat field pair.
793 exposure2 : `lsst.afw.image.ExposureF`
794 Second exposure of flat field pair.
795 region : `lsst.geom.Box2I`, optional
796 Region of each exposure where to perform the calculations
797 (e.g, an amplifier).
799 Returns
800 -------
801 im1Area : `lsst.afw.image.MaskedImageF`
802 Masked image from exposure 1.
803 im2Area : `lsst.afw.image.MaskedImageF`
804 Masked image from exposure 2.
805 imStatsCtrl : `lsst.afw.math.StatisticsControl`
806 Statistics control object.
807 mu1 : `float`
808 Clipped mean of im1Area (ADU).
809 mu2 : `float`
810 Clipped mean of im2Area (ADU).
811 """
812 if region is not None:
813 im1Area = exposure1.maskedImage[region]
814 im2Area = exposure2.maskedImage[region]
815 else:
816 im1Area = exposure1.maskedImage
817 im2Area = exposure2.maskedImage
819 # Get mask planes and construct statistics control object from one
820 # of the exposures
821 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
822 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
823 self.config.nIterSigmaClipPtc,
824 imMaskVal)
825 imStatsCtrl.setNanSafe(True)
826 imStatsCtrl.setAndMask(imMaskVal)
828 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
829 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
831 return (im1Area, im2Area, imStatsCtrl, mu1, mu2)
833 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2,
834 correctionType='NONE', readNoise=None):
835 """Estimate the gain from a single pair of flats.
837 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const,
838 where I1 and I2 correspond to flats 1 and 2, respectively.
839 Corrections for the variable QE and the read-noise are then
840 made following the derivation in Robert Lupton's forthcoming
841 book, which gets
843 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2).
845 This is a quadratic equation, whose solutions are given by:
847 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2
848 - 2*sigma^2)
850 where 'mu' is the average signal level and 'sigma' is the
851 amplifier's readnoise. The positive solution will be used.
852 The way the correction is applied depends on the value
853 supplied for correctionType.
855 correctionType is one of ['NONE', 'SIMPLE' or 'FULL']
856 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula.
857 'SIMPLE' : uses the gain from the 'NONE' method for the
858 1/2g^2 term.
859 'FULL' : solves the full equation for g, discarding the
860 non-physical solution to the resulting quadratic.
862 Parameters
863 ----------
864 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
865 Masked image from exposure 1.
866 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
867 Masked image from exposure 2.
868 imStatsCtrl : `lsst.afw.math.StatisticsControl`
869 Statistics control object.
870 mu1: `float`
871 Clipped mean of im1Area (ADU).
872 mu2: `float`
873 Clipped mean of im2Area (ADU).
874 correctionType : `str`, optional
875 The correction applied, one of ['NONE', 'SIMPLE', 'FULL']
876 readNoise : `float`, optional
877 Amplifier readout noise (ADU).
879 Returns
880 -------
881 gain : `float`
882 Gain, in e/ADU.
884 Raises
885 ------
886 RuntimeError
887 Raise if `correctionType` is not one of 'NONE',
888 'SIMPLE', or 'FULL'.
889 """
890 if correctionType not in ['NONE', 'SIMPLE', 'FULL']:
891 raise RuntimeError("Unknown correction type: %s" % correctionType)
893 if correctionType != 'NONE' and not np.isfinite(readNoise):
894 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, "
895 "but 'readNoise' is NaN. Setting 'correctionType' "
896 "to 'NONE', so a gain value will be estimated without "
897 "corrections." % correctionType)
898 correctionType = 'NONE'
900 mu = 0.5*(mu1 + mu2)
902 # ratioIm = (I1 - I2)^2 / (I1 + I2)
903 temp = im2Area.clone()
904 ratioIm = im1Area.clone()
905 ratioIm -= temp
906 ratioIm *= ratioIm
908 # Sum of pairs
909 sumIm = im1Area.clone()
910 sumIm += temp
912 ratioIm /= sumIm
914 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue()
915 gain = 1. / const
917 if correctionType == 'SIMPLE':
918 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2)))
919 elif correctionType == 'FULL':
920 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2)
921 denom = (2*const*mu - 2*readNoise**2)
922 positiveSolution = (root + mu)/denom
923 gain = positiveSolution
925 return gain
927 def getReadNoise(self, exposureMetadata, taskMetadata, ampName):
928 """Gets readout noise for an amp from ISR metadata.
930 If possible, this attempts to get the now-standard headers
931 added to the exposure itself. If not found there, the ISR
932 TaskMetadata is searched. If neither of these has the value,
933 warn and set the read noise to NaN.
935 Parameters
936 ----------
937 exposureMetadata : `lsst.daf.base.PropertySet`
938 Metadata to check for read noise first.
939 taskMetadata : `lsst.pipe.base.TaskMetadata`
940 List of exposures metadata from ISR for this exposure.
941 ampName : `str`
942 Amplifier name.
944 Returns
945 -------
946 readNoise : `float`
947 The read noise for this set of exposure/amplifier.
948 """
949 # Try from the exposure first.
950 expectedKey = f"LSST ISR OVERSCAN RESIDUAL SERIAL STDEV {ampName}"
951 if expectedKey in exposureMetadata:
952 return exposureMetadata[expectedKey]
954 # If not, try getting it from the task metadata.
955 expectedKey = f"RESIDUAL STDEV {ampName}"
956 if "isr" in taskMetadata:
957 if expectedKey in taskMetadata["isr"]:
958 return taskMetadata["isr"][expectedKey]
960 self.log.warning("Median readout noise from ISR metadata for amp %s "
961 "could not be calculated." % ampName)
962 return np.nan
964 def computeGaussianHistogramParameters(self, im1Area, im2Area, imStatsCtrl, mu1, mu2):
965 """Compute KS test for a Gaussian model fit to a histogram of the
966 difference image.
968 Parameters
969 ----------
970 im1Area : `lsst.afw.image.MaskedImageF`
971 Masked image from exposure 1.
972 im2Area : `lsst.afw.image.MaskedImageF`
973 Masked image from exposure 2.
974 imStatsCtrl : `lsst.afw.math.StatisticsControl`
975 Statistics control object.
976 mu1 : `float`
977 Clipped mean of im1Area (ADU).
978 mu2 : `float`
979 Clipped mean of im2Area (ADU).
981 Returns
982 -------
983 varFit : `float`
984 Variance from the Gaussian fit.
985 chi2Dof : `float`
986 Chi-squared per degree of freedom of Gaussian fit.
987 kspValue : `float`
988 The KS test p-value for the Gaussian fit.
990 Notes
991 -----
992 The algorithm here was originally developed by Aaron Roodman.
993 Tests on the full focal plane of LSSTCam during testing has shown
994 that a KS test p-value cut of 0.01 is a good discriminant for
995 well-behaved flat pairs (p>0.01) and poorly behaved non-Gaussian
996 flat pairs (p<0.01).
997 """
998 diffExp = im1Area.clone()
999 diffExp -= im2Area
1001 sel = (((diffExp.mask.array & imStatsCtrl.getAndMask()) == 0)
1002 & np.isfinite(diffExp.mask.array))
1003 diffArr = diffExp.image.array[sel]
1005 numOk = len(diffArr)
1007 if numOk >= self.config.ksHistMinDataValues and np.isfinite(mu1) and np.isfinite(mu2):
1008 # Create a histogram symmetric around zero, with a bin size
1009 # determined from the expected variance given by the average of
1010 # the input signal levels.
1011 lim = self.config.ksHistLimitMultiplier * np.sqrt((mu1 + mu2)/2.)
1012 yVals, binEdges = np.histogram(diffArr, bins=self.config.ksHistNBins, range=[-lim, lim])
1014 # Fit the histogram with a Gaussian model.
1015 model = GaussianModel()
1016 yVals = yVals.astype(np.float64)
1017 xVals = ((binEdges[0: -1] + binEdges[1:])/2.).astype(np.float64)
1018 errVals = np.sqrt(yVals)
1019 errVals[(errVals == 0.0)] = 1.0
1020 pars = model.guess(yVals, x=xVals)
1021 with warnings.catch_warnings():
1022 warnings.simplefilter("ignore")
1023 # The least-squares fitter sometimes spouts (spurious) warnings
1024 # when the model is very bad. Swallow these warnings now and
1025 # let the KS test check the model below.
1026 out = model.fit(
1027 yVals,
1028 pars,
1029 x=xVals,
1030 weights=1./errVals,
1031 calc_covar=True,
1032 method="least_squares",
1033 )
1035 # Calculate chi2.
1036 chiArr = out.residual
1037 nDof = len(yVals) - 3
1038 chi2Dof = np.sum(chiArr**2.)/nDof
1039 sigmaFit = out.params["sigma"].value
1041 # Calculate KS test p-value for the fit.
1042 ksResult = scipy.stats.ks_1samp(
1043 diffArr,
1044 scipy.stats.norm.cdf,
1045 (out.params["center"].value, sigmaFit),
1046 )
1048 kspValue = ksResult.pvalue
1049 if kspValue < 1e-15:
1050 kspValue = 0.0
1052 varFit = sigmaFit**2.
1054 else:
1055 varFit = np.nan
1056 chi2Dof = np.nan
1057 kspValue = 0.0
1059 return varFit, chi2Dof, kspValue