Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 12%
258 statements
« prev ^ index » next coverage.py v6.5.0, created at 2023-03-15 02:16 -0700
« prev ^ index » next coverage.py v6.5.0, created at 2023-03-15 02:16 -0700
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22import numpy as np
24import lsst.afw.math as afwMath
25import lsst.pex.config as pexConfig
26import lsst.pipe.base as pipeBase
27from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId,
28 arrangeFlatsByExpFlux, sigmaClipCorrection,
29 CovFastFourierTransform)
31import lsst.pipe.base.connectionTypes as cT
33from lsst.ip.isr import PhotonTransferCurveDataset
34from lsst.ip.isr import IsrTask
36__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask']
39class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections,
40 dimensions=("instrument", "detector")):
42 inputExp = cT.Input(
43 name="ptcInputExposurePairs",
44 doc="Input post-ISR processed exposure pairs (flats) to"
45 "measure covariances from.",
46 storageClass="Exposure",
47 dimensions=("instrument", "exposure", "detector"),
48 multiple=True,
49 deferLoad=True,
50 )
51 taskMetadata = cT.Input(
52 name="isr_metadata",
53 doc="Input task metadata to extract statistics from.",
54 storageClass="TaskMetadata",
55 dimensions=("instrument", "exposure", "detector"),
56 multiple=True,
57 )
58 outputCovariances = cT.Output(
59 name="ptcCovariances",
60 doc="Extracted flat (co)variances.",
61 storageClass="PhotonTransferCurveDataset",
62 dimensions=("instrument", "exposure", "detector"),
63 isCalibration=True,
64 multiple=True,
65 )
68class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig,
69 pipelineConnections=PhotonTransferCurveExtractConnections):
70 """Configuration for the measurement of covariances from flats.
71 """
72 matchExposuresType = pexConfig.ChoiceField(
73 dtype=str,
74 doc="Match input exposures by time, flux, or expId",
75 default='TIME',
76 allowed={
77 "TIME": "Match exposures by exposure time.",
78 "FLUX": "Match exposures by target flux. Use header keyword"
79 " in matchExposuresByFluxKeyword to find the flux.",
80 "EXPID": "Match exposures by exposure ID."
81 }
82 )
83 matchExposuresByFluxKeyword = pexConfig.Field(
84 dtype=str,
85 doc="Header keyword for flux if matchExposuresType is FLUX.",
86 default='CCOBFLUX',
87 )
88 maximumRangeCovariancesAstier = pexConfig.Field(
89 dtype=int,
90 doc="Maximum range of covariances as in Astier+19",
91 default=8,
92 )
93 binSize = pexConfig.Field(
94 dtype=int,
95 doc="Bin the image by this factor in both dimensions.",
96 default=1,
97 )
98 minMeanSignal = pexConfig.DictField(
99 keytype=str,
100 itemtype=float,
101 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use."
102 " The same cut is applied to all amps if this parameter [`dict`] is passed as "
103 " {'ALL_AMPS': value}",
104 default={'ALL_AMPS': 0.0},
105 )
106 maxMeanSignal = pexConfig.DictField(
107 keytype=str,
108 itemtype=float,
109 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp."
110 " The same cut is applied to all amps if this dictionary is of the form"
111 " {'ALL_AMPS': value}",
112 default={'ALL_AMPS': 1e6},
113 )
114 maskNameList = pexConfig.ListField(
115 dtype=str,
116 doc="Mask list to exclude from statistics calculations.",
117 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'],
118 )
119 nSigmaClipPtc = pexConfig.Field(
120 dtype=float,
121 doc="Sigma cut for afwMath.StatisticsControl()",
122 default=5.5,
123 )
124 nIterSigmaClipPtc = pexConfig.Field(
125 dtype=int,
126 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
127 default=3,
128 )
129 minNumberGoodPixelsForCovariance = pexConfig.Field(
130 dtype=int,
131 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or"
132 " direclty).",
133 default=10000,
134 )
135 thresholdDiffAfwVarVsCov00 = pexConfig.Field(
136 dtype=float,
137 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 "
138 "for a region of a difference image is greater than this threshold (percentage), "
139 "a warning will be issued.",
140 default=1.,
141 )
142 detectorMeasurementRegion = pexConfig.ChoiceField(
143 dtype=str,
144 doc="Region of each exposure where to perform the calculations (amplifier or full image).",
145 default='AMP',
146 allowed={
147 "AMP": "Amplifier of the detector.",
148 "FULL": "Full image."
149 }
150 )
151 numEdgeSuspect = pexConfig.Field(
152 dtype=int,
153 doc="Number of edge pixels to be flagged as untrustworthy.",
154 default=0,
155 )
156 edgeMaskLevel = pexConfig.ChoiceField(
157 dtype=str,
158 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?",
159 default="DETECTOR",
160 allowed={
161 'DETECTOR': 'Mask only the edges of the full detector.',
162 'AMP': 'Mask edges of each amplifier.',
163 },
164 )
165 doGain = pexConfig.Field(
166 dtype=bool,
167 doc="Calculate a gain per input flat pair.",
168 default=True,
169 )
170 gainCorrectionType = pexConfig.ChoiceField(
171 dtype=str,
172 doc="Correction type for the gain.",
173 default='FULL',
174 allowed={
175 'NONE': 'No correction.',
176 'SIMPLE': 'First order correction.',
177 'FULL': 'Second order correction.'
178 }
179 )
182class PhotonTransferCurveExtractTask(pipeBase.PipelineTask):
183 """Task to measure covariances from flat fields.
185 This task receives as input a list of flat-field images
186 (flats), and sorts these flats in pairs taken at the
187 same time (the task will raise if there is one one flat
188 at a given exposure time, and it will discard extra flats if
189 there are more than two per exposure time). This task measures
190 the mean, variance, and covariances from a region (e.g.,
191 an amplifier) of the difference image of the two flats with
192 the same exposure time (alternatively, all input images could have
193 the same exposure time but their flux changed).
195 The variance is calculated via afwMath, and the covariance
196 via the methods in Astier+19 (appendix A). In theory,
197 var = covariance[0,0]. This should be validated, and in the
198 future, we may decide to just keep one (covariance).
199 At this moment, if the two values differ by more than the value
200 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will
201 be issued.
203 The measured covariances at a given exposure time (along with
204 other quantities such as the mean) are stored in a PTC dataset
205 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets
206 partially filled at this stage (the remainder of the attributes
207 of the dataset will be filled after running the second task of
208 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`).
210 The number of partially-filled
211 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less
212 than the number of input exposures because the task combines
213 input flats in pairs. However, it is required at this moment
214 that the number of input dimensions matches
215 bijectively the number of output dimensions. Therefore, a number
216 of "dummy" PTC datasets are inserted in the output list. This
217 output list will then be used as input of the next task in the
218 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`,
219 which will assemble the multiple `PhotonTransferCurveDataset`
220 objects into a single one in order to fit the measured covariances
221 as a function of flux to one of three models
222 (see `PhotonTransferCurveSolveTask` for details).
224 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD
225 sensors", arXiv:1905.08677.
226 """
228 ConfigClass = PhotonTransferCurveExtractConfig
229 _DefaultName = 'cpPtcExtract'
231 def runQuantum(self, butlerQC, inputRefs, outputRefs):
232 """Ensure that the input and output dimensions are passed along.
234 Parameters
235 ----------
236 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
237 Butler to operate on.
238 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
239 Input data refs to load.
240 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
241 Output data refs to persist.
242 """
243 inputs = butlerQC.get(inputRefs)
244 # Ids of input list of exposure references
245 # (deferLoad=True in the input connections)
246 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp]
248 # Dictionary, keyed by expTime (or expFlux or expId), with tuples
249 # containing flat exposures and their IDs.
250 matchType = self.config.matchExposuresType
251 if matchType == 'TIME':
252 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims'])
253 elif matchType == 'FLUX':
254 inputs['inputExp'] = arrangeFlatsByExpFlux(inputs['inputExp'], inputs['inputDims'],
255 self.config.matchExposuresByFluxKeyword)
256 else:
257 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims'])
259 outputs = self.run(**inputs)
260 outputs = self._guaranteeOutputs(inputs['inputDims'], outputs, outputRefs)
261 butlerQC.put(outputs, outputRefs)
263 def _guaranteeOutputs(self, inputDims, outputs, outputRefs):
264 """Ensure that all outputRefs have a matching output, and if they do
265 not, fill the output with dummy PTC datasets.
267 Parameters
268 ----------
269 inputDims : `dict` [`str`, `int`]
270 Input exposure dimensions.
271 outputs : `lsst.pipe.base.Struct`
272 Outputs from the ``run`` method. Contains the entry:
274 ``outputCovariances``
275 Output PTC datasets (`list` [`lsst.ip.isr.IsrCalib`])
276 outputRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
277 Container with all of the outputs expected to be generated.
279 Returns
280 -------
281 outputs : `lsst.pipe.base.Struct`
282 Dummy dataset padded version of the input ``outputs`` with
283 the same entries.
284 """
285 newCovariances = []
286 for ref in outputRefs.outputCovariances:
287 outputExpId = ref.dataId['exposure']
288 if outputExpId in inputDims:
289 entry = inputDims.index(outputExpId)
290 newCovariances.append(outputs.outputCovariances[entry])
291 else:
292 newPtc = PhotonTransferCurveDataset(['no amp'], 'DUMMY', 1)
293 newPtc.setAmpValues('no amp')
294 newCovariances.append(newPtc)
295 return pipeBase.Struct(outputCovariances=newCovariances)
297 def run(self, inputExp, inputDims, taskMetadata):
299 """Measure covariances from difference of flat pairs
301 Parameters
302 ----------
303 inputExp : `dict` [`float`, `list`
304 [`~lsst.pipe.base.connections.DeferredDatasetRef`]]
305 Dictionary that groups references to flat-field exposures that
306 have the same exposure time (seconds), or that groups them
307 sequentially by their exposure id.
308 inputDims : `list`
309 List of exposure IDs.
310 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`]
311 List of exposures metadata from ISR.
313 Returns
314 -------
315 results : `lsst.pipe.base.Struct`
316 The resulting Struct contains:
318 ``outputCovariances``
319 A list containing the per-pair PTC measurements (`list`
320 [`lsst.ip.isr.PhotonTransferCurveDataset`])
321 """
322 # inputExp.values() returns a view, which we turn into a list. We then
323 # access the first exposure-ID tuple to get the detector.
324 # The first "get()" retrieves the exposure from the exposure reference.
325 detector = list(inputExp.values())[0][0][0].get(component='detector')
326 detNum = detector.getId()
327 amps = detector.getAmplifiers()
328 ampNames = [amp.getName() for amp in amps]
330 # Each amp may have a different min and max ADU signal
331 # specified in the config.
332 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames}
333 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames}
334 for ampName in ampNames:
335 if 'ALL_AMPS' in self.config.maxMeanSignal:
336 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS']
337 elif ampName in self.config.maxMeanSignal:
338 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName]
340 if 'ALL_AMPS' in self.config.minMeanSignal:
341 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS']
342 elif ampName in self.config.minMeanSignal:
343 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName]
344 # These are the column names for `tupleRows` below.
345 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'),
346 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')]
347 # Create a dummy ptcDataset. Dummy datasets will be
348 # used to ensure that the number of output and input
349 # dimensions match.
350 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY',
351 self.config.maximumRangeCovariancesAstier)
352 for ampName in ampNames:
353 dummyPtcDataset.setAmpValues(ampName)
354 # Get read noise. Try from the exposure, then try
355 # taskMetadata. This adds a get() for the exposures.
356 readNoiseLists = {}
357 for pairIndex, expRefs in inputExp.items():
358 # This yields an index (exposure_time, seq_num, or flux)
359 # and a pair of references at that index.
360 for expRef, expId in expRefs:
361 # This yields an exposure ref and an exposureId.
362 exposureMetadata = expRef.get(component="metadata")
363 metadataIndex = inputDims.index(expId)
364 thisTaskMetadata = taskMetadata[metadataIndex]
366 for ampName in ampNames:
367 if ampName not in readNoiseLists:
368 readNoiseLists[ampName] = [self.getReadNoise(exposureMetadata,
369 thisTaskMetadata, ampName)]
370 else:
371 readNoiseLists[ampName].append(self.getReadNoise(exposureMetadata,
372 thisTaskMetadata, ampName))
374 readNoiseDict = {ampName: 0.0 for ampName in ampNames}
375 for ampName in ampNames:
376 # Take median read noise value
377 readNoiseDict[ampName] = np.nanmedian(readNoiseLists[ampName])
379 # Output list with PTC datasets.
380 partialPtcDatasetList = []
381 # The number of output references needs to match that of input
382 # references: initialize outputlist with dummy PTC datasets.
383 for i in range(len(inputDims)):
384 partialPtcDatasetList.append(dummyPtcDataset)
386 if self.config.numEdgeSuspect > 0:
387 isrTask = IsrTask()
388 self.log.info("Masking %d pixels from the edges of all %ss as SUSPECT.",
389 self.config.numEdgeSuspect, self.config.edgeMaskLevel)
391 # Depending on the value of config.matchExposuresType
392 # 'expTime' can stand for exposure time, flux, or ID.
393 for expTime in inputExp:
394 exposures = inputExp[expTime]
395 if len(exposures) == 1:
396 self.log.warning("Only one exposure found at %s %f. Dropping exposure %d.",
397 self.config.matchExposuresType, expTime, exposures[0][1])
398 continue
399 else:
400 # Only use the first two exposures at expTime. Each
401 # element is a tuple (exposure, expId)
402 expRef1, expId1 = exposures[0]
403 expRef2, expId2 = exposures[1]
404 # use get() to obtain `lsst.afw.image.Exposure`
405 exp1, exp2 = expRef1.get(), expRef2.get()
407 if len(exposures) > 2:
408 self.log.warning("Already found 2 exposures at %s %f. Ignoring exposures: %s",
409 self.config.matchExposuresType, expTime,
410 ", ".join(str(i[1]) for i in exposures[2:]))
411 # Mask pixels at the edge of the detector or of each amp
412 if self.config.numEdgeSuspect > 0:
413 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect,
414 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
415 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect,
416 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
418 nAmpsNan = 0
419 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL',
420 self.config.maximumRangeCovariancesAstier)
421 for ampNumber, amp in enumerate(detector):
422 ampName = amp.getName()
423 if self.config.detectorMeasurementRegion == 'AMP':
424 region = amp.getBBox()
425 elif self.config.detectorMeasurementRegion == 'FULL':
426 region = None
428 # Get masked image regions, masking planes, statistic control
429 # objects, and clipped means. Calculate once to reuse in
430 # `measureMeanVarCov` and `getGainFromFlatPair`.
431 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2,
432 region=region)
434 # `measureMeanVarCov` is the function that measures
435 # the variance and covariances from a region of
436 # the difference image of two flats at the same
437 # exposure time. The variable `covAstier` that is
438 # returned is of the form:
439 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in
440 # {maxLag, maxLag}^2].
441 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2)
442 # Estimate the gain from the flat pair
443 if self.config.doGain:
444 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2,
445 correctionType=self.config.gainCorrectionType,
446 readNoise=readNoiseDict[ampName])
447 else:
448 gain = np.nan
450 # Correction factor for bias introduced by sigma
451 # clipping.
452 # Function returns 1/sqrt(varFactor), so it needs
453 # to be squared. varDiff is calculated via
454 # afwMath.VARIANCECLIP.
455 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2
456 varDiff *= varFactor
458 expIdMask = True
459 # Mask data point at this mean signal level if
460 # the signal, variance, or covariance calculations
461 # from `measureMeanVarCov` resulted in NaNs.
462 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
463 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of "
464 "detector %d.", ampName, expId1, expId2, detNum)
465 nAmpsNan += 1
466 expIdMask = False
467 covArray = np.full((1, self.config.maximumRangeCovariancesAstier,
468 self.config.maximumRangeCovariancesAstier), np.nan)
469 covSqrtWeights = np.full_like(covArray, np.nan)
471 # Mask data point if it is outside of the
472 # specified mean signal range.
473 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]):
474 expIdMask = False
476 if covAstier is not None:
477 # Turn the tuples with the measured information
478 # into covariance arrays.
479 # covrow: (i, j, var (cov[0,0]), cov, npix)
480 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime,
481 ampName) for covRow in covAstier]
482 tempStructArray = np.array(tupleRows, dtype=tags)
483 covArray, vcov, _ = self.makeCovArray(tempStructArray,
484 self.config.maximumRangeCovariancesAstier)
485 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov))
487 # Correct covArray for sigma clipping:
488 # 1) Apply varFactor twice for the whole covariance matrix
489 covArray *= varFactor**2
490 # 2) But, only once for the variance element of the
491 # matrix, covArray[0,0] (so divide one factor out).
492 covArray[0, 0] /= varFactor
494 partialPtcDataset.setAmpValues(ampName, rawExpTime=[expTime], rawMean=[muDiff],
495 rawVar=[varDiff], inputExpIdPair=[(expId1, expId2)],
496 expIdMask=[expIdMask], covArray=covArray,
497 covSqrtWeights=covSqrtWeights, gain=gain,
498 noise=readNoiseDict[ampName])
499 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair.
500 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple
501 # with a single-element array, so [0][0]
502 # is necessary to extract the required index.
503 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0]
504 # `partialPtcDatasetList` is a list of
505 # `PhotonTransferCurveDataset` objects. Some of them
506 # will be dummy datasets (to match length of input
507 # and output references), and the rest will have
508 # datasets with the mean signal, variance, and
509 # covariance measurements at a given exposure
510 # time. The next ppart of the PTC-measurement
511 # pipeline, `solve`, will take this list as input,
512 # and assemble the measurements in the datasets
513 # in an addecuate manner for fitting a PTC
514 # model.
515 partialPtcDataset.updateMetadataFromExposures([exp1, exp2])
516 partialPtcDataset.updateMetadata(setDate=True, detector=detector)
517 partialPtcDatasetList[datasetIndex] = partialPtcDataset
519 if nAmpsNan == len(ampNames):
520 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
521 self.log.warning(msg)
523 return pipeBase.Struct(
524 outputCovariances=partialPtcDatasetList,
525 )
527 def makeCovArray(self, inputTuple, maxRangeFromTuple):
528 """Make covariances array from tuple.
530 Parameters
531 ----------
532 inputTuple : `numpy.ndarray`
533 Structured array with rows with at least
534 (mu, afwVar, cov, var, i, j, npix), where:
535 mu : `float`
536 0.5*(m1 + m2), where mu1 is the mean value of flat1
537 and mu2 is the mean value of flat2.
538 afwVar : `float`
539 Variance of difference flat, calculated with afw.
540 cov : `float`
541 Covariance value at lag(i, j)
542 var : `float`
543 Variance(covariance value at lag(0, 0))
544 i : `int`
545 Lag in dimension "x".
546 j : `int`
547 Lag in dimension "y".
548 npix : `int`
549 Number of pixels used for covariance calculation.
550 maxRangeFromTuple : `int`
551 Maximum range to select from tuple.
553 Returns
554 -------
555 cov : `numpy.array`
556 Covariance arrays, indexed by mean signal mu.
557 vCov : `numpy.array`
558 Variance of the [co]variance arrays, indexed by mean signal mu.
559 muVals : `numpy.array`
560 List of mean signal values.
561 """
562 if maxRangeFromTuple is not None:
563 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple)
564 cutTuple = inputTuple[cut]
565 else:
566 cutTuple = inputTuple
567 # increasing mu order, so that we can group measurements with the
568 # same mu
569 muTemp = cutTuple['mu']
570 ind = np.argsort(muTemp)
572 cutTuple = cutTuple[ind]
573 # should group measurements on the same image pairs(same average)
574 mu = cutTuple['mu']
575 xx = np.hstack(([mu[0]], mu))
576 delta = xx[1:] - xx[:-1]
577 steps, = np.where(delta > 0)
578 ind = np.zeros_like(mu, dtype=int)
579 ind[steps] = 1
580 ind = np.cumsum(ind) # this acts as an image pair index.
581 # now fill the 3-d cov array(and variance)
582 muVals = np.array(np.unique(mu))
583 i = cutTuple['i'].astype(int)
584 j = cutTuple['j'].astype(int)
585 c = 0.5*cutTuple['cov']
586 n = cutTuple['npix']
587 v = 0.5*cutTuple['var']
588 # book and fill
589 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1))
590 var = np.zeros_like(cov)
591 cov[ind, i, j] = c
592 var[ind, i, j] = v**2/n
593 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N
595 return cov, var, muVals
597 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2):
598 """Calculate the mean of each of two exposures and the variance
599 and covariance of their difference. The variance is calculated
600 via afwMath, and the covariance via the methods in Astier+19
601 (appendix A). In theory, var = covariance[0,0]. This should
602 be validated, and in the future, we may decide to just keep
603 one (covariance).
605 Parameters
606 ----------
607 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
608 Masked image from exposure 1.
609 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
610 Masked image from exposure 2.
611 imStatsCtrl : `lsst.afw.math.StatisticsControl`
612 Statistics control object.
613 mu1: `float`
614 Clipped mean of im1Area (ADU).
615 mu2: `float`
616 Clipped mean of im2Area (ADU).
618 Returns
619 -------
620 mu : `float` or `NaN`
621 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means
622 of the regions in both exposures. If either mu1 or m2 are
623 NaN's, the returned value is NaN.
624 varDiff : `float` or `NaN`
625 Half of the clipped variance of the difference of the
626 regions inthe two input exposures. If either mu1 or m2 are
627 NaN's, the returned value is NaN.
628 covDiffAstier : `list` or `NaN`
629 List with tuples of the form (dx, dy, var, cov, npix), where:
630 dx : `int`
631 Lag in x
632 dy : `int`
633 Lag in y
634 var : `float`
635 Variance at (dx, dy).
636 cov : `float`
637 Covariance at (dx, dy).
638 nPix : `int`
639 Number of pixel pairs used to evaluate var and cov.
641 If either mu1 or m2 are NaN's, the returned value is NaN.
642 """
643 if np.isnan(mu1) or np.isnan(mu2):
644 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2)
645 return np.nan, np.nan, None
646 mu = 0.5*(mu1 + mu2)
648 # Take difference of pairs
649 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
650 temp = im2Area.clone()
651 temp *= mu1
652 diffIm = im1Area.clone()
653 diffIm *= mu2
654 diffIm -= temp
655 diffIm /= mu
657 if self.config.binSize > 1:
658 diffIm = afwMath.binImage(diffIm, self.config.binSize)
660 # Variance calculation via afwMath
661 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue())
663 # Covariances calculations
664 # Get the pixels that were not clipped
665 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()
666 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue()
667 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip)
668 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0)
670 # Get the pixels in the mask planes of the difference image
671 # that were ignored by the clipping algorithm
672 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
673 # Combine the two sets of pixels ('1': use; '0': don't use)
674 # into a final weight matrix to be used in the covariance
675 # calculations below.
676 w = unmasked*wDiff
678 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2):
679 self.log.warning("Number of good points for covariance calculation (%s) is less "
680 "(than threshold %s)", np.sum(w),
681 self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2))
682 return np.nan, np.nan, None
684 maxRangeCov = self.config.maximumRangeCovariancesAstier
686 # Calculate covariances via FFT.
687 shapeDiff = np.array(diffIm.image.array.shape)
688 # Calculate the sizes of FFT dimensions.
689 s = shapeDiff + maxRangeCov
690 tempSize = np.array(np.log(s)/np.log(2.)).astype(int)
691 fftSize = np.array(2**(tempSize+1)).astype(int)
692 fftShape = (fftSize[0], fftSize[1])
693 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov)
694 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier
695 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov)
697 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0]
698 # is the Cov[0,0] element, [3] is the variance, and there's a
699 # factor of 0.5 difference with afwMath.VARIANCECLIP.
700 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00
701 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5))
702 if fractionalDiff >= thresholdPercentage:
703 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] "
704 "is more than %f%%: %f", thresholdPercentage, fractionalDiff)
706 return mu, varDiff, covDiffAstier
708 def getImageAreasMasksStats(self, exposure1, exposure2, region=None):
709 """Get image areas in a region as well as masks and statistic objects.
711 Parameters
712 ----------
713 exposure1 : `lsst.afw.image.exposure.ExposureF`
714 First exposure of flat field pair.
715 exposure2 : `lsst.afw.image.exposure.ExposureF`
716 Second exposure of flat field pair.
717 region : `lsst.geom.Box2I`, optional
718 Region of each exposure where to perform the calculations
719 (e.g, an amplifier).
721 Returns
722 -------
723 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
724 Masked image from exposure 1.
725 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
726 Masked image from exposure 2.
727 imStatsCtrl : `lsst.afw.math.StatisticsControl`
728 Statistics control object.
729 mu1: `float`
730 Clipped mean of im1Area (ADU).
731 mu2: `float`
732 Clipped mean of im2Area (ADU).
733 """
734 if region is not None:
735 im1Area = exposure1.maskedImage[region]
736 im2Area = exposure2.maskedImage[region]
737 else:
738 im1Area = exposure1.maskedImage
739 im2Area = exposure2.maskedImage
741 # Get mask planes and construct statistics control object from one
742 # of the exposures
743 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
744 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
745 self.config.nIterSigmaClipPtc,
746 imMaskVal)
747 imStatsCtrl.setNanSafe(True)
748 imStatsCtrl.setAndMask(imMaskVal)
750 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
751 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
753 return (im1Area, im2Area, imStatsCtrl, mu1, mu2)
755 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2,
756 correctionType='NONE', readNoise=None):
757 """Estimate the gain from a single pair of flats.
759 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const,
760 where I1 and I2 correspond to flats 1 and 2, respectively.
761 Corrections for the variable QE and the read-noise are then
762 made following the derivation in Robert Lupton's forthcoming
763 book, which gets
765 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2).
767 This is a quadratic equation, whose solutions are given by:
769 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2
770 - 2*sigma^2)
772 where 'mu' is the average signal level and 'sigma' is the
773 amplifier's readnoise. The positive solution will be used.
774 The way the correction is applied depends on the value
775 supplied for correctionType.
777 correctionType is one of ['NONE', 'SIMPLE' or 'FULL']
778 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula.
779 'SIMPLE' : uses the gain from the 'NONE' method for the
780 1/2g^2 term.
781 'FULL' : solves the full equation for g, discarding the
782 non-physical solution to the resulting quadratic.
784 Parameters
785 ----------
786 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
787 Masked image from exposure 1.
788 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
789 Masked image from exposure 2.
790 imStatsCtrl : `lsst.afw.math.StatisticsControl`
791 Statistics control object.
792 mu1: `float`
793 Clipped mean of im1Area (ADU).
794 mu2: `float`
795 Clipped mean of im2Area (ADU).
796 correctionType : `str`, optional
797 The correction applied, one of ['NONE', 'SIMPLE', 'FULL']
798 readNoise : `float`, optional
799 Amplifier readout noise (ADU).
801 Returns
802 -------
803 gain : `float`
804 Gain, in e/ADU.
806 Raises
807 ------
808 RuntimeError
809 Raise if `correctionType` is not one of 'NONE',
810 'SIMPLE', or 'FULL'.
811 """
812 if correctionType not in ['NONE', 'SIMPLE', 'FULL']:
813 raise RuntimeError("Unknown correction type: %s" % correctionType)
815 if correctionType != 'NONE' and not np.isfinite(readNoise):
816 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, "
817 "but 'readNoise' is NaN. Setting 'correctionType' "
818 "to 'NONE', so a gain value will be estimated without "
819 "corrections." % correctionType)
820 correctionType = 'NONE'
822 mu = 0.5*(mu1 + mu2)
824 # ratioIm = (I1 - I2)^2 / (I1 + I2)
825 temp = im2Area.clone()
826 ratioIm = im1Area.clone()
827 ratioIm -= temp
828 ratioIm *= ratioIm
830 # Sum of pairs
831 sumIm = im1Area.clone()
832 sumIm += temp
834 ratioIm /= sumIm
836 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue()
837 gain = 1. / const
839 if correctionType == 'SIMPLE':
840 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2)))
841 elif correctionType == 'FULL':
842 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2)
843 denom = (2*const*mu - 2*readNoise**2)
844 positiveSolution = (root + mu)/denom
845 gain = positiveSolution
847 return gain
849 def getReadNoise(self, exposureMetadata, taskMetadata, ampName):
850 """Gets readout noise for an amp from ISR metadata.
852 If possible, this attempts to get the now-standard headers
853 added to the exposure itself. If not found there, the ISR
854 TaskMetadata is searched. If neither of these has the value,
855 warn and set the read noise to NaN.
857 Parameters
858 ----------
859 exposureMetadata : `lsst.daf.base.PropertySet`
860 Metadata to check for read noise first.
861 taskMetadata : `lsst.pipe.base.TaskMetadata`
862 List of exposures metadata from ISR for this exposure.
863 ampName : `str`
864 Amplifier name.
866 Returns
867 -------
868 readNoise : `float`
869 The read noise for this set of exposure/amplifier.
870 """
871 # Try from the exposure first.
872 expectedKey = f"LSST ISR OVERSCAN RESIDUAL SERIAL STDEV {ampName}"
873 if expectedKey in exposureMetadata:
874 return exposureMetadata[expectedKey]
876 # If not, try getting it from the task metadata.
877 expectedKey = f"RESIDUAL STDEV {ampName}"
878 if "isr" in taskMetadata:
879 if expectedKey in taskMetadata["isr"]:
880 return taskMetadata["isr"][expectedKey]
882 self.log.warning("Median readout noise from ISR metadata for amp %s "
883 "could not be calculated." % ampName)
884 return np.nan