Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 12%
238 statements
« prev ^ index » next coverage.py v6.5.0, created at 2023-02-04 12:10 +0000
« prev ^ index » next coverage.py v6.5.0, created at 2023-02-04 12:10 +0000
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22import numpy as np
24import lsst.afw.math as afwMath
25import lsst.pex.config as pexConfig
26import lsst.pipe.base as pipeBase
27from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId,
28 arrangeFlatsByExpFlux, sigmaClipCorrection,
29 CovFastFourierTransform)
31import lsst.pipe.base.connectionTypes as cT
33from lsst.ip.isr import PhotonTransferCurveDataset
34from lsst.ip.isr import IsrTask
36__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask']
39class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections,
40 dimensions=("instrument", "detector")):
42 inputExp = cT.Input(
43 name="ptcInputExposurePairs",
44 doc="Input post-ISR processed exposure pairs (flats) to"
45 "measure covariances from.",
46 storageClass="Exposure",
47 dimensions=("instrument", "exposure", "detector"),
48 multiple=True,
49 deferLoad=True,
50 )
51 taskMetadata = cT.Input(
52 name="isr_metadata",
53 doc="Input task metadata to extract statistics from.",
54 storageClass="TaskMetadata",
55 dimensions=("instrument", "exposure", "detector"),
56 multiple=True,
57 )
58 outputCovariances = cT.Output(
59 name="ptcCovariances",
60 doc="Extracted flat (co)variances.",
61 storageClass="PhotonTransferCurveDataset",
62 dimensions=("instrument", "exposure", "detector"),
63 isCalibration=True,
64 multiple=True,
65 )
68class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig,
69 pipelineConnections=PhotonTransferCurveExtractConnections):
70 """Configuration for the measurement of covariances from flats.
71 """
72 matchExposuresType = pexConfig.ChoiceField(
73 dtype=str,
74 doc="Match input exposures by time, flux, or expId",
75 default='TIME',
76 allowed={
77 "TIME": "Match exposures by exposure time.",
78 "FLUX": "Match exposures by target flux. Use header keyword"
79 " in matchExposuresByFluxKeyword to find the flux.",
80 "EXPID": "Match exposures by exposure ID."
81 }
82 )
83 matchExposuresByFluxKeyword = pexConfig.Field(
84 dtype=str,
85 doc="Header keyword for flux if matchExposuresType is FLUX.",
86 default='CCOBFLUX',
87 )
88 maximumRangeCovariancesAstier = pexConfig.Field(
89 dtype=int,
90 doc="Maximum range of covariances as in Astier+19",
91 default=8,
92 )
93 binSize = pexConfig.Field(
94 dtype=int,
95 doc="Bin the image by this factor in both dimensions.",
96 default=1,
97 )
98 minMeanSignal = pexConfig.DictField(
99 keytype=str,
100 itemtype=float,
101 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use."
102 " The same cut is applied to all amps if this parameter [`dict`] is passed as "
103 " {'ALL_AMPS': value}",
104 default={'ALL_AMPS': 0.0},
105 )
106 maxMeanSignal = pexConfig.DictField(
107 keytype=str,
108 itemtype=float,
109 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp."
110 " The same cut is applied to all amps if this dictionary is of the form"
111 " {'ALL_AMPS': value}",
112 default={'ALL_AMPS': 1e6},
113 )
114 maskNameList = pexConfig.ListField(
115 dtype=str,
116 doc="Mask list to exclude from statistics calculations.",
117 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'],
118 )
119 nSigmaClipPtc = pexConfig.Field(
120 dtype=float,
121 doc="Sigma cut for afwMath.StatisticsControl()",
122 default=5.5,
123 )
124 nIterSigmaClipPtc = pexConfig.Field(
125 dtype=int,
126 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
127 default=3,
128 )
129 minNumberGoodPixelsForCovariance = pexConfig.Field(
130 dtype=int,
131 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or"
132 " direclty).",
133 default=10000,
134 )
135 thresholdDiffAfwVarVsCov00 = pexConfig.Field(
136 dtype=float,
137 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 "
138 "for a region of a difference image is greater than this threshold (percentage), "
139 "a warning will be issued.",
140 default=1.,
141 )
142 detectorMeasurementRegion = pexConfig.ChoiceField(
143 dtype=str,
144 doc="Region of each exposure where to perform the calculations (amplifier or full image).",
145 default='AMP',
146 allowed={
147 "AMP": "Amplifier of the detector.",
148 "FULL": "Full image."
149 }
150 )
151 numEdgeSuspect = pexConfig.Field(
152 dtype=int,
153 doc="Number of edge pixels to be flagged as untrustworthy.",
154 default=0,
155 )
156 edgeMaskLevel = pexConfig.ChoiceField(
157 dtype=str,
158 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?",
159 default="DETECTOR",
160 allowed={
161 'DETECTOR': 'Mask only the edges of the full detector.',
162 'AMP': 'Mask edges of each amplifier.',
163 },
164 )
165 doGain = pexConfig.Field(
166 dtype=bool,
167 doc="Calculate a gain per input flat pair.",
168 default=True,
169 )
170 gainCorrectionType = pexConfig.ChoiceField(
171 dtype=str,
172 doc="Correction type for the gain.",
173 default='FULL',
174 allowed={
175 'NONE': 'No correction.',
176 'SIMPLE': 'First order correction.',
177 'FULL': 'Second order correction.'
178 }
179 )
182class PhotonTransferCurveExtractTask(pipeBase.PipelineTask):
183 """Task to measure covariances from flat fields.
185 This task receives as input a list of flat-field images
186 (flats), and sorts these flats in pairs taken at the
187 same time (the task will raise if there is one one flat
188 at a given exposure time, and it will discard extra flats if
189 there are more than two per exposure time). This task measures
190 the mean, variance, and covariances from a region (e.g.,
191 an amplifier) of the difference image of the two flats with
192 the same exposure time (alternatively, all input images could have
193 the same exposure time but their flux changed).
195 The variance is calculated via afwMath, and the covariance
196 via the methods in Astier+19 (appendix A). In theory,
197 var = covariance[0,0]. This should be validated, and in the
198 future, we may decide to just keep one (covariance).
199 At this moment, if the two values differ by more than the value
200 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will
201 be issued.
203 The measured covariances at a given exposure time (along with
204 other quantities such as the mean) are stored in a PTC dataset
205 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets
206 partially filled at this stage (the remainder of the attributes
207 of the dataset will be filled after running the second task of
208 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`).
210 The number of partially-filled
211 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less
212 than the number of input exposures because the task combines
213 input flats in pairs. However, it is required at this moment
214 that the number of input dimensions matches
215 bijectively the number of output dimensions. Therefore, a number
216 of "dummy" PTC datasets are inserted in the output list. This
217 output list will then be used as input of the next task in the
218 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`,
219 which will assemble the multiple `PhotonTransferCurveDataset`
220 objects into a single one in order to fit the measured covariances
221 as a function of flux to one of three models
222 (see `PhotonTransferCurveSolveTask` for details).
224 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD
225 sensors", arXiv:1905.08677.
226 """
228 ConfigClass = PhotonTransferCurveExtractConfig
229 _DefaultName = 'cpPtcExtract'
231 def runQuantum(self, butlerQC, inputRefs, outputRefs):
232 """Ensure that the input and output dimensions are passed along.
234 Parameters
235 ----------
236 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
237 Butler to operate on.
238 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
239 Input data refs to load.
240 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
241 Output data refs to persist.
242 """
243 inputs = butlerQC.get(inputRefs)
244 # Ids of input list of exposure references
245 # (deferLoad=True in the input connections)
246 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp]
248 # Dictionary, keyed by expTime (or expFlux or expId), with tuples
249 # containing flat exposures and their IDs.
250 matchType = self.config.matchExposuresType
251 if matchType == 'TIME':
252 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims'])
253 elif matchType == 'FLUX':
254 inputs['inputExp'] = arrangeFlatsByExpFlux(inputs['inputExp'], inputs['inputDims'],
255 self.config.matchExposuresByFluxKeyword)
256 else:
257 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims'])
259 outputs = self.run(**inputs)
260 butlerQC.put(outputs, outputRefs)
262 def run(self, inputExp, inputDims, taskMetadata):
263 """Measure covariances from difference of flat pairs
265 Parameters
266 ----------
267 inputExp : `dict` [`float`, `list`
268 [`~lsst.pipe.base.connections.DeferredDatasetRef`]]
269 Dictionary that groups references to flat-field exposures that
270 have the same exposure time (seconds), or that groups them
271 sequentially by their exposure id.
272 inputDims : `list`
273 List of exposure IDs.
274 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`]
275 List of exposures metadata from ISR.
277 Returns
278 -------
279 results : `lsst.pipe.base.Struct`
280 The resulting Struct contains:
282 ``outputCovariances``
283 A list containing the per-pair PTC measurements (`list`
284 [`lsst.ip.isr.PhotonTransferCurveDataset`])
285 """
286 # inputExp.values() returns a view, which we turn into a list. We then
287 # access the first exposure-ID tuple to get the detector.
288 # The first "get()" retrieves the exposure from the exposure reference.
289 detector = list(inputExp.values())[0][0][0].get(component='detector')
290 detNum = detector.getId()
291 amps = detector.getAmplifiers()
292 ampNames = [amp.getName() for amp in amps]
294 # Each amp may have a different min and max ADU signal
295 # specified in the config.
296 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames}
297 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames}
298 for ampName in ampNames:
299 if 'ALL_AMPS' in self.config.maxMeanSignal:
300 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS']
301 elif ampName in self.config.maxMeanSignal:
302 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName]
304 if 'ALL_AMPS' in self.config.minMeanSignal:
305 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS']
306 elif ampName in self.config.minMeanSignal:
307 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName]
308 # These are the column names for `tupleRows` below.
309 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'),
310 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')]
311 # Create a dummy ptcDataset. Dummy datasets will be
312 # used to ensure that the number of output and input
313 # dimensions match.
314 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY',
315 self.config.maximumRangeCovariancesAstier)
317 readNoiseDict = {ampName: 0.0 for ampName in ampNames}
318 for ampName in ampNames:
319 # Initialize amps of `dummyPtcDatset`.
320 dummyPtcDataset.setAmpValues(ampName)
321 # Overscan readnoise from post-ISR exposure metadata.
322 # It will be used to estimate the gain from a pair of flats.
323 readNoiseDict[ampName] = self.getReadNoiseFromMetadata(taskMetadata, ampName)
325 # Output list with PTC datasets.
326 partialPtcDatasetList = []
327 # The number of output references needs to match that of input
328 # references: initialize outputlist with dummy PTC datasets.
329 for i in range(len(inputDims)):
330 partialPtcDatasetList.append(dummyPtcDataset)
332 if self.config.numEdgeSuspect > 0:
333 isrTask = IsrTask()
334 self.log.info("Masking %d pixels from the edges of all exposures as SUSPECT.",
335 self.config.numEdgeSuspect)
337 # Depending on the value of config.matchExposuresType
338 # 'expTime' can stand for exposure time, flux, or ID.
339 for expTime in inputExp:
340 exposures = inputExp[expTime]
341 if len(exposures) == 1:
342 self.log.warning("Only one exposure found at %s %f. Dropping exposure %d.",
343 self.config.matchExposuresType, expTime, exposures[0][1])
344 continue
345 else:
346 # Only use the first two exposures at expTime. Each
347 # element is a tuple (exposure, expId)
348 expRef1, expId1 = exposures[0]
349 expRef2, expId2 = exposures[1]
350 # use get() to obtain `lsst.afw.image.Exposure`
351 exp1, exp2 = expRef1.get(), expRef2.get()
353 if len(exposures) > 2:
354 self.log.warning("Already found 2 exposures at %s %f. Ignoring exposures: %s",
355 self.config.matchExposuresType, expTime,
356 ", ".join(str(i[1]) for i in exposures[2:]))
357 # Mask pixels at the edge of the detector or of each amp
358 if self.config.numEdgeSuspect > 0:
359 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect,
360 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
361 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect,
362 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
364 nAmpsNan = 0
365 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL',
366 self.config.maximumRangeCovariancesAstier)
367 for ampNumber, amp in enumerate(detector):
368 ampName = amp.getName()
369 if self.config.detectorMeasurementRegion == 'AMP':
370 region = amp.getBBox()
371 elif self.config.detectorMeasurementRegion == 'FULL':
372 region = None
374 # Get masked image regions, masking planes, statistic control
375 # objects, and clipped means. Calculate once to reuse in
376 # `measureMeanVarCov` and `getGainFromFlatPair`.
377 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2,
378 region=region)
380 # `measureMeanVarCov` is the function that measures
381 # the variance and covariances from a region of
382 # the difference image of two flats at the same
383 # exposure time. The variable `covAstier` that is
384 # returned is of the form:
385 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in
386 # {maxLag, maxLag}^2].
387 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2)
388 # Estimate the gain from the flat pair
389 if self.config.doGain:
390 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2,
391 correctionType=self.config.gainCorrectionType,
392 readNoise=readNoiseDict[ampName])
393 else:
394 gain = np.nan
396 # Correction factor for bias introduced by sigma
397 # clipping.
398 # Function returns 1/sqrt(varFactor), so it needs
399 # to be squared. varDiff is calculated via
400 # afwMath.VARIANCECLIP.
401 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2
402 varDiff *= varFactor
404 expIdMask = True
405 # Mask data point at this mean signal level if
406 # the signal, variance, or covariance calculations
407 # from `measureMeanVarCov` resulted in NaNs.
408 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
409 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of "
410 "detector %d.", ampName, expId1, expId2, detNum)
411 nAmpsNan += 1
412 expIdMask = False
413 covArray = np.full((1, self.config.maximumRangeCovariancesAstier,
414 self.config.maximumRangeCovariancesAstier), np.nan)
415 covSqrtWeights = np.full_like(covArray, np.nan)
417 # Mask data point if it is outside of the
418 # specified mean signal range.
419 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]):
420 expIdMask = False
422 if covAstier is not None:
423 # Turn the tuples with the measured information
424 # into covariance arrays.
425 # covrow: (i, j, var (cov[0,0]), cov, npix)
426 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime,
427 ampName) for covRow in covAstier]
428 tempStructArray = np.array(tupleRows, dtype=tags)
429 covArray, vcov, _ = self.makeCovArray(tempStructArray,
430 self.config.maximumRangeCovariancesAstier)
431 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov))
433 # Correct covArray for sigma clipping:
434 # 1) Apply varFactor twice for the whole covariance matrix
435 covArray *= varFactor**2
436 # 2) But, only once for the variance element of the
437 # matrix, covArray[0,0] (so divide one factor out).
438 covArray[0, 0] /= varFactor
440 partialPtcDataset.setAmpValues(ampName, rawExpTime=[expTime], rawMean=[muDiff],
441 rawVar=[varDiff], inputExpIdPair=[(expId1, expId2)],
442 expIdMask=[expIdMask], covArray=covArray,
443 covSqrtWeights=covSqrtWeights, gain=gain,
444 noise=readNoiseDict[ampName])
445 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair.
446 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple
447 # with a single-element array, so [0][0]
448 # is necessary to extract the required index.
449 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0]
450 # `partialPtcDatasetList` is a list of
451 # `PhotonTransferCurveDataset` objects. Some of them
452 # will be dummy datasets (to match length of input
453 # and output references), and the rest will have
454 # datasets with the mean signal, variance, and
455 # covariance measurements at a given exposure
456 # time. The next ppart of the PTC-measurement
457 # pipeline, `solve`, will take this list as input,
458 # and assemble the measurements in the datasets
459 # in an addecuate manner for fitting a PTC
460 # model.
461 partialPtcDataset.updateMetadata(setDate=True, detector=detector)
462 partialPtcDatasetList[datasetIndex] = partialPtcDataset
464 if nAmpsNan == len(ampNames):
465 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
466 self.log.warning(msg)
467 return pipeBase.Struct(
468 outputCovariances=partialPtcDatasetList,
469 )
471 def makeCovArray(self, inputTuple, maxRangeFromTuple):
472 """Make covariances array from tuple.
474 Parameters
475 ----------
476 inputTuple : `numpy.ndarray`
477 Structured array with rows with at least
478 (mu, afwVar, cov, var, i, j, npix), where:
479 mu : `float`
480 0.5*(m1 + m2), where mu1 is the mean value of flat1
481 and mu2 is the mean value of flat2.
482 afwVar : `float`
483 Variance of difference flat, calculated with afw.
484 cov : `float`
485 Covariance value at lag(i, j)
486 var : `float`
487 Variance(covariance value at lag(0, 0))
488 i : `int`
489 Lag in dimension "x".
490 j : `int`
491 Lag in dimension "y".
492 npix : `int`
493 Number of pixels used for covariance calculation.
494 maxRangeFromTuple : `int`
495 Maximum range to select from tuple.
497 Returns
498 -------
499 cov : `numpy.array`
500 Covariance arrays, indexed by mean signal mu.
501 vCov : `numpy.array`
502 Variance of the [co]variance arrays, indexed by mean signal mu.
503 muVals : `numpy.array`
504 List of mean signal values.
505 """
506 if maxRangeFromTuple is not None:
507 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple)
508 cutTuple = inputTuple[cut]
509 else:
510 cutTuple = inputTuple
511 # increasing mu order, so that we can group measurements with the
512 # same mu
513 muTemp = cutTuple['mu']
514 ind = np.argsort(muTemp)
516 cutTuple = cutTuple[ind]
517 # should group measurements on the same image pairs(same average)
518 mu = cutTuple['mu']
519 xx = np.hstack(([mu[0]], mu))
520 delta = xx[1:] - xx[:-1]
521 steps, = np.where(delta > 0)
522 ind = np.zeros_like(mu, dtype=int)
523 ind[steps] = 1
524 ind = np.cumsum(ind) # this acts as an image pair index.
525 # now fill the 3-d cov array(and variance)
526 muVals = np.array(np.unique(mu))
527 i = cutTuple['i'].astype(int)
528 j = cutTuple['j'].astype(int)
529 c = 0.5*cutTuple['cov']
530 n = cutTuple['npix']
531 v = 0.5*cutTuple['var']
532 # book and fill
533 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1))
534 var = np.zeros_like(cov)
535 cov[ind, i, j] = c
536 var[ind, i, j] = v**2/n
537 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N
539 return cov, var, muVals
541 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2):
542 """Calculate the mean of each of two exposures and the variance
543 and covariance of their difference. The variance is calculated
544 via afwMath, and the covariance via the methods in Astier+19
545 (appendix A). In theory, var = covariance[0,0]. This should
546 be validated, and in the future, we may decide to just keep
547 one (covariance).
549 Parameters
550 ----------
551 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
552 Masked image from exposure 1.
553 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
554 Masked image from exposure 2.
555 imStatsCtrl : `lsst.afw.math.StatisticsControl`
556 Statistics control object.
557 mu1: `float`
558 Clipped mean of im1Area (ADU).
559 mu2: `float`
560 Clipped mean of im2Area (ADU).
562 Returns
563 -------
564 mu : `float` or `NaN`
565 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means
566 of the regions in both exposures. If either mu1 or m2 are
567 NaN's, the returned value is NaN.
568 varDiff : `float` or `NaN`
569 Half of the clipped variance of the difference of the
570 regions inthe two input exposures. If either mu1 or m2 are
571 NaN's, the returned value is NaN.
572 covDiffAstier : `list` or `NaN`
573 List with tuples of the form (dx, dy, var, cov, npix), where:
574 dx : `int`
575 Lag in x
576 dy : `int`
577 Lag in y
578 var : `float`
579 Variance at (dx, dy).
580 cov : `float`
581 Covariance at (dx, dy).
582 nPix : `int`
583 Number of pixel pairs used to evaluate var and cov.
585 If either mu1 or m2 are NaN's, the returned value is NaN.
586 """
587 if np.isnan(mu1) or np.isnan(mu2):
588 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2)
589 return np.nan, np.nan, None
590 mu = 0.5*(mu1 + mu2)
592 # Take difference of pairs
593 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
594 temp = im2Area.clone()
595 temp *= mu1
596 diffIm = im1Area.clone()
597 diffIm *= mu2
598 diffIm -= temp
599 diffIm /= mu
601 # Variance calculation via afwMath
602 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue())
604 # Covariances calculations
605 # Get the pixels that were not clipped
606 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()
607 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue()
608 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip)
609 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0)
611 # Get the pixels in the mask planes of the difference image
612 # that were ignored by the clipping algorithm
613 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
614 # Combine the two sets of pixels ('1': use; '0': don't use)
615 # into a final weight matrix to be used in the covariance
616 # calculations below.
617 w = unmasked*wDiff
619 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance:
620 self.log.warning("Number of good points for covariance calculation (%s) is less "
621 "(than threshold %s)", np.sum(w), self.config.minNumberGoodPixelsForCovariance)
622 return np.nan, np.nan, None
624 maxRangeCov = self.config.maximumRangeCovariancesAstier
626 # Calculate covariances via FFT.
627 shapeDiff = np.array(diffIm.image.array.shape)
628 # Calculate the sizes of FFT dimensions.
629 s = shapeDiff + maxRangeCov
630 tempSize = np.array(np.log(s)/np.log(2.)).astype(int)
631 fftSize = np.array(2**(tempSize+1)).astype(int)
632 fftShape = (fftSize[0], fftSize[1])
633 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov)
634 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier
635 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov)
637 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0]
638 # is the Cov[0,0] element, [3] is the variance, and there's a
639 # factor of 0.5 difference with afwMath.VARIANCECLIP.
640 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00
641 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5))
642 if fractionalDiff >= thresholdPercentage:
643 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] "
644 "is more than %f%%: %f", thresholdPercentage, fractionalDiff)
646 return mu, varDiff, covDiffAstier
648 def getImageAreasMasksStats(self, exposure1, exposure2, region=None):
649 """Get image areas in a region as well as masks and statistic objects.
651 Parameters
652 ----------
653 exposure1 : `lsst.afw.image.exposure.ExposureF`
654 First exposure of flat field pair.
655 exposure2 : `lsst.afw.image.exposure.ExposureF`
656 Second exposure of flat field pair.
657 region : `lsst.geom.Box2I`, optional
658 Region of each exposure where to perform the calculations
659 (e.g, an amplifier).
661 Returns
662 -------
663 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
664 Masked image from exposure 1.
665 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
666 Masked image from exposure 2.
667 imStatsCtrl : `lsst.afw.math.StatisticsControl`
668 Statistics control object.
669 mu1: `float`
670 Clipped mean of im1Area (ADU).
671 mu2: `float`
672 Clipped mean of im2Area (ADU).
673 """
674 if region is not None:
675 im1Area = exposure1.maskedImage[region]
676 im2Area = exposure2.maskedImage[region]
677 else:
678 im1Area = exposure1.maskedImage
679 im2Area = exposure2.maskedImage
681 if self.config.binSize > 1:
682 im1Area = afwMath.binImage(im1Area, self.config.binSize)
683 im2Area = afwMath.binImage(im2Area, self.config.binSize)
685 # Get mask planes and construct statistics control object from one
686 # of the exposures
687 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
688 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
689 self.config.nIterSigmaClipPtc,
690 imMaskVal)
691 imStatsCtrl.setNanSafe(True)
692 imStatsCtrl.setAndMask(imMaskVal)
694 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
695 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
697 return (im1Area, im2Area, imStatsCtrl, mu1, mu2)
699 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2,
700 correctionType='NONE', readNoise=None):
701 """Estimate the gain from a single pair of flats.
703 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const,
704 where I1 and I2 correspond to flats 1 and 2, respectively.
705 Corrections for the variable QE and the read-noise are then
706 made following the derivation in Robert Lupton's forthcoming
707 book, which gets
709 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2).
711 This is a quadratic equation, whose solutions are given by:
713 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2
714 - 2*sigma^2)
716 where 'mu' is the average signal level and 'sigma' is the
717 amplifier's readnoise. The positive solution will be used.
718 The way the correction is applied depends on the value
719 supplied for correctionType.
721 correctionType is one of ['NONE', 'SIMPLE' or 'FULL']
722 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula.
723 'SIMPLE' : uses the gain from the 'NONE' method for the
724 1/2g^2 term.
725 'FULL' : solves the full equation for g, discarding the
726 non-physical solution to the resulting quadratic.
728 Parameters
729 ----------
730 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
731 Masked image from exposure 1.
732 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
733 Masked image from exposure 2.
734 imStatsCtrl : `lsst.afw.math.StatisticsControl`
735 Statistics control object.
736 mu1: `float`
737 Clipped mean of im1Area (ADU).
738 mu2: `float`
739 Clipped mean of im2Area (ADU).
740 correctionType : `str`, optional
741 The correction applied, one of ['NONE', 'SIMPLE', 'FULL']
742 readNoise : `float`, optional
743 Amplifier readout noise (ADU).
745 Returns
746 -------
747 gain : `float`
748 Gain, in e/ADU.
750 Raises
751 ------
752 RuntimeError
753 Raise if `correctionType` is not one of 'NONE',
754 'SIMPLE', or 'FULL'.
755 """
756 if correctionType not in ['NONE', 'SIMPLE', 'FULL']:
757 raise RuntimeError("Unknown correction type: %s" % correctionType)
759 if correctionType != 'NONE' and readNoise is None:
760 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, "
761 "but 'readNoise' is 'None'. Setting 'correctionType' "
762 "to 'NONE', so a gain value will be estimated without "
763 "corrections." % correctionType)
764 correctionType = 'NONE'
766 mu = 0.5*(mu1 + mu2)
768 # ratioIm = (I1 - I2)^2 / (I1 + I2)
769 temp = im2Area.clone()
770 ratioIm = im1Area.clone()
771 ratioIm -= temp
772 ratioIm *= ratioIm
774 # Sum of pairs
775 sumIm = im1Area.clone()
776 sumIm += temp
778 ratioIm /= sumIm
780 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue()
781 gain = 1. / const
783 if correctionType == 'SIMPLE':
784 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2)))
785 elif correctionType == 'FULL':
786 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2)
787 denom = (2*const*mu - 2*readNoise**2)
788 positiveSolution = (root + mu)/denom
789 gain = positiveSolution
791 return gain
793 def getReadNoiseFromMetadata(self, taskMetadata, ampName):
794 """Gets readout noise for an amp from ISR metadata.
796 Parameters
797 ----------
798 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`]
799 List of exposures metadata from ISR.
800 ampName : `str`
801 Amplifier name.
803 Returns
804 -------
805 readNoise : `float`
806 Median of the overscan readnoise in the
807 post-ISR metadata of the input exposures (ADU).
808 Returns 'None' if the median could not be calculated.
809 """
810 # Empirical readout noise [ADU] measured from an
811 # overscan-subtracted overscan during ISR.
812 expectedKey = f"RESIDUAL STDEV {ampName}"
814 readNoises = []
815 for expMetadata in taskMetadata:
816 if 'isr' in expMetadata:
817 overscanNoise = expMetadata['isr'][expectedKey]
818 else:
819 continue
820 readNoises.append(overscanNoise)
822 if len(readNoises):
823 readNoise = np.median(np.array(readNoises))
824 else:
825 self.log.warning("Median readout noise from ISR metadata for amp %s "
826 "could not be calculated." % ampName)
827 readNoise = None
829 return readNoise