Coverage for python/lsst/cp/pipe/ptc/cpExtractPtcTask.py: 14%
234 statements
« prev ^ index » next coverage.py v6.4.2, created at 2022-07-28 00:49 -0700
« prev ^ index » next coverage.py v6.4.2, created at 2022-07-28 00:49 -0700
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22import numpy as np
24import lsst.afw.math as afwMath
25import lsst.pex.config as pexConfig
26import lsst.pipe.base as pipeBase
27from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId,
28 sigmaClipCorrection, CovFastFourierTransform)
30import lsst.pipe.base.connectionTypes as cT
32from lsst.ip.isr import PhotonTransferCurveDataset
33from lsst.ip.isr import IsrTask
35__all__ = ['PhotonTransferCurveExtractConfig', 'PhotonTransferCurveExtractTask']
38class PhotonTransferCurveExtractConnections(pipeBase.PipelineTaskConnections,
39 dimensions=("instrument", "detector")):
41 inputExp = cT.Input(
42 name="ptcInputExposurePairs",
43 doc="Input post-ISR processed exposure pairs (flats) to"
44 "measure covariances from.",
45 storageClass="Exposure",
46 dimensions=("instrument", "exposure", "detector"),
47 multiple=True,
48 deferLoad=True,
49 )
50 taskMetadata = cT.Input(
51 name="isrTask_metadata",
52 doc="Input task metadata to extract statistics from.",
53 storageClass="TaskMetadata",
54 dimensions=("instrument", "exposure", "detector"),
55 multiple=True,
56 )
57 outputCovariances = cT.Output(
58 name="ptcCovariances",
59 doc="Extracted flat (co)variances.",
60 storageClass="PhotonTransferCurveDataset",
61 dimensions=("instrument", "exposure", "detector"),
62 multiple=True,
63 )
66class PhotonTransferCurveExtractConfig(pipeBase.PipelineTaskConfig,
67 pipelineConnections=PhotonTransferCurveExtractConnections):
68 """Configuration for the measurement of covariances from flats.
69 """
71 matchByExposureId = pexConfig.Field(
72 dtype=bool,
73 doc="Should exposures be matched by ID rather than exposure time?",
74 default=False,
75 )
76 maximumRangeCovariancesAstier = pexConfig.Field(
77 dtype=int,
78 doc="Maximum range of covariances as in Astier+19",
79 default=8,
80 )
81 binSize = pexConfig.Field(
82 dtype=int,
83 doc="Bin the image by this factor in both dimensions.",
84 default=1,
85 )
86 minMeanSignal = pexConfig.DictField(
87 keytype=str,
88 itemtype=float,
89 doc="Minimum values (inclusive) of mean signal (in ADU) per amp to use."
90 " The same cut is applied to all amps if this parameter [`dict`] is passed as "
91 " {'ALL_AMPS': value}",
92 default={'ALL_AMPS': 0.0},
93 )
94 maxMeanSignal = pexConfig.DictField(
95 keytype=str,
96 itemtype=float,
97 doc="Maximum values (inclusive) of mean signal (in ADU) below which to consider, per amp."
98 " The same cut is applied to all amps if this dictionary is of the form"
99 " {'ALL_AMPS': value}",
100 default={'ALL_AMPS': 1e6},
101 )
102 maskNameList = pexConfig.ListField(
103 dtype=str,
104 doc="Mask list to exclude from statistics calculations.",
105 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'],
106 )
107 nSigmaClipPtc = pexConfig.Field(
108 dtype=float,
109 doc="Sigma cut for afwMath.StatisticsControl()",
110 default=5.5,
111 )
112 nIterSigmaClipPtc = pexConfig.Field(
113 dtype=int,
114 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
115 default=3,
116 )
117 minNumberGoodPixelsForCovariance = pexConfig.Field(
118 dtype=int,
119 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or"
120 " direclty).",
121 default=10000,
122 )
123 thresholdDiffAfwVarVsCov00 = pexConfig.Field(
124 dtype=float,
125 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 "
126 "for a region of a difference image is greater than this threshold (percentage), "
127 "a warning will be issued.",
128 default=1.,
129 )
130 detectorMeasurementRegion = pexConfig.ChoiceField(
131 dtype=str,
132 doc="Region of each exposure where to perform the calculations (amplifier or full image).",
133 default='AMP',
134 allowed={
135 "AMP": "Amplifier of the detector.",
136 "FULL": "Full image."
137 }
138 )
139 numEdgeSuspect = pexConfig.Field(
140 dtype=int,
141 doc="Number of edge pixels to be flagged as untrustworthy.",
142 default=0,
143 )
144 edgeMaskLevel = pexConfig.ChoiceField(
145 dtype=str,
146 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?",
147 default="DETECTOR",
148 allowed={
149 'DETECTOR': 'Mask only the edges of the full detector.',
150 'AMP': 'Mask edges of each amplifier.',
151 },
152 )
153 doGain = pexConfig.Field(
154 dtype=bool,
155 doc="Calculate a gain per input flat pair.",
156 default=True,
157 )
158 gainCorrectionType = pexConfig.ChoiceField(
159 dtype=str,
160 doc="Correction type for the gain.",
161 default='FULL',
162 allowed={
163 'NONE': 'No correction.',
164 'SIMPLE': 'First order correction.',
165 'FULL': 'Second order correction.'
166 }
167 )
170class PhotonTransferCurveExtractTask(pipeBase.PipelineTask,
171 pipeBase.CmdLineTask):
172 """Task to measure covariances from flat fields.
174 This task receives as input a list of flat-field images
175 (flats), and sorts these flats in pairs taken at the
176 same time (the task will raise if there is one one flat
177 at a given exposure time, and it will discard extra flats if
178 there are more than two per exposure time). This task measures
179 the mean, variance, and covariances from a region (e.g.,
180 an amplifier) of the difference image of the two flats with
181 the same exposure time.
183 The variance is calculated via afwMath, and the covariance
184 via the methods in Astier+19 (appendix A). In theory,
185 var = covariance[0,0]. This should be validated, and in the
186 future, we may decide to just keep one (covariance).
187 At this moment, if the two values differ by more than the value
188 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will
189 be issued.
191 The measured covariances at a given exposure time (along with
192 other quantities such as the mean) are stored in a PTC dataset
193 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets
194 partially filled at this stage (the remainder of the attributes
195 of the dataset will be filled after running the second task of
196 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`).
198 The number of partially-filled
199 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less
200 than the number of input exposures because the task combines
201 input flats in pairs. However, it is required at this moment
202 that the number of input dimensions matches
203 bijectively the number of output dimensions. Therefore, a number
204 of "dummy" PTC datasets are inserted in the output list. This
205 output list will then be used as input of the next task in the
206 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`,
207 which will assemble the multiple `PhotonTransferCurveDataset`
208 objects into a single one in order to fit the measured covariances
209 as a function of flux to one of three models
210 (see `PhotonTransferCurveSolveTask` for details).
212 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD
213 sensors", arXiv:1905.08677.
214 """
216 ConfigClass = PhotonTransferCurveExtractConfig
217 _DefaultName = 'cpPtcExtract'
219 def runQuantum(self, butlerQC, inputRefs, outputRefs):
220 """Ensure that the input and output dimensions are passed along.
222 Parameters
223 ----------
224 butlerQC : `~lsst.daf.butler.butlerQuantumContext.ButlerQuantumContext`
225 Butler to operate on.
226 inputRefs : `~lsst.pipe.base.connections.InputQuantizedConnection`
227 Input data refs to load.
228 ouptutRefs : `~lsst.pipe.base.connections.OutputQuantizedConnection`
229 Output data refs to persist.
230 """
231 inputs = butlerQC.get(inputRefs)
232 # Ids of input list of exposure references
233 # (deferLoad=True in the input connections)
234 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp]
236 # Dictionary, keyed by expTime, with tuples containing flat
237 # exposures and their IDs.
238 if self.config.matchByExposureId:
239 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims'])
240 else:
241 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims'])
243 outputs = self.run(**inputs)
244 butlerQC.put(outputs, outputRefs)
246 def run(self, inputExp, inputDims, taskMetadata):
247 """Measure covariances from difference of flat pairs
249 Parameters
250 ----------
251 inputExp : `dict` [`float`, `list`
252 [`~lsst.pipe.base.connections.DeferredDatasetRef`]]
253 Dictionary that groups references to flat-field exposures that
254 have the same exposure time (seconds), or that groups them
255 sequentially by their exposure id.
256 inputDims : `list`
257 List of exposure IDs.
258 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`]
259 List of exposures metadata from ISR.
261 Returns
262 -------
263 results : `lsst.pipe.base.Struct`
264 The resulting Struct contains:
265 ``outputCovariances``
266 A list containing the per-pair PTC measurements (`list`
267 [`lsst.ip.isr.PhotonTransferCurveDataset`])
268 """
269 # inputExp.values() returns a view, which we turn into a list. We then
270 # access the first exposure-ID tuple to get the detector.
271 # The first "get()" retrieves the exposure from the exposure reference.
272 detector = list(inputExp.values())[0][0][0].get(component='detector')
273 detNum = detector.getId()
274 amps = detector.getAmplifiers()
275 ampNames = [amp.getName() for amp in amps]
277 # Each amp may have a different min and max ADU signal
278 # specified in the config.
279 maxMeanSignalDict = {ampName: 1e6 for ampName in ampNames}
280 minMeanSignalDict = {ampName: 0.0 for ampName in ampNames}
281 for ampName in ampNames:
282 if 'ALL_AMPS' in self.config.maxMeanSignal:
283 maxMeanSignalDict[ampName] = self.config.maxMeanSignal['ALL_AMPS']
284 elif ampName in self.config.maxMeanSignal:
285 maxMeanSignalDict[ampName] = self.config.maxMeanSignal[ampName]
287 if 'ALL_AMPS' in self.config.minMeanSignal:
288 minMeanSignalDict[ampName] = self.config.minMeanSignal['ALL_AMPS']
289 elif ampName in self.config.minMeanSignal:
290 minMeanSignalDict[ampName] = self.config.minMeanSignal[ampName]
291 # These are the column names for `tupleRows` below.
292 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'),
293 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')]
294 # Create a dummy ptcDataset. Dummy datasets will be
295 # used to ensure that the number of output and input
296 # dimensions match.
297 dummyPtcDataset = PhotonTransferCurveDataset(ampNames, 'DUMMY',
298 self.config.maximumRangeCovariancesAstier)
300 readNoiseDict = {ampName: 0.0 for ampName in ampNames}
301 for ampName in ampNames:
302 # Initialize amps of `dummyPtcDatset`.
303 dummyPtcDataset.setAmpValues(ampName)
304 # Overscan readnoise from post-ISR exposure metadata.
305 # It will be used to estimate the gain from a pair of flats.
306 readNoiseDict[ampName] = self.getReadNoiseFromMetadata(taskMetadata, ampName)
308 # Output list with PTC datasets.
309 partialPtcDatasetList = []
310 # The number of output references needs to match that of input
311 # references: initialize outputlist with dummy PTC datasets.
312 for i in range(len(inputDims)):
313 partialPtcDatasetList.append(dummyPtcDataset)
315 if self.config.numEdgeSuspect > 0:
316 isrTask = IsrTask()
317 self.log.info("Masking %d pixels from the edges of all exposures as SUSPECT.",
318 self.config.numEdgeSuspect)
320 for expTime in inputExp:
321 exposures = inputExp[expTime]
322 if len(exposures) == 1:
323 self.log.warning("Only one exposure found at expTime %f. Dropping exposure %d.",
324 expTime, exposures[0][1])
325 continue
326 else:
327 # Only use the first two exposures at expTime. Each
328 # element is a tuple (exposure, expId)
329 expRef1, expId1 = exposures[0]
330 expRef2, expId2 = exposures[1]
331 # use get() to obtain `lsst.afw.image.Exposure`
332 exp1, exp2 = expRef1.get(), expRef2.get()
334 if len(exposures) > 2:
335 self.log.warning("Already found 2 exposures at expTime %f. Ignoring exposures: %s",
336 expTime, ", ".join(str(i[1]) for i in exposures[2:]))
337 # Mask pixels at the edge of the detector or of each amp
338 if self.config.numEdgeSuspect > 0:
339 isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect,
340 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
341 isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect,
342 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
344 nAmpsNan = 0
345 partialPtcDataset = PhotonTransferCurveDataset(ampNames, 'PARTIAL',
346 self.config.maximumRangeCovariancesAstier)
347 for ampNumber, amp in enumerate(detector):
348 ampName = amp.getName()
349 # covAstier: [(i, j, var (cov[0,0]), cov, npix) for
350 # (i,j) in {maxLag, maxLag}^2]
351 if self.config.detectorMeasurementRegion == 'AMP':
352 region = amp.getBBox()
353 elif self.config.detectorMeasurementRegion == 'FULL':
354 region = None
356 # Get masked image regions, masking planes, statistic control
357 # objects, and clipped means. Calculate once to reuse in
358 # `measureMeanVarCov` and `getGainFromFlatPair`.
359 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2,
360 region=region)
362 # `measureMeanVarCov` is the function that measures
363 # the variance and covariances from a region of
364 # the difference image of two flats at the same
365 # exposure time. The variable `covAstier` that is
366 # returned is of the form:
367 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in
368 # {maxLag, maxLag}^2].
369 muDiff, varDiff, covAstier = self.measureMeanVarCov(im1Area, im2Area, imStatsCtrl, mu1, mu2)
371 # Estimate the gain from the flat pair
372 if self.config.doGain:
373 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2,
374 correctionType=self.config.gainCorrectionType,
375 readNoise=readNoiseDict[ampName])
376 else:
377 gain = np.nan
379 # Correction factor for bias introduced by sigma
380 # clipping.
381 # Function returns 1/sqrt(varFactor), so it needs
382 # to be squared. varDiff is calculated via
383 # afwMath.VARIANCECLIP.
384 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2
385 varDiff *= varFactor
387 expIdMask = True
388 # Mask data point at this mean signal level if
389 # the signal, variance, or covariance calculations
390 # from `measureMeanVarCov` resulted in NaNs.
391 if np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None):
392 self.log.warning("NaN mean or var, or None cov in amp %s in exposure pair %d, %d of "
393 "detector %d.", ampName, expId1, expId2, detNum)
394 nAmpsNan += 1
395 expIdMask = False
396 covArray = np.full((1, self.config.maximumRangeCovariancesAstier,
397 self.config.maximumRangeCovariancesAstier), np.nan)
398 covSqrtWeights = np.full_like(covArray, np.nan)
400 # Mask data point if it is outside of the
401 # specified mean signal range.
402 if (muDiff <= minMeanSignalDict[ampName]) or (muDiff >= maxMeanSignalDict[ampName]):
403 expIdMask = False
405 if covAstier is not None:
406 # Turn the tuples with the measured information
407 # into covariance arrays.
408 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime,
409 ampName) for covRow in covAstier]
410 tempStructArray = np.array(tupleRows, dtype=tags)
411 covArray, vcov, _ = self.makeCovArray(tempStructArray,
412 self.config.maximumRangeCovariancesAstier)
413 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov))
415 # Correct covArray for sigma clipping:
416 # 1) Apply varFactor twice for the whole covariance matrix
417 covArray *= varFactor**2
418 # 2) But, only once for the variance element of the
419 # matrix, covArray[0,0] (so divide one factor out).
420 covArray[0, 0] /= varFactor
422 partialPtcDataset.setAmpValues(ampName, rawExpTime=[expTime], rawMean=[muDiff],
423 rawVar=[varDiff], inputExpIdPair=[(expId1, expId2)],
424 expIdMask=[expIdMask], covArray=covArray,
425 covSqrtWeights=covSqrtWeights, gain=gain,
426 noise=readNoiseDict[ampName])
427 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair.
428 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple
429 # with a single-element array, so [0][0]
430 # is necessary to extract the required index.
431 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0]
432 # `partialPtcDatasetList` is a list of
433 # `PhotonTransferCurveDataset` objects. Some of them
434 # will be dummy datasets (to match length of input
435 # and output references), and the rest will have
436 # datasets with the mean signal, variance, and
437 # covariance measurements at a given exposure
438 # time. The next ppart of the PTC-measurement
439 # pipeline, `solve`, will take this list as input,
440 # and assemble the measurements in the datasets
441 # in an addecuate manner for fitting a PTC
442 # model.
443 partialPtcDataset.updateMetadata(setDate=True, detector=detector)
444 partialPtcDatasetList[datasetIndex] = partialPtcDataset
446 if nAmpsNan == len(ampNames):
447 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
448 self.log.warning(msg)
449 return pipeBase.Struct(
450 outputCovariances=partialPtcDatasetList,
451 )
453 def makeCovArray(self, inputTuple, maxRangeFromTuple):
454 """Make covariances array from tuple.
456 Parameters
457 ----------
458 inputTuple : `numpy.ndarray`
459 Structured array with rows with at least
460 (mu, afwVar, cov, var, i, j, npix), where:
461 mu : `float`
462 0.5*(m1 + m2), where mu1 is the mean value of flat1
463 and mu2 is the mean value of flat2.
464 afwVar : `float`
465 Variance of difference flat, calculated with afw.
466 cov : `float`
467 Covariance value at lag(i, j)
468 var : `float`
469 Variance(covariance value at lag(0, 0))
470 i : `int`
471 Lag in dimension "x".
472 j : `int`
473 Lag in dimension "y".
474 npix : `int`
475 Number of pixels used for covariance calculation.
476 maxRangeFromTuple : `int`
477 Maximum range to select from tuple.
479 Returns
480 -------
481 cov : `numpy.array`
482 Covariance arrays, indexed by mean signal mu.
483 vCov : `numpy.array`
484 Variance arrays, indexed by mean signal mu.
485 muVals : `numpy.array`
486 List of mean signal values.
487 """
488 if maxRangeFromTuple is not None:
489 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple)
490 cutTuple = inputTuple[cut]
491 else:
492 cutTuple = inputTuple
493 # increasing mu order, so that we can group measurements with the
494 # same mu
495 muTemp = cutTuple['mu']
496 ind = np.argsort(muTemp)
498 cutTuple = cutTuple[ind]
499 # should group measurements on the same image pairs(same average)
500 mu = cutTuple['mu']
501 xx = np.hstack(([mu[0]], mu))
502 delta = xx[1:] - xx[:-1]
503 steps, = np.where(delta > 0)
504 ind = np.zeros_like(mu, dtype=int)
505 ind[steps] = 1
506 ind = np.cumsum(ind) # this acts as an image pair index.
507 # now fill the 3-d cov array(and variance)
508 muVals = np.array(np.unique(mu))
509 i = cutTuple['i'].astype(int)
510 j = cutTuple['j'].astype(int)
511 c = 0.5*cutTuple['cov']
512 n = cutTuple['npix']
513 v = 0.5*cutTuple['var']
514 # book and fill
515 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1))
516 var = np.zeros_like(cov)
517 cov[ind, i, j] = c
518 var[ind, i, j] = v**2/n
519 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N
521 return cov, var, muVals
523 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2):
524 """Calculate the mean of each of two exposures and the variance
525 and covariance of their difference. The variance is calculated
526 via afwMath, and the covariance via the methods in Astier+19
527 (appendix A). In theory, var = covariance[0,0]. This should
528 be validated, and in the future, we may decide to just keep
529 one (covariance).
531 Parameters
532 ----------
533 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
534 Masked image from exposure 1.
535 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
536 Masked image from exposure 2.
537 imStatsCtrl : `lsst.afw.math.StatisticsControl`
538 Statistics control object.
539 mu1: `float`
540 Clipped mean of im1Area (ADU).
541 mu2: `float`
542 Clipped mean of im2Area (ADU).
544 Returns
545 -------
546 mu : `float` or `NaN`
547 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means
548 of the regions in both exposures. If either mu1 or m2 are
549 NaN's, the returned value is NaN.
550 varDiff : `float` or `NaN`
551 Half of the clipped variance of the difference of the
552 regions inthe two input exposures. If either mu1 or m2 are
553 NaN's, the returned value is NaN.
554 covDiffAstier : `list` or `NaN`
555 List with tuples of the form (dx, dy, var, cov, npix), where:
556 dx : `int`
557 Lag in x
558 dy : `int`
559 Lag in y
560 var : `float`
561 Variance at (dx, dy).
562 cov : `float`
563 Covariance at (dx, dy).
564 nPix : `int`
565 Number of pixel pairs used to evaluate var and cov.
567 If either mu1 or m2 are NaN's, the returned value is NaN.
568 """
569 if np.isnan(mu1) or np.isnan(mu2):
570 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2)
571 return np.nan, np.nan, None
572 mu = 0.5*(mu1 + mu2)
574 # Take difference of pairs
575 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
576 temp = im2Area.clone()
577 temp *= mu1
578 diffIm = im1Area.clone()
579 diffIm *= mu2
580 diffIm -= temp
581 diffIm /= mu
583 # Variance calculation via afwMath
584 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue())
586 # Covariances calculations
587 # Get the pixels that were not clipped
588 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()
589 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue()
590 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip)
591 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0)
593 # Get the pixels in the mask planes of the difference image
594 # that were ignored by the clipping algorithm
595 wDiff = np.where(diffIm.getMask().getArray() == 0, 1, 0)
596 # Combine the two sets of pixels ('1': use; '0': don't use)
597 # into a final weight matrix to be used in the covariance
598 # calculations below.
599 w = unmasked*wDiff
601 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance:
602 self.log.warning("Number of good points for covariance calculation (%s) is less "
603 "(than threshold %s)", np.sum(w), self.config.minNumberGoodPixelsForCovariance)
604 return np.nan, np.nan, None
606 maxRangeCov = self.config.maximumRangeCovariancesAstier
608 # Calculate covariances via FFT.
609 shapeDiff = np.array(diffIm.image.array.shape)
610 # Calculate the sizes of FFT dimensions.
611 s = shapeDiff + maxRangeCov
612 tempSize = np.array(np.log(s)/np.log(2.)).astype(int)
613 fftSize = np.array(2**(tempSize+1)).astype(int)
614 fftShape = (fftSize[0], fftSize[1])
616 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov)
617 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov)
619 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0]
620 # is the Cov[0,0] element, [3] is the variance, and there's a
621 # factor of 0.5 difference with afwMath.VARIANCECLIP.
622 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00
623 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5))
624 if fractionalDiff >= thresholdPercentage:
625 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] "
626 "is more than %f%%: %f", thresholdPercentage, fractionalDiff)
628 return mu, varDiff, covDiffAstier
630 def getImageAreasMasksStats(self, exposure1, exposure2, region=None):
631 """Get image areas in a region as well as masks and statistic objects.
633 Parameters
634 ----------
635 exposure1 : `lsst.afw.image.exposure.ExposureF`
636 First exposure of flat field pair.
637 exposure2 : `lsst.afw.image.exposure.ExposureF`
638 Second exposure of flat field pair.
639 region : `lsst.geom.Box2I`, optional
640 Region of each exposure where to perform the calculations
641 (e.g, an amplifier).
643 Returns
644 -------
645 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
646 Masked image from exposure 1.
647 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
648 Masked image from exposure 2.
649 imStatsCtrl : `lsst.afw.math.StatisticsControl`
650 Statistics control object.
651 mu1: `float`
652 Clipped mean of im1Area (ADU).
653 mu2: `float`
654 Clipped mean of im2Area (ADU).
655 """
656 if region is not None:
657 im1Area = exposure1.maskedImage[region]
658 im2Area = exposure2.maskedImage[region]
659 else:
660 im1Area = exposure1.maskedImage
661 im2Area = exposure2.maskedImage
663 if self.config.binSize > 1:
664 im1Area = afwMath.binImage(im1Area, self.config.binSize)
665 im2Area = afwMath.binImage(im2Area, self.config.binSize)
667 # Get mask planes and construct statistics control object from one
668 # of the exposures
669 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
670 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
671 self.config.nIterSigmaClipPtc,
672 imMaskVal)
673 imStatsCtrl.setNanSafe(True)
674 imStatsCtrl.setAndMask(imMaskVal)
676 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
677 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
679 return (im1Area, im2Area, imStatsCtrl, mu1, mu2)
681 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2,
682 correctionType='NONE', readNoise=None):
683 """Estimate the gain from a single pair of flats.
685 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const,
686 where I1 and I2 correspond to flats 1 and 2, respectively.
687 Corrections for the variable QE and the read-noise are then
688 made following the derivation in Robert Lupton's forthcoming
689 book, which gets
691 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2).
693 This is a quadratic equation, whose solutions are given by:
695 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2
696 - 2*sigma^2)
698 where 'mu' is the average signal level and 'sigma' is the
699 amplifier's readnoise. The positive solution will be used.
700 The way the correction is applied depends on the value
701 supplied for correctionType.
703 correctionType is one of ['NONE', 'SIMPLE' or 'FULL']
704 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula.
705 'SIMPLE' : uses the gain from the 'NONE' method for the
706 1/2g^2 term.
707 'FULL' : solves the full equation for g, discarding the
708 non-physical solution to the resulting quadratic.
710 Parameters
711 ----------
712 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
713 Masked image from exposure 1.
714 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
715 Masked image from exposure 2.
716 imStatsCtrl : `lsst.afw.math.StatisticsControl`
717 Statistics control object.
718 mu1: `float`
719 Clipped mean of im1Area (ADU).
720 mu2: `float`
721 Clipped mean of im2Area (ADU).
722 correctionType : `str`, optional
723 The correction applied, one of ['NONE', 'SIMPLE', 'FULL']
724 readNoise : `float`, optional
725 Amplifier readout noise (ADU).
727 Returns
728 -------
729 gain : `float`
730 Gain, in e/ADU.
732 Raises
733 ------
734 RuntimeError: if `correctionType` is not one of 'NONE',
735 'SIMPLE', or 'FULL'.
736 """
737 if correctionType not in ['NONE', 'SIMPLE', 'FULL']:
738 raise RuntimeError("Unknown correction type: %s" % correctionType)
740 if correctionType != 'NONE' and readNoise is None:
741 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, "
742 "but 'readNoise' is 'None'. Setting 'correctionType' "
743 "to 'NONE', so a gain value will be estimated without "
744 "corrections." % correctionType)
745 correctionType = 'NONE'
747 mu = 0.5*(mu1 + mu2)
749 # ratioIm = (I1 - I2)^2 / (I1 + I2)
750 temp = im2Area.clone()
751 ratioIm = im1Area.clone()
752 ratioIm -= temp
753 ratioIm *= ratioIm
755 # Sum of pairs
756 sumIm = im1Area.clone()
757 sumIm += temp
759 ratioIm /= sumIm
761 const = afwMath.makeStatistics(ratioIm, afwMath.MEANCLIP, imStatsCtrl).getValue()
762 gain = 1. / const
764 if correctionType == 'SIMPLE':
765 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2)))
766 elif correctionType == 'FULL':
767 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2)
768 denom = (2*const*mu - 2*readNoise**2)
769 positiveSolution = (root + mu)/denom
770 gain = positiveSolution
772 return gain
774 def getReadNoiseFromMetadata(self, taskMetadata, ampName):
775 """Gets readout noise for an amp from ISR metadata.
777 Parameters
778 ----------
779 taskMetadata : `list` [`lsst.pipe.base.TaskMetadata`]
780 List of exposures metadata from ISR.
781 ampName : `str`
782 Amplifier name.
784 Returns
785 -------
786 readNoise : `float`
787 Median of the overscan readnoise in the
788 post-ISR metadata of the input exposures (ADU).
789 Returns 'None' if the median could not be calculated.
790 """
791 # Empirical readout noise [ADU] measured from an
792 # overscan-subtracted overscan during ISR.
793 expectedKey = f"RESIDUAL STDEV {ampName}"
795 readNoises = []
796 for expMetadata in taskMetadata:
797 if 'isr' in expMetadata:
798 overscanNoise = expMetadata['isr'][expectedKey]
799 else:
800 continue
801 readNoises.append(overscanNoise)
803 if len(readNoises):
804 readNoise = np.median(np.array(readNoises))
805 else:
806 self.log.warning("Median readout noise from ISR metadata for amp %s "
807 "could not be calculated." % ampName)
808 readNoise = None
810 return readNoise