Coverage for python / lsst / cp / pipe / ptc / cpPtcExtract.py: 11%
586 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-05-05 08:44 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-05-05 08:44 +0000
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21#
22from collections import defaultdict
23import logging
24import numpy as np
25from lmfit.models import GaussianModel
26import scipy.stats
27import warnings
29import lsst.afw.math as afwMath
30import lsst.afw.cameraGeom
31import lsst.pex.config as pexConfig
32import lsst.pipe.base as pipeBase
33from lsst.geom import (Box2I, Point2I, Extent2I)
34from lsst.cp.pipe.utils import (arrangeFlatsByExpTime, arrangeFlatsByExpId,
35 arrangeFlatsByExpFlux, sigmaClipCorrection,
36 CovFastFourierTransform, getReadNoise,
37 bin_flat)
38from lsst.cp.pipe.utilsEfd import CpEfdClient
40import lsst.pipe.base.connectionTypes as cT
42from lsst.ip.isr import PhotonTransferCurveDataset
43from lsst.ip.isr import IsrTask
45__all__ = [
46 'PhotonTransferCurveExtractConfig',
47 'PhotonTransferCurveExtractTask',
48 'PhotonTransferCurveExtractPairConfig',
49 'PhotonTransferCurveExtractPairTask',
50]
53class PhotonTransferCurveExtractConnectionsBase(pipeBase.PipelineTaskConnections,
54 dimensions=("instrument", "detector")):
56 inputExp = cT.Input(
57 name="ptcInputExposurePairs",
58 doc="Input post-ISR processed exposure pairs (flats) to"
59 "measure covariances from.",
60 storageClass="Exposure",
61 dimensions=("instrument", "exposure", "detector"),
62 multiple=True,
63 deferLoad=True,
64 )
65 inputPhotodiodeData = cT.Input(
66 name="photodiode",
67 doc="Photodiode readings data.",
68 storageClass="IsrCalib",
69 dimensions=("instrument", "exposure"),
70 multiple=True,
71 deferLoad=True,
72 )
74 def __init__(self, *, config=None):
75 if not config.doExtractPhotodiodeData:
76 del self.inputPhotodiodeData
79class PhotonTransferCurveExtractConnections(
80 PhotonTransferCurveExtractConnectionsBase,
81 dimensions=("instrument", "detector"),
82):
83 outputCovariances = cT.Output(
84 name="ptcCovariances",
85 doc="Extracted flat (co)variances.",
86 storageClass="PhotonTransferCurveDataset",
87 dimensions=("instrument", "exposure", "detector"),
88 isCalibration=True,
89 multiple=True,
90 )
93class PhotonTransferCurveExtractPairConnections(
94 PhotonTransferCurveExtractConnectionsBase,
95 dimensions=("instrument", "detector", "exposure"),
96):
97 outputCovariance = cT.Output(
98 name="ptcCovariance",
99 doc="Extracted flat (co)variance.",
100 storageClass="PhotonTransferCurveDataset",
101 dimensions=("instrument", "exposure", "detector"),
102 isCalibration=True,
103 )
104 outputBinnedImages = cT.Output(
105 name="cpPtcPairBinned",
106 doc="Tabular binned images for the PTC pair.",
107 storageClass="ArrowAstropy",
108 dimensions=("instrument", "exposure", "detector"),
109 )
111 def __init__(self, *, config=None):
112 super().__init__(config=config)
114 if not config.doOutputBinnedImages:
115 del self.outputBinnedImages
117 def adjust_all_quanta(self, adjuster):
118 _LOG = logging.getLogger(__name__)
120 def _removeExposure(exposure, pop=False, msg=None):
121 """Remove an exposure from the quanta.
123 Parameters
124 ----------
125 exposure : `int`
126 Exposure key to quantumIdDict.
127 pop : `bool`, optional
128 Pop the exposure from the dict? Otherwise change
129 to an empty dict.
130 msg : `str`, optional
131 Warning string to log with exposure number.
132 """
133 if msg is not None:
134 _LOG.warning("Exposure %d %s; dropping.", exposure, msg)
136 # Remove all quanta associated with this exposure.
137 for key, element in quantumIdDict[exposure].items():
138 adjuster.remove_quantum(element)
139 if pop:
140 # Pop it completely from the dict.
141 quantumIdDict.pop(exposure)
142 else:
143 # Set to empty dict.
144 quantumIdDict[exposure] = {}
146 def _addInputs(sourceExposure, targetExposure, remove=False):
147 """Add inputs to a quantum.
149 Parameters
150 ----------
151 sourceExposure : `int`
152 Take the quanta from this source exposure.
153 targetExposure : `int`
154 And add them to the quanta for this target exposure.
155 remove : `bool`, optional
156 Remove the sourceExposure from the quantum dict?
157 """
158 for detector in quantumIdDict[sourceExposure].keys():
159 inputs = adjuster.get_inputs(quantumIdDict[sourceExposure][detector])
160 if detector in quantumIdDict[targetExposure]:
161 adjuster.add_input(
162 quantumIdDict[targetExposure][detector],
163 "inputExp",
164 inputs["inputExp"][0],
165 )
166 if self.config.doExtractPhotodiodeData:
167 adjuster.add_input(
168 quantumIdDict[targetExposure][detector],
169 "inputPhotodiodeData",
170 inputs["inputPhotodiodeData"][0],
171 )
172 else:
173 _LOG.warning("Exposure %d does not include detector %d", targetExposure, detector)
174 if remove:
175 adjuster.remove_quantum(quantumIdDict[sourceExposure][detector])
177 # Build a dict keyed by exposure.
178 # Each entry is a dict of {detector: quantumId}
179 # And everything will be sorted by exposure and detector.
180 quantumIdDict = defaultdict(dict)
181 for quantumId in sorted(adjuster.iter_data_ids(), key=lambda d: (d["exposure"], d["detector"])):
182 exposure = quantumId["exposure"]
183 quantumIdDict[exposure][quantumId["detector"]] = quantumId
185 # Ensure we are only using "flat" image types.
186 for exposure in list(quantumIdDict.keys()):
187 # We only need to check the first detector in each exposure list.
188 firstDet = next(iter(quantumIdDict[exposure].keys()))
189 obsType = adjuster.expand_quantum_data_id(
190 quantumIdDict[exposure][firstDet],
191 ).exposure.observation_type
192 if obsType != "flat":
193 _removeExposure(exposure, pop=True, msg="is not of 'flat' type")
195 if self.config.matchExposuresType == "TIME":
196 # Extract the exposure times.
198 exposureTimes = []
199 for exposure in quantumIdDict.keys():
200 firstDet = list(quantumIdDict[exposure].keys())[0]
201 exposureTimes.append(
202 adjuster.expand_quantum_data_id(quantumIdDict[exposure][firstDet]).exposure.exposure_time,
203 )
205 previousExposure = -1
206 previousExposureTime = -1.0
207 nInPair = 1
208 for i, exposure in enumerate(quantumIdDict.keys()):
209 if np.isclose(exposureTimes[i], previousExposureTime, atol=5e-3):
210 # This is a match.
211 if nInPair == 2:
212 # We already have a pair!
213 _removeExposure(exposure, msg="already has two exptime matches")
214 else:
215 # Add the inputs from the current exposure quantum
216 # to the previous one, and remove the current one
217 # from the set to run.
218 _addInputs(exposure, previousExposure, remove=True)
219 nInPair += 1
220 else:
221 nInPair = 1
222 previousExposureTime = exposureTimes[i]
223 previousExposure = exposure
225 elif self.config.matchExposuresType == "EXPID":
226 # Pair by ExpId sequence.
227 exposures = list(quantumIdDict.keys())
228 for i in range(len(exposures)):
229 if (i % 2) == 1:
230 # This is the second of a pair.
231 _addInputs(exposures[i], exposures[i - 1], remove=True)
233 elif self.config.matchExposuresType == "FLUX":
234 # When matching by flux keyword, we do not yet have access
235 # to the header information to uniquely pair the exposures.
236 # We do know that exposure pairs will be back-to-back, so
237 # we set up triplets and do the final sorting in runQuantum.
239 # For each triplet, we take the index of the sorted exposures,
240 # and set it up with the previous and next exposure.
241 # In runQuantum we will then look at the headers and match.
242 # If all 3 have the same keyword, we are confused and we warn
243 # and raise NoWorkFound.
244 # If all 3 have different keywords, we do not have a pair and
245 # we warn and raise NoWorkFound.
246 # If there is a matched pair, if the first in the pair has the
247 # same exposure number as the quantum id, it is "primary" and
248 # we continue.
249 # If there is a matched pair, if the second in the pair has the
250 # same exposure number as the quantum id, it is "secondary" and
251 # we raise NoWorkFound.
253 # For example, the first column is the quantum id and the next
254 # is the triplet (or double at the boundaries).
255 # 0: 0, 1 - this has a match (0 == 1), 0 == 0 do work
256 # 1: 0, 1, 2 - this has a match (0 == 1), 0 != 1 -> no work
257 # 2: 1, 2, 3 - this has a match (2 == 3), 2 == 2 -> do work
258 # 3: 2, 3, 4 - this has a match (2 == 3), 2 != 3 -> no work
259 # 4: 3, 4, 5 - this has a match (4 == 5), 4 == 4 -> do work
260 # 5: 4, 5 - this has a match (4 == 5), 4 != 5 -> no work
262 exposures = list(quantumIdDict.keys())
263 for i in range(len(exposures)):
264 if (i - 1) >= 0:
265 _addInputs(exposures[i - 1], exposures[i], remove=False)
266 if (i + 1) < len(exposures):
267 _addInputs(exposures[i + 1], exposures[i], remove=False)
270class PhotonTransferCurveExtractConfigBase(
271 pipeBase.PipelineTaskConfig,
272 pipelineConnections=PhotonTransferCurveExtractConnectionsBase
273):
274 """Configuration for the measurement of covariances from flats.
275 """
276 matchExposuresType = pexConfig.ChoiceField(
277 dtype=str,
278 doc="Match input exposures by time, flux, or expId",
279 default='TIME',
280 allowed={
281 "TIME": "Match exposures by reported exposure time. Entries "
282 "that are within 5e-3 seconds.",
283 "FLUX": "Match exposures by target flux. Use header keyword"
284 " in matchExposuresByFluxKeyword to find the flux.",
285 "EXPID": "Match exposures by exposure ID."
286 }
287 )
288 matchExposuresByFluxKeyword = pexConfig.Field(
289 dtype=str,
290 doc="Header keyword for flux if matchExposuresType is FLUX.",
291 default='CCOBFLUX',
292 )
293 maximumRangeCovariancesAstier = pexConfig.Field(
294 dtype=int,
295 doc="Maximum range of covariances as in Astier+19",
296 default=10,
297 )
298 binSize = pexConfig.Field(
299 dtype=int,
300 doc="Bin the image by this factor in both dimensions.",
301 default=1,
302 )
303 maskNameList = pexConfig.ListField(
304 dtype=str,
305 doc="Mask list to exclude from statistics calculations.",
306 default=['SUSPECT', 'BAD', 'NO_DATA', 'SAT'],
307 )
308 nSigmaClipPtc = pexConfig.Field(
309 dtype=float,
310 doc="Sigma cut for afwMath.StatisticsControl()",
311 default=5.5,
312 )
313 nIterSigmaClipPtc = pexConfig.Field(
314 dtype=int,
315 doc="Number of sigma-clipping iterations for afwMath.StatisticsControl()",
316 default=3,
317 )
318 minNumberGoodPixelsForCovariance = pexConfig.Field(
319 dtype=int,
320 doc="Minimum number of acceptable good pixels per amp to calculate the covariances (via FFT or"
321 " direclty).",
322 default=10000,
323 )
324 thresholdDiffAfwVarVsCov00 = pexConfig.Field(
325 dtype=float,
326 doc="If the absolute fractional differece between afwMath.VARIANCECLIP and Cov00 "
327 "for a region of a difference image is greater than this threshold (percentage), "
328 "a warning will be issued.",
329 default=1.,
330 )
331 detectorMeasurementRegion = pexConfig.ChoiceField(
332 dtype=str,
333 doc="Region of each exposure where to perform the calculations (amplifier or full image).",
334 default='AMP',
335 allowed={
336 "AMP": "Amplifier of the detector.",
337 "FULL": "Full image."
338 }
339 )
340 numEdgeSuspect = pexConfig.Field(
341 dtype=int,
342 doc="Number of edge pixels to be flagged as untrustworthy.",
343 default=0,
344 )
345 edgeMaskLevel = pexConfig.ChoiceField(
346 dtype=str,
347 doc="Mask edge pixels in which coordinate frame: DETECTOR or AMP?",
348 default="DETECTOR",
349 allowed={
350 'DETECTOR': 'Mask only the edges of the full detector.',
351 'AMP': 'Mask edges of each amplifier.',
352 },
353 )
354 doGain = pexConfig.Field(
355 dtype=bool,
356 doc="Calculate a gain per input flat pair.",
357 default=True,
358 )
359 gainCorrectionType = pexConfig.ChoiceField(
360 dtype=str,
361 doc="Correction type for the gain.",
362 default='FULL',
363 allowed={
364 'NONE': 'No correction.',
365 'SIMPLE': 'First order correction.',
366 'FULL': 'Second order correction.'
367 }
368 )
369 doKsHistMeasurement = pexConfig.Field(
370 dtype=bool,
371 doc="Do the Ks test of gaussianity? If False, kspValue will be filled "
372 "with all 1.0s.",
373 default=False,
374 )
375 ksHistNBins = pexConfig.Field(
376 dtype=int,
377 doc="Number of bins for the KS test histogram.",
378 default=100,
379 )
380 ksHistLimitMultiplier = pexConfig.Field(
381 dtype=float,
382 doc="Number of sigma (as predicted from the mean value) to compute KS test histogram.",
383 default=8.0,
384 )
385 ksHistMinDataValues = pexConfig.Field(
386 dtype=int,
387 doc="Minimum number of good data values to compute KS test histogram.",
388 default=100,
389 )
390 auxiliaryHeaderKeys = pexConfig.ListField(
391 dtype=str,
392 doc="Auxiliary header keys to store with the PTC dataset.",
393 default=[],
394 )
395 doExtractPhotodiodeData = pexConfig.Field(
396 dtype=bool,
397 doc="Extract photodiode data?",
398 default=False,
399 )
400 useEfdPhotodiodeData = pexConfig.Field(
401 dtype=bool,
402 doc="Extract photodiode values from EFD?",
403 default=False,
404 )
405 efdPhotodiodeSeries = pexConfig.Field(
406 dtype=str,
407 doc="EFD series to use to get photodiode values.",
408 default="lsst.sal.Electrometer.logevent_logMessage",
409 )
410 efdSalIndex = pexConfig.Field(
411 dtype=int,
412 doc="EFD SAL Index to select electrometer. This is 101 for mainTel, 201 for auxTel.",
413 default=201,
414 )
416 photodiodeIntegrationMethod = pexConfig.ChoiceField(
417 dtype=str,
418 doc="Integration method for photodiode monitoring data.",
419 default="TRIMMED_SUM",
420 allowed={
421 "DIRECT_SUM": ("Use numpy's trapezoid integrator on all photodiode "
422 "readout entries"),
423 "TRIMMED_SUM": ("Use numpy's trapezoid integrator, clipping the "
424 "leading and trailing entries, which are "
425 "nominally at zero baseline level."),
426 "CHARGE_SUM": ("Treat the current values as integrated charge "
427 "over the sampling interval and simply sum "
428 "the values, after subtracting a baseline level."),
429 "MEAN": {"Take the average of the photodiode measurements and "
430 "multiply by the exposure time."},
431 },
432 )
433 photodiodeCurrentScale = pexConfig.Field(
434 dtype=float,
435 doc="Scale factor to apply to photodiode current values for the "
436 "``CHARGE_SUM``, ``TRIMMED_SUM``, and ``MEAN`` integration methods.",
437 default=-1.0,
438 )
440 doVignetteFunctionRegionSelection = pexConfig.Field(
441 dtype=bool,
442 doc="Use vignette function to select PTC region?",
443 default=False,
444 )
445 vignetteFunctionPolynomialCoeffs = pexConfig.ListField(
446 dtype=float,
447 doc="Polynomial terms for radial vignetting function. This is used with "
448 "vig = np.polyval(coeffs, radius), where radius is in meters. This must be "
449 "set if doVignetteFunctionRegionSelection is True.",
450 default=None,
451 optional=True,
452 )
453 vignetteFunctionRegionSelectionMinimumPixels = pexConfig.Field(
454 dtype=int,
455 doc="Minumum number of pixels to select for vignette function region selection.",
456 default=250_000,
457 )
458 vignetteFunctionRegionSelectionPercent = pexConfig.Field(
459 dtype=float,
460 doc="Vignetting function variation over the focal plane that is determined to be "
461 "satisfactory. If this does not yield vignetteFunctionRegionSelectionMinimumPixels "
462 "then this will be increased until vignetteFunctionRegionSelectionMinimumPixels is "
463 "reached.",
464 default=2.0,
465 )
467 def validate(self):
468 super().validate()
469 if self.doExtractPhotodiodeData and self.useEfdPhotodiodeData:
470 # These get information from different places, so let's
471 # disallow both being true.
472 raise ValueError("doExtractPhotodiodeData and useEfdPhotodiodeData are mutually exclusive.")
474 if self.doVignetteFunctionRegionSelection and self.vignetteFunctionPolynomialCoeffs is None:
475 raise ValueError(
476 "vignetteFunctionPolynomialTerms must be set if "
477 "doVignetteFunctionRegionSelection is True.",
478 )
481class PhotonTransferCurveExtractConfig(
482 PhotonTransferCurveExtractConfigBase,
483 pipelineConnections=PhotonTransferCurveExtractConnections,
484):
485 pass
488class PhotonTransferCurveExtractPairConfig(
489 PhotonTransferCurveExtractConfigBase,
490 pipelineConnections=PhotonTransferCurveExtractPairConnections,
491):
492 doOutputBinnedImages = pexConfig.Field(
493 dtype=bool,
494 doc="Output binned images in tabular form?",
495 default=False,
496 )
497 outputBinnedImagesBinFactor = pexConfig.Field(
498 dtype=int,
499 doc="Output binned images bin factor.",
500 default=8,
501 )
504class PhotonTransferCurveExtractTaskBase(pipeBase.PipelineTask):
505 """Task to measure covariances from flat fields.
507 This task receives as input a list of flat-field images
508 (flats), and sorts these flats in pairs taken at the
509 same time (the task will raise if there is one one flat
510 at a given exposure time, and it will discard extra flats if
511 there are more than two per exposure time). This task measures
512 the mean, variance, and covariances from a region (e.g.,
513 an amplifier) of the difference image of the two flats with
514 the same exposure time (alternatively, all input images could have
515 the same exposure time but their flux changed).
517 The variance is calculated via afwMath, and the covariance
518 via the methods in Astier+19 (appendix A). In theory,
519 var = covariance[0,0]. This should be validated, and in the
520 future, we may decide to just keep one (covariance).
521 At this moment, if the two values differ by more than the value
522 of `thresholdDiffAfwVarVsCov00` (default: 1%), a warning will
523 be issued.
525 The measured covariances at a given exposure time (along with
526 other quantities such as the mean) are stored in a PTC dataset
527 object (`~lsst.ip.isr.PhotonTransferCurveDataset`), which gets
528 partially filled at this stage (the remainder of the attributes
529 of the dataset will be filled after running the second task of
530 the PTC-measurement pipeline, `~PhotonTransferCurveSolveTask`).
532 The number of partially-filled
533 `~lsst.ip.isr.PhotonTransferCurveDataset` objects will be less
534 than the number of input exposures because the task combines
535 input flats in pairs. However, it is required at this moment
536 that the number of input dimensions matches
537 bijectively the number of output dimensions. Therefore, a number
538 of "dummy" PTC datasets are inserted in the output list. This
539 output list will then be used as input of the next task in the
540 PTC-measurement pipeline, `PhotonTransferCurveSolveTask`,
541 which will assemble the multiple `PhotonTransferCurveDataset`
542 objects into a single one in order to fit the measured covariances
543 as a function of flux to one of three models
544 (see `PhotonTransferCurveSolveTask` for details).
546 Reference: Astier+19: "The Shape of the Photon Transfer Curve of CCD
547 sensors", arXiv:1905.08677.
548 """
550 ConfigClass = PhotonTransferCurveExtractConfigBase
551 _DefaultName = 'cpPtcExtractBase'
553 def _extractPhotoChargeDict(self, inputPhotodiodeData, inputExp):
554 """Extract the photodiode charge.
556 Parameters
557 ----------
558 inputPhotoDiodeData : `dict` [`str`,
559 `lsst.pipe.base.connections.DeferredDatasetRef`]
560 Photodiode readings data (optional).
561 inputExp : `dict` [`float`, `list`
562 [`~lsst.pipe.base.connections.DeferredDatasetRef`]]
563 Dictionary that groups references to flat-field exposures.
565 Returns
566 -------
567 photoChargeDict : `dict` [`int`, `float`]
568 Dict of photocharge values, keyed by expId. May be empty.
569 """
570 # Extract the photodiode data if requested.
571 photoChargeDict = {}
572 if self.config.doExtractPhotodiodeData:
573 # Compute the photodiode integrals once, at the start.
574 for pdHandle, expHandle in zip(inputPhotodiodeData, inputExp):
575 expId = pdHandle.dataId['exposure']
576 pdCalib = pdHandle.get()
577 pdCalib.integrationMethod = self.config.photodiodeIntegrationMethod
578 pdCalib.currentScale = self.config.photodiodeCurrentScale
579 visitInfo = expHandle.get(component="visitInfo")
580 exposureTime = visitInfo.getExposureTime()
581 photoChargeDict[expId] = pdCalib.integrate(exposureTime=exposureTime)
582 elif self.config.useEfdPhotodiodeData:
583 client = CpEfdClient()
584 obsDates = {}
585 obsMin = None
586 obsMax = None
587 for _, expList in inputExp.items():
588 for ref, refId in expList:
589 ts = ref.dataId.exposure.timespan
590 dt = ts.end - ts.begin
591 obsDates[refId] = {
592 'mid': ts.begin + dt / 2.0,
593 'dt': dt,
594 'begin': ts.begin,
595 'end': ts.end,
596 'exptime': ref.dataId.exposure.exposure_time,
597 }
598 if not obsMin or ts.begin < obsMin:
599 obsMin = ts.begin - dt
600 if not obsMax or ts.end > obsMax:
601 obsMax = ts.end + dt
603 pdData = client.getEfdElectrometerData(dataSeries=self.config.efdPhotodiodeSeries,
604 dateMin=obsMin.isot,
605 dateMax=obsMax.isot)
606 for expId, expDate in obsDates.items():
607 # This returns matchDate, intensity, and optional
608 # endDate. The matchDate is the most recent date
609 # prior to that being searched.
610 results = client.parseElectrometerStatus(pdData,
611 expDate['end'],
612 index=self.config.efdSalIndex)
613 # The monitor diode charge is negative and a time average,
614 # so we need to flip the sign and multiply by the exposure
615 # time.
616 photoChargeDict[expId] = -1.0 * results[1] * expDate['exptime']
618 # The data can be large, so:
619 del pdData
620 del client
622 return photoChargeDict
624 def _extractOneFlatPair(
625 self,
626 expIds,
627 exps,
628 photoChargeDict,
629 expTime,
630 ):
631 """Extract a single flat pair.
633 Parameters
634 ----------
635 expIds : `tuple` [`int`]
636 Tuple of two exposure IDs for the flat pair.
637 exps : `tuple` [`lsst.afw.image.Exposure`]
638 Tuple of two exposures for the flat pair.
639 photoChargeDict : `dict` [`int`, `float`]
640 Dict of photocharge values, keyed by expId. May be empty.
641 expTime : `float`
642 Exposure time.
644 Returns
645 -------
646 partialPtcDataset : `lsst.ip.isr.PhotonTransferCurveDataset`
647 PTC dataset for this flat pair.
648 nAmpsNan : `int`
649 Number of amps that have no measurements.
650 """
651 expId1, expId2 = expIds
652 exp1, exp2 = exps
654 date1 = exp1.visitInfo.getDate()
655 if date1 is not None and date1.isValid():
656 expMjd1 = date1.toAstropy().mjd
657 else:
658 self.log.warning("No valid date found for exposure %d", expId1)
659 expMjd1 = np.nan
660 date2 = exp2.visitInfo.getDate()
661 if date2 is not None and date2.isValid():
662 expMjd2 = date2.toAstropy().mjd
663 else:
664 self.log.warning("No valid date found for exposure %d", expId2)
665 expMjd2 = np.nan
666 inputExpPairMjdStart = min([expMjd1, expMjd2])
668 self.log.info("Extracting PTC data from flat pair %d, %d", expId1, expId2)
670 # These are the column names for `tupleRows` below.
671 tags = [('mu', '<f8'), ('afwVar', '<f8'), ('i', '<i8'), ('j', '<i8'), ('var', '<f8'),
672 ('cov', '<f8'), ('npix', '<i8'), ('ext', '<i8'), ('expTime', '<f8'), ('ampName', '<U3')]
674 detector = exp1.getDetector()
675 detNum = detector.getId()
676 amps = detector.getAmplifiers()
677 ampNames = [amp.getName() for amp in amps]
679 # Mask pixels at the edge of the detector or of each amp
680 if self.config.numEdgeSuspect > 0:
681 self.isrTask.maskEdges(exp1, numEdgePixels=self.config.numEdgeSuspect,
682 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
683 self.isrTask.maskEdges(exp2, numEdgePixels=self.config.numEdgeSuspect,
684 maskPlane="SUSPECT", level=self.config.edgeMaskLevel)
686 # Mask "vignetted" subregions.
687 if self.config.doVignetteFunctionRegionSelection:
688 self._maskVignetteFunctionRegion([exp1, exp2])
690 # Extract any metadata keys from the headers.
691 auxDict = {}
692 metadata = exp1.getMetadata()
693 for key in self.config.auxiliaryHeaderKeys:
694 if key not in metadata:
695 self.log.warning(
696 "Requested auxiliary keyword %s not found in exposure metadata for %d",
697 key,
698 expId1,
699 )
700 value = np.nan
701 else:
702 value = metadata[key]
704 auxDict[key] = value
706 # Pull key visitInfo data into the auxDict.
707 visitInfo = exp1.info.getVisitInfo()
708 auxDict["observationType"] = visitInfo.getObservationType()
709 auxDict["observationReason"] = visitInfo.getObservationReason()
710 auxDict["scienceProgram"] = visitInfo.getScienceProgram()
712 nAmpsNan = 0
713 partialPtcDataset = PhotonTransferCurveDataset(
714 ampNames, 'PARTIAL',
715 covMatrixSide=self.config.maximumRangeCovariancesAstier)
717 # These should be identical, but we can use them both.
718 maskValue1 = exp1.mask.getPlaneBitMask(self.config.maskNameList)
719 maskValue2 = exp2.mask.getPlaneBitMask(self.config.maskNameList)
721 # Extract photocharge for exposures.
722 photoCharge = np.nan
723 photoChargeDelta = np.nan
724 if self.config.doExtractPhotodiodeData or self.config.useEfdPhotodiodeData:
725 nExps = 0
726 photoCharge = 0.0
727 for expId in [expId1, expId2]:
728 if expId in photoChargeDict:
729 photoCharge += photoChargeDict[expId]
730 nExps += 1
732 if nExps > 0:
733 photoCharge /= nExps
734 else:
735 photoCharge = np.nan
737 if nExps == 2:
738 photoChargeDelta = photoChargeDict[expId2] - photoChargeDict[expId1]
740 # Get the following statistics for each amp
741 for ampNumber, amp in enumerate(detector):
742 ampName = amp.getName()
743 if self.config.detectorMeasurementRegion == 'AMP':
744 region = amp.getBBox()
745 elif self.config.detectorMeasurementRegion == 'FULL':
746 region = None
748 # Get masked image regions, masking planes, statistic control
749 # objects, and clipped means. Calculate once to reuse in
750 # `measureMeanVarCov` and `getGainFromFlatPair`.
751 im1Area, im2Area, imStatsCtrl, mu1, mu2 = self.getImageAreasMasksStats(exp1, exp2,
752 region=region)
754 nPixelCovariance1 = ((im1Area.mask.array & maskValue1) == 0).sum()
755 nPixelCovariance2 = ((im2Area.mask.array & maskValue2) == 0).sum()
757 if nPixelCovariance1 != nPixelCovariance2:
758 raise RuntimeError("Inconsistent selections in pair of exposures.")
760 readNoise1 = getReadNoise(exp1, ampName)
761 readNoise2 = getReadNoise(exp2, ampName)
762 if not np.isfinite(readNoise1) and not np.isfinite(readNoise2):
763 # We allow the mean read noise to be nan if both read noise
764 # values are nan, so suppress this warning (e.g. bad amps)
765 meanReadNoise = np.nan
766 else:
767 meanReadNoise = np.nanmean([readNoise1, readNoise2])
769 overscanMedianLevel1 = exp1.metadata.get(f'LSST ISR OVERSCAN SERIAL MEDIAN {ampName}', np.nan)
770 overscanMedianLevel2 = exp2.metadata.get(f'LSST ISR OVERSCAN SERIAL MEDIAN {ampName}', np.nan)
771 if not np.isfinite(overscanMedianLevel1) and \
772 not np.isfinite(overscanMedianLevel2):
773 # We allow the mean overscan median level to be nan if both
774 # statistics are nan, so suppress this warning (e.g. bad amps)
775 overscanMedianLevel = np.nan
776 else:
777 overscanMedianLevel = np.nanmean([overscanMedianLevel1, overscanMedianLevel2])
779 # We demand that both mu1 and mu2 be finite and greater than 0.
780 if not np.isfinite(mu1) or not np.isfinite(mu2) \
781 or ((np.nan_to_num(mu1) + np.nan_to_num(mu2)/2.) <= 0.0):
782 self.log.warning(
783 "Illegal mean value(s) detected for amp %s on exposure pair %d/%d",
784 ampName,
785 expId1,
786 expId2,
787 )
788 partialPtcDataset.setAmpValuesPartialDataset(
789 ampName,
790 inputExpIdPair=(expId1, expId2),
791 rawExpTime=expTime,
792 expIdMask=False,
793 nPixelCovariance=nPixelCovariance1,
794 photoCharge=photoCharge,
795 photoChargeDelta=photoChargeDelta,
796 )
797 continue
799 # `measureMeanVarCov` is the function that measures
800 # the variance and covariances from a region of
801 # the difference image of two flats at the same
802 # exposure time. The variable `covAstier` that is
803 # returned is of the form:
804 # [(i, j, var (cov[0,0]), cov, npix) for (i,j) in
805 # {maxLag, maxLag}^2].
806 muDiff, varDiff, covAstier, rowMeanVariance = self.measureMeanVarCov(im1Area,
807 im2Area,
808 imStatsCtrl,
809 mu1,
810 mu2)
811 # Estimate the gain from the flat pair
812 if self.config.doGain:
813 gain = self.getGainFromFlatPair(im1Area, im2Area, imStatsCtrl, mu1, mu2,
814 correctionType=self.config.gainCorrectionType,
815 readNoise=meanReadNoise)
816 else:
817 gain = np.nan
819 # Correction factor for bias introduced by sigma
820 # clipping.
821 # Function returns 1/sqrt(varFactor), so it needs
822 # to be squared. varDiff is calculated via
823 # afwMath.VARIANCECLIP.
824 varFactor = sigmaClipCorrection(self.config.nSigmaClipPtc)**2
825 varDiff *= varFactor
827 expIdMask = True
828 # Mask data point at this mean signal level if
829 # the signal, variance, or covariance calculations
830 # from `measureMeanVarCov` resulted in NaNs.
831 if (np.isnan(muDiff) or np.isnan(varDiff) or (covAstier is None)
832 or (rowMeanVariance is np.nan)):
833 self.log.warning("NaN mean, var or rowmeanVariance, or None cov in amp %s "
834 "in exposure pair %d, %d of "
835 "detector %d.", ampName, expId1, expId2, detNum)
836 nAmpsNan += 1
837 expIdMask = False
838 covArray = np.full((1, self.config.maximumRangeCovariancesAstier,
839 self.config.maximumRangeCovariancesAstier), np.nan)
840 covSqrtWeights = np.full_like(covArray, np.nan)
842 if covAstier is not None:
843 # Turn the tuples with the measured information
844 # into covariance arrays.
845 # covrow: (i, j, var (cov[0,0]), cov, npix)
846 tupleRows = [(muDiff, varDiff) + covRow + (ampNumber, expTime,
847 ampName) for covRow in covAstier]
848 tempStructArray = np.array(tupleRows, dtype=tags)
850 covArray, vcov, _ = self.makeCovArray(tempStructArray,
851 self.config.maximumRangeCovariancesAstier)
853 # The returned covArray should only have 1 entry;
854 # raise if this is not the case.
855 if covArray.shape[0] != 1:
856 raise RuntimeError("Serious programming error in covArray shape.")
858 covSqrtWeights = np.nan_to_num(1./np.sqrt(vcov))
860 # Correct covArray for sigma clipping:
861 # 1) Apply varFactor twice for the whole covariance matrix
862 covArray *= varFactor**2
863 # 2) But, only once for the variance element of the
864 # matrix, covArray[0, 0, 0] (so divide one factor out).
865 # (the first 0 is because this is a 3D array for insertion into
866 # the combined dataset).
867 covArray[0, 0, 0] /= varFactor
869 if expIdMask and self.config.doKsHistMeasurement:
870 # Run the Gaussian histogram only if this is a legal
871 # amplifier and configured to do so.
872 histVar, histChi2Dof, kspValue = self.computeGaussianHistogramParameters(
873 im1Area,
874 im2Area,
875 imStatsCtrl,
876 mu1,
877 mu2,
878 )
879 else:
880 histVar = np.nan
881 histChi2Dof = np.nan
882 if self.config.doKsHistMeasurement:
883 kspValue = 0.0
884 else:
885 # When this is turned off, we will always allow it to
886 # pass downstream.
887 kspValue = 1.0
889 # Get amp offsets if available.
890 ampOffsetKey = f"LSST ISR AMPOFFSET PEDESTAL {ampName}"
891 ampOffset1 = exp1.metadata.get(ampOffsetKey, np.nan)
892 ampOffset2 = exp2.metadata.get(ampOffsetKey, np.nan)
893 ampOffset = (ampOffset1 + ampOffset2) / 2.0
895 partialPtcDataset.setAmpValuesPartialDataset(
896 ampName,
897 inputExpIdPair=(expId1, expId2),
898 inputExpPairMjdStart=inputExpPairMjdStart,
899 rawExpTime=expTime,
900 rawMean=muDiff,
901 rawVar=varDiff,
902 rawDelta=mu2 - mu1,
903 photoCharge=photoCharge,
904 photoChargeDelta=photoChargeDelta,
905 ampOffset=ampOffset,
906 expIdMask=expIdMask,
907 nPixelCovariance=nPixelCovariance1,
908 covariance=covArray[0, :, :],
909 covSqrtWeights=covSqrtWeights[0, :, :],
910 gain=gain,
911 noise=meanReadNoise,
912 overscanMedianLevel=overscanMedianLevel,
913 histVar=histVar,
914 histChi2Dof=histChi2Dof,
915 kspValue=kspValue,
916 rowMeanVariance=rowMeanVariance,
917 )
919 partialPtcDataset.setAuxValuesPartialDataset(auxDict)
921 return partialPtcDataset, nAmpsNan
923 def _maskVignetteFunctionRegion(self, exposures, maskPlane="SUSPECT"):
924 """Mask regions with strong vignetting.
926 Parameters
927 ----------
928 exposures : `list` [`lsst.afw.image.Exposure`]
929 Exposure to mask.
930 maskPlane : `str`, optional
931 Mask plane to set.
932 """
933 detector = exposures[0].getDetector()
934 coeffs = np.asarray(self.config.vignetteFunctionPolynomialCoeffs)
936 transform = detector.getTransform(
937 fromSys=lsst.afw.cameraGeom.PIXELS,
938 toSys=lsst.afw.cameraGeom.FOCAL_PLANE,
939 )
940 nx = detector.getBBox().getWidth()
941 ny = detector.getBBox().getHeight()
943 x = np.repeat(np.arange(nx, dtype=np.float64), ny)
944 y = np.tile(np.arange(ny, dtype=np.float64), nx)
945 xy = np.vstack((x, y))
946 x2, y2 = np.vsplit(transform.getMapping().applyForward(xy), 2)
947 # Convert from mm to meters for the coefficients.
948 fpRad = np.sqrt((x2.ravel()/1000.)**2. + (y2.ravel()/1000.)**2.)
950 vigImage = lsst.afw.image.ImageF(detector.getBBox())
951 vigImage.array[:, :] = np.polyval(coeffs, fpRad).reshape(nx, ny).T
953 bitmask = exposures[0].mask.getPlaneBitMask(maskPlane)
955 for amp in detector:
956 vigSubImage = vigImage[amp.getBBox()].array.copy()
958 nSelected = 0
959 subMax = vigSubImage.max()
960 sel = self.config.vignetteFunctionRegionSelectionPercent
961 while (nSelected < self.config.vignetteFunctionRegionSelectionMinimumPixels) and sel < 100.0:
962 threshold = subMax * (1.0 - sel / 100.0)
963 high = (vigSubImage > threshold)
964 nSelected = high.sum()
966 # Raise by a percent at a time.
967 sel += 1.0
969 if sel > (self.config.vignetteFunctionRegionSelectionPercent + 1.0):
970 self.log.warning(
971 "Amplifier %s_%s required %.1f%% threshold to reach %d pixel threshold.",
972 detector.getName(),
973 amp.getName(),
974 sel - 1.0,
975 self.config.vignetteFunctionRegionSelectionMinimumPixels,
976 )
978 for exposure in exposures:
979 exposure[amp.getBBox()].mask.array[~high] |= bitmask
981 def makeCovArray(self, inputTuple, maxRangeFromTuple):
982 """Make covariances array from tuple.
984 Parameters
985 ----------
986 inputTuple : `numpy.ndarray`
987 Structured array with rows with at least
988 (mu, afwVar, cov, var, i, j, npix), where:
989 mu : `float`
990 0.5*(m1 + m2), where mu1 is the mean value of flat1
991 and mu2 is the mean value of flat2.
992 afwVar : `float`
993 Variance of difference flat, calculated with afw.
994 cov : `float`
995 Covariance value at lag(i, j)
996 var : `float`
997 Variance(covariance value at lag(0, 0))
998 i : `int`
999 Lag in dimension "x".
1000 j : `int`
1001 Lag in dimension "y".
1002 npix : `int`
1003 Number of pixels used for covariance calculation.
1004 maxRangeFromTuple : `int`
1005 Maximum range to select from tuple.
1007 Returns
1008 -------
1009 cov : `numpy.array`
1010 Covariance arrays, indexed by mean signal mu.
1011 vCov : `numpy.array`
1012 Variance of the [co]variance arrays, indexed by mean signal mu.
1013 muVals : `numpy.array`
1014 List of mean signal values.
1015 """
1016 if maxRangeFromTuple is not None:
1017 cut = (inputTuple['i'] < maxRangeFromTuple) & (inputTuple['j'] < maxRangeFromTuple)
1018 cutTuple = inputTuple[cut]
1019 else:
1020 cutTuple = inputTuple
1021 # increasing mu order, so that we can group measurements with the
1022 # same mu
1023 muTemp = cutTuple['mu']
1024 ind = np.argsort(muTemp)
1026 cutTuple = cutTuple[ind]
1027 # should group measurements on the same image pairs(same average)
1028 mu = cutTuple['mu']
1029 xx = np.hstack(([mu[0]], mu))
1030 delta = xx[1:] - xx[:-1]
1031 steps, = np.where(delta > 0)
1032 ind = np.zeros_like(mu, dtype=int)
1033 ind[steps] = 1
1034 ind = np.cumsum(ind) # this acts as an image pair index.
1035 # now fill the 3-d cov array(and variance)
1036 muVals = np.array(np.unique(mu))
1037 i = cutTuple['i'].astype(int)
1038 j = cutTuple['j'].astype(int)
1039 c = 0.5*cutTuple['cov']
1040 n = cutTuple['npix']
1041 v = 0.5*cutTuple['var']
1042 # book and fill
1043 cov = np.ndarray((len(muVals), np.max(i)+1, np.max(j)+1))
1044 var = np.zeros_like(cov)
1045 cov[ind, i, j] = c
1046 var[ind, i, j] = v**2/n
1047 var[:, 0, 0] *= 2 # var(v) = 2*v**2/N
1049 return cov, var, muVals
1051 def measureMeanVarCov(self, im1Area, im2Area, imStatsCtrl, mu1, mu2):
1052 """Calculate the mean of each of two exposures and the variance
1053 and covariance of their difference. The variance is calculated
1054 via afwMath, and the covariance via the methods in Astier+19
1055 (appendix A). In theory, var = covariance[0,0]. This should
1056 be validated, and in the future, we may decide to just keep
1057 one (covariance).
1059 Parameters
1060 ----------
1061 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
1062 Masked image from exposure 1.
1063 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
1064 Masked image from exposure 2.
1065 imStatsCtrl : `lsst.afw.math.StatisticsControl`
1066 Statistics control object.
1067 mu1: `float`
1068 Clipped mean of im1Area (ADU).
1069 mu2: `float`
1070 Clipped mean of im2Area (ADU).
1072 Returns
1073 -------
1074 mu : `float`
1075 0.5*(mu1 + mu2), where mu1, and mu2 are the clipped means
1076 of the regions in both exposures. If either mu1 or m2 are
1077 NaN's, the returned value is NaN.
1078 varDiff : `float`
1079 Half of the clipped variance of the difference of the
1080 regions inthe two input exposures. If either mu1 or m2 are
1081 NaN's, the returned value is NaN.
1082 covDiffAstier : `list` or `NaN`
1083 List with tuples of the form (dx, dy, var, cov, npix), where:
1084 dx : `int`
1085 Lag in x
1086 dy : `int`
1087 Lag in y
1088 var : `float`
1089 Variance at (dx, dy).
1090 cov : `float`
1091 Covariance at (dx, dy).
1092 nPix : `int`
1093 Number of pixel pairs used to evaluate var and cov.
1094 rowMeanVariance : `float`
1095 Variance of the means of each row in the difference image.
1096 Taken from `github.com/lsst-camera-dh/eo_pipe`.
1098 If either mu1 or m2 are NaN's, the returned value is NaN.
1099 """
1100 if np.isnan(mu1) or np.isnan(mu2):
1101 self.log.warning("Mean of amp in image 1 or 2 is NaN: %f, %f.", mu1, mu2)
1102 return np.nan, np.nan, None, np.nan
1103 mu = 0.5*(mu1 + mu2)
1105 # Take difference of pairs
1106 # symmetric formula: diff = (mu2*im1-mu1*im2)/(0.5*(mu1+mu2))
1107 temp = im2Area.clone()
1108 temp *= mu1
1109 diffIm = im1Area.clone()
1110 diffIm *= mu2
1111 diffIm -= temp
1112 diffIm /= mu
1114 if self.config.binSize > 1:
1115 diffIm = afwMath.binImage(diffIm, self.config.binSize)
1117 # Calculate the variance (in ADU^2) of the means of rows for diffIm.
1118 # Taken from eo_pipe
1119 region = diffIm.getBBox()
1120 rowMeans = []
1121 for row in range(region.minY, region.maxY):
1122 regionRow = Box2I(Point2I(region.minX, row),
1123 Extent2I(region.width, 1))
1124 rowMeans.append(afwMath.makeStatistics(diffIm[regionRow],
1125 afwMath.MEAN,
1126 imStatsCtrl).getValue())
1127 rowMeanVariance = afwMath.makeStatistics(
1128 np.array(rowMeans), afwMath.VARIANCECLIP,
1129 imStatsCtrl).getValue()
1131 # Variance calculation via afwMath
1132 varDiff = 0.5*(afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue())
1134 # Covariances calculations
1135 # Get the pixels that were not clipped
1136 varClip = afwMath.makeStatistics(diffIm, afwMath.VARIANCECLIP, imStatsCtrl).getValue()
1137 meanClip = afwMath.makeStatistics(diffIm, afwMath.MEANCLIP, imStatsCtrl).getValue()
1138 cut = meanClip + self.config.nSigmaClipPtc*np.sqrt(varClip)
1139 unmasked = np.where(np.fabs(diffIm.image.array) <= cut, 1, 0)
1141 # Get the pixels in the mask planes of the difference image
1142 # that were ignored by the clipping algorithm
1143 wDiff = np.where(diffIm.mask.array & imStatsCtrl.getAndMask() == 0, 1, 0)
1144 # Combine the two sets of pixels ('1': use; '0': don't use)
1145 # into a final weight matrix to be used in the covariance
1146 # calculations below.
1147 w = unmasked*wDiff
1149 if np.sum(w) < self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2):
1150 self.log.warning("Number of good points for covariance calculation (%s) is less "
1151 "(than threshold %s)", np.sum(w),
1152 self.config.minNumberGoodPixelsForCovariance/(self.config.binSize**2))
1153 return np.nan, np.nan, None, np.nan
1155 maxRangeCov = self.config.maximumRangeCovariancesAstier
1157 # Calculate covariances via FFT.
1158 shapeDiff = np.array(diffIm.image.array.shape)
1159 # Calculate the sizes of FFT dimensions.
1160 s = shapeDiff + maxRangeCov
1161 tempSize = np.array(np.log(s)/np.log(2.)).astype(int)
1162 fftSize = np.array(2**(tempSize+1)).astype(int)
1163 fftShape = (fftSize[0], fftSize[1])
1164 c = CovFastFourierTransform(diffIm.image.array, w, fftShape, maxRangeCov)
1165 # np.sum(w) is the same as npix[0][0] returned in covDiffAstier
1166 try:
1167 covDiffAstier = c.reportCovFastFourierTransform(maxRangeCov)
1168 except ValueError:
1169 # This is raised if there are not enough pixels.
1170 self.log.warning("Not enough pixels covering the requested covariance range in x/y (%d)",
1171 self.config.maximumRangeCovariancesAstier)
1172 return np.nan, np.nan, None, np.nan
1174 # Compare Cov[0,0] and afwMath.VARIANCECLIP covDiffAstier[0]
1175 # is the Cov[0,0] element, [3] is the variance, and there's a
1176 # factor of 0.5 difference with afwMath.VARIANCECLIP.
1177 thresholdPercentage = self.config.thresholdDiffAfwVarVsCov00
1178 fractionalDiff = 100*np.fabs(1 - varDiff/(covDiffAstier[0][3]*0.5))
1179 if fractionalDiff >= thresholdPercentage:
1180 self.log.warning("Absolute fractional difference between afwMatch.VARIANCECLIP and Cov[0,0] "
1181 "is more than %f%%: %f", thresholdPercentage, fractionalDiff)
1183 return mu, varDiff, covDiffAstier, rowMeanVariance
1185 def getImageAreasMasksStats(self, exposure1, exposure2, region=None):
1186 """Get image areas in a region as well as masks and statistic objects.
1188 Parameters
1189 ----------
1190 exposure1 : `lsst.afw.image.ExposureF`
1191 First exposure of flat field pair.
1192 exposure2 : `lsst.afw.image.ExposureF`
1193 Second exposure of flat field pair.
1194 region : `lsst.geom.Box2I`, optional
1195 Region of each exposure where to perform the calculations
1196 (e.g, an amplifier).
1198 Returns
1199 -------
1200 im1Area : `lsst.afw.image.MaskedImageF`
1201 Masked image from exposure 1.
1202 im2Area : `lsst.afw.image.MaskedImageF`
1203 Masked image from exposure 2.
1204 imStatsCtrl : `lsst.afw.math.StatisticsControl`
1205 Statistics control object.
1206 mu1 : `float`
1207 Clipped mean of im1Area (ADU).
1208 mu2 : `float`
1209 Clipped mean of im2Area (ADU).
1210 """
1211 if region is not None:
1212 im1Area = exposure1.maskedImage[region]
1213 im2Area = exposure2.maskedImage[region]
1214 else:
1215 im1Area = exposure1.maskedImage
1216 im2Area = exposure2.maskedImage
1218 # Get mask planes and construct statistics control object from one
1219 # of the exposures
1220 imMaskVal = exposure1.getMask().getPlaneBitMask(self.config.maskNameList)
1221 imStatsCtrl = afwMath.StatisticsControl(self.config.nSigmaClipPtc,
1222 self.config.nIterSigmaClipPtc,
1223 imMaskVal)
1224 imStatsCtrl.setNanSafe(True)
1225 imStatsCtrl.setAndMask(imMaskVal)
1227 mu1 = afwMath.makeStatistics(im1Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
1228 mu2 = afwMath.makeStatistics(im2Area, afwMath.MEANCLIP, imStatsCtrl).getValue()
1230 return (im1Area, im2Area, imStatsCtrl, mu1, mu2)
1232 def getGainFromFlatPair(self, im1Area, im2Area, imStatsCtrl, mu1, mu2,
1233 correctionType='NONE', readNoise=None):
1234 """Estimate the gain from a single pair of flats.
1236 The basic premise is 1/g = <(I1 - I2)^2/(I1 + I2)> = 1/const,
1237 where I1 and I2 correspond to flats 1 and 2, respectively.
1238 Corrections for the variable QE and the read-noise are then
1239 made following the derivation in Robert Lupton's forthcoming
1240 book, which gets
1242 1/g = <(I1 - I2)^2/(I1 + I2)> - 1/mu(sigma^2 - 1/2g^2).
1244 This is a quadratic equation, whose solutions are given by:
1246 g = mu +/- sqrt(2*sigma^2 - 2*const*mu + mu^2)/(2*const*mu*2
1247 - 2*sigma^2)
1249 where 'mu' is the average signal level and 'sigma' is the
1250 amplifier's readnoise. The positive solution will be used.
1251 The way the correction is applied depends on the value
1252 supplied for correctionType.
1254 correctionType is one of ['NONE', 'SIMPLE' or 'FULL']
1255 'NONE' : uses the 1/g = <(I1 - I2)^2/(I1 + I2)> formula.
1256 'SIMPLE' : uses the gain from the 'NONE' method for the
1257 1/2g^2 term.
1258 'FULL' : solves the full equation for g, discarding the
1259 non-physical solution to the resulting quadratic.
1261 Parameters
1262 ----------
1263 im1Area : `lsst.afw.image.maskedImage.MaskedImageF`
1264 Masked image from exposure 1.
1265 im2Area : `lsst.afw.image.maskedImage.MaskedImageF`
1266 Masked image from exposure 2.
1267 imStatsCtrl : `lsst.afw.math.StatisticsControl`
1268 Statistics control object.
1269 mu1: `float`
1270 Clipped mean of im1Area (ADU).
1271 mu2: `float`
1272 Clipped mean of im2Area (ADU).
1273 correctionType : `str`, optional
1274 The correction applied, one of ['NONE', 'SIMPLE', 'FULL']
1275 readNoise : `float`, optional
1276 Amplifier readout noise (ADU).
1278 Returns
1279 -------
1280 gain : `float`
1281 Gain, in e/ADU.
1283 Raises
1284 ------
1285 RuntimeError
1286 Raise if `correctionType` is not one of 'NONE',
1287 'SIMPLE', or 'FULL'.
1288 """
1289 if correctionType not in ['NONE', 'SIMPLE', 'FULL']:
1290 raise RuntimeError("Unknown correction type: %s" % correctionType)
1292 if correctionType != 'NONE' and not np.isfinite(readNoise):
1293 self.log.warning("'correctionType' in 'getGainFromFlatPair' is %s, "
1294 "but 'readNoise' is NaN. Setting 'correctionType' "
1295 "to 'NONE', so a gain value will be estimated without "
1296 "corrections." % correctionType)
1297 correctionType = 'NONE'
1299 mu = 0.5*(mu1 + mu2)
1301 # ratioIm = (I1 - I2)^2 / (I1 + I2)
1302 temp = im2Area.clone()
1303 ratioIm = im1Area.clone()
1304 ratioIm -= temp
1305 ratioIm *= ratioIm
1307 # Sum of pairs
1308 sumIm = im1Area.clone()
1309 sumIm += temp
1311 ratioIm /= sumIm
1313 const = afwMath.makeStatistics(ratioIm, afwMath.MEAN, imStatsCtrl).getValue()
1314 gain = 1. / const
1316 if correctionType == 'SIMPLE':
1317 gain = 1/(const - (1/mu)*(readNoise**2 - (1/2*gain**2)))
1318 elif correctionType == 'FULL':
1319 root = np.sqrt(mu**2 - 2*mu*const + 2*readNoise**2)
1320 denom = (2*const*mu - 2*readNoise**2)
1321 positiveSolution = (root + mu)/denom
1322 gain = positiveSolution
1324 return gain
1326 def computeGaussianHistogramParameters(self, im1Area, im2Area, imStatsCtrl, mu1, mu2):
1327 """Compute KS test for a Gaussian model fit to a histogram of the
1328 difference image.
1330 Parameters
1331 ----------
1332 im1Area : `lsst.afw.image.MaskedImageF`
1333 Masked image from exposure 1.
1334 im2Area : `lsst.afw.image.MaskedImageF`
1335 Masked image from exposure 2.
1336 imStatsCtrl : `lsst.afw.math.StatisticsControl`
1337 Statistics control object.
1338 mu1 : `float`
1339 Clipped mean of im1Area (ADU).
1340 mu2 : `float`
1341 Clipped mean of im2Area (ADU).
1343 Returns
1344 -------
1345 varFit : `float`
1346 Variance from the Gaussian fit.
1347 chi2Dof : `float`
1348 Chi-squared per degree of freedom of Gaussian fit.
1349 kspValue : `float`
1350 The KS test p-value for the Gaussian fit.
1352 Notes
1353 -----
1354 The algorithm here was originally developed by Aaron Roodman.
1355 Tests on the full focal plane of LSSTCam during testing has shown
1356 that a KS test p-value cut of 0.01 is a good discriminant for
1357 well-behaved flat pairs (p>0.01) and poorly behaved non-Gaussian
1358 flat pairs (p<0.01).
1359 """
1360 diffExp = im1Area.clone()
1361 diffExp -= im2Area
1363 sel = (((diffExp.mask.array & imStatsCtrl.getAndMask()) == 0)
1364 & np.isfinite(diffExp.mask.array))
1365 diffArr = diffExp.image.array[sel]
1367 numOk = len(diffArr)
1369 if numOk >= self.config.ksHistMinDataValues and np.isfinite(mu1) and np.isfinite(mu2):
1370 # Create a histogram symmetric around zero, with a bin size
1371 # determined from the expected variance given by the average of
1372 # the input signal levels.
1373 lim = self.config.ksHistLimitMultiplier * np.sqrt((mu1 + mu2)/2.)
1374 yVals, binEdges = np.histogram(diffArr, bins=self.config.ksHistNBins, range=[-lim, lim])
1376 # Fit the histogram with a Gaussian model.
1377 model = GaussianModel()
1378 yVals = yVals.astype(np.float64)
1379 xVals = ((binEdges[0: -1] + binEdges[1:])/2.).astype(np.float64)
1380 errVals = np.sqrt(yVals)
1381 errVals[(errVals == 0.0)] = 1.0
1382 pars = model.guess(yVals, x=xVals)
1383 with warnings.catch_warnings():
1384 warnings.simplefilter("ignore")
1385 # The least-squares fitter sometimes spouts (spurious) warnings
1386 # when the model is very bad. Swallow these warnings now and
1387 # let the KS test check the model below.
1388 out = model.fit(
1389 yVals,
1390 pars,
1391 x=xVals,
1392 weights=1./errVals,
1393 calc_covar=True,
1394 method="least_squares",
1395 )
1397 # Calculate chi2.
1398 chiArr = out.residual
1399 nDof = len(yVals) - 3
1400 chi2Dof = np.sum(chiArr**2.)/nDof
1401 sigmaFit = out.params["sigma"].value
1403 # Calculate KS test p-value for the fit.
1404 ksResult = scipy.stats.ks_1samp(
1405 diffArr,
1406 scipy.stats.norm.cdf,
1407 (out.params["center"].value, sigmaFit),
1408 )
1410 kspValue = ksResult.pvalue
1411 if kspValue < 1e-15:
1412 kspValue = 0.0
1414 varFit = sigmaFit**2.
1416 else:
1417 varFit = np.nan
1418 chi2Dof = np.nan
1419 kspValue = 0.0
1421 return varFit, chi2Dof, kspValue
1424class PhotonTransferCurveExtractTask(PhotonTransferCurveExtractTaskBase):
1425 ConfigClass = PhotonTransferCurveExtractConfig
1426 _DefaultName = 'cpPtcExtract'
1428 def runQuantum(self, butlerQC, inputRefs, outputRefs):
1429 """Ensure that the input and output dimensions are passed along.
1431 Parameters
1432 ----------
1433 butlerQC : `~lsst.daf.butler.QuantumContext`
1434 Butler to operate on.
1435 inputRefs : `~lsst.pipe.base.InputQuantizedConnection`
1436 Input data refs to load.
1437 ouptutRefs : `~lsst.pipe.base.OutputQuantizedConnection`
1438 Output data refs to persist.
1439 """
1440 inputs = butlerQC.get(inputRefs)
1441 # Ids of input list of exposure references
1442 # (deferLoad=True in the input connections)
1443 inputs['inputDims'] = [expRef.datasetRef.dataId['exposure'] for expRef in inputRefs.inputExp]
1445 # Dictionary, keyed by expTime (or expFlux or expId), with tuples
1446 # containing flat exposures and their IDs.
1447 matchType = self.config.matchExposuresType
1448 if matchType == 'TIME':
1449 inputs['inputExp'] = arrangeFlatsByExpTime(inputs['inputExp'], inputs['inputDims'], log=self.log)
1450 elif matchType == 'FLUX':
1451 inputs['inputExp'] = arrangeFlatsByExpFlux(
1452 inputs['inputExp'],
1453 inputs['inputDims'],
1454 self.config.matchExposuresByFluxKeyword,
1455 log=self.log,
1456 )
1457 else:
1458 inputs['inputExp'] = arrangeFlatsByExpId(inputs['inputExp'], inputs['inputDims'])
1460 outputs = self.run(**inputs)
1461 outputs = self._guaranteeOutputs(inputs['inputDims'], outputs, outputRefs)
1462 butlerQC.put(outputs, outputRefs)
1464 def _guaranteeOutputs(self, inputDims, outputs, outputRefs):
1465 """Ensure that all outputRefs have a matching output, and if they do
1466 not, fill the output with dummy PTC datasets.
1468 Parameters
1469 ----------
1470 inputDims : `dict` [`str`, `int`]
1471 Input exposure dimensions.
1472 outputs : `lsst.pipe.base.Struct`
1473 Outputs from the ``run`` method. Contains the entry:
1475 ``outputCovariances``
1476 Output PTC datasets (`list` [`lsst.ip.isr.IsrCalib`])
1477 outputRefs : `~lsst.pipe.base.OutputQuantizedConnection`
1478 Container with all of the outputs expected to be generated.
1480 Returns
1481 -------
1482 outputs : `lsst.pipe.base.Struct`
1483 Dummy dataset padded version of the input ``outputs`` with
1484 the same entries.
1485 """
1486 newCovariances = []
1487 for ref in outputRefs.outputCovariances:
1488 outputExpId = ref.dataId['exposure']
1489 if outputExpId in inputDims:
1490 entry = inputDims.index(outputExpId)
1491 newCovariances.append(outputs.outputCovariances[entry])
1492 else:
1493 newPtc = PhotonTransferCurveDataset(['no amp'], 'DUMMY', covMatrixSide=1)
1494 newPtc.setAmpValuesPartialDataset('no amp')
1495 newCovariances.append(newPtc)
1496 return pipeBase.Struct(outputCovariances=newCovariances)
1498 def run(self, inputExp, inputDims, inputPhotodiodeData=None):
1499 """Measure covariances from difference of flat pairs
1501 Parameters
1502 ----------
1503 inputExp : `dict` [`float`, `list`
1504 [`~lsst.pipe.base.connections.DeferredDatasetRef`]]
1505 Dictionary that groups references to flat-field exposures that
1506 have the same exposure time (seconds), or that groups them
1507 sequentially by their exposure id.
1508 inputDims : `list`
1509 List of exposure IDs.
1510 inputPhotodiodeData : `dict` [`str`, `lsst.ip.isr.PhotodiodeCalib`]
1511 Photodiode readings data (optional).
1513 Returns
1514 -------
1515 results : `lsst.pipe.base.Struct`
1516 The resulting Struct contains:
1518 ``outputCovariances``
1519 A list containing the per-pair PTC measurements (`list`
1520 [`lsst.ip.isr.PhotonTransferCurveDataset`])
1521 """
1522 # inputExp.values() returns a view, which we turn into a list. We then
1523 # access the first exposure-ID tuple to get the detector.
1524 # The first "get()" retrieves the exposure from the exposure reference.
1525 detector = list(inputExp.values())[0][0][0].get(component='detector')
1526 filterName = list(inputExp.values())[0][0][0].get().metadata["FILTER"]
1527 detNum = detector.getId()
1528 amps = detector.getAmplifiers()
1529 ampNames = [amp.getName() for amp in amps]
1531 # Create a dummy ptcDataset. Dummy datasets will be
1532 # used to ensure that the number of output and input
1533 # dimensions match.
1534 dummyPtcDataset = PhotonTransferCurveDataset(
1535 ampNames, 'DUMMY',
1536 covMatrixSide=self.config.maximumRangeCovariancesAstier,
1537 filterName=filterName,
1538 )
1539 for ampName in ampNames:
1540 dummyPtcDataset.setAmpValuesPartialDataset(ampName)
1542 photoChargeDict = self._extractPhotoChargeDict(inputPhotodiodeData, inputExp)
1544 # Output list with PTC datasets.
1545 partialPtcDatasetList = []
1546 # The number of output references needs to match that of input
1547 # references: initialize outputlist with dummy PTC datasets.
1548 for i in range(len(inputDims)):
1549 partialPtcDatasetList.append(dummyPtcDataset)
1551 if self.config.numEdgeSuspect > 0:
1552 self.isrTask = IsrTask()
1553 self.log.info("Masking %d pixels from the edges of all %ss as SUSPECT.",
1554 self.config.numEdgeSuspect, self.config.edgeMaskLevel)
1556 # Depending on the value of config.matchExposuresType
1557 # 'expTime' can stand for exposure time, flux, or ID.
1558 for expTime in inputExp:
1559 exposures = inputExp[expTime]
1560 if not np.isfinite(expTime):
1561 self.log.warning("Illegal/missing %s found (%s). Dropping exposure %d",
1562 self.config.matchExposuresType, expTime, exposures[0][1])
1563 continue
1564 elif len(exposures) == 1:
1565 self.log.warning("Only one exposure found at %s %f. Dropping exposure %d.",
1566 self.config.matchExposuresType, expTime, exposures[0][1])
1567 continue
1568 else:
1570 # Only use the first two exposures at expTime. Each
1571 # element is a tuple (exposure, expId)
1572 expRef1, expId1 = exposures[0]
1573 expRef2, expId2 = exposures[1]
1574 # use get() to obtain `lsst.afw.image.Exposure`
1575 exp1, exp2 = expRef1.get(), expRef2.get()
1577 if len(exposures) > 2:
1578 self.log.warning("Already found 2 exposures at %s %f. Ignoring exposures: %s",
1579 self.config.matchExposuresType, expTime,
1580 ", ".join(str(i[1]) for i in exposures[2:]))
1582 partialPtcDataset, nAmpsNan = self._extractOneFlatPair(
1583 [expId1, expId2],
1584 [exp1, exp2],
1585 photoChargeDict,
1586 expTime,
1587 )
1589 # Use location of exp1 to save PTC dataset from (exp1, exp2) pair.
1590 # Below, np.where(expId1 == np.array(inputDims)) returns a tuple
1591 # with a single-element array, so [0][0]
1592 # is necessary to extract the required index.
1593 datasetIndex = np.where(expId1 == np.array(inputDims))[0][0]
1594 # `partialPtcDatasetList` is a list of
1595 # `PhotonTransferCurveDataset` objects. Some of them
1596 # will be dummy datasets (to match length of input
1597 # and output references), and the rest will have
1598 # datasets with the mean signal, variance, and
1599 # covariance measurements at a given exposure
1600 # time. The next ppart of the PTC-measurement
1601 # pipeline, `solve`, will take this list as input,
1602 # and assemble the measurements in the datasets
1603 # in an addecuate manner for fitting a PTC
1604 # model.
1605 partialPtcDataset.updateMetadataFromExposures([exp1, exp2])
1606 partialPtcDataset.updateMetadata(setDate=True, detector=detector)
1607 partialPtcDatasetList[datasetIndex] = partialPtcDataset
1609 if nAmpsNan == len(ampNames):
1610 msg = f"NaN mean in all amps of exposure pair {expId1}, {expId2} of detector {detNum}."
1611 self.log.warning(msg)
1613 return pipeBase.Struct(
1614 outputCovariances=partialPtcDatasetList,
1615 )
1618class PhotonTransferCurveExtractPairTask(PhotonTransferCurveExtractTaskBase):
1619 ConfigClass = PhotonTransferCurveExtractPairConfig
1620 _DefaultName = "cpPtcExtractPair"
1622 def runQuantum(self, butlerQC, inputRefs, outputRefs):
1623 inputs = butlerQC.get(inputRefs)
1625 quantumExposure = butlerQC.quantum.dataId["exposure"]
1627 if len(inputRefs.inputExp) == 1:
1628 self.log.warning("Exposure %d does not have a pair; skipping.", quantumExposure)
1629 raise pipeBase.NoWorkFound(f"Exposure {quantumExposure} does not have a pair.")
1631 if self.config.matchExposuresType == "FLUX":
1632 # Get the flux keyword value for each of the inputs.
1633 if len(inputs["inputExp"]) > 3:
1634 raise RuntimeError("Error in matching inputs to PhotonTransferCurveExtractPairTask")
1636 fluxValues = []
1637 inputExposures = []
1638 for inputExpRef in inputs["inputExp"]:
1639 md = inputExpRef.get(component="metadata")
1640 # Note that nan will never match anything.
1641 fluxValue = md.get(self.config.matchExposuresByFluxKeyword, np.nan)
1642 if np.isnan(fluxValue):
1643 self.log.warning(
1644 "Exposure %d is missing flux keyword %s",
1645 inputExpRef.dataId["exposure"],
1646 self.config.matchExposuresByFluxKeyword,
1647 )
1648 fluxValues.append(fluxValue)
1649 inputExposures.append(inputExpRef.dataId["exposure"])
1651 if len(fluxValues) == 2:
1652 # The simple case that we are at a boundary.
1653 if fluxValues[0] != fluxValues[1]:
1654 self.log.warning(
1655 "Exposure %d may not have a match via %s",
1656 quantumExposure,
1657 self.config.matchExposuresByFluxKeyword,
1658 )
1659 raise pipeBase.NoWorkFound(
1660 f"Exposure {quantumExposure} does not have a match."
1661 )
1662 elif quantumExposure != inputs["inputExp"][0].dataId["exposure"]:
1663 # Not primary; that's okay, we can skip.
1664 raise pipeBase.NoWorkFound(
1665 f"Exposure {quantumExposure} is not the primary in a pair.",
1666 )
1667 else:
1668 # We have a set of three.
1669 if np.all(np.asarray(fluxValues) == fluxValues[0]):
1670 self.log.warning(
1671 "More than two exposures have matching %s to exposure %d",
1672 self.config.matchExposuresByFluxKeyword,
1673 quantumExposure,
1674 )
1675 raise pipeBase.NoWorkFound(
1676 f"Exposure {quantumExposure} matches too many "
1677 f"{self.config.matchExposuresByFluxKeyword}",
1678 )
1679 if fluxValues[0] == fluxValues[1] and inputExposures[0] == quantumExposure:
1680 # Remove the last input.
1681 inputs["inputExp"].pop(2)
1682 if self.config.doExtractPhotodiodeData:
1683 inputs["inputPhotodiodeData"].pop(2)
1684 elif fluxValues[1] == fluxValues[2] and inputExposures[1] == quantumExposure:
1685 # Remove the first input
1686 inputs["inputExp"].pop(0)
1687 if self.config.doExtractPhotodiodeData:
1688 inputs["inputPhotodiodeData"].pop(0)
1689 elif fluxValues[0] != fluxValues[1] and fluxValues[1] != fluxValues[2]:
1690 self.log.warning(
1691 "Exposure %d may not have a match via %s",
1692 quantumExposure,
1693 self.config.matchExposuresByFluxKeyword,
1694 )
1695 raise pipeBase.NoWorkFound(
1696 f"Exposure {quantumExposure} does not have a match."
1697 )
1698 else:
1699 # Not primary; that's okay, we can skip.
1700 raise pipeBase.NoWorkFound(
1701 f"Exposure {quantumExposure} is not the primary in a pair.",
1702 )
1704 inputs["inputDims"] = [expRef.dataId["exposure"] for expRef in inputs["inputExp"]]
1706 outputs = self.run(**inputs)
1707 butlerQC.put(outputs, outputRefs)
1709 def run(self, *, inputExp, inputDims, inputPhotodiodeData=None):
1710 """Measure covariances from a single flat pair.
1712 Parameters
1713 ----------
1714 inputExp : `list` [`lsst.pipe.base.connections.DeferredDatasetRef`]
1715 List of 2 references to the input flat exposures.
1716 inputDims : `list` [`int`]
1717 List of 2 exposure numbers for the input flat exposures.
1718 inputPhotodiodeData : `list`
1719 [`lsst.pipe.base.connections.DeferredDatasetRef`], optional
1720 List of 2 references to input photodiode data.
1722 Returns
1723 -------
1724 results : `lsst.pipe.base.Struct`
1725 The resulting Struct contains:
1727 ``outputCovariance``
1728 The single-pair PTC measurement
1729 `lsst.ip.isr.PhotonTransferCurveDataset`
1730 """
1731 partialPtcDataset = None
1733 expId1, expId2 = inputDims
1734 exp1 = inputExp[0].get()
1735 exp2 = inputExp[1].get()
1737 detector = exp1.getDetector()
1738 detNum = detector.getId()
1739 amps = detector.getAmplifiers()
1740 ampNames = [amp.getName() for amp in amps]
1742 # Set the exposure time to the real exposure time.
1743 expTime = exp1.info.getVisitInfo().exposureTime
1745 photoChargeDict = self._extractPhotoChargeDict(inputPhotodiodeData, inputExp)
1747 if self.config.numEdgeSuspect > 0:
1748 self.isrTask = IsrTask()
1749 self.log.info("Masking %d pixels from the edges of all %ss as SUSPECT.",
1750 self.config.numEdgeSuspect, self.config.edgeMaskLevel)
1752 partialPtcDataset, nAmpsNan = self._extractOneFlatPair(
1753 [expId1, expId2],
1754 [exp1, exp2],
1755 photoChargeDict,
1756 expTime,
1757 )
1759 if self.config.doOutputBinnedImages:
1760 binned = bin_flat(
1761 partialPtcDataset,
1762 exp1,
1763 bin_factor=self.config.outputBinnedImagesBinFactor,
1764 amp_boundary=self.config.numEdgeSuspect,
1765 apply_gains=False,
1766 )
1767 binned2 = bin_flat(
1768 partialPtcDataset,
1769 exp2,
1770 bin_factor=self.config.outputBinnedImagesBinFactor,
1771 amp_boundary=self.config.numEdgeSuspect,
1772 apply_gains=False,
1773 )
1774 # Combine the two tables; these will have the same
1775 # binning and therefore can be simply concatenated this way.
1776 binned.rename_column("value", "value1")
1777 binned["value2"] = binned2["value"]
1778 else:
1779 binned = None
1781 partialPtcDataset.updateMetadataFromExposures([exp1, exp2])
1782 partialPtcDataset.updateMetadata(setDate=True, detector=detector)
1784 if nAmpsNan == len(ampNames):
1785 self.log.warning(
1786 "NaN mean in all amps of exposure pair %d, %d of detector %d.",
1787 expId1,
1788 expId2,
1789 detNum,
1790 )
1792 return pipeBase.Struct(
1793 outputCovariance=partialPtcDataset,
1794 outputBinnedImages=binned,
1795 )