Coverage for python / lsst / cp / pipe / cpCombine.py: 21%
351 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-05-01 08:54 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-05-01 08:54 +0000
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21import numpy as np
22from datetime import datetime, UTC
23from operator import attrgetter
25import astropy.time
26import lsst.geom as geom
27import lsst.pex.config as pexConfig
28import lsst.pipe.base as pipeBase
29import lsst.pipe.base.connectionTypes as cT
30import lsst.afw.math as afwMath
31import lsst.afw.image as afwImage
33import lsst.daf.base
35from lsst.ip.isr.vignette import VignetteTask, maskVignettedRegion
37from astro_metadata_translator import merge_headers
38from astro_metadata_translator.serialize import dates_to_fits
39from lsst.obs.base.utils import strip_provenance_from_fits_header
42__all__ = ["CalibStatsConfig", "CalibStatsTask",
43 "CalibCombineConfig", "CalibCombineConnections", "CalibCombineTask",
44 "CalibCombineByFilterConfig", "CalibCombineByFilterConnections", "CalibCombineByFilterTask",
45 "CalibCombineTwoFlatsByFilterConfig", "CalibCombineTwoFlatsByFilterTask"]
48# CalibStatsConfig/CalibStatsTask from pipe_base/constructCalibs.py
49class CalibStatsConfig(pexConfig.Config):
50 """Parameters controlling the measurement of background
51 statistics.
52 """
54 stat = pexConfig.Field(
55 dtype=str,
56 default="MEANCLIP",
57 doc="Statistic name to use to estimate background (from `~lsst.afw.math.Property`)",
58 )
59 clip = pexConfig.Field(
60 dtype=float,
61 default=3.0,
62 doc="Clipping threshold for background",
63 )
64 nIter = pexConfig.Field(
65 dtype=int,
66 default=3,
67 doc="Clipping iterations for background",
68 )
69 mask = pexConfig.ListField(
70 dtype=str,
71 default=["DETECTED", "BAD", "NO_DATA"],
72 doc="Mask planes to reject",
73 )
76class CalibStatsTask(pipeBase.Task):
77 """Measure statistics on the background
79 This can be useful for scaling the background, e.g., for flats and
80 fringe frames.
81 """
83 ConfigClass = CalibStatsConfig
85 def run(self, exposureOrImage):
86 """Measure a particular statistic on an image (of some sort).
88 Parameters
89 ----------
90 exposureOrImage : `lsst.afw.image.Exposure`,
91 `lsst.afw.image.MaskedImage`, or
92 `lsst.afw.image.Image`
93 Exposure or image to calculate statistics on.
95 Returns
96 -------
97 results : `float`
98 Resulting statistic value.
99 """
100 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
101 afwImage.Mask.getPlaneBitMask(self.config.mask))
102 try:
103 image = exposureOrImage.getMaskedImage()
104 except Exception:
105 try:
106 image = exposureOrImage.getImage()
107 except Exception:
108 image = exposureOrImage
109 statType = afwMath.stringToStatisticsProperty(self.config.stat)
110 return afwMath.makeStatistics(image, statType, stats).getValue()
113class CalibCombineConnections(pipeBase.PipelineTaskConnections,
114 dimensions=("instrument", "detector")):
115 inputExpHandles = cT.Input(
116 name="cpInputs",
117 doc="Input pre-processed exposures to combine.",
118 storageClass="Exposure",
119 dimensions=("instrument", "detector", "exposure"),
120 multiple=True,
121 deferLoad=True,
122 )
123 inputScales = cT.Input(
124 name="cpScales",
125 doc="Input scale factors to use.",
126 storageClass="StructuredDataDict",
127 dimensions=("instrument", ),
128 multiple=False,
129 )
131 outputData = cT.Output(
132 name="cpProposal",
133 doc="Output combined proposed calibration to be validated and certified..",
134 storageClass="ExposureF",
135 dimensions=("instrument", "detector"),
136 isCalibration=True,
137 )
139 def __init__(self, *, config=None):
140 super().__init__(config=config)
142 if config and config.exposureScaling != "InputList":
143 self.inputs.discard("inputScales")
146# CalibCombineConfig/CalibCombineTask from pipe_base/constructCalibs.py
147class CalibCombineConfig(pipeBase.PipelineTaskConfig,
148 pipelineConnections=CalibCombineConnections):
149 """Configuration for combining calib exposures.
150 """
152 calibrationType = pexConfig.Field(
153 dtype=str,
154 default="calibration",
155 doc="Name of calibration to be generated.",
156 )
158 exposureScaling = pexConfig.ChoiceField(
159 dtype=str,
160 allowed={
161 "Unity": "Do not scale inputs. Scale factor is 1.0.",
162 "ExposureTime": "Scale inputs by their exposure time.",
163 "DarkTime": "Scale inputs by their dark time.",
164 "MeanStats": "Scale inputs based on their mean values.",
165 "InputList": "Scale inputs based on a list of values.",
166 },
167 default="Unity",
168 doc="Scaling to be applied to each input exposure.",
169 )
170 scalingLevel = pexConfig.ChoiceField(
171 dtype=str,
172 allowed={
173 "DETECTOR": "Scale by detector.",
174 "AMP": "Scale by amplifier.",
175 },
176 default="DETECTOR",
177 doc="Region to scale.",
178 )
179 maxVisitsToCalcErrorFromInputVariance = pexConfig.Field(
180 dtype=int,
181 default=5,
182 doc="Maximum number of visits to estimate variance from input variance, not per-pixel spread",
183 )
184 subregionSize = pexConfig.ListField(
185 dtype=int,
186 doc="Width, height of subregion size.",
187 length=2,
188 # This is 200 rows for all detectors smaller than 10k in width.
189 default=(10000, 200),
190 )
192 doVignette = pexConfig.Field(
193 dtype=bool,
194 default=False,
195 doc="Copy vignette polygon to output and censor vignetted pixels? "
196 "(Used for non-LSST flats).",
197 )
199 doVignetteMask = pexConfig.Field(
200 dtype=bool,
201 default=False,
202 doc="Compute and attach a validPolygon and mask excluding the "
203 "totally vignetted regions? (Used for LSST flats).",
204 )
205 vignette = pexConfig.ConfigurableField(
206 target=VignetteTask,
207 doc="Vignetting task.",
208 )
209 doPartialVignetteMask = pexConfig.Field(
210 dtype=bool,
211 default=False,
212 doc="Compute a PARTLY_VIGNETTED mask plane for partly vignetted "
213 "regions? (Used for LSST flats).",
214 )
215 partialVignette = pexConfig.ConfigurableField(
216 target=VignetteTask,
217 doc="Vignetting task, configured for partial vignetting.",
218 )
219 distributionPercentiles = pexConfig.ListField(
220 dtype=float,
221 default=[0, 5, 16, 50, 84, 95, 100],
222 doc="Percentile levels to measure on the final combined calibration.",
223 )
224 mask = pexConfig.ListField(
225 dtype=str,
226 default=["SAT", "DETECTED", "INTRP"],
227 doc="Mask planes to respect",
228 )
229 combine = pexConfig.Field(
230 dtype=str,
231 default="MEANCLIP",
232 doc="Statistic name to use for combination (from `~lsst.afw.math.Property`)",
233 )
234 clip = pexConfig.Field(
235 dtype=float,
236 default=3.0,
237 doc="Clipping threshold for combination",
238 )
239 nIter = pexConfig.Field(
240 dtype=int,
241 default=3,
242 doc="Clipping iterations for combination",
243 )
244 noGoodPixelsMask = pexConfig.Field(
245 dtype=str,
246 default="BAD",
247 doc="Mask bit to set when there are no good input pixels. See code comments for details.",
248 )
249 checkNoData = pexConfig.Field(
250 dtype=bool,
251 default=True,
252 doc="Check that the calibration does not have NO_DATA set?",
253 )
254 censorMaskPlanes = pexConfig.Field(
255 dtype=bool,
256 default=True,
257 doc="Unset mask planes other than NO_DATA in output calibration?",
258 )
259 stats = pexConfig.ConfigurableField(
260 target=CalibStatsTask,
261 doc="Background statistics configuration",
262 )
264 def validate(self):
265 super().validate()
267 if self.doVignetteMask and self.calibrationType != "flat":
268 raise ValueError("doVignetteMask is only supported with ``flat`` calibrationType.")
269 if self.doVignetteMask and self.doVignette:
270 raise ValueError("Cannot set both doVignetteMask and doVignette.")
271 if self.doPartialVignetteMask and self.calibrationType != "flat":
272 raise ValueError("doPartialVignetteMask is only supported with ``flat`` calibrationType.")
275class CalibCombineTask(pipeBase.PipelineTask):
276 """Task to combine calib exposures."""
278 ConfigClass = CalibCombineConfig
279 _DefaultName = "cpCombine"
281 def __init__(self, **kwargs):
282 super().__init__(**kwargs)
283 if "TwoFlats" not in type(self).__name__:
284 # Skip the additional initialization for TwoFlats task.
285 self.makeSubtask("stats")
286 if self.config.doVignetteMask:
287 self.makeSubtask("vignette")
288 if self.config.doPartialVignetteMask:
289 self.makeSubtask("partialVignette")
291 def runQuantum(self, butlerQC, inputRefs, outputRefs):
292 inputs = butlerQC.get(inputRefs)
294 # Down-select exposures based on InputList scaling if necessary.
295 if self.config.exposureScaling == "InputList":
296 inputScales = inputs["inputScales"]
298 if (detectorId := butlerQC.quantum.dataId["detector"]) not in inputScales["expScale"]:
299 raise pipeBase.NoWorkFound(f"No input scaling for detector {detectorId}")
301 # Use any amp for this check.
302 ampName = list(inputScales["expScale"][detectorId].keys())[0]
303 scaledExposures = list(inputScales["expScale"][detectorId][ampName].keys())
305 inputExpHandles = [
306 handle for handle in inputs["inputExpHandles"]
307 if handle.dataId["exposure"] in scaledExposures
308 ]
309 inputs["inputExpHandles"] = inputExpHandles
311 expHandleRefs = [
312 ref for ref in inputRefs.inputExpHandles
313 if ref.dataId["exposure"] in scaledExposures
314 ]
315 else:
316 expHandleRefs = inputRefs.inputExpHandles
318 dimensions = [dict(expHandle.dataId.required) for expHandle in expHandleRefs]
319 inputs["inputDims"] = dimensions
321 outputs = self.run(**inputs)
322 butlerQC.put(outputs, outputRefs)
324 def run(self, inputExpHandles, inputScales=None, inputDims=None):
325 """Combine calib exposures for a single detector.
327 Parameters
328 ----------
329 inputExpHandles : `list` [`lsst.daf.butler.DeferredDatasetHandle`]
330 Input list of exposure handles to combine.
331 inputScales : `dict` [`dict` [`dict` [`float`]]], optional
332 Dictionary of scales, indexed by detector (`int`),
333 amplifier (`int`), and exposure (`int`). Used for
334 'inputExps' scaling.
335 inputDims : `list` [`dict`]
336 List of dictionaries of input data dimensions/values.
337 Each list entry should contain:
339 ``"exposure"``
340 exposure id value (`int`)
341 ``"detector"``
342 detector id value (`int`)
344 Returns
345 -------
346 results : `lsst.pipe.base.Struct`
347 The results struct containing:
349 ``outputData``
350 Final combined exposure generated from the inputs
351 (`lsst.afw.image.Exposure`).
353 Raises
354 ------
355 RuntimeError
356 Raised if no input data is found. Also raised if
357 config.exposureScaling == InputList, and a necessary scale
358 was not found.
359 """
360 width, height = self.getDimensions(inputExpHandles)
361 stats = afwMath.StatisticsControl(
362 numSigmaClip=self.config.clip,
363 numIter=self.config.nIter,
364 andMask=afwImage.Mask.getPlaneBitMask(self.config.mask),
365 )
367 # Normally, this would be set to NO_DATA. However, we want
368 # pixels in the combined calibration that had no good inputs
369 # to be treated as a defect (which are interpolated) and not
370 # like an empty region (such as vignetted corners, which are
371 # not interpolated). To handle this, we use the config to
372 # override the mask plane used (BAD by default), and censor
373 # all other mask planes below.
374 stats.setNoGoodPixelsMask(afwImage.Mask.getPlaneBitMask(self.config.noGoodPixelsMask))
376 numExps = len(inputExpHandles)
377 if numExps < 1:
378 raise RuntimeError("No valid input data")
379 if numExps < self.config.maxVisitsToCalcErrorFromInputVariance:
380 stats.setCalcErrorFromInputVariance(True)
382 inputDetector = inputExpHandles[0].get(component="detector")
384 # Create output exposure for combined data.
385 combined = afwImage.MaskedImageF(width, height)
386 combinedExp = afwImage.makeExposure(combined)
388 # Apply scaling:
389 expScales = []
390 if inputDims is None:
391 inputDims = [dict() for i in inputExpHandles]
393 for index, (expHandle, dims) in enumerate(zip(inputExpHandles, inputDims)):
394 scale = 1.0
395 visitInfo = expHandle.get(component="visitInfo")
396 if self.config.exposureScaling == "ExposureTime":
397 scale = visitInfo.getExposureTime()
398 elif self.config.exposureScaling == "DarkTime":
399 scale = visitInfo.getDarkTime()
400 elif self.config.exposureScaling == "MeanStats":
401 # Note: there may a bug freeing memory here. TBD.
402 exp = expHandle.get()
403 scale = self.stats.run(exp)
404 del exp
405 elif self.config.exposureScaling == "InputList":
406 visitId = dims.get("exposure", None)
407 detectorId = dims.get("detector", None)
408 if visitId is None or detectorId is None:
409 raise RuntimeError(f"Could not identify scaling for input {index} ({dims})")
410 if detectorId not in inputScales["expScale"]:
411 raise RuntimeError(f"Could not identify a scaling for input {index}"
412 f" detector {detectorId}")
414 if self.config.scalingLevel in ("DETECTOR", "AMP"):
415 scale = [inputScales["expScale"][detectorId][amp.getName()][visitId]
416 for amp in inputDetector]
417 else:
418 raise RuntimeError(f"Unknown scaling level: {self.config.scalingLevel}")
419 elif self.config.exposureScaling == "Unity":
420 scale = 1.0
421 else:
422 raise RuntimeError(f"Unknown scaling type: {self.config.exposureScaling}.")
424 expScales.append(scale)
425 self.log.info("Scaling input %d by %s", index, scale)
427 self.combine(combinedExp, inputExpHandles, expScales, stats)
429 # The calibration should _never_ have NO_DATA set, as this is
430 # handled poorly downstream, and so we raise here after
431 # explicitly checking for pixels with that bit set.
432 if self.config.checkNoData:
433 test = ((combinedExp.mask.array & afwImage.Mask.getPlaneBitMask("NO_DATA")) > 0)
434 if (nnodata := test.sum()) > 0:
435 raise RuntimeError(f"Combined calibration has {nnodata} NO_DATA pixels!")
437 if self.config.censorMaskPlanes:
438 # Any mask planes that are not the noGoodPixelsMask plane
439 # should be cleared. This should remove things like the
440 # CROSSTALK plane from printing into the calibration. Run
441 # after the checkNoData pass so that we don't censor what
442 # we want to raise on.
443 mask = combinedExp.mask
444 for plane, value in mask.getMaskPlaneDict().items():
445 if plane != self.config.noGoodPixelsMask:
446 mask.clearMaskPlane(value)
448 self.interpolateNans(combined, maskPlane=self.config.noGoodPixelsMask)
450 if self.config.doVignette:
451 polygon = inputExpHandles[0].get(component="validPolygon")
452 maskVignettedRegion(combined, polygon=polygon, vignetteValue=0.0)
454 # Set the detector
455 combinedExp.setDetector(inputDetector)
457 if self.config.doVignetteMask:
458 self.vignette.run(
459 exposure=combinedExp,
460 doUpdateMask=True,
461 doUpdatePolygon=True,
462 vignetteValue=0.0,
463 log=self.log,
464 maskPlane=["NO_DATA", "VIGNETTED"],
465 )
467 combinedExp.mask.addMaskPlane("PARTLY_VIGNETTED")
468 if self.config.doPartialVignetteMask:
469 self.partialVignette.run(
470 exposure=combinedExp,
471 doUpdateMask=True,
472 doUpdatePolygon=False,
473 vignetteValue=None,
474 log=self.log,
475 maskPlane="PARTLY_VIGNETTED",
476 )
478 # Combine headers
479 self.combineHeaders(inputExpHandles, combinedExp,
480 calibType=self.config.calibrationType, scales=expScales)
482 # Do we need to set a filter?
483 filterLabel = inputExpHandles[0].get(component="filter")
484 self.setFilter(combinedExp, filterLabel)
486 self.setFlatSource(combinedExp)
488 # Set QA headers
489 self.calibStats(combinedExp, self.config.calibrationType)
491 # Return
492 return pipeBase.Struct(
493 outputData=combinedExp,
494 )
496 def getDimensions(self, expHandleList):
497 """Get dimensions of the inputs.
499 Parameters
500 ----------
501 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`]
502 Exposure handles to check the sizes of.
504 Returns
505 -------
506 width, height : `int`
507 Unique set of input dimensions.
508 """
509 dimList = [expHandle.get(component="bbox").getDimensions() for expHandle in expHandleList]
511 return self.getSize(dimList)
513 def getSize(self, dimList):
514 """Determine a consistent size, given a list of image sizes.
516 Parameters
517 ----------
518 dimList : `list` [`tuple` [`int`, `int`]]
519 List of dimensions.
521 Raises
522 ------
523 RuntimeError
524 If input dimensions are inconsistent.
526 Returns
527 -------
528 width, height : `int`
529 Common dimensions.
530 """
531 dim = set((w, h) for w, h in dimList)
532 if len(dim) != 1:
533 raise RuntimeError(f"Inconsistent dimensions: {dim}")
534 return dim.pop()
536 def applyScale(self, exposure, bbox=None, scale=None):
537 """Apply scale to input exposure.
539 This implementation applies a flux scaling: the input exposure is
540 divided by the provided scale.
542 Parameters
543 ----------
544 exposure : `lsst.afw.image.Exposure`
545 Exposure to scale.
546 bbox : `lsst.geom.Box2I`
547 BBox matching the segment of the exposure passed in.
548 scale : `float` or `list` [`float`], optional
549 Constant scale to divide the exposure by.
550 """
551 if scale is not None:
552 mi = exposure.getMaskedImage()
553 if isinstance(scale, list):
554 # Create a realization of the per-amp scales as an
555 # image we can take a subset of. This may be slightly
556 # slower than only populating the region we care
557 # about, but this avoids needing to do arbitrary
558 # numbers of offsets, etc.
559 scaleExp = afwImage.MaskedImageF(exposure.getDetector().getBBox())
560 for amp, ampScale in zip(exposure.getDetector(), scale):
561 scaleExp.image[amp.getBBox()] = ampScale
562 scale = scaleExp[bbox]
563 mi /= scale
565 @staticmethod
566 def _subBBoxIter(bbox, subregionSize):
567 """Iterate over subregions of a bbox.
569 Parameters
570 ----------
571 bbox : `lsst.geom.Box2I`
572 Bounding box over which to iterate.
573 subregionSize: `lsst.geom.Extent2I`
574 Size of sub-bboxes.
576 Yields
577 ------
578 subBBox : `lsst.geom.Box2I`
579 Next sub-bounding box of size ``subregionSize`` or
580 smaller; each ``subBBox`` is contained within ``bbox``, so
581 it may be smaller than ``subregionSize`` at the edges of
582 ``bbox``, but it will never be empty.
583 """
584 if bbox.isEmpty():
585 raise RuntimeError(f"bbox {bbox} is empty")
586 if subregionSize[0] < 1 or subregionSize[1] < 1:
587 raise RuntimeError(f"subregionSize {subregionSize} must be nonzero")
589 for rowShift in range(0, bbox.getHeight(), subregionSize[1]):
590 for colShift in range(0, bbox.getWidth(), subregionSize[0]):
591 subBBox = geom.Box2I(bbox.getMin() + geom.Extent2I(colShift, rowShift), subregionSize)
592 subBBox.clip(bbox)
593 if subBBox.isEmpty():
594 raise RuntimeError(f"Bug: empty bbox! bbox={bbox}, subregionSize={subregionSize}, "
595 f"colShift={colShift}, rowShift={rowShift}")
596 yield subBBox
598 def combine(self, target, expHandleList, expScaleList, stats):
599 """Combine multiple images.
601 Parameters
602 ----------
603 target : `lsst.afw.image.Exposure`
604 Output exposure to construct.
605 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`]
606 Input exposure handles to combine.
607 expScaleList : `list` [`float`]
608 List of scales to apply to each input image.
609 stats : `lsst.afw.math.StatisticsControl`
610 Control explaining how to combine the input images.
611 """
612 combineType = afwMath.stringToStatisticsProperty(self.config.combine)
614 subregionSizeArr = self.config.subregionSize
615 subregionSize = geom.Extent2I(subregionSizeArr[0], subregionSizeArr[1])
616 for subBbox in self._subBBoxIter(target.getBBox(), subregionSize):
617 images = []
618 for expHandle, expScale in zip(expHandleList, expScaleList):
619 inputExp = expHandle.get(parameters={"bbox": subBbox})
620 self.applyScale(inputExp, subBbox, expScale)
621 images.append(inputExp.getMaskedImage())
623 combinedSubregion = afwMath.statisticsStack(images, combineType, stats)
624 target.maskedImage.assign(combinedSubregion, subBbox)
626 def combineHeaders(self, expHandleList, calib=None, calibType="CALIB", scales=None, metadata=None):
627 """Combine input headers to determine the set of common headers,
628 supplemented by calibration inputs. The calibration header is
629 set in-place.
631 Parameters
632 ----------
633 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`]
634 Input list of exposure handles to combine.
635 calib : `lsst.afw.image.Exposure`, optional
636 Output calibration to construct headers for.
637 calibType : `str`, optional
638 OBSTYPE the output should claim.
639 scales : `list` [`float`], optional
640 Scale values applied to each input to record.
641 metadata : `lsst.daf.base.PropertyList`, optional
642 Output metadata to add headers to.
644 Returns
645 -------
646 header : `lsst.daf.base.PropertyList`
647 Constructed header.
649 Raises
650 ------
651 RuntimeError
652 Raised if neither a calib nor a metadata was supplied.
653 """
654 # Header
655 if calib is not None:
656 header = calib.getMetadata()
657 elif metadata is not None:
658 header = metadata
659 else:
660 raise RuntimeError("No calibration and no metadata passed to combineHeaders")
662 header.set("OBSTYPE", calibType)
664 # Keywords we care about
665 comments = {"TIMESYS": "Time scale for all dates",
666 "DATE-OBS": "Start date of earliest input observation",
667 "MJD-OBS": "[d] Start MJD of earliest input observation",
668 "DATE-BEG": "Start date of earliest input observation",
669 "MJD-BEG": "[d] Start MJD of earliest input observation",
670 "DATE-END": "End date of oldest input observation",
671 "MJD-END": "[d] End MJD of oldest input observation",
672 "MJD-AVG": "[d] MJD midpoint of all input observations",
673 "DATE-AVG": "Midpoint date of all input observations"}
675 # Creation date. Calibration team standard is for local time to be
676 # available. Also form UTC (not TAI) version for easier comparisons
677 # across multiple processing sites.
678 now = datetime.now(tz=UTC)
679 header.set("CALIB_CREATION_DATETIME", now.strftime("%Y-%m-%dT%T"), comment="UTC of processing")
680 local_time = now.astimezone()
681 calibDate = local_time.strftime("%Y-%m-%d")
682 calibTime = local_time.strftime("%X %Z")
683 header.set("CALIB_CREATION_DATE", calibDate, comment="Local time day of creation")
684 header.set("CALIB_CREATION_TIME", calibTime, comment="Local time in day of creation")
686 # Merge input headers
687 inputHeaders = [expHandle.get(component="metadata") for expHandle in expHandleList]
688 merged = merge_headers(inputHeaders, mode="drop")
690 # Remove any left over provenance headers that weren't dropped
691 # (for example if different numbers of input datasets were present).
692 strip_provenance_from_fits_header(merged)
694 # Add the unchanging headers from all inputs to the given header.
695 for k, v in merged.items():
696 if k not in header:
697 # The merged header should be a PropertyList so will have
698 # comment information.
699 header.set(k, v, comment=merged.getComment(k))
701 # Ideally we would avoid going to butler again to read VisitInfo
702 # information when there is already the header available. Unfortunately
703 # the FITS reader strips VisitInfo keys from the header so it is no
704 # longer possible to create an ObservationInfo with valid exposure
705 # time (and DATE-BEG survives only because afw calls it DATE-OBS).
707 # Construct list of visits
708 visitInfoList = [expHandle.get(component="visitInfo") for expHandle in expHandleList]
710 # Create provenance headers.
711 for i, visit in enumerate(visitInfoList):
712 if visit is None:
713 continue
714 header.set(f"CPP_INPUT_{i}", visit.id, comment="Input exposure ID")
715 header.set(
716 f"CPP_INPUT_DATE_{i}",
717 str(visit.getDate().toAstropy().to_value("fits")),
718 comment=f"TAI date of input {i}",
719 )
720 header.set(f"CPP_INPUT_EXPT_{i}", visit.getExposureTime(), comment="Input exposure time")
721 if scales is not None:
722 header.set(f"CPP_INPUT_SCALE_{i}", scales[i], comment="Scaling applied to input")
724 # Sort the inputs into date order.
725 visitInfoList = sorted(visitInfoList, key=attrgetter("date"))
727 def add_time_offset(dt: lsst.daf.base.DateTime, offset: float) -> astropy.time.Time:
728 # Calculate a astropy time with an offset applied.
729 at = dt.toAstropy()
730 if offset == 0.0:
731 return at
732 return at + astropy.time.TimeDelta(offset, format="sec")
734 earliest = add_time_offset(visitInfoList[0].date, visitInfoList[0].exposureTime / -2.0)
735 newest = add_time_offset(visitInfoList[-1].date, visitInfoList[-1].exposureTime / 2.0)
737 # Add standard DATE headers covering the range of inputs.
738 dateCards = dates_to_fits(earliest, newest)
740 for k, v in dateCards.items():
741 header.set(k, v, comment=comments.get(k, None))
743 # Populate a visitInfo. Set the exposure time and dark time
744 # to 0.0 or 1.0 as appropriate, and copy the instrument name
745 # from one of the inputs.
746 if calib:
747 expTime = 1.0
748 if self.config.connections.outputData.lower() == 'bias':
749 expTime = 0.0
750 inputVisitInfo = visitInfoList[0]
751 date_avg = earliest + (newest - earliest) / 2.0
752 visitInfo = afwImage.VisitInfo(
753 exposureTime=expTime,
754 darkTime=expTime,
755 date=lsst.daf.base.DateTime(date_avg.isot, lsst.daf.base.DateTime.TAI),
756 instrumentLabel=inputVisitInfo.instrumentLabel
757 )
758 calib.getInfo().setVisitInfo(visitInfo)
760 return header
762 def interpolateNans(self, exp, maskPlane="BAD"):
763 """Interpolate over NANs in the combined image.
765 NANs can result from masked areas on the CCD. We don't want
766 them getting into our science images, so we replace them with
767 the median of the data.
769 Parameters
770 ----------
771 exp : `lsst.afw.image.Exposure`
772 Exp to check for NaNs.
773 maskPlane : `str`, optional
774 Mask plane to set where we have replaced a NAN.
775 """
776 array = exp.image.array
777 mask = exp.mask.array
778 variance = exp.variance.array
779 badMaskValue = exp.mask.getPlaneBitMask(maskPlane)
781 bad = np.isnan(array) | np.isnan(variance)
783 if np.any(bad):
784 median = np.median(array[~bad])
785 medianVariance = np.median(variance[~bad])
786 count = np.sum(bad)
788 array[bad] = median
789 mask[bad] |= badMaskValue
790 variance[bad] = medianVariance
791 self.log.warning("Found and fixed %s NAN pixels", count)
793 @staticmethod
794 def setFilter(exp, filterLabel):
795 """Dummy function that will not assign a filter.
797 Parameters
798 ----------
799 exp : `lsst.afw.image.Exposure`
800 Exposure to assign filter to.
801 filterLabel : `lsst.afw.image.FilterLabel`
802 Filter to assign.
803 """
804 pass
806 def setFlatSource(self, exp):
807 """Set the flat source metadata.
809 Parameters
810 ----------
811 exp : `lsst.afw.image.Exposure`
812 Exposure to set the flat source.
813 """
814 pass
816 def calibStats(self, exp, calibrationType):
817 """Measure bulk statistics for the calibration.
819 Parameters
820 ----------
821 exp : `lsst.afw.image.Exposure`
822 Exposure to calculate statistics for.
823 calibrationType : `str`
824 Type of calibration to record in header.
825 """
826 metadata = exp.getMetadata()
828 noGoodPixelsBit = afwImage.Mask.getPlaneBitMask(self.config.noGoodPixelsMask)
830 # percentiles
831 for amp in exp.getDetector():
832 ampImage = exp[amp.getBBox()]
833 percentileValues = np.nanpercentile(ampImage.image.array,
834 self.config.distributionPercentiles)
835 for level, value in zip(self.config.distributionPercentiles, percentileValues):
836 key = f"LSST CALIB {calibrationType.upper()} {amp.getName()} DISTRIBUTION {level}-PCT"
837 metadata[key] = value
839 bad = ((ampImage.mask.array & noGoodPixelsBit) > 0)
840 key = f"LSST CALIB {calibrationType.upper()} {amp.getName()} BADPIX-NUM"
841 metadata[key] = bad.sum()
842 if metadata[key] > 0:
843 self.log.warning(f"Found {metadata[key]} pixels with "
844 f"mask plane {self.config.noGoodPixelsMask} "
845 f"for amp {amp.getName()}.")
848# Create versions of the Connections, Config, and Task that support
849# filter constraints.
850class CalibCombineByFilterConnections(CalibCombineConnections,
851 dimensions=("instrument", "detector", "physical_filter")):
852 inputScales = cT.Input(
853 name="cpFilterScales",
854 doc="Input scale factors to use.",
855 storageClass="StructuredDataDict",
856 dimensions=("instrument", "physical_filter"),
857 multiple=False,
858 )
860 outputData = cT.Output(
861 name="cpFilterProposal",
862 doc="Output combined proposed calibration to be validated and certified.",
863 storageClass="ExposureF",
864 dimensions=("instrument", "detector", "physical_filter"),
865 isCalibration=True,
866 )
868 def __init__(self, *, config=None):
869 super().__init__(config=config)
871 if config and config.exposureScaling != "InputList":
872 self.inputs.discard("inputScales")
875class CalibCombineByFilterConfig(CalibCombineConfig,
876 pipelineConnections=CalibCombineByFilterConnections):
877 flatSource = pexConfig.ChoiceField(
878 dtype=str,
879 doc="Type of flat constructed. This config is only used if "
880 "calibrationType==``flat``.",
881 default="DOME",
882 allowed={
883 "DOME": "Flat constructed from the dome screen with 'white' light.",
884 "TWILIGHT": "Flat constructed from twilight images.",
885 "PSEUDO": "Flat constructed from the instrument model.",
886 "SKY": "Flat constructed from night-sky images.",
887 }
888 )
891class CalibCombineByFilterTask(CalibCombineTask):
892 """Task to combine calib exposures."""
894 ConfigClass = CalibCombineByFilterConfig
895 _DefaultName = "cpFilterCombine"
897 @staticmethod
898 def setFilter(exp, filterLabel):
899 """Dummy function that will not assign a filter.
901 Parameters
902 ----------
903 exp : `lsst.afw.image.Exposure`
904 Exposure to assign filter to.
905 filterLabel : `lsst.afw.image.FilterLabel`
906 Filter to assign.
907 """
908 if filterLabel:
909 exp.setFilter(filterLabel)
911 def setFlatSource(self, exp):
912 if self.config.calibrationType != "flat":
913 return
915 exp.metadata["FLATSRC"] = self.config.flatSource
918class CalibCombineTwoFlatsByFilterConnections(
919 pipeBase.PipelineTaskConnections,
920 dimensions=("instrument", "detector", "physical_filter"),
921):
922 inputFlatOneHandle = cT.Input(
923 name="flat_blue",
924 doc="Input first processed flat to combine.",
925 storageClass="Exposure",
926 dimensions=("instrument", "detector", "physical_filter"),
927 multiple=False,
928 deferLoad=True,
929 isCalibration=True,
930 )
931 inputFlatTwoHandle = cT.Input(
932 name="flat_red",
933 doc="Input second processed flat to combine.",
934 storageClass="Exposure",
935 dimensions=("instrument", "detector", "physical_filter"),
936 multiple=False,
937 deferLoad=True,
938 isCalibration=True,
939 )
940 outputData = cT.Output(
941 name="flat",
942 doc="Output combined flat.",
943 storageClass="ExposureF",
944 dimensions=("instrument", "detector", "physical_filter"),
945 isCalibration=True,
946 )
949class CalibCombineTwoFlatsByFilterConfig(
950 pipeBase.PipelineTaskConfig,
951 pipelineConnections=CalibCombineTwoFlatsByFilterConnections,
952):
953 calibrationType = pexConfig.Field(
954 dtype=str,
955 default="flat",
956 doc="Name of calibration to be generated.",
957 )
958 weightOne = pexConfig.Field(
959 dtype=float,
960 default=0.5,
961 doc="Weight for first flat (second weight will be 1.0 - weightOne).",
962 )
963 noGoodPixelsMask = pexConfig.Field(
964 dtype=str,
965 default="BAD",
966 doc="Mask bit to set when there are no good input pixels. See code comments for details.",
967 )
968 combine = pexConfig.Field(
969 dtype=str,
970 default="MEAN",
971 doc="Statistic name to use for combination (from `~lsst.afw.math.Property`)",
972 )
973 subregionSize = pexConfig.ListField(
974 dtype=int,
975 doc="Width, height of subregion size.",
976 length=2,
977 # This is 200 rows for all detectors smaller than 10k in width.
978 default=(10000, 200),
979 )
980 distributionPercentiles = pexConfig.ListField(
981 dtype=float,
982 default=[0, 5, 16, 50, 84, 95, 100],
983 doc="Percentile levels to measure on the final combined calibration.",
984 )
987class CalibCombineTwoFlatsByFilterTask(CalibCombineTask):
988 """Task to combine two flats (weighted)."""
990 ConfigClass = CalibCombineTwoFlatsByFilterConfig
991 _DefaultName = "cpCombineTwoFlatsByFilter"
993 def __init__(self, **kwargs):
994 super().__init__(**kwargs)
996 def runQuantum(self, butlerQC, inputRefs, outputRefs):
997 inputs = butlerQC.get(inputRefs)
999 outputs = self.run(**inputs)
1000 butlerQC.put(outputs, outputRefs)
1002 def run(self, *, inputFlatOneHandle, inputFlatTwoHandle):
1003 """Combine two flats (blue/red) into one flat.
1005 Parameters
1006 ----------
1007 inputFlatOneHandle : `lsst.daf.butler.DeferredDatasetHandle`
1008 The first flat handle.
1009 inputFlatTwoHandle : `lsst.daf.butler.DeferredDatasetHandle`
1010 The second flat handle.
1012 Returns
1013 -------
1014 results : `lsst.pipe.base.Struct`
1015 The results struct containing:
1017 ``outputData``
1018 Final combined exposure generated from the inputs.
1019 (`lsst.afw.image.ExposureF`).
1020 """
1021 inputExpHandles = [inputFlatOneHandle, inputFlatTwoHandle]
1023 width, height = self.getDimensions(inputExpHandles)
1025 stats = afwMath.StatisticsControl()
1026 stats.setCalcErrorFromInputVariance(True)
1028 # See comment in CalibCombineTask.run().
1029 stats.setNoGoodPixelsMask(afwImage.Mask.getPlaneBitMask(self.config.noGoodPixelsMask))
1031 inputDetector = inputExpHandles[0].get(component="detector")
1033 combined = afwImage.MaskedImageF(width, height)
1034 combinedExp = afwImage.makeExposure(combined)
1036 # We need an extra factor of 2 because of the way the scaling works.
1037 expScales = [0.5/self.config.weightOne, 0.5/(1.0 - self.config.weightOne)]
1039 self.combine(combinedExp, inputExpHandles, expScales, stats)
1041 # Set the detector
1042 combinedExp.setDetector(inputDetector)
1044 # Set the validPolygon
1045 # Note that NO_DATA regions (due to vignetting) are propogated
1046 # in the combine task above.
1047 polygon = inputExpHandles[0].get(component="validPolygon")
1048 if polygon is not None:
1049 combinedExp.info.setValidPolygon(polygon)
1051 self.combineHeaders(
1052 inputExpHandles,
1053 combinedExp,
1054 calibType=self.config.calibrationType,
1055 scales=expScales,
1056 )
1058 filterLabel = inputExpHandles[0].get(component="filter")
1059 if filterLabel:
1060 combinedExp.setFilter(filterLabel)
1062 self.setFlatSource(combinedExp)
1064 # Set QA headers
1065 self.calibStats(combinedExp, self.config.calibrationType)
1067 # Return
1068 return pipeBase.Struct(
1069 outputData=combinedExp,
1070 )