Coverage for python/lsst/cp/pipe/cpCombine.py: 21%
248 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-04 03:58 -0700
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-04 03:58 -0700
1# This file is part of cp_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21import numpy as np
22import time
24import lsst.geom as geom
25import lsst.pex.config as pexConfig
26import lsst.pipe.base as pipeBase
27import lsst.pipe.base.connectionTypes as cT
28import lsst.afw.math as afwMath
29import lsst.afw.image as afwImage
31from lsst.ip.isr.vignette import maskVignettedRegion
33from astro_metadata_translator import merge_headers, ObservationGroup
34from astro_metadata_translator.serialize import dates_to_fits
37__all__ = ["CalibStatsConfig", "CalibStatsTask",
38 "CalibCombineConfig", "CalibCombineConnections", "CalibCombineTask",
39 "CalibCombineByFilterConfig", "CalibCombineByFilterConnections", "CalibCombineByFilterTask"]
42# CalibStatsConfig/CalibStatsTask from pipe_base/constructCalibs.py
43class CalibStatsConfig(pexConfig.Config):
44 """Parameters controlling the measurement of background
45 statistics.
46 """
48 stat = pexConfig.Field(
49 dtype=str,
50 default="MEANCLIP",
51 doc="Statistic name to use to estimate background (from `~lsst.afw.math.Property`)",
52 )
53 clip = pexConfig.Field(
54 dtype=float,
55 default=3.0,
56 doc="Clipping threshold for background",
57 )
58 nIter = pexConfig.Field(
59 dtype=int,
60 default=3,
61 doc="Clipping iterations for background",
62 )
63 mask = pexConfig.ListField(
64 dtype=str,
65 default=["DETECTED", "BAD", "NO_DATA"],
66 doc="Mask planes to reject",
67 )
70class CalibStatsTask(pipeBase.Task):
71 """Measure statistics on the background
73 This can be useful for scaling the background, e.g., for flats and
74 fringe frames.
75 """
77 ConfigClass = CalibStatsConfig
79 def run(self, exposureOrImage):
80 """Measure a particular statistic on an image (of some sort).
82 Parameters
83 ----------
84 exposureOrImage : `lsst.afw.image.Exposure`,
85 `lsst.afw.image.MaskedImage`, or
86 `lsst.afw.image.Image`
87 Exposure or image to calculate statistics on.
89 Returns
90 -------
91 results : `float`
92 Resulting statistic value.
93 """
94 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
95 afwImage.Mask.getPlaneBitMask(self.config.mask))
96 try:
97 image = exposureOrImage.getMaskedImage()
98 except Exception:
99 try:
100 image = exposureOrImage.getImage()
101 except Exception:
102 image = exposureOrImage
103 statType = afwMath.stringToStatisticsProperty(self.config.stat)
104 return afwMath.makeStatistics(image, statType, stats).getValue()
107class CalibCombineConnections(pipeBase.PipelineTaskConnections,
108 dimensions=("instrument", "detector")):
109 inputExpHandles = cT.Input(
110 name="cpInputs",
111 doc="Input pre-processed exposures to combine.",
112 storageClass="Exposure",
113 dimensions=("instrument", "detector", "exposure"),
114 multiple=True,
115 deferLoad=True,
116 )
117 inputScales = cT.Input(
118 name="cpScales",
119 doc="Input scale factors to use.",
120 storageClass="StructuredDataDict",
121 dimensions=("instrument", ),
122 multiple=False,
123 )
125 outputData = cT.Output(
126 name="cpProposal",
127 doc="Output combined proposed calibration to be validated and certified..",
128 storageClass="ExposureF",
129 dimensions=("instrument", "detector"),
130 isCalibration=True,
131 )
133 def __init__(self, *, config=None):
134 super().__init__(config=config)
136 if config and config.exposureScaling != "InputList":
137 self.inputs.discard("inputScales")
140# CalibCombineConfig/CalibCombineTask from pipe_base/constructCalibs.py
141class CalibCombineConfig(pipeBase.PipelineTaskConfig,
142 pipelineConnections=CalibCombineConnections):
143 """Configuration for combining calib exposures.
144 """
146 calibrationType = pexConfig.Field(
147 dtype=str,
148 default="calibration",
149 doc="Name of calibration to be generated.",
150 )
152 exposureScaling = pexConfig.ChoiceField(
153 dtype=str,
154 allowed={
155 "Unity": "Do not scale inputs. Scale factor is 1.0.",
156 "ExposureTime": "Scale inputs by their exposure time.",
157 "DarkTime": "Scale inputs by their dark time.",
158 "MeanStats": "Scale inputs based on their mean values.",
159 "InputList": "Scale inputs based on a list of values.",
160 },
161 default="Unity",
162 doc="Scaling to be applied to each input exposure.",
163 )
164 scalingLevel = pexConfig.ChoiceField(
165 dtype=str,
166 allowed={
167 "DETECTOR": "Scale by detector.",
168 "AMP": "Scale by amplifier.",
169 },
170 default="DETECTOR",
171 doc="Region to scale.",
172 )
173 maxVisitsToCalcErrorFromInputVariance = pexConfig.Field(
174 dtype=int,
175 default=5,
176 doc="Maximum number of visits to estimate variance from input variance, not per-pixel spread",
177 )
178 subregionSize = pexConfig.ListField(
179 dtype=int,
180 doc="Width, height of subregion size.",
181 length=2,
182 # This is 200 rows for all detectors smaller than 10k in width.
183 default=(10000, 200),
184 )
186 doVignette = pexConfig.Field(
187 dtype=bool,
188 default=False,
189 doc="Copy vignette polygon to output and censor vignetted pixels?"
190 )
192 distributionPercentiles = pexConfig.ListField(
193 dtype=float,
194 default=[0, 5, 16, 50, 84, 95, 100],
195 doc="Percentile levels to measure on the final combined calibration.",
196 )
197 mask = pexConfig.ListField(
198 dtype=str,
199 default=["SAT", "DETECTED", "INTRP"],
200 doc="Mask planes to respect",
201 )
202 combine = pexConfig.Field(
203 dtype=str,
204 default="MEANCLIP",
205 doc="Statistic name to use for combination (from `~lsst.afw.math.Property`)",
206 )
207 clip = pexConfig.Field(
208 dtype=float,
209 default=3.0,
210 doc="Clipping threshold for combination",
211 )
212 nIter = pexConfig.Field(
213 dtype=int,
214 default=3,
215 doc="Clipping iterations for combination",
216 )
217 noGoodPixelsMask = pexConfig.Field(
218 dtype=str,
219 default="BAD",
220 doc="Mask bit to set when there are no good input pixels.",
221 )
222 checkNoData = pexConfig.Field(
223 dtype=bool,
224 default=True,
225 doc="Check that the calibration does not have NO_DATA set?",
226 )
227 stats = pexConfig.ConfigurableField(
228 target=CalibStatsTask,
229 doc="Background statistics configuration",
230 )
233class CalibCombineTask(pipeBase.PipelineTask):
234 """Task to combine calib exposures."""
236 ConfigClass = CalibCombineConfig
237 _DefaultName = "cpCombine"
239 def __init__(self, **kwargs):
240 super().__init__(**kwargs)
241 self.makeSubtask("stats")
243 def runQuantum(self, butlerQC, inputRefs, outputRefs):
244 inputs = butlerQC.get(inputRefs)
246 dimensions = [dict(expHandle.dataId.required) for expHandle in inputRefs.inputExpHandles]
247 inputs["inputDims"] = dimensions
249 outputs = self.run(**inputs)
250 butlerQC.put(outputs, outputRefs)
252 def run(self, inputExpHandles, inputScales=None, inputDims=None):
253 """Combine calib exposures for a single detector.
255 Parameters
256 ----------
257 inputExpHandles : `list` [`lsst.daf.butler.DeferredDatasetHandle`]
258 Input list of exposure handles to combine.
259 inputScales : `dict` [`dict` [`dict` [`float`]]], optional
260 Dictionary of scales, indexed by detector (`int`),
261 amplifier (`int`), and exposure (`int`). Used for
262 'inputExps' scaling.
263 inputDims : `list` [`dict`]
264 List of dictionaries of input data dimensions/values.
265 Each list entry should contain:
267 ``"exposure"``
268 exposure id value (`int`)
269 ``"detector"``
270 detector id value (`int`)
272 Returns
273 -------
274 results : `lsst.pipe.base.Struct`
275 The results struct containing:
277 ``outputData``
278 Final combined exposure generated from the inputs
279 (`lsst.afw.image.Exposure`).
281 Raises
282 ------
283 RuntimeError
284 Raised if no input data is found. Also raised if
285 config.exposureScaling == InputList, and a necessary scale
286 was not found.
287 """
288 width, height = self.getDimensions(inputExpHandles)
289 stats = afwMath.StatisticsControl(
290 numSigmaClip=self.config.clip,
291 numIter=self.config.nIter,
292 andMask=afwImage.Mask.getPlaneBitMask(self.config.mask),
293 )
294 stats.setNoGoodPixelsMask(afwImage.Mask.getPlaneBitMask(self.config.noGoodPixelsMask))
295 numExps = len(inputExpHandles)
296 if numExps < 1:
297 raise RuntimeError("No valid input data")
298 if numExps < self.config.maxVisitsToCalcErrorFromInputVariance:
299 stats.setCalcErrorFromInputVariance(True)
301 inputDetector = inputExpHandles[0].get(component="detector")
303 # Create output exposure for combined data.
304 combined = afwImage.MaskedImageF(width, height)
305 combinedExp = afwImage.makeExposure(combined)
307 # Apply scaling:
308 expScales = []
309 if inputDims is None:
310 inputDims = [dict() for i in inputExpHandles]
312 for index, (expHandle, dims) in enumerate(zip(inputExpHandles, inputDims)):
313 scale = 1.0
314 visitInfo = expHandle.get(component="visitInfo")
315 if self.config.exposureScaling == "ExposureTime":
316 scale = visitInfo.getExposureTime()
317 elif self.config.exposureScaling == "DarkTime":
318 scale = visitInfo.getDarkTime()
319 elif self.config.exposureScaling == "MeanStats":
320 # Note: there may a bug freeing memory here. TBD.
321 exp = expHandle.get()
322 scale = self.stats.run(exp)
323 del exp
324 elif self.config.exposureScaling == "InputList":
325 visitId = dims.get("exposure", None)
326 detectorId = dims.get("detector", None)
327 if visitId is None or detectorId is None:
328 raise RuntimeError(f"Could not identify scaling for input {index} ({dims})")
329 if detectorId not in inputScales["expScale"]:
330 raise RuntimeError(f"Could not identify a scaling for input {index}"
331 f" detector {detectorId}")
333 if self.config.scalingLevel == "DETECTOR":
334 if visitId not in inputScales["expScale"][detectorId]:
335 raise RuntimeError(f"Could not identify a scaling for input {index}"
336 f"detector {detectorId} visit {visitId}")
337 scale = inputScales["expScale"][detectorId][visitId]
338 elif self.config.scalingLevel == "AMP":
339 scale = [inputScales["expScale"][detectorId][amp.getName()][visitId]
340 for amp in inputDetector]
341 else:
342 raise RuntimeError(f"Unknown scaling level: {self.config.scalingLevel}")
343 elif self.config.exposureScaling == "Unity":
344 scale = 1.0
345 else:
346 raise RuntimeError(f"Unknown scaling type: {self.config.exposureScaling}.")
348 expScales.append(scale)
349 self.log.info("Scaling input %d by %s", index, scale)
351 self.combine(combinedExp, inputExpHandles, expScales, stats)
353 # The calibration should _never_ have NO_DATA set.
354 if self.config.checkNoData:
355 test = ((combinedExp.mask.array & afwImage.Mask.getPlaneBitMask("NO_DATA")) > 0)
356 if (nnodata := test.sum()) > 0:
357 raise RuntimeError(f"Combined calibration has {nnodata} pixels!")
359 self.interpolateNans(combined)
361 if self.config.doVignette:
362 polygon = inputExpHandles[0].get(component="validPolygon")
363 maskVignettedRegion(combined, polygon=polygon, vignetteValue=0.0)
365 # Combine headers
366 self.combineHeaders(inputExpHandles, combinedExp,
367 calibType=self.config.calibrationType, scales=expScales)
369 # Set the detector
370 combinedExp.setDetector(inputDetector)
372 # Do we need to set a filter?
373 filterLabel = inputExpHandles[0].get(component="filter")
374 self.setFilter(combinedExp, filterLabel)
376 # Set QA headers
377 self.calibStats(combinedExp, self.config.calibrationType)
379 # Return
380 return pipeBase.Struct(
381 outputData=combinedExp,
382 )
384 def getDimensions(self, expHandleList):
385 """Get dimensions of the inputs.
387 Parameters
388 ----------
389 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`]
390 Exposure handles to check the sizes of.
392 Returns
393 -------
394 width, height : `int`
395 Unique set of input dimensions.
396 """
397 dimList = [expHandle.get(component="bbox").getDimensions() for expHandle in expHandleList]
399 return self.getSize(dimList)
401 def getSize(self, dimList):
402 """Determine a consistent size, given a list of image sizes.
404 Parameters
405 ----------
406 dimList : `list` [`tuple` [`int`, `int`]]
407 List of dimensions.
409 Raises
410 ------
411 RuntimeError
412 If input dimensions are inconsistent.
414 Returns
415 -------
416 width, height : `int`
417 Common dimensions.
418 """
419 dim = set((w, h) for w, h in dimList)
420 if len(dim) != 1:
421 raise RuntimeError("Inconsistent dimensions: %s" % dim)
422 return dim.pop()
424 def applyScale(self, exposure, bbox=None, scale=None):
425 """Apply scale to input exposure.
427 This implementation applies a flux scaling: the input exposure is
428 divided by the provided scale.
430 Parameters
431 ----------
432 exposure : `lsst.afw.image.Exposure`
433 Exposure to scale.
434 bbox : `lsst.geom.Box2I`
435 BBox matching the segment of the exposure passed in.
436 scale : `float` or `list` [`float`], optional
437 Constant scale to divide the exposure by.
438 """
439 if scale is not None:
440 mi = exposure.getMaskedImage()
441 if isinstance(scale, list):
442 # Create a realization of the per-amp scales as an
443 # image we can take a subset of. This may be slightly
444 # slower than only populating the region we care
445 # about, but this avoids needing to do arbitrary
446 # numbers of offsets, etc.
447 scaleExp = afwImage.MaskedImageF(exposure.getDetector().getBBox())
448 for amp, ampScale in zip(exposure.getDetector(), scale):
449 scaleExp.image[amp.getBBox()] = ampScale
450 scale = scaleExp[bbox]
451 mi /= scale
453 @staticmethod
454 def _subBBoxIter(bbox, subregionSize):
455 """Iterate over subregions of a bbox.
457 Parameters
458 ----------
459 bbox : `lsst.geom.Box2I`
460 Bounding box over which to iterate.
461 subregionSize: `lsst.geom.Extent2I`
462 Size of sub-bboxes.
464 Yields
465 ------
466 subBBox : `lsst.geom.Box2I`
467 Next sub-bounding box of size ``subregionSize`` or
468 smaller; each ``subBBox`` is contained within ``bbox``, so
469 it may be smaller than ``subregionSize`` at the edges of
470 ``bbox``, but it will never be empty.
471 """
472 if bbox.isEmpty():
473 raise RuntimeError("bbox %s is empty" % (bbox,))
474 if subregionSize[0] < 1 or subregionSize[1] < 1:
475 raise RuntimeError("subregionSize %s must be nonzero" % (subregionSize,))
477 for rowShift in range(0, bbox.getHeight(), subregionSize[1]):
478 for colShift in range(0, bbox.getWidth(), subregionSize[0]):
479 subBBox = geom.Box2I(bbox.getMin() + geom.Extent2I(colShift, rowShift), subregionSize)
480 subBBox.clip(bbox)
481 if subBBox.isEmpty():
482 raise RuntimeError("Bug: empty bbox! bbox=%s, subregionSize=%s, "
483 "colShift=%s, rowShift=%s" %
484 (bbox, subregionSize, colShift, rowShift))
485 yield subBBox
487 def combine(self, target, expHandleList, expScaleList, stats):
488 """Combine multiple images.
490 Parameters
491 ----------
492 target : `lsst.afw.image.Exposure`
493 Output exposure to construct.
494 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`]
495 Input exposure handles to combine.
496 expScaleList : `list` [`float`]
497 List of scales to apply to each input image.
498 stats : `lsst.afw.math.StatisticsControl`
499 Control explaining how to combine the input images.
500 """
501 combineType = afwMath.stringToStatisticsProperty(self.config.combine)
503 subregionSizeArr = self.config.subregionSize
504 subregionSize = geom.Extent2I(subregionSizeArr[0], subregionSizeArr[1])
505 for subBbox in self._subBBoxIter(target.getBBox(), subregionSize):
506 images = []
507 for expHandle, expScale in zip(expHandleList, expScaleList):
508 inputExp = expHandle.get(parameters={"bbox": subBbox})
509 self.applyScale(inputExp, subBbox, expScale)
510 images.append(inputExp.getMaskedImage())
512 combinedSubregion = afwMath.statisticsStack(images, combineType, stats)
513 target.maskedImage.assign(combinedSubregion, subBbox)
515 def combineHeaders(self, expHandleList, calib, calibType="CALIB", scales=None):
516 """Combine input headers to determine the set of common headers,
517 supplemented by calibration inputs. The calibration header is
518 set in-place.
520 Parameters
521 ----------
522 expHandleList : `list` [`lsst.daf.butler.DeferredDatasetHandle`]
523 Input list of exposure handles to combine.
524 calib : `lsst.afw.image.Exposure`
525 Output calibration to construct headers for.
526 calibType : `str`, optional
527 OBSTYPE the output should claim.
528 scales : `list` [`float`], optional
529 Scale values applied to each input to record.
531 Returns
532 -------
533 header : `lsst.daf.base.PropertyList`
534 Constructed header.
535 """
536 # Header
537 header = calib.getMetadata()
538 header.set("OBSTYPE", calibType)
540 # Keywords we care about
541 comments = {"TIMESYS": "Time scale for all dates",
542 "DATE-OBS": "Start date of earliest input observation",
543 "MJD-OBS": "[d] Start MJD of earliest input observation",
544 "DATE-END": "End date of oldest input observation",
545 "MJD-END": "[d] End MJD of oldest input observation",
546 "MJD-AVG": "[d] MJD midpoint of all input observations",
547 "DATE-AVG": "Midpoint date of all input observations"}
549 # Creation date
550 now = time.localtime()
551 calibDate = time.strftime("%Y-%m-%d", now)
552 calibTime = time.strftime("%X %Z", now)
553 header.set("CALIB_CREATION_DATE", calibDate)
554 header.set("CALIB_CREATION_TIME", calibTime)
556 # Merge input headers
557 inputHeaders = [expHandle.get(component="metadata") for expHandle in expHandleList]
558 merged = merge_headers(inputHeaders, mode="drop")
560 # Scan the first header for items that were dropped due to
561 # conflict, and replace them.
562 for k, v in merged.items():
563 if k not in header:
564 md = inputHeaders[0]
565 comment = md.getComment(k) if k in md else None
566 header.set(k, v, comment=comment)
568 # Construct list of visits
569 visitInfoList = [expHandle.get(component="visitInfo") for expHandle in expHandleList]
570 for i, visit in enumerate(visitInfoList):
571 if visit is None:
572 continue
573 header.set("CPP_INPUT_%d" % (i,), visit.id)
574 header.set("CPP_INPUT_DATE_%d" % (i,), str(visit.getDate()))
575 header.set("CPP_INPUT_EXPT_%d" % (i,), visit.getExposureTime())
576 if scales is not None:
577 header.set("CPP_INPUT_SCALE_%d" % (i,), scales[i])
579 # Populate a visitInfo. Set the exposure time and dark time
580 # to 0.0 or 1.0 as appropriate, and copy the instrument name
581 # from one of the inputs.
582 expTime = 1.0
583 if self.config.connections.outputData.lower() == 'bias':
584 expTime = 0.0
585 inputVisitInfo = visitInfoList[0]
586 visitInfo = afwImage.VisitInfo(exposureTime=expTime, darkTime=expTime,
587 instrumentLabel=inputVisitInfo.instrumentLabel)
588 calib.getInfo().setVisitInfo(visitInfo)
590 # Not yet working: DM-22302
591 # Create an observation group so we can add some standard headers
592 # independent of the form in the input files.
593 # Use try block in case we are dealing with unexpected data headers
594 try:
595 group = ObservationGroup(visitInfoList, pedantic=False)
596 except Exception:
597 self.log.warning("Exception making an obs group for headers. Continuing.")
598 # Fall back to setting a DATE-OBS from the calibDate
599 dateCards = {"DATE-OBS": "{}T00:00:00.00".format(calibDate)}
600 comments["DATE-OBS"] = "Date of start of day of calibration creation"
601 else:
602 oldest, newest = group.extremes()
603 dateCards = dates_to_fits(oldest.datetime_begin, newest.datetime_end)
605 for k, v in dateCards.items():
606 header.set(k, v, comment=comments.get(k, None))
608 return header
610 def interpolateNans(self, exp):
611 """Interpolate over NANs in the combined image.
613 NANs can result from masked areas on the CCD. We don't want
614 them getting into our science images, so we replace them with
615 the median of the image.
617 Parameters
618 ----------
619 exp : `lsst.afw.image.Exposure`
620 Exp to check for NaNs.
621 """
622 array = exp.getImage().getArray()
623 bad = np.isnan(array)
624 if np.any(bad):
625 median = np.median(array[np.logical_not(bad)])
626 count = np.sum(bad)
627 array[bad] = median
628 self.log.warning("Found and fixed %s NAN pixels", count)
630 @staticmethod
631 def setFilter(exp, filterLabel):
632 """Dummy function that will not assign a filter.
634 Parameters
635 ----------
636 exp : `lsst.afw.image.Exposure`
637 Exposure to assign filter to.
638 filterLabel : `lsst.afw.image.FilterLabel`
639 Filter to assign.
640 """
641 pass
643 def calibStats(self, exp, calibrationType):
644 """Measure bulk statistics for the calibration.
646 Parameters
647 ----------
648 exp : `lsst.afw.image.Exposure`
649 Exposure to calculate statistics for.
650 calibrationType : `str`
651 Type of calibration to record in header.
652 """
653 metadata = exp.getMetadata()
655 noGoodPixelsBit = afwImage.Mask.getPlaneBitMask(self.config.noGoodPixelsMask)
657 # percentiles
658 for amp in exp.getDetector():
659 ampImage = exp[amp.getBBox()]
660 percentileValues = np.nanpercentile(ampImage.image.array,
661 self.config.distributionPercentiles)
662 for level, value in zip(self.config.distributionPercentiles, percentileValues):
663 key = f"LSST CALIB {calibrationType.upper()} {amp.getName()} DISTRIBUTION {level}-PCT"
664 metadata[key] = value
666 bad = ((ampImage.mask.array & noGoodPixelsBit) > 0)
667 key = f"LSST CALIB {calibrationType.upper()} {amp.getName()} BADPIX-NUM"
668 metadata[key] = bad.sum()
671# Create versions of the Connections, Config, and Task that support
672# filter constraints.
673class CalibCombineByFilterConnections(CalibCombineConnections,
674 dimensions=("instrument", "detector", "physical_filter")):
675 inputScales = cT.Input(
676 name="cpFilterScales",
677 doc="Input scale factors to use.",
678 storageClass="StructuredDataDict",
679 dimensions=("instrument", "physical_filter"),
680 multiple=False,
681 )
683 outputData = cT.Output(
684 name="cpFilterProposal",
685 doc="Output combined proposed calibration to be validated and certified.",
686 storageClass="ExposureF",
687 dimensions=("instrument", "detector", "physical_filter"),
688 isCalibration=True,
689 )
691 def __init__(self, *, config=None):
692 super().__init__(config=config)
694 if config and config.exposureScaling != "InputList":
695 self.inputs.discard("inputScales")
698class CalibCombineByFilterConfig(CalibCombineConfig,
699 pipelineConnections=CalibCombineByFilterConnections):
700 pass
703class CalibCombineByFilterTask(CalibCombineTask):
704 """Task to combine calib exposures."""
706 ConfigClass = CalibCombineByFilterConfig
707 _DefaultName = "cpFilterCombine"
709 @staticmethod
710 def setFilter(exp, filterLabel):
711 """Dummy function that will not assign a filter.
713 Parameters
714 ----------
715 exp : `lsst.afw.image.Exposure`
716 Exposure to assign filter to.
717 filterLabel : `lsst.afw.image.FilterLabel`
718 Filter to assign.
719 """
720 if filterLabel:
721 exp.setFilter(filterLabel)