32 from astro_metadata_translator
import merge_headers, ObservationGroup
33 from astro_metadata_translator.serialize
import dates_to_fits
38 """Parameters controlling the measurement of background statistics.
40 stat = pexConfig.Field(
42 default=int(afwMath.MEANCLIP),
43 doc=
"Statistic to use to estimate background (from lsst.afw.math)",
45 clip = pexConfig.Field(
48 doc=
"Clipping threshold for background",
50 nIter = pexConfig.Field(
53 doc=
"Clipping iterations for background",
55 mask = pexConfig.ListField(
57 default=[
"DETECTED",
"BAD",
"NO_DATA"],
58 doc=
"Mask planes to reject",
63 """Measure statistics on the background
65 This can be useful for scaling the background, e.g., for flats and fringe frames.
67 ConfigClass = CalibStatsConfig
69 def run(self, exposureOrImage):
70 """Measure a particular statistic on an image (of some sort).
74 exposureOrImage : `lsst.afw.image.Exposure`, `lsst.afw.image.MaskedImage`, or `lsst.afw.image.Image`
75 Exposure or image to calculate statistics on.
80 Resulting statistic value.
82 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
83 afwImage.Mask.getPlaneBitMask(self.config.mask))
85 image = exposureOrImage.getMaskedImage()
88 image = exposureOrImage.getImage()
90 image = exposureOrImage
92 return afwMath.makeStatistics(image, self.config.stat, stats).getValue()
96 dimensions=(
"instrument",
"detector")):
99 doc=
"Input pre-processed exposures to combine.",
100 storageClass=
"Exposure",
101 dimensions=(
"instrument",
"detector",
"exposure"),
104 inputScales = cT.Input(
106 doc=
"Input scale factors to use.",
107 storageClass=
"StructuredDataDict",
108 dimensions=(
"instrument", ),
112 outputData = cT.Output(
114 doc=
"Output combined proposed calibration.",
115 storageClass=
"ExposureF",
116 dimensions=(
"instrument",
"detector"),
122 if config
and config.exposureScaling !=
'InputList':
123 self.inputs.discard(
"inputScales")
125 if config
and len(config.calibrationDimensions) != 0:
126 newDimensions = tuple(config.calibrationDimensions)
127 newOutputData = cT.Output(
131 dimensions=self.allConnections[
'outputData'].dimensions + newDimensions
133 self.dimensions.update(config.calibrationDimensions)
136 if config.exposureScaling ==
'InputList':
137 newInputScales = cT.PrerequisiteInput(
141 dimensions=self.allConnections[
'inputScales'].dimensions + newDimensions
143 self.dimensions.update(config.calibrationDimensions)
149 pipelineConnections=CalibCombineConnections):
150 """Configuration for combining calib exposures.
152 calibrationType = pexConfig.Field(
154 default=
"calibration",
155 doc=
"Name of calibration to be generated.",
157 calibrationDimensions = pexConfig.ListField(
160 doc=
"List of updated dimensions to append to output."
163 exposureScaling = pexConfig.ChoiceField(
166 "None":
"No scaling used.",
167 "ExposureTime":
"Scale inputs by their exposure time.",
168 "DarkTime":
"Scale inputs by their dark time.",
169 "MeanStats":
"Scale inputs based on their mean values.",
170 "InputList":
"Scale inputs based on a list of values.",
173 doc=
"Scaling to be applied to each input exposure.",
175 scalingLevel = pexConfig.ChoiceField(
178 "DETECTOR":
"Scale by detector.",
179 "AMP":
"Scale by amplifier.",
182 doc=
"Region to scale.",
184 maxVisitsToCalcErrorFromInputVariance = pexConfig.Field(
187 doc=
"Maximum number of visits to estimate variance from input variance, not per-pixel spread",
190 doVignette = pexConfig.Field(
193 doc=
"Copy vignette polygon to output and censor vignetted pixels?"
196 mask = pexConfig.ListField(
198 default=[
"SAT",
"DETECTED",
"INTRP"],
199 doc=
"Mask planes to respect",
201 combine = pexConfig.Field(
203 default=int(afwMath.MEANCLIP),
204 doc=
"Statistic to use for combination (from lsst.afw.math)",
206 clip = pexConfig.Field(
209 doc=
"Clipping threshold for combination",
211 nIter = pexConfig.Field(
214 doc=
"Clipping iterations for combination",
216 stats = pexConfig.ConfigurableField(
217 target=CalibStatsTask,
218 doc=
"Background statistics configuration",
223 pipeBase.CmdLineTask):
224 """Task to combine calib exposures."""
225 ConfigClass = CalibCombineConfig
226 _DefaultName =
'cpCombine'
230 self.makeSubtask(
"stats")
233 inputs = butlerQC.get(inputRefs)
235 dimensions = [exp.dataId.byName()
for exp
in inputRefs.inputExps]
236 inputs[
'inputDims'] = dimensions
238 outputs = self.
run(**inputs)
239 butlerQC.put(outputs, outputRefs)
241 def run(self, inputExps, inputScales=None, inputDims=None):
242 """Combine calib exposures for a single detector.
246 inputExps : `list` [`lsst.afw.image.Exposure`]
247 Input list of exposures to combine.
248 inputScales : `dict` [`dict` [`dict` [`float`]]], optional
249 Dictionary of scales, indexed by detector (`int`),
250 amplifier (`int`), and exposure (`int`). Used for
252 inputDims : `list` [`dict`]
253 List of dictionaries of input data dimensions/values.
254 Each list entry should contain:
257 exposure id value (`int`)
259 detector id value (`int`)
263 combinedExp : `lsst.afw.image.Exposure`
264 Final combined exposure generated from the inputs.
269 Raised if no input data is found. Also raised if
270 config.exposureScaling == InputList, and a necessary scale
274 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
275 afwImage.Mask.getPlaneBitMask(self.config.mask))
276 numExps = len(inputExps)
278 raise RuntimeError(
"No valid input data")
279 if numExps < self.config.maxVisitsToCalcErrorFromInputVariance:
280 stats.setCalcErrorFromInputVariance(
True)
283 combined = afwImage.MaskedImageF(width, height)
284 combinedExp = afwImage.makeExposure(combined)
288 if inputDims
is None:
289 inputDims = [dict()
for i
in inputExps]
291 for index, (exp, dims)
in enumerate(zip(inputExps, inputDims)):
294 self.log.warn(
"Input %d is None (%s); unable to scale exp.", index, dims)
297 if self.config.exposureScaling ==
"ExposureTime":
298 scale = exp.getInfo().getVisitInfo().getExposureTime()
299 elif self.config.exposureScaling ==
"DarkTime":
300 scale = exp.getInfo().getVisitInfo().getDarkTime()
301 elif self.config.exposureScaling ==
"MeanStats":
302 scale = self.stats.
run(exp)
303 elif self.config.exposureScaling ==
"InputList":
304 visitId = dims.get(
'exposure',
None)
305 detectorId = dims.get(
'detector',
None)
306 if visitId
is None or detectorId
is None:
307 raise RuntimeError(f
"Could not identify scaling for input {index} ({dims})")
308 if detectorId
not in inputScales[
'expScale']:
309 raise RuntimeError(f
"Could not identify a scaling for input {index}"
310 f
" detector {detectorId}")
312 if self.config.scalingLevel ==
"DETECTOR":
313 if visitId
not in inputScales[
'expScale'][detectorId]:
314 raise RuntimeError(f
"Could not identify a scaling for input {index}"
315 f
"detector {detectorId} visit {visitId}")
316 scale = inputScales[
'expScale'][detectorId][visitId]
317 elif self.config.scalingLevel ==
'AMP':
318 scale = [inputScales[
'expScale'][detectorId][amp.getName()][visitId]
319 for amp
in exp.getDetector()]
321 raise RuntimeError(f
"Unknown scaling level: {self.config.scalingLevel}")
322 elif self.config.exposureScaling ==
'None':
325 raise RuntimeError(f
"Unknown scaling type: {self.config.exposureScaling}.")
327 expScales.append(scale)
328 self.log.info(
"Scaling input %d by %s", index, scale)
331 self.
combine(combined, inputExps, stats)
335 if self.config.doVignette:
336 polygon = inputExps[0].getInfo().getValidPolygon()
338 doSetValue=
True, vignetteValue=0.0)
342 calibType=self.config.calibrationType, scales=expScales)
345 return pipeBase.Struct(
346 outputData=combinedExp,
350 """Get dimensions of the inputs.
354 expList : `list` [`lsst.afw.image.Exposure`]
355 Exps to check the sizes of.
359 width, height : `int`
360 Unique set of input dimensions.
362 dimList = [exp.getDimensions()
for exp
in expList
if exp
is not None]
366 """Determine a consistent size, given a list of image sizes.
370 dimList : iterable of `tuple` (`int`, `int`)
376 If input dimensions are inconsistent.
380 width, height : `int`
383 dim = set((w, h)
for w, h
in dimList)
385 raise RuntimeError(
"Inconsistent dimensions: %s" % dim)
389 """Apply scale to input exposure.
391 This implementation applies a flux scaling: the input exposure is
392 divided by the provided scale.
396 exposure : `lsst.afw.image.Exposure`
398 scale : `float` or `list` [`float`], optional
399 Constant scale to divide the exposure by.
401 if scale
is not None:
402 mi = exposure.getMaskedImage()
403 if isinstance(scale, list):
404 for amp, ampScale
in zip(exposure.getDetector(), scale):
405 ampIm = mi[amp.getBBox()]
411 """Combine multiple images.
415 target : `lsst.afw.image.Exposure`
416 Output exposure to construct.
417 expList : `list` [`lsst.afw.image.Exposure`]
418 Input exposures to combine.
419 stats : `lsst.afw.math.StatisticsControl`
420 Control explaining how to combine the input images.
422 images = [img.getMaskedImage()
for img
in expList
if img
is not None]
423 afwMath.statisticsStack(target, images, afwMath.Property(self.config.combine), stats)
426 """Combine input headers to determine the set of common headers,
427 supplemented by calibration inputs.
431 expList : `list` of `lsst.afw.image.Exposure`
432 Input list of exposures to combine.
433 calib : `lsst.afw.image.Exposure`
434 Output calibration to construct headers for.
435 calibType: `str`, optional
436 OBSTYPE the output should claim.
437 scales: `list` of `float`, optional
438 Scale values applied to each input to record.
442 header : `lsst.daf.base.PropertyList`
446 header = calib.getMetadata()
447 header.set(
"OBSTYPE", calibType)
450 comments = {
"TIMESYS":
"Time scale for all dates",
451 "DATE-OBS":
"Start date of earliest input observation",
452 "MJD-OBS":
"[d] Start MJD of earliest input observation",
453 "DATE-END":
"End date of oldest input observation",
454 "MJD-END":
"[d] End MJD of oldest input observation",
455 "MJD-AVG":
"[d] MJD midpoint of all input observations",
456 "DATE-AVG":
"Midpoint date of all input observations"}
459 now = time.localtime()
460 calibDate = time.strftime(
"%Y-%m-%d", now)
461 calibTime = time.strftime(
"%X %Z", now)
462 header.set(
"CALIB_CREATE_DATE", calibDate)
463 header.set(
"CALIB_CREATE_TIME", calibTime)
466 inputHeaders = [exp.getMetadata()
for exp
in expList
if exp
is not None]
467 merged = merge_headers(inputHeaders, mode=
'drop')
468 for k, v
in merged.items():
470 md = expList[0].getMetadata()
471 comment = md.getComment(k)
if k
in md
else None
472 header.set(k, v, comment=comment)
475 visitInfoList = [exp.getInfo().getVisitInfo()
for exp
in expList
if exp
is not None]
476 for i, visit
in enumerate(visitInfoList):
479 header.set(
"CPP_INPUT_%d" % (i,), visit.getExposureId())
480 header.set(
"CPP_INPUT_DATE_%d" % (i,), str(visit.getDate()))
481 header.set(
"CPP_INPUT_EXPT_%d" % (i,), visit.getExposureTime())
482 if scales
is not None:
483 header.set(
"CPP_INPUT_SCALE_%d" % (i,), scales[i])
490 group = ObservationGroup(visitInfoList, pedantic=
False)
492 self.log.warn(
"Exception making an obs group for headers. Continuing.")
494 dateCards = {
"DATE-OBS":
"{}T00:00:00.00".format(calibDate)}
495 comments[
"DATE-OBS"] =
"Date of start of day of calibration midpoint"
497 oldest, newest = group.extremes()
498 dateCards = dates_to_fits(oldest.datetime_begin, newest.datetime_end)
500 for k, v
in dateCards.items():
501 header.set(k, v, comment=comments.get(k,
None))
506 """Interpolate over NANs in the combined image.
508 NANs can result from masked areas on the CCD. We don't want them getting
509 into our science images, so we replace them with the median of the image.
513 exp : `lsst.afw.image.Exposure`
514 Exp to check for NaNs.
516 array = exp.getImage().getArray()
517 bad = np.isnan(array)
519 median = np.median(array[np.logical_not(bad)])
520 count = np.sum(np.logical_not(bad))
523 self.log.warn(
"Found %s NAN pixels", count)
527 doUpdateMask=True, maskPlane='BAD',
528 doSetValue=False, vignetteValue=0.0,
530 """Apply vignetted polygon to image pixels.
534 exposure : `lsst.afw.image.Exposure`
536 doUpdateMask : `bool`, optional
537 Update the exposure mask for vignetted area?
538 maskPlane : `str`, optional,
539 Mask plane to assign.
540 doSetValue : `bool`, optional
541 Set image value for vignetted area?
542 vignetteValue : `float`, optional
544 log : `lsst.log.Log`, optional
550 Raised if no valid polygon exists.
552 polygon = polygon
if polygon
else exposure.getInfo().getValidPolygon()
554 raise RuntimeError(
"Could not find valid polygon!")
555 log = log
if log
else Log.getLogger(__name__.partition(
".")[2])
557 fullyIlluminated =
True
558 for corner
in exposure.getBBox().getCorners():
559 if not polygon.contains(
Point2D(corner)):
560 fullyIlluminated =
False
562 log.info(
"Exposure is fully illuminated? %s", fullyIlluminated)
564 if not fullyIlluminated:
566 mask = exposure.getMask()
567 numPixels = mask.getBBox().getArea()
569 xx, yy = np.meshgrid(np.arange(0, mask.getWidth(), dtype=int),
570 np.arange(0, mask.getHeight(), dtype=int))
572 vignMask = np.array([
not polygon.contains(
Point2D(x, y))
for x, y
in
573 zip(xx.reshape(numPixels), yy.reshape(numPixels))])
574 vignMask = vignMask.reshape(mask.getHeight(), mask.getWidth())
577 bitMask = mask.getPlaneBitMask(maskPlane)
578 maskArray = mask.getArray()
579 maskArray[vignMask] |= bitMask
581 imageArray = exposure.getImage().getArray()
582 imageArray[vignMask] = vignetteValue
583 log.info(
"Exposure contains %d vignetted pixels.",
584 np.count_nonzero(vignMask))