1 from __future__
import absolute_import, division, print_function
11 from builtins
import zip
12 from builtins
import range
14 from lsst.pex.config import Config, ConfigurableField, Field, ListField, ConfigField
15 from lsst.pipe.base import Task, Struct, TaskRunner, ArgumentParser
32 from .checksum
import checksum
33 from .utils
import getDataRef
37 """Parameters controlling the measurement of background statistics""" 38 stat = Field(doc=
"Statistic to use to estimate background (from lsst.afw.math)", dtype=int,
39 default=int(afwMath.MEANCLIP))
40 clip = Field(doc=
"Clipping threshold for background",
41 dtype=float, default=3.0)
42 nIter = Field(doc=
"Clipping iterations for background",
44 maxVisitsToCalcErrorFromInputVariance = Field(
45 doc=
"Maximum number of visits to estimate variance from input variance, not per-pixel spread",
47 mask = ListField(doc=
"Mask planes to reject",
48 dtype=str, default=[
"DETECTED",
"BAD",
"NO_DATA",])
52 """Measure statistics on the background 54 This can be useful for scaling the background, e.g., for flats and fringe frames. 56 ConfigClass = CalibStatsConfig
58 def run(self, exposureOrImage):
59 """!Measure a particular statistic on an image (of some sort). 61 @param exposureOrImage Exposure, MaskedImage or Image. 62 @return Value of desired statistic 64 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
65 afwImage.Mask.getPlaneBitMask(self.config.mask))
67 image = exposureOrImage.getMaskedImage()
70 image = exposureOrImage.getImage()
72 image = exposureOrImage
74 return afwMath.makeStatistics(image, self.config.stat, stats).getValue()
78 """Configuration for combining calib images""" 79 rows = Field(doc=
"Number of rows to read at a time",
80 dtype=int, default=512)
81 mask = ListField(doc=
"Mask planes to respect", dtype=str,
82 default=[
"SAT",
"DETECTED",
"INTRP"])
83 combine = Field(doc=
"Statistic to use for combination (from lsst.afw.math)", dtype=int,
84 default=int(afwMath.MEANCLIP))
85 clip = Field(doc=
"Clipping threshold for combination",
86 dtype=float, default=3.0)
87 nIter = Field(doc=
"Clipping iterations for combination",
89 stats = ConfigurableField(target=CalibStatsTask,
90 doc=
"Background statistics configuration")
94 """Task to combine calib images""" 95 ConfigClass = CalibCombineConfig
98 Task.__init__(self, *args, **kwargs)
99 self.makeSubtask(
"stats")
101 def run(self, sensorRefList, expScales=None, finalScale=None, inputName="postISRCCD"):
102 """!Combine calib images for a single sensor 104 @param sensorRefList List of data references to combine (for a single sensor) 105 @param expScales List of scales to apply for each exposure 106 @param finalScale Desired scale for final combined image 107 @param inputName Data set name for inputs 108 @return combined image 111 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
112 afwImage.Mask.getPlaneBitMask(self.config.mask))
113 numImages = len(sensorRefList)
115 raise RuntimeError(
"No valid input data")
116 if numImages < self.config.stats.maxVisitsToCalcErrorFromInputVariance:
117 stats.setCalcErrorFromInputVariance(
True)
120 combined = afwImage.MaskedImageF(width, height)
121 imageList = [
None]*numImages
122 for start
in range(0, height, self.config.rows):
123 rows = min(self.config.rows, height - start)
124 box = afwGeom.Box2I(afwGeom.Point2I(0, start),
125 afwGeom.Extent2I(width, rows))
126 subCombined = combined.Factory(combined, box)
128 for i, sensorRef
in enumerate(sensorRefList):
129 if sensorRef
is None:
132 exposure = sensorRef.get(inputName +
"_sub", bbox=box)
133 if expScales
is not None:
135 imageList[i] = exposure.getMaskedImage()
137 self.
combine(subCombined, imageList, stats)
139 if finalScale
is not None:
140 background = self.stats.
run(combined)
141 self.log.info(
"%s: Measured background of stack is %f; adjusting to %f" %
142 (NODE, background, finalScale))
143 combined *= finalScale / background
148 """Get dimensions of the inputs""" 150 for sensorRef
in sensorRefList:
151 if sensorRef
is None:
153 md = sensorRef.get(inputName +
"_md")
154 dimList.append(afwImage.bboxFromMetadata(md).
getDimensions())
158 """Apply scale to input exposure 160 This implementation applies a flux scaling: the input exposure is 161 divided by the provided scale. 163 if scale
is not None:
164 mi = exposure.getMaskedImage()
168 """!Combine multiple images 170 @param target Target image to receive the combined pixels 171 @param imageList List of input images 172 @param stats Statistics control 174 images = [img
for img
in imageList
if img
is not None]
175 afwMath.statisticsStack(target, images, afwMath.Property(self.config.combine), stats)
179 """Determine a consistent size, given a list of image sizes""" 180 dim = set((w, h)
for w, h
in dimList)
183 raise RuntimeError(
"Inconsistent dimensions: %s" % dim)
188 """!Return a tuple of specific values from a dict 190 This provides a hashable representation of the dict from certain keywords. 191 This can be useful for creating e.g., a tuple of the values in the DataId 192 that identify the CCD. 194 @param dict_ dict to parse 195 @param keys keys to extract (order is important) 196 @return tuple of values 198 return tuple(dict_[k]
for k
in keys)
202 """!Determine a list of CCDs from exposure references 204 This essentially inverts the exposure-level references (which 205 provides a list of CCDs for each exposure), by providing 206 a dataId list for each CCD. Consider an input list of exposures 207 [e1, e2, e3], and each exposure has CCDs c1 and c2. Then this 210 {(c1,): [e1c1, e2c1, e3c1], (c2,): [e1c2, e2c2, e3c2]} 212 This is a dict whose keys are tuples of the identifying values of a 213 CCD (usually just the CCD number) and the values are lists of dataIds 214 for that CCD in each exposure. A missing dataId is given the value 217 @param expRefList List of data references for exposures 218 @param level Level for the butler to generate CCDs 219 @param ccdKeys DataId keywords that identify a CCD 220 @return dict of data identifier lists for each CCD; 221 keys are values of ccdKeys in order 223 expIdList = [[ccdRef.dataId
for ccdRef
in expRef.subItems(
224 level)]
for expRef
in expRefList]
227 if len(ccdKeys) != len(set(ccdKeys)):
228 raise RuntimeError(
"Duplicate keys found in ccdKeys: %s" % ccdKeys)
230 for ccdIdList
in expIdList:
231 for ccdId
in ccdIdList:
238 for n, ccdIdList
in enumerate(expIdList):
239 for ccdId
in ccdIdList:
241 if name
not in ccdLists:
243 ccdLists[name].append(ccdId)
247 ccdLists[ccd] = sorted(ccdLists[ccd], key=
lambda dd:
dictToTuple(dd, sorted(dd.keys())))
253 """Generate a matrix of results using pool.map 255 The function should have the call signature: 256 func(cache, dataId, *args, **kwargs) 258 We return a dict mapping 'ccd name' to a list of values for 261 @param pool Process pool 262 @param func Function to call for each dataId 263 @param ccdIdLists Dict of data identifier lists for each CCD name 264 @return matrix of results 266 dataIdList = sum(ccdIdLists.values(), [])
267 resultList = pool.map(func, dataIdList, *args, **kwargs)
269 data = dict((ccdName, [
None] * len(expList))
for ccdName, expList
in ccdIdLists.items())
270 indices = dict(sum([[(tuple(dataId.values())
if dataId
is not None else None, (ccdName, expNum))
271 for expNum, dataId
in enumerate(expList)]
272 for ccdName, expList
in ccdIdLists.items()], []))
273 for dataId, result
in zip(dataIdList, resultList):
276 ccdName, expNum = indices[tuple(dataId.values())]
277 data[ccdName][expNum] = result
282 """Split name=value pairs and put the result in a dict""" 284 def __call__(self, parser, namespace, values, option_string):
285 output = getattr(namespace, self.dest, {})
286 for nameValue
in values:
287 name, sep, valueStr = nameValue.partition(
"=")
289 parser.error(
"%s value %s must be in form name=value" %
290 (option_string, nameValue))
291 output[name] = valueStr
292 setattr(namespace, self.dest, output)
296 """ArgumentParser for calibration construction""" 299 """Add a --calibId argument to the standard pipe_base argument parser""" 300 ArgumentParser.__init__(self, *args, **kwargs)
302 self.add_id_argument(
"--id", datasetType=
"raw",
303 help=
"input identifiers, e.g., --id visit=123 ccd=4")
304 self.add_argument(
"--calibId", nargs=
"*", action=CalibIdAction, default={},
305 help=
"identifiers for calib, e.g., --calibId version=1",
306 metavar=
"KEY=VALUE1[^VALUE2[^VALUE3...]")
311 Checks that the "--calibId" provided works. 313 namespace = ArgumentParser.parse_args(self, *args, **kwargs)
315 keys = namespace.butler.getKeys(self.
calibName)
317 for name, value
in namespace.calibId.items():
320 "%s is not a relevant calib identifier key (%s)" % (name, keys))
321 parsed[name] = keys[name](value)
322 namespace.calibId = parsed
328 """Configuration for constructing calibs""" 329 clobber = Field(dtype=bool, default=
True,
330 doc=
"Clobber existing processed images?")
331 isr = ConfigurableField(target=IsrTask, doc=
"ISR configuration")
332 dateObs = Field(dtype=str, default=
"dateObs",
333 doc=
"Key for observation date in exposure registry")
334 dateCalib = Field(dtype=str, default=
"calibDate",
335 doc=
"Key for calib date in calib registry")
336 filter = Field(dtype=str, default=
"filter",
337 doc=
"Key for filter name in exposure/calib registries")
338 combination = ConfigurableField(
339 target=CalibCombineTask, doc=
"Calib combination configuration")
340 ccdKeys = ListField(dtype=str, default=[
"ccd"],
341 doc=
"DataId keywords specifying a CCD")
342 visitKeys = ListField(dtype=str, default=[
"visit"],
343 doc=
"DataId keywords specifying a visit")
344 calibKeys = ListField(dtype=str, default=[],
345 doc=
"DataId keywords specifying a calibration")
346 doCameraImage = Field(dtype=bool, default=
True, doc=
"Create camera overview image?")
347 binning = Field(dtype=int, default=64, doc=
"Binning to apply for camera image")
350 self.
isr.doWrite =
False 354 """Get parsed values into the CalibTask.run""" 357 return [dict(expRefList=parsedCmd.id.refList, butler=parsedCmd.butler, calibId=parsedCmd.calibId)]
360 """Call the Task with the kwargs from getTargetList""" 361 task = self.TaskClass(config=self.config, log=self.log)
364 result = task.runDataRef(**args)
367 result = task.runDataRef(**args)
368 except Exception
as e:
371 task.log.fatal(
"Failed: %s" % e)
372 traceback.print_exc(file=sys.stderr)
374 if self.doReturnResults:
376 exitStatus=exitStatus,
378 metadata=task.metadata,
383 exitStatus=exitStatus,
387 """!Base class for constructing calibs. 389 This should be subclassed for each of the required calib types. 390 The subclass should be sure to define the following class variables: 391 * _DefaultName: default name of the task, used by CmdLineTask 392 * calibName: name of the calibration data set in the butler 393 The subclass may optionally set: 394 * filterName: filter name to give the resultant calib 396 ConfigClass = CalibConfig
397 RunnerClass = CalibTaskRunner
404 BatchPoolTask.__init__(self, *args, **kwargs)
405 self.makeSubtask(
"isr")
406 self.makeSubtask(
"combination")
410 numCcds = len(parsedCmd.butler.get(
"camera"))
412 parsedCmd)[0][
'expRefList'])
413 numCycles = int(numCcds/float(numCores) + 0.5)
414 return time*numExps*numCycles
417 def _makeArgumentParser(cls, *args, **kwargs):
418 kwargs.pop(
"doBatch",
False)
422 """!Construct a calib from a list of exposure references 424 This is the entry point, called by the TaskRunner.__call__ 426 Only the master node executes this method. 428 @param expRefList List of data references at the exposure level 429 @param butler Data butler 430 @param calibId Identifier dict for calib 432 if len(expRefList) < 1:
433 raise RuntimeError(
"No valid input data")
435 for expRef
in expRefList:
436 self.
addMissingKeys(expRef.dataId, butler, self.config.ccdKeys,
'raw')
440 expRefList, level=
"sensor", ccdKeys=self.config.ccdKeys)
444 outputIdItemList = list(outputId.items())
445 for ccdName
in ccdIdLists:
446 dataId = dict([(k, ccdName[i])
for i, k
in enumerate(self.config.ccdKeys)])
447 dataId.update(outputIdItemList)
449 dataId.update(outputIdItemList)
452 butler.get(self.
calibName +
"_filename", dataId)
453 except Exception
as e:
455 "Unable to determine output filename \"%s_filename\" from %s: %s" %
458 processPool =
Pool(
"process")
459 processPool.storeSet(butler=butler)
465 scales = self.
scale(ccdIdLists, data)
467 combinePool =
Pool(
"combine")
468 combinePool.storeSet(butler=butler)
471 calibs = self.
scatterCombine(combinePool, outputId, ccdIdLists, scales)
473 if self.config.doCameraImage:
474 camera = butler.get(
"camera")
476 calibs = {butler.get(
"postISRCCD_detector",
477 dict(zip(self.config.ccdKeys, ccdName))).getId(): calibs[ccdName]
478 for ccdName
in ccdIdLists}
482 butler.put(cameraImage, self.
calibName +
"_camera", dataId)
483 except Exception
as exc:
484 self.log.warn(
"Unable to create camera image: %s" % (exc,))
488 ccdIdLists = ccdIdLists,
491 processPool = processPool,
492 combinePool = combinePool,
496 """!Generate the data identifier for the output calib 498 The mean date and the common filter are included, using keywords 499 from the configuration. The CCD-specific part is not included 500 in the data identifier. 502 @param expRefList List of data references at exposure level 503 @param calibId Data identifier elements for the calib provided by the user 504 @return data identifier 508 for expRef
in expRefList:
509 butler = expRef.getButler()
510 dataId = expRef.dataId
512 midTime += self.
getMjd(butler, dataId)
515 if filterName
is None:
516 filterName = thisFilter
517 elif filterName != thisFilter:
518 raise RuntimeError(
"Filter mismatch for %s: %s vs %s" % (
519 dataId, thisFilter, filterName))
521 midTime /= len(expRefList)
522 date = str(dafBase.DateTime(
523 midTime, dafBase.DateTime.MJD).toPython().date())
525 outputId = {self.config.filter: filterName,
526 self.config.dateCalib: date}
527 outputId.update(calibId)
530 def getMjd(self, butler, dataId, timescale=dafBase.DateTime.UTC):
531 """Determine the Modified Julian Date (MJD; in TAI) from a data identifier""" 532 if self.config.dateObs
in dataId:
533 dateObs = dataId[self.config.dateObs]
535 dateObs = butler.queryMetadata(
'raw', [self.config.dateObs], dataId)[0]
536 if "T" not in dateObs:
537 dateObs = dateObs +
"T12:00:00.0Z" 538 elif not dateObs.endswith(
"Z"):
541 return dafBase.DateTime(dateObs, timescale).get(dafBase.DateTime.MJD)
544 """Determine the filter from a data identifier""" 545 filt = butler.queryMetadata(
'raw', [self.config.filter], dataId)[0]
549 if calibName
is None:
552 if missingKeys
is None:
553 missingKeys = set(butler.getKeys(calibName).keys()) - set(dataId.keys())
555 for k
in missingKeys:
557 v = butler.queryMetadata(
'raw', [k], dataId)
558 except Exception
as e:
567 raise RuntimeError(
"No unique lookup for %s: %s" % (k, v))
570 """!Update the metadata from the VisitInfo 572 \param calibImage The image whose metadata is to be set 573 \param exposureTime The exposure time for the image 574 \param darkTime The time since the last read (default: exposureTime) 578 darkTime = exposureTime
580 visitInfo = afwImage.makeVisitInfo(exposureTime=exposureTime, darkTime=darkTime, **kwargs)
581 md = calibImage.getMetadata()
583 afwImage.setVisitInfoMetadata(md, visitInfo)
586 """!Scatter the processing among the nodes 588 We scatter each CCD independently (exposures aren't grouped together), 589 to make full use of all available processors. This necessitates piecing 590 everything back together in the same format as ccdIdLists afterwards. 592 Only the master node executes this method. 594 @param pool Process pool 595 @param ccdIdLists Dict of data identifier lists for each CCD name 596 @return Dict of lists of returned data for each CCD name 598 self.log.info(
"Scatter processing")
601 def process(self, cache, ccdId, outputName="postISRCCD", **kwargs):
602 """!Process a CCD, specified by a data identifier 604 After processing, optionally returns a result (produced by 605 the 'processResult' method) calculated from the processed 606 exposure. These results will be gathered by the master node, 607 and is a means for coordinated scaling of all CCDs for flats, 610 Only slave nodes execute this method. 612 @param cache Process pool cache 613 @param ccdId Data identifier for CCD 614 @param outputName Output dataset name for butler 615 @return result from 'processResult' 618 self.log.warn(
"Null identifier received on %s" % NODE)
621 if self.config.clobber
or not sensorRef.datasetExists(outputName):
622 self.log.info(
"Processing %s on %s" % (ccdId, NODE))
625 except Exception
as e:
626 self.log.warn(
"Unable to process %s: %s" % (ccdId, e))
632 "Using previously persisted processed exposure for %s" % (sensorRef.dataId,))
633 exposure = sensorRef.get(outputName)
637 """Process a single CCD, specified by a data reference 639 Generally, this simply means doing ISR. 641 Only slave nodes execute this method. 646 """!Write the processed CCD 648 We need to write these out because we can't hold them all in 651 Only slave nodes execute this method. 653 @param dataRef Data reference 654 @param exposure CCD exposure to write 655 @param outputName Output dataset name for butler. 657 dataRef.put(exposure, outputName)
660 """Extract processing results from a processed exposure 662 This method generates what is gathered by the master node. 663 This can be a background measurement or similar for scaling 664 flat-fields. It must be picklable! 666 Only slave nodes execute this method. 671 """!Determine scaling across CCDs and exposures 673 This is necessary mainly for flats, so as to determine a 674 consistent scaling across the entire focal plane. This 675 implementation is simply a placeholder. 677 Only the master node executes this method. 679 @param ccdIdLists Dict of data identifier lists for each CCD tuple 680 @param data Dict of lists of returned data for each CCD tuple 681 @return dict of Struct(ccdScale: scaling for CCD, 682 expScales: scaling for each exposure 685 self.log.info(
"Scale on %s" % NODE)
686 return dict((name, Struct(ccdScale=
None, expScales=[
None] * len(ccdIdLists[name])))
687 for name
in ccdIdLists)
690 """!Scatter the combination of exposures across multiple nodes 692 In this case, we can only scatter across as many nodes as 695 Only the master node executes this method. 697 @param pool Process pool 698 @param outputId Output identifier (exposure part only) 699 @param ccdIdLists Dict of data identifier lists for each CCD name 700 @param scales Dict of structs with scales, for each CCD name 701 @param dict of binned images 703 self.log.info(
"Scatter combination")
704 data = [Struct(ccdName=ccdName, ccdIdList=ccdIdLists[ccdName], scales=scales[ccdName])
for 705 ccdName
in ccdIdLists]
706 images = pool.map(self.
combine, data, outputId)
707 return dict(zip(ccdIdLists.keys(), images))
710 """Get fully-qualified output data identifier 712 We may need to look up keys that aren't in the output dataId. 714 @param ccdName Name tuple for CCD 715 @param butler Data butler 716 @param outputId Data identifier for combined image (exposure part only) 717 @return fully-qualified output dataId 719 fullOutputId = {k: ccdName[i]
for i, k
in enumerate(self.config.ccdKeys)}
720 fullOutputId.update(outputId)
722 fullOutputId.update(outputId)
726 """!Combine multiple exposures of a particular CCD and write the output 728 Only the slave nodes execute this method. 730 @param cache Process pool cache 731 @param struct Parameters for the combination, which has the following components: 732 * ccdName Name tuple for CCD 733 * ccdIdList List of data identifiers for combination 734 * scales Scales to apply (expScales are scalings for each exposure, 735 ccdScale is final scale for combined image) 736 @param outputId Data identifier for combined image (exposure part only) 737 @return binned calib image 740 dataRefList = [
getDataRef(cache.butler, dataId)
if dataId
is not None else None for 741 dataId
in struct.ccdIdList]
742 self.log.info(
"Combining %s on %s" % (outputId, NODE))
743 calib = self.combination.run(dataRefList, expScales=struct.scales.expScales,
744 finalScale=struct.scales.ccdScale)
746 if not hasattr(calib,
"getMetadata"):
747 if hasattr(calib,
"getVariance"):
748 calib = afwImage.makeExposure(calib)
750 calib = afwImage.DecoratedImageF(calib.getImage())
755 struct.ccdIdList, outputId)
759 self.
write(cache.butler, calib, outputId)
761 return afwMath.binImage(calib.getImage(), self.config.binning)
764 """!Record metadata including the inputs and creation details 766 This metadata will go into the FITS header. 768 @param butler Data butler 769 @param calib Combined calib exposure. 770 @param dataIdList List of data identifiers for calibration inputs 771 @param outputId Data identifier for output 773 header = calib.getMetadata()
777 now = time.localtime()
778 header.add(
"CALIB_CREATION_DATE", time.strftime(
"%Y-%m-%d", now))
779 header.add(
"CALIB_CREATION_TIME", time.strftime(
"%X %Z", now))
781 header.add(
"DATE-OBS",
"%sT00:00:00.00" % outputId[self.config.dateCalib])
784 visits = [str(
dictToTuple(dataId, self.config.visitKeys))
for dataId
in dataIdList
if 786 for i, v
in enumerate(sorted(set(visits))):
787 header.add(
"CALIB_INPUT_%d" % (i,), v)
789 header.add(
"CALIB_ID",
" ".join(
"%s=%s" % (key, value)
790 for key, value
in outputId.items()))
794 """Interpolate over NANs in the combined image 796 NANs can result from masked areas on the CCD. We don't want them getting 797 into our science images, so we replace them with the median of the image. 799 if hasattr(image,
"getMaskedImage"):
801 image = image.getMaskedImage().getImage()
802 if hasattr(image,
"getImage"):
803 image = image.getImage()
804 array = image.getArray()
805 bad = np.isnan(array)
806 array[bad] = np.median(array[np.logical_not(bad)])
808 def write(self, butler, exposure, dataId):
809 """!Write the final combined calib 811 Only the slave nodes execute this method 813 @param butler Data butler 814 @param exposure CCD exposure to write 815 @param dataId Data identifier for output 817 self.log.info(
"Writing %s on %s" % (dataId, NODE))
818 butler.put(exposure, self.
calibName, dataId)
821 """!Create and write an image of the entire camera 823 This is useful for judging the quality or getting an overview of 824 the features of the calib. 826 @param camera Camera object 827 @param dataId Data identifier for output 828 @param calibs Dict mapping CCD detector ID to calib image 833 """Check that the list of CCD dataIds is consistent 835 @param ccdIdLists Dict of data identifier lists for each CCD name 836 @return Number of exposures, number of CCDs 838 visitIdLists = collections.defaultdict(list)
839 for ccdName
in ccdIdLists:
840 for dataId
in ccdIdLists[ccdName]:
841 visitName =
dictToTuple(dataId, self.config.visitKeys)
842 visitIdLists[visitName].append(dataId)
844 numExps = set(len(expList)
for expList
in ccdIdLists.values())
845 numCcds = set(len(ccdList)
for ccdList
in visitIdLists.values())
847 if len(numExps) != 1
or len(numCcds) != 1:
850 self.log.warn(
"Number of visits for each CCD: %s",
851 {ccdName: len(ccdIdLists[ccdName])
for ccdName
in ccdIdLists})
852 self.log.warn(
"Number of CCDs for each visit: %s",
853 {vv: len(visitIdLists[vv])
for vv
in visitIdLists})
854 raise RuntimeError(
"Inconsistent number of exposures/CCDs")
856 return numExps.pop(), numCcds.pop()
860 """Configuration for bias construction. 862 No changes required compared to the base class, but 863 subclassed for distinction. 868 class BiasTask(CalibTask):
869 """Bias construction""" 870 ConfigClass = BiasConfig
871 _DefaultName =
"bias" 878 """Overrides to apply for bias construction""" 879 config.isr.doBias =
False 880 config.isr.doDark =
False 881 config.isr.doFlat =
False 882 config.isr.doFringe =
False 886 """Configuration for dark construction""" 887 doRepair = Field(dtype=bool, default=
True, doc=
"Repair artifacts?")
888 psfFwhm = Field(dtype=float, default=3.0, doc=
"Repair PSF FWHM (pixels)")
889 psfSize = Field(dtype=int, default=21, doc=
"Repair PSF size (pixels)")
890 crGrow = Field(dtype=int, default=2, doc=
"Grow radius for CR (pixels)")
891 repair = ConfigurableField(
892 target=RepairTask, doc=
"Task to repair artifacts")
895 CalibConfig.setDefaults(self)
902 The only major difference from the base class is a cosmic-ray 903 identification stage, and dividing each image by the dark time 904 to generate images of the dark rate. 906 ConfigClass = DarkConfig
907 _DefaultName =
"dark" 912 CalibTask.__init__(self, *args, **kwargs)
913 self.makeSubtask(
"repair")
917 """Overrides to apply for dark construction""" 918 config.isr.doDark =
False 919 config.isr.doFlat =
False 920 config.isr.doFringe =
False 923 """Process a single CCD 925 Besides the regular ISR, also masks cosmic-rays and divides each 926 processed image by the dark time to generate images of the dark rate. 927 The dark time is provided by the 'getDarkTime' method. 929 exposure = CalibTask.processSingle(self, sensorRef)
931 if self.config.doRepair:
932 psf = measAlg.DoubleGaussianPsf(self.config.psfSize, self.config.psfSize,
933 self.config.psfFwhm/(2*math.sqrt(2*math.log(2))))
935 self.repair.run(exposure, keepCRs=
False)
936 if self.config.crGrow > 0:
937 mask = exposure.getMaskedImage().getMask().clone()
938 mask &= mask.getPlaneBitMask(
"CR")
939 fpSet = afwDet.FootprintSet(
940 mask, afwDet.Threshold(0.5))
941 fpSet = afwDet.FootprintSet(fpSet, self.config.crGrow,
True)
942 fpSet.setMask(exposure.getMaskedImage().getMask(),
"CR")
944 mi = exposure.getMaskedImage()
949 """Retrieve the dark time for an exposure""" 950 darkTime = exposure.getInfo().getVisitInfo().
getDarkTime()
951 if not np.isfinite(darkTime):
952 raise RuntimeError(
"Non-finite darkTime")
957 """Configuration for flat construction""" 958 iterations = Field(dtype=int, default=10,
959 doc=
"Number of iterations for scale determination")
960 stats = ConfigurableField(target=CalibStatsTask,
961 doc=
"Background statistics configuration")
967 The principal change from the base class involves gathering the background 968 values from each image and using them to determine the scalings for the final 971 ConfigClass = FlatConfig
972 _DefaultName =
"flat" 977 """Overrides for flat construction""" 978 config.isr.doFlat =
False 979 config.isr.doFringe =
False 982 CalibTask.__init__(self, *args, **kwargs)
983 self.makeSubtask(
"stats")
986 return self.stats.run(exposure)
989 """Determine the scalings for the final combination 991 We have a matrix B_ij = C_i E_j, where C_i is the relative scaling 992 of one CCD to all the others in an exposure, and E_j is the scaling 993 of the exposure. We convert everything to logarithms so we can work 994 with a linear system. We determine the C_i and E_j from B_ij by iteration, 995 under the additional constraint that the average CCD scale is unity. 997 This algorithm comes from Eugene Magnier and Pan-STARRS. 999 assert len(ccdIdLists.values()) > 0,
"No successful CCDs" 1000 lengths = set([len(expList)
for expList
in ccdIdLists.values()])
1002 lengths) == 1,
"Number of successful exposures for each CCD differs" 1003 assert tuple(lengths)[0] > 0,
"No successful exposures" 1005 indices = dict((name, i)
for i, name
in enumerate(ccdIdLists))
1006 bgMatrix = np.array([[0.0] * len(expList)
1007 for expList
in ccdIdLists.values()])
1008 for name
in ccdIdLists:
1011 d
if d
is not None else np.nan
for d
in data[name]]
1013 numpyPrint = np.get_printoptions()
1014 np.set_printoptions(threshold=np.inf)
1015 self.log.info(
"Input backgrounds: %s" % bgMatrix)
1018 numCcds = len(ccdIdLists)
1019 numExps = bgMatrix.shape[1]
1021 bgMatrix = np.log(bgMatrix)
1022 bgMatrix = np.ma.masked_array(bgMatrix, np.isnan(bgMatrix))
1024 compScales = np.zeros(numCcds)
1025 expScales = np.array(
1026 [(bgMatrix[:, i0] - compScales).mean()
for i0
in range(numExps)])
1028 for iterate
in range(self.config.iterations):
1029 compScales = np.array(
1030 [(bgMatrix[i1, :] - expScales).mean()
for i1
in range(numCcds)])
1031 expScales = np.array(
1032 [(bgMatrix[:, i2] - compScales).mean()
for i2
in range(numExps)])
1034 avgScale = np.average(np.exp(compScales))
1035 compScales -= np.log(avgScale)
1036 self.log.debug(
"Iteration %d exposure scales: %s",
1037 iterate, np.exp(expScales))
1038 self.log.debug(
"Iteration %d component scales: %s",
1039 iterate, np.exp(compScales))
1041 expScales = np.array(
1042 [(bgMatrix[:, i3] - compScales).mean()
for i3
in range(numExps)])
1044 if np.any(np.isnan(expScales)):
1045 raise RuntimeError(
"Bad exposure scales: %s --> %s" %
1046 (bgMatrix, expScales))
1048 expScales = np.exp(expScales)
1049 compScales = np.exp(compScales)
1051 self.log.info(
"Exposure scales: %s" % expScales)
1052 self.log.info(
"Component relative scaling: %s" % compScales)
1053 np.set_printoptions(**numpyPrint)
1055 return dict((ccdName, Struct(ccdScale=compScales[indices[ccdName]], expScales=expScales))
1056 for ccdName
in ccdIdLists)
1060 """Configuration for fringe construction""" 1061 stats = ConfigurableField(target=CalibStatsTask,
1062 doc=
"Background statistics configuration")
1063 subtractBackground = ConfigurableField(target=measAlg.SubtractBackgroundTask,
1064 doc=
"Background configuration")
1065 detection = ConfigurableField(
1066 target=measAlg.SourceDetectionTask, doc=
"Detection configuration")
1067 detectSigma = Field(dtype=float, default=1.0,
1068 doc=
"Detection PSF gaussian sigma")
1071 CalibConfig.setDefaults(self)
1072 self.
detection.reEstimateBackground =
False 1076 """Fringe construction task 1078 The principal change from the base class is that the images are 1079 background-subtracted and rescaled by the background. 1081 XXX This is probably not right for a straight-up combination, as we 1082 are currently doing, since the fringe amplitudes need not scale with 1085 XXX Would like to have this do PCA and generate multiple images, but 1086 that will take a bit of work with the persistence code. 1088 ConfigClass = FringeConfig
1089 _DefaultName =
"fringe" 1090 calibName =
"fringe" 1094 """Overrides for fringe construction""" 1095 config.isr.doFringe =
False 1098 CalibTask.__init__(self, *args, **kwargs)
1099 self.makeSubtask(
"detection")
1100 self.makeSubtask(
"stats")
1101 self.makeSubtask(
"subtractBackground")
1104 """Subtract the background and normalise by the background level""" 1105 exposure = CalibTask.processSingle(self, sensorRef)
1106 bgLevel = self.stats.run(exposure)
1107 self.subtractBackground.run(exposure)
1108 mi = exposure.getMaskedImage()
1110 footprintSets = self.detection.detectFootprints(
1111 exposure, sigma=self.config.detectSigma)
1112 mask = exposure.getMaskedImage().getMask()
1113 detected = 1 << mask.addMaskPlane(
"DETECTED")
1114 for fpSet
in (footprintSets.positive, footprintSets.negative):
1115 if fpSet
is not None:
1116 afwDet.setMaskFromFootprintList(
1117 mask, fpSet.getFootprints(), detected)
1122 """Configuration for sky frame construction""" 1123 detection = ConfigurableField(target=measAlg.SourceDetectionTask, doc=
"Detection configuration")
1124 detectSigma = Field(dtype=float, default=2.0, doc=
"Detection PSF gaussian sigma")
1125 subtractBackground = ConfigurableField(target=measAlg.SubtractBackgroundTask,
1126 doc=
"Regular-scale background configuration, for object detection")
1127 largeScaleBackground = ConfigField(dtype=FocalPlaneBackgroundConfig,
1128 doc=
"Large-scale background configuration")
1129 sky = ConfigurableField(target=SkyMeasurementTask, doc=
"Sky measurement")
1130 maskThresh = Field(dtype=float, default=3.0, doc=
"k-sigma threshold for masking pixels")
1131 mask = ListField(dtype=str, default=[
"BAD",
"SAT",
"DETECTED",
"NO_DATA"],
1132 doc=
"Mask planes to consider as contaminated")
1136 """Task for sky frame construction 1138 The sky frame is a (relatively) small-scale background 1139 model, the response of the camera to the sky. 1141 To construct, we first remove a large-scale background (e.g., caused 1142 by moonlight) which may vary from image to image. Then we construct a 1143 model of the sky, which is essentially a binned version of the image 1144 (important configuration parameters: sky.background.[xy]BinSize). 1145 It is these models which are coadded to yield the sky frame. 1147 ConfigClass = SkyConfig
1148 _DefaultName =
"sky" 1152 CalibTask.__init__(self, *args, **kwargs)
1153 self.makeSubtask(
"detection")
1154 self.makeSubtask(
"subtractBackground")
1155 self.makeSubtask(
"sky")
1158 """!Scatter the processing among the nodes 1160 Only the master node executes this method, assigning work to the 1163 We measure and subtract off a large-scale background model across 1164 all CCDs, which requires a scatter/gather. Then we process the 1165 individual CCDs, subtracting the large-scale background model and 1166 the residual background model measured. These residuals will be 1167 combined for the sky frame. 1169 @param pool Process pool 1170 @param ccdIdLists Dict of data identifier lists for each CCD name 1171 @return Dict of lists of returned data for each CCD name 1173 self.log.info(
"Scatter processing")
1175 numExps = set(len(expList)
for expList
in ccdIdLists.values()).pop()
1183 for exp
in range(numExps):
1184 bgModels = [bgModelList[ccdName][exp]
for ccdName
in ccdIdLists]
1185 visit = set(tuple(ccdIdLists[ccdName][exp][key]
for key
in sorted(self.config.visitKeys))
for 1186 ccdName
in ccdIdLists)
1187 assert len(visit) == 1
1189 bgModel = bgModels[0]
1190 for bg
in bgModels[1:]:
1192 self.log.info(
"Background model min/max for visit %s: %f %f", visit,
1193 np.min(bgModel.getStatsImage().getArray()),
1194 np.max(bgModel.getStatsImage().getArray()))
1195 backgrounds[visit] = bgModel
1196 scales[visit] = np.median(bgModel.getStatsImage().getArray())
1198 return mapToMatrix(pool, self.
process, ccdIdLists, backgrounds=backgrounds, scales=scales)
1201 """!Measure background model for CCD 1203 This method is executed by the slaves. 1205 The background models for all CCDs in an exposure will be 1206 combined to form a full focal-plane background model. 1208 @param cache Process pool cache 1209 @param dataId Data identifier 1210 @return Bcakground model 1217 config = self.config.largeScaleBackground
1218 camera = dataRef.get(
"camera")
1219 bgModel = FocalPlaneBackground.fromCamera(config, camera)
1220 bgModel.addCcd(exposure)
1224 """!Process a single CCD for the background 1226 This method is executed by the slaves. 1228 Because we're interested in the background, we detect and mask astrophysical 1229 sources, and pixels above the noise level. 1231 @param dataRef Data reference for CCD. 1232 @return processed exposure 1234 if not self.config.clobber
and dataRef.datasetExists(
"postISRCCD"):
1235 return dataRef.get(
"postISRCCD")
1236 exposure = CalibTask.processSingle(self, dataRef)
1239 bgTemp = self.subtractBackground.run(exposure).background
1240 footprints = self.detection.detectFootprints(exposure, sigma=self.config.detectSigma)
1241 image = exposure.getMaskedImage()
1242 if footprints.background
is not None:
1243 image += footprints.background.getImage()
1246 variance = image.getVariance()
1247 noise = np.sqrt(np.median(variance.getArray()))
1248 isHigh = image.getImage().getArray() > self.config.maskThresh*noise
1249 image.getMask().getArray()[isHigh] |= image.getMask().getPlaneBitMask(
"DETECTED")
1252 image += bgTemp.getImage()
1255 maskVal = image.getMask().getPlaneBitMask(self.config.mask)
1256 isBad = image.getMask().getArray() & maskVal > 0
1257 bgLevel = np.median(image.getImage().getArray()[~isBad])
1258 image.getImage().getArray()[isBad] = bgLevel
1259 dataRef.put(exposure,
"postISRCCD")
1263 """Process a single CCD, specified by a data reference 1265 We subtract the appropriate focal plane background model, 1266 divide by the appropriate scale and measure the background. 1268 Only slave nodes execute this method. 1270 @param dataRef Data reference for single CCD 1271 @param backgrounds Background model for each visit 1272 @param scales Scales for each visit 1273 @return Processed exposure 1275 visit = tuple(dataRef.dataId[key]
for key
in sorted(self.config.visitKeys))
1276 exposure = dataRef.get(
"postISRCCD", immediate=
True)
1277 image = exposure.getMaskedImage()
1278 detector = exposure.getDetector()
1279 bbox = image.getBBox()
1281 bgModel = backgrounds[visit]
1282 bg = bgModel.toCcdBackground(detector, bbox)
1283 image -= bg.getImage()
1284 image /= scales[visit]
1287 dataRef.put(bg,
"icExpBackground")
1291 """!Combine multiple background models of a particular CCD and write the output 1293 Only the slave nodes execute this method. 1295 @param cache Process pool cache 1296 @param struct Parameters for the combination, which has the following components: 1297 * ccdName Name tuple for CCD 1298 * ccdIdList List of data identifiers for combination 1299 @param outputId Data identifier for combined image (exposure part only) 1300 @return binned calib image 1303 dataRefList = [
getDataRef(cache.butler, dataId)
if dataId
is not None else None for 1304 dataId
in struct.ccdIdList]
1305 self.log.info(
"Combining %s on %s" % (outputId, NODE))
1306 bgList = [dataRef.get(
"icExpBackground", immediate=
True).clone()
for dataRef
in dataRefList]
1308 bgExp = self.sky.averageBackgrounds(bgList)
1311 cache.butler.put(bgExp,
"sky", outputId)
1312 return afwMath.binImage(self.sky.exposureToBackground(bgExp).getImage(), self.config.binning)
def applyOverrides(cls, config)
def getFilter(self, butler, dataId)
def __init__(self, args, kwargs)
def processWrite(self, dataRef, exposure, outputName="postISRCCD")
Write the processed CCD.
def scatterCombine(self, pool, outputId, ccdIdLists, scales)
Scatter the combination of exposures across multiple nodes.
def __init__(self, args, kwargs)
def run(self, exposureOrImage)
Measure a particular statistic on an image (of some sort).
def processResult(self, exposure)
def checksum(obj, header=None, sumType="MD5")
Calculate a checksum of an object.
def getCcdIdListFromExposures(expRefList, level="sensor", ccdKeys=["ccd"])
Determine a list of CCDs from exposure references.
def __init__(self, calibName, args, kwargs)
def applyScale(self, exposure, scale=None)
def processSingle(self, sensorRef)
def applyOverrides(cls, config)
def processSingle(self, sensorRef)
def getFullyQualifiedOutputId(self, ccdName, butler, outputId)
def __call__(self, parser, namespace, values, option_string)
def processSingle(self, dataRef)
def dictToTuple(dict_, keys)
Return a tuple of specific values from a dict.
def interpolateNans(self, image)
def getDataRef(butler, dataId, datasetType="raw")
def updateMetadata(self, calibImage, exposureTime, darkTime=None, kwargs)
Update the metadata from the VisitInfo.
def scale(self, ccdIdLists, data)
def process(self, cache, ccdId, outputName="postISRCCD", kwargs)
Process a CCD, specified by a data identifier.
def parse_args(self, args, kwargs)
def processSingle(self, dataRef, backgrounds, scales)
def makeCameraImage(self, camera, dataId, calibs)
Create and write an image of the entire camera.
def combine(self, target, imageList, stats)
Combine multiple images.
def getMjd(self, butler, dataId, timescale=dafBase.DateTime.UTC)
def getDarkTime(self, exposure)
def combine(self, cache, struct, outputId)
Combine multiple exposures of a particular CCD and write the output.
def checkCcdIdLists(self, ccdIdLists)
def runDataRef(self, expRefList, butler, calibId)
Construct a calib from a list of exposure references.
def write(self, butler, exposure, dataId)
Write the final combined calib.
def combine(self, cache, struct, outputId)
Combine multiple background models of a particular CCD and write the output.
def measureBackground(self, cache, dataId)
Measure background model for CCD.
def recordCalibInputs(self, butler, calib, dataIdList, outputId)
Record metadata including the inputs and creation details.
def scatterProcess(self, pool, ccdIdLists)
Scatter the processing among the nodes.
def run(self, sensorRefList, expScales=None, finalScale=None, inputName="postISRCCD")
Combine calib images for a single sensor.
def __init__(self, args, kwargs)
def __init__(self, args, kwargs)
def applyOverrides(cls, config)
def getOutputId(self, expRefList, calibId)
Generate the data identifier for the output calib.
def processResult(self, exposure)
def mapToMatrix(pool, func, ccdIdLists, args, kwargs)
def __init__(self, args, kwargs)
def scale(self, ccdIdLists, data)
Determine scaling across CCDs and exposures.
def addMissingKeys(self, dataId, butler, missingKeys=None, calibName=None)
def getDimensions(self, sensorRefList, inputName="postISRCCD")
def getTargetList(parsedCmd, kwargs)
def __init__(self, args, kwargs)
Base class for constructing calibs.
def applyOverrides(cls, config)
def processSingleBackground(self, dataRef)
Process a single CCD for the background.
def scatterProcess(self, pool, ccdIdLists)
Scatter the processing among the nodes.
def batchWallTime(cls, time, parsedCmd, numCores)