1 from __future__
import absolute_import, division, print_function
11 from builtins
import zip
12 from builtins
import range
14 from lsst.pex.config import Config, ConfigurableField, Field, ListField, ConfigField
15 from lsst.pipe.base import Task, Struct, TaskRunner, ArgumentParser
30 from .checksum
import checksum
31 from .utils
import getDataRef
35 """Parameters controlling the measurement of background statistics""" 36 stat = Field(doc=
"Statistic to use to estimate background (from lsst.afw.math)", dtype=int,
37 default=int(afwMath.MEANCLIP))
38 clip = Field(doc=
"Clipping threshold for background",
39 dtype=float, default=3.0)
40 nIter = Field(doc=
"Clipping iterations for background",
42 maxVisitsToCalcErrorFromInputVariance = Field(
43 doc=
"Maximum number of visits to estimate variance from input variance, not per-pixel spread",
45 mask = ListField(doc=
"Mask planes to reject",
46 dtype=str, default=[
"DETECTED",
"BAD",
"NO_DATA"])
50 """Measure statistics on the background 52 This can be useful for scaling the background, e.g., for flats and fringe frames. 54 ConfigClass = CalibStatsConfig
56 def run(self, exposureOrImage):
57 """!Measure a particular statistic on an image (of some sort). 59 @param exposureOrImage Exposure, MaskedImage or Image. 60 @return Value of desired statistic 62 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
63 afwImage.Mask.getPlaneBitMask(self.config.mask))
65 image = exposureOrImage.getMaskedImage()
68 image = exposureOrImage.getImage()
70 image = exposureOrImage
72 return afwMath.makeStatistics(image, self.config.stat, stats).getValue()
76 """Configuration for combining calib images""" 77 rows = Field(doc=
"Number of rows to read at a time",
78 dtype=int, default=512)
79 mask = ListField(doc=
"Mask planes to respect", dtype=str,
80 default=[
"SAT",
"DETECTED",
"INTRP"])
81 combine = Field(doc=
"Statistic to use for combination (from lsst.afw.math)", dtype=int,
82 default=int(afwMath.MEANCLIP))
83 clip = Field(doc=
"Clipping threshold for combination",
84 dtype=float, default=3.0)
85 nIter = Field(doc=
"Clipping iterations for combination",
87 stats = ConfigurableField(target=CalibStatsTask,
88 doc=
"Background statistics configuration")
92 """Task to combine calib images""" 93 ConfigClass = CalibCombineConfig
96 Task.__init__(self, *args, **kwargs)
97 self.makeSubtask(
"stats")
99 def run(self, sensorRefList, expScales=None, finalScale=None, inputName="postISRCCD"):
100 """!Combine calib images for a single sensor 102 @param sensorRefList List of data references to combine (for a single sensor) 103 @param expScales List of scales to apply for each exposure 104 @param finalScale Desired scale for final combined image 105 @param inputName Data set name for inputs 106 @return combined image 109 stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
110 afwImage.Mask.getPlaneBitMask(self.config.mask))
111 numImages = len(sensorRefList)
113 raise RuntimeError(
"No valid input data")
114 if numImages < self.config.stats.maxVisitsToCalcErrorFromInputVariance:
115 stats.setCalcErrorFromInputVariance(
True)
118 combined = afwImage.MaskedImageF(width, height)
119 imageList = [
None]*numImages
120 for start
in range(0, height, self.config.rows):
121 rows = min(self.config.rows, height - start)
122 box = afwGeom.Box2I(afwGeom.Point2I(0, start),
123 afwGeom.Extent2I(width, rows))
124 subCombined = combined.Factory(combined, box)
126 for i, sensorRef
in enumerate(sensorRefList):
127 if sensorRef
is None:
130 exposure = sensorRef.get(inputName +
"_sub", bbox=box)
131 if expScales
is not None:
133 imageList[i] = exposure.getMaskedImage()
135 self.
combine(subCombined, imageList, stats)
137 if finalScale
is not None:
138 background = self.stats.
run(combined)
139 self.log.info(
"%s: Measured background of stack is %f; adjusting to %f" %
140 (NODE, background, finalScale))
141 combined *= finalScale / background
146 """Get dimensions of the inputs""" 148 for sensorRef
in sensorRefList:
149 if sensorRef
is None:
151 md = sensorRef.get(inputName +
"_md")
152 dimList.append(afwImage.bboxFromMetadata(md).
getDimensions())
156 """Apply scale to input exposure 158 This implementation applies a flux scaling: the input exposure is 159 divided by the provided scale. 161 if scale
is not None:
162 mi = exposure.getMaskedImage()
166 """!Combine multiple images 168 @param target Target image to receive the combined pixels 169 @param imageList List of input images 170 @param stats Statistics control 172 images = [img
for img
in imageList
if img
is not None]
173 afwMath.statisticsStack(target, images, afwMath.Property(self.config.combine), stats)
177 """Determine a consistent size, given a list of image sizes""" 178 dim = set((w, h)
for w, h
in dimList)
181 raise RuntimeError(
"Inconsistent dimensions: %s" % dim)
186 """!Return a tuple of specific values from a dict 188 This provides a hashable representation of the dict from certain keywords. 189 This can be useful for creating e.g., a tuple of the values in the DataId 190 that identify the CCD. 192 @param dict_ dict to parse 193 @param keys keys to extract (order is important) 194 @return tuple of values 196 return tuple(dict_[k]
for k
in keys)
200 """!Determine a list of CCDs from exposure references 202 This essentially inverts the exposure-level references (which 203 provides a list of CCDs for each exposure), by providing 204 a dataId list for each CCD. Consider an input list of exposures 205 [e1, e2, e3], and each exposure has CCDs c1 and c2. Then this 208 {(c1,): [e1c1, e2c1, e3c1], (c2,): [e1c2, e2c2, e3c2]} 210 This is a dict whose keys are tuples of the identifying values of a 211 CCD (usually just the CCD number) and the values are lists of dataIds 212 for that CCD in each exposure. A missing dataId is given the value 215 @param expRefList List of data references for exposures 216 @param level Level for the butler to generate CCDs 217 @param ccdKeys DataId keywords that identify a CCD 218 @return dict of data identifier lists for each CCD; 219 keys are values of ccdKeys in order 221 expIdList = [[ccdRef.dataId
for ccdRef
in expRef.subItems(
222 level)]
for expRef
in expRefList]
225 if len(ccdKeys) != len(set(ccdKeys)):
226 raise RuntimeError(
"Duplicate keys found in ccdKeys: %s" % ccdKeys)
228 for ccdIdList
in expIdList:
229 for ccdId
in ccdIdList:
236 for n, ccdIdList
in enumerate(expIdList):
237 for ccdId
in ccdIdList:
239 if name
not in ccdLists:
241 ccdLists[name].append(ccdId)
245 ccdLists[ccd] = sorted(ccdLists[ccd], key=
lambda dd:
dictToTuple(dd, sorted(dd.keys())))
251 """Generate a matrix of results using pool.map 253 The function should have the call signature: 254 func(cache, dataId, *args, **kwargs) 256 We return a dict mapping 'ccd name' to a list of values for 259 @param pool Process pool 260 @param func Function to call for each dataId 261 @param ccdIdLists Dict of data identifier lists for each CCD name 262 @return matrix of results 264 dataIdList = sum(ccdIdLists.values(), [])
265 resultList = pool.map(func, dataIdList, *args, **kwargs)
267 data = dict((ccdName, [
None] * len(expList))
for ccdName, expList
in ccdIdLists.items())
268 indices = dict(sum([[(tuple(dataId.values())
if dataId
is not None else None, (ccdName, expNum))
269 for expNum, dataId
in enumerate(expList)]
270 for ccdName, expList
in ccdIdLists.items()], []))
271 for dataId, result
in zip(dataIdList, resultList):
274 ccdName, expNum = indices[tuple(dataId.values())]
275 data[ccdName][expNum] = result
280 """Split name=value pairs and put the result in a dict""" 282 def __call__(self, parser, namespace, values, option_string):
283 output = getattr(namespace, self.dest, {})
284 for nameValue
in values:
285 name, sep, valueStr = nameValue.partition(
"=")
287 parser.error(
"%s value %s must be in form name=value" %
288 (option_string, nameValue))
289 output[name] = valueStr
290 setattr(namespace, self.dest, output)
294 """ArgumentParser for calibration construction""" 297 """Add a --calibId argument to the standard pipe_base argument parser""" 298 ArgumentParser.__init__(self, *args, **kwargs)
300 self.add_id_argument(
"--id", datasetType=
"raw",
301 help=
"input identifiers, e.g., --id visit=123 ccd=4")
302 self.add_argument(
"--calibId", nargs=
"*", action=CalibIdAction, default={},
303 help=
"identifiers for calib, e.g., --calibId version=1",
304 metavar=
"KEY=VALUE1[^VALUE2[^VALUE3...]")
309 Checks that the "--calibId" provided works. 311 namespace = ArgumentParser.parse_args(self, *args, **kwargs)
313 keys = namespace.butler.getKeys(self.
calibName)
315 for name, value
in namespace.calibId.items():
318 "%s is not a relevant calib identifier key (%s)" % (name, keys))
319 parsed[name] = keys[name](value)
320 namespace.calibId = parsed
326 """Configuration for constructing calibs""" 327 clobber = Field(dtype=bool, default=
True,
328 doc=
"Clobber existing processed images?")
329 isr = ConfigurableField(target=IsrTask, doc=
"ISR configuration")
330 dateObs = Field(dtype=str, default=
"dateObs",
331 doc=
"Key for observation date in exposure registry")
332 dateCalib = Field(dtype=str, default=
"calibDate",
333 doc=
"Key for calib date in calib registry")
334 filter = Field(dtype=str, default=
"filter",
335 doc=
"Key for filter name in exposure/calib registries")
336 combination = ConfigurableField(
337 target=CalibCombineTask, doc=
"Calib combination configuration")
338 ccdKeys = ListField(dtype=str, default=[
"ccd"],
339 doc=
"DataId keywords specifying a CCD")
340 visitKeys = ListField(dtype=str, default=[
"visit"],
341 doc=
"DataId keywords specifying a visit")
342 calibKeys = ListField(dtype=str, default=[],
343 doc=
"DataId keywords specifying a calibration")
344 doCameraImage = Field(dtype=bool, default=
True, doc=
"Create camera overview image?")
345 binning = Field(dtype=int, default=64, doc=
"Binning to apply for camera image")
348 self.
isr.doWrite =
False 352 """Get parsed values into the CalibTask.run""" 355 return [dict(expRefList=parsedCmd.id.refList, butler=parsedCmd.butler, calibId=parsedCmd.calibId)]
358 """Call the Task with the kwargs from getTargetList""" 359 task = self.TaskClass(config=self.config, log=self.log)
362 result = task.runDataRef(**args)
365 result = task.runDataRef(**args)
366 except Exception
as e:
371 task.log.fatal(
"Failed: %s" % e)
372 traceback.print_exc(file=sys.stderr)
374 if self.doReturnResults:
376 exitStatus=exitStatus,
378 metadata=task.metadata,
383 exitStatus=exitStatus,
388 """!Base class for constructing calibs. 390 This should be subclassed for each of the required calib types. 391 The subclass should be sure to define the following class variables: 392 * _DefaultName: default name of the task, used by CmdLineTask 393 * calibName: name of the calibration data set in the butler 394 The subclass may optionally set: 395 * filterName: filter name to give the resultant calib 397 ConfigClass = CalibConfig
398 RunnerClass = CalibTaskRunner
405 BatchPoolTask.__init__(self, *args, **kwargs)
406 self.makeSubtask(
"isr")
407 self.makeSubtask(
"combination")
411 numCcds = len(parsedCmd.butler.get(
"camera"))
413 parsedCmd)[0][
'expRefList'])
414 numCycles = int(numCcds/float(numCores) + 0.5)
415 return time*numExps*numCycles
418 def _makeArgumentParser(cls, *args, **kwargs):
419 kwargs.pop(
"doBatch",
False)
423 """!Construct a calib from a list of exposure references 425 This is the entry point, called by the TaskRunner.__call__ 427 Only the master node executes this method. 429 @param expRefList List of data references at the exposure level 430 @param butler Data butler 431 @param calibId Identifier dict for calib 433 if len(expRefList) < 1:
434 raise RuntimeError(
"No valid input data")
436 for expRef
in expRefList:
437 self.
addMissingKeys(expRef.dataId, butler, self.config.ccdKeys,
'raw')
441 expRefList, level=
"sensor", ccdKeys=self.config.ccdKeys)
445 outputIdItemList = list(outputId.items())
446 for ccdName
in ccdIdLists:
447 dataId = dict([(k, ccdName[i])
for i, k
in enumerate(self.config.ccdKeys)])
448 dataId.update(outputIdItemList)
450 dataId.update(outputIdItemList)
453 butler.get(self.
calibName +
"_filename", dataId)
454 except Exception
as e:
456 "Unable to determine output filename \"%s_filename\" from %s: %s" %
459 processPool =
Pool(
"process")
460 processPool.storeSet(butler=butler)
466 scales = self.
scale(ccdIdLists, data)
468 combinePool =
Pool(
"combine")
469 combinePool.storeSet(butler=butler)
472 calibs = self.
scatterCombine(combinePool, outputId, ccdIdLists, scales)
474 if self.config.doCameraImage:
475 camera = butler.get(
"camera")
477 calibs = {butler.get(
"postISRCCD_detector",
478 dict(zip(self.config.ccdKeys, ccdName))).getId(): calibs[ccdName]
479 for ccdName
in ccdIdLists}
483 butler.put(cameraImage, self.
calibName +
"_camera", dataId)
484 except Exception
as exc:
485 self.log.warn(
"Unable to create camera image: %s" % (exc,))
489 ccdIdLists=ccdIdLists,
492 processPool=processPool,
493 combinePool=combinePool,
497 """!Generate the data identifier for the output calib 499 The mean date and the common filter are included, using keywords 500 from the configuration. The CCD-specific part is not included 501 in the data identifier. 503 @param expRefList List of data references at exposure level 504 @param calibId Data identifier elements for the calib provided by the user 505 @return data identifier 509 for expRef
in expRefList:
510 butler = expRef.getButler()
511 dataId = expRef.dataId
513 midTime += self.
getMjd(butler, dataId)
516 if filterName
is None:
517 filterName = thisFilter
518 elif filterName != thisFilter:
519 raise RuntimeError(
"Filter mismatch for %s: %s vs %s" % (
520 dataId, thisFilter, filterName))
522 midTime /= len(expRefList)
523 date = str(dafBase.DateTime(
524 midTime, dafBase.DateTime.MJD).toPython().date())
526 outputId = {self.config.filter: filterName,
527 self.config.dateCalib: date}
528 outputId.update(calibId)
531 def getMjd(self, butler, dataId, timescale=dafBase.DateTime.UTC):
532 """Determine the Modified Julian Date (MJD; in TAI) from a data identifier""" 533 if self.config.dateObs
in dataId:
534 dateObs = dataId[self.config.dateObs]
536 dateObs = butler.queryMetadata(
'raw', [self.config.dateObs], dataId)[0]
537 if "T" not in dateObs:
538 dateObs = dateObs +
"T12:00:00.0Z" 539 elif not dateObs.endswith(
"Z"):
542 return dafBase.DateTime(dateObs, timescale).get(dafBase.DateTime.MJD)
545 """Determine the filter from a data identifier""" 546 filt = butler.queryMetadata(
'raw', [self.config.filter], dataId)[0]
550 if calibName
is None:
553 if missingKeys
is None:
554 missingKeys = set(butler.getKeys(calibName).keys()) - set(dataId.keys())
556 for k
in missingKeys:
558 v = butler.queryMetadata(
'raw', [k], dataId)
568 raise RuntimeError(
"No unique lookup for %s: %s" % (k, v))
571 """!Update the metadata from the VisitInfo 573 @param calibImage The image whose metadata is to be set 574 @param exposureTime The exposure time for the image 575 @param darkTime The time since the last read (default: exposureTime) 579 darkTime = exposureTime
581 visitInfo = afwImage.makeVisitInfo(exposureTime=exposureTime, darkTime=darkTime, **kwargs)
582 md = calibImage.getMetadata()
584 afwImage.setVisitInfoMetadata(md, visitInfo)
587 """!Scatter the processing among the nodes 589 We scatter each CCD independently (exposures aren't grouped together), 590 to make full use of all available processors. This necessitates piecing 591 everything back together in the same format as ccdIdLists afterwards. 593 Only the master node executes this method. 595 @param pool Process pool 596 @param ccdIdLists Dict of data identifier lists for each CCD name 597 @return Dict of lists of returned data for each CCD name 599 self.log.info(
"Scatter processing")
602 def process(self, cache, ccdId, outputName="postISRCCD", **kwargs):
603 """!Process a CCD, specified by a data identifier 605 After processing, optionally returns a result (produced by 606 the 'processResult' method) calculated from the processed 607 exposure. These results will be gathered by the master node, 608 and is a means for coordinated scaling of all CCDs for flats, 611 Only slave nodes execute this method. 613 @param cache Process pool cache 614 @param ccdId Data identifier for CCD 615 @param outputName Output dataset name for butler 616 @return result from 'processResult' 619 self.log.warn(
"Null identifier received on %s" % NODE)
622 if self.config.clobber
or not sensorRef.datasetExists(outputName):
623 self.log.info(
"Processing %s on %s" % (ccdId, NODE))
626 except Exception
as e:
627 self.log.warn(
"Unable to process %s: %s" % (ccdId, e))
633 "Using previously persisted processed exposure for %s" % (sensorRef.dataId,))
634 exposure = sensorRef.get(outputName)
638 """Process a single CCD, specified by a data reference 640 Generally, this simply means doing ISR. 642 Only slave nodes execute this method. 647 """!Write the processed CCD 649 We need to write these out because we can't hold them all in 652 Only slave nodes execute this method. 654 @param dataRef Data reference 655 @param exposure CCD exposure to write 656 @param outputName Output dataset name for butler. 658 dataRef.put(exposure, outputName)
661 """Extract processing results from a processed exposure 663 This method generates what is gathered by the master node. 664 This can be a background measurement or similar for scaling 665 flat-fields. It must be picklable! 667 Only slave nodes execute this method. 672 """!Determine scaling across CCDs and exposures 674 This is necessary mainly for flats, so as to determine a 675 consistent scaling across the entire focal plane. This 676 implementation is simply a placeholder. 678 Only the master node executes this method. 680 @param ccdIdLists Dict of data identifier lists for each CCD tuple 681 @param data Dict of lists of returned data for each CCD tuple 682 @return dict of Struct(ccdScale: scaling for CCD, 683 expScales: scaling for each exposure 686 self.log.info(
"Scale on %s" % NODE)
687 return dict((name, Struct(ccdScale=
None, expScales=[
None] * len(ccdIdLists[name])))
688 for name
in ccdIdLists)
691 """!Scatter the combination of exposures across multiple nodes 693 In this case, we can only scatter across as many nodes as 696 Only the master node executes this method. 698 @param pool Process pool 699 @param outputId Output identifier (exposure part only) 700 @param ccdIdLists Dict of data identifier lists for each CCD name 701 @param scales Dict of structs with scales, for each CCD name 702 @param dict of binned images 704 self.log.info(
"Scatter combination")
705 data = [Struct(ccdName=ccdName, ccdIdList=ccdIdLists[ccdName], scales=scales[ccdName])
for 706 ccdName
in ccdIdLists]
707 images = pool.map(self.
combine, data, outputId)
708 return dict(zip(ccdIdLists.keys(), images))
711 """Get fully-qualified output data identifier 713 We may need to look up keys that aren't in the output dataId. 715 @param ccdName Name tuple for CCD 716 @param butler Data butler 717 @param outputId Data identifier for combined image (exposure part only) 718 @return fully-qualified output dataId 720 fullOutputId = {k: ccdName[i]
for i, k
in enumerate(self.config.ccdKeys)}
721 fullOutputId.update(outputId)
723 fullOutputId.update(outputId)
727 """!Combine multiple exposures of a particular CCD and write the output 729 Only the slave nodes execute this method. 731 @param cache Process pool cache 732 @param struct Parameters for the combination, which has the following components: 733 * ccdName Name tuple for CCD 734 * ccdIdList List of data identifiers for combination 735 * scales Scales to apply (expScales are scalings for each exposure, 736 ccdScale is final scale for combined image) 737 @param outputId Data identifier for combined image (exposure part only) 738 @return binned calib image 741 dataRefList = [
getDataRef(cache.butler, dataId)
if dataId
is not None else None for 742 dataId
in struct.ccdIdList]
743 self.log.info(
"Combining %s on %s" % (outputId, NODE))
744 calib = self.combination.run(dataRefList, expScales=struct.scales.expScales,
745 finalScale=struct.scales.ccdScale)
747 if not hasattr(calib,
"getMetadata"):
748 if hasattr(calib,
"getVariance"):
749 calib = afwImage.makeExposure(calib)
751 calib = afwImage.DecoratedImageF(calib.getImage())
756 struct.ccdIdList, outputId)
760 self.
write(cache.butler, calib, outputId)
762 return afwMath.binImage(calib.getImage(), self.config.binning)
765 """!Record metadata including the inputs and creation details 767 This metadata will go into the FITS header. 769 @param butler Data butler 770 @param calib Combined calib exposure. 771 @param dataIdList List of data identifiers for calibration inputs 772 @param outputId Data identifier for output 774 header = calib.getMetadata()
778 now = time.localtime()
779 header.add(
"CALIB_CREATION_DATE", time.strftime(
"%Y-%m-%d", now))
780 header.add(
"CALIB_CREATION_TIME", time.strftime(
"%X %Z", now))
782 header.add(
"DATE-OBS",
"%sT00:00:00.00" % outputId[self.config.dateCalib])
785 visits = [str(
dictToTuple(dataId, self.config.visitKeys))
for dataId
in dataIdList
if 787 for i, v
in enumerate(sorted(set(visits))):
788 header.add(
"CALIB_INPUT_%d" % (i,), v)
790 header.add(
"CALIB_ID",
" ".join(
"%s=%s" % (key, value)
791 for key, value
in outputId.items()))
795 """Interpolate over NANs in the combined image 797 NANs can result from masked areas on the CCD. We don't want them getting 798 into our science images, so we replace them with the median of the image. 800 if hasattr(image,
"getMaskedImage"):
802 image = image.getMaskedImage().getImage()
803 if hasattr(image,
"getImage"):
804 image = image.getImage()
805 array = image.getArray()
806 bad = np.isnan(array)
807 array[bad] = np.median(array[np.logical_not(bad)])
809 def write(self, butler, exposure, dataId):
810 """!Write the final combined calib 812 Only the slave nodes execute this method 814 @param butler Data butler 815 @param exposure CCD exposure to write 816 @param dataId Data identifier for output 818 self.log.info(
"Writing %s on %s" % (dataId, NODE))
819 butler.put(exposure, self.
calibName, dataId)
822 """!Create and write an image of the entire camera 824 This is useful for judging the quality or getting an overview of 825 the features of the calib. 827 @param camera Camera object 828 @param dataId Data identifier for output 829 @param calibs Dict mapping CCD detector ID to calib image 834 """Check that the list of CCD dataIds is consistent 836 @param ccdIdLists Dict of data identifier lists for each CCD name 837 @return Number of exposures, number of CCDs 839 visitIdLists = collections.defaultdict(list)
840 for ccdName
in ccdIdLists:
841 for dataId
in ccdIdLists[ccdName]:
842 visitName =
dictToTuple(dataId, self.config.visitKeys)
843 visitIdLists[visitName].append(dataId)
845 numExps = set(len(expList)
for expList
in ccdIdLists.values())
846 numCcds = set(len(ccdList)
for ccdList
in visitIdLists.values())
848 if len(numExps) != 1
or len(numCcds) != 1:
851 self.log.warn(
"Number of visits for each CCD: %s",
852 {ccdName: len(ccdIdLists[ccdName])
for ccdName
in ccdIdLists})
853 self.log.warn(
"Number of CCDs for each visit: %s",
854 {vv: len(visitIdLists[vv])
for vv
in visitIdLists})
855 raise RuntimeError(
"Inconsistent number of exposures/CCDs")
857 return numExps.pop(), numCcds.pop()
861 """Configuration for bias construction. 863 No changes required compared to the base class, but 864 subclassed for distinction. 869 class BiasTask(CalibTask):
870 """Bias construction""" 871 ConfigClass = BiasConfig
872 _DefaultName =
"bias" 879 """Overrides to apply for bias construction""" 880 config.isr.doBias =
False 881 config.isr.doDark =
False 882 config.isr.doFlat =
False 883 config.isr.doFringe =
False 887 """Configuration for dark construction""" 888 doRepair = Field(dtype=bool, default=
True, doc=
"Repair artifacts?")
889 psfFwhm = Field(dtype=float, default=3.0, doc=
"Repair PSF FWHM (pixels)")
890 psfSize = Field(dtype=int, default=21, doc=
"Repair PSF size (pixels)")
891 crGrow = Field(dtype=int, default=2, doc=
"Grow radius for CR (pixels)")
892 repair = ConfigurableField(
893 target=RepairTask, doc=
"Task to repair artifacts")
896 CalibConfig.setDefaults(self)
903 The only major difference from the base class is a cosmic-ray 904 identification stage, and dividing each image by the dark time 905 to generate images of the dark rate. 907 ConfigClass = DarkConfig
908 _DefaultName =
"dark" 913 CalibTask.__init__(self, *args, **kwargs)
914 self.makeSubtask(
"repair")
918 """Overrides to apply for dark construction""" 919 config.isr.doDark =
False 920 config.isr.doFlat =
False 921 config.isr.doFringe =
False 924 """Process a single CCD 926 Besides the regular ISR, also masks cosmic-rays and divides each 927 processed image by the dark time to generate images of the dark rate. 928 The dark time is provided by the 'getDarkTime' method. 930 exposure = CalibTask.processSingle(self, sensorRef)
932 if self.config.doRepair:
933 psf = measAlg.DoubleGaussianPsf(self.config.psfSize, self.config.psfSize,
934 self.config.psfFwhm/(2*math.sqrt(2*math.log(2))))
936 self.repair.run(exposure, keepCRs=
False)
937 if self.config.crGrow > 0:
938 mask = exposure.getMaskedImage().getMask().clone()
939 mask &= mask.getPlaneBitMask(
"CR")
940 fpSet = afwDet.FootprintSet(
941 mask, afwDet.Threshold(0.5))
942 fpSet = afwDet.FootprintSet(fpSet, self.config.crGrow,
True)
943 fpSet.setMask(exposure.getMaskedImage().getMask(),
"CR")
945 mi = exposure.getMaskedImage()
950 """Retrieve the dark time for an exposure""" 951 darkTime = exposure.getInfo().getVisitInfo().
getDarkTime()
952 if not np.isfinite(darkTime):
953 raise RuntimeError(
"Non-finite darkTime")
958 """Configuration for flat construction""" 959 iterations = Field(dtype=int, default=10,
960 doc=
"Number of iterations for scale determination")
961 stats = ConfigurableField(target=CalibStatsTask,
962 doc=
"Background statistics configuration")
968 The principal change from the base class involves gathering the background 969 values from each image and using them to determine the scalings for the final 972 ConfigClass = FlatConfig
973 _DefaultName =
"flat" 978 """Overrides for flat construction""" 979 config.isr.doFlat =
False 980 config.isr.doFringe =
False 983 CalibTask.__init__(self, *args, **kwargs)
984 self.makeSubtask(
"stats")
987 return self.stats.run(exposure)
990 """Determine the scalings for the final combination 992 We have a matrix B_ij = C_i E_j, where C_i is the relative scaling 993 of one CCD to all the others in an exposure, and E_j is the scaling 994 of the exposure. We convert everything to logarithms so we can work 995 with a linear system. We determine the C_i and E_j from B_ij by iteration, 996 under the additional constraint that the average CCD scale is unity. 998 This algorithm comes from Eugene Magnier and Pan-STARRS. 1000 assert len(ccdIdLists.values()) > 0,
"No successful CCDs" 1001 lengths = set([len(expList)
for expList
in ccdIdLists.values()])
1002 assert len(lengths) == 1,
"Number of successful exposures for each CCD differs" 1003 assert tuple(lengths)[0] > 0,
"No successful exposures" 1005 indices = dict((name, i)
for i, name
in enumerate(ccdIdLists))
1006 bgMatrix = np.array([[0.0] * len(expList)
for expList
in ccdIdLists.values()])
1007 for name
in ccdIdLists:
1009 bgMatrix[i] = [d
if d
is not None else np.nan
for d
in data[name]]
1011 numpyPrint = np.get_printoptions()
1012 np.set_printoptions(threshold=np.inf)
1013 self.log.info(
"Input backgrounds: %s" % bgMatrix)
1016 numCcds = len(ccdIdLists)
1017 numExps = bgMatrix.shape[1]
1019 bgMatrix = np.log(bgMatrix)
1020 bgMatrix = np.ma.masked_array(bgMatrix, ~np.isfinite(bgMatrix))
1022 compScales = np.zeros(numCcds)
1023 expScales = np.array([(bgMatrix[:, i0] - compScales).mean()
for i0
in range(numExps)])
1025 for iterate
in range(self.config.iterations):
1026 compScales = np.array([(bgMatrix[i1, :] - expScales).mean()
for i1
in range(numCcds)])
1027 bad = np.isnan(compScales)
1030 compScales[bad] = compScales[~bad].mean()
1031 expScales = np.array([(bgMatrix[:, i2] - compScales).mean()
for i2
in range(numExps)])
1033 avgScale = np.average(np.exp(compScales))
1034 compScales -= np.log(avgScale)
1035 self.log.debug(
"Iteration %d exposure scales: %s", iterate, np.exp(expScales))
1036 self.log.debug(
"Iteration %d component scales: %s", iterate, np.exp(compScales))
1038 expScales = np.array([(bgMatrix[:, i3] - compScales).mean()
for i3
in range(numExps)])
1040 if np.any(np.isnan(expScales)):
1041 raise RuntimeError(
"Bad exposure scales: %s --> %s" % (bgMatrix, expScales))
1043 expScales = np.exp(expScales)
1044 compScales = np.exp(compScales)
1046 self.log.info(
"Exposure scales: %s" % expScales)
1047 self.log.info(
"Component relative scaling: %s" % compScales)
1048 np.set_printoptions(**numpyPrint)
1050 return dict((ccdName, Struct(ccdScale=compScales[indices[ccdName]], expScales=expScales))
1051 for ccdName
in ccdIdLists)
1055 """Configuration for fringe construction""" 1056 stats = ConfigurableField(target=CalibStatsTask,
1057 doc=
"Background statistics configuration")
1058 subtractBackground = ConfigurableField(target=measAlg.SubtractBackgroundTask,
1059 doc=
"Background configuration")
1060 detection = ConfigurableField(
1061 target=measAlg.SourceDetectionTask, doc=
"Detection configuration")
1062 detectSigma = Field(dtype=float, default=1.0,
1063 doc=
"Detection PSF gaussian sigma")
1066 CalibConfig.setDefaults(self)
1067 self.
detection.reEstimateBackground =
False 1071 """Fringe construction task 1073 The principal change from the base class is that the images are 1074 background-subtracted and rescaled by the background. 1076 XXX This is probably not right for a straight-up combination, as we 1077 are currently doing, since the fringe amplitudes need not scale with 1080 XXX Would like to have this do PCA and generate multiple images, but 1081 that will take a bit of work with the persistence code. 1083 ConfigClass = FringeConfig
1084 _DefaultName =
"fringe" 1085 calibName =
"fringe" 1089 """Overrides for fringe construction""" 1090 config.isr.doFringe =
False 1093 CalibTask.__init__(self, *args, **kwargs)
1094 self.makeSubtask(
"detection")
1095 self.makeSubtask(
"stats")
1096 self.makeSubtask(
"subtractBackground")
1099 """Subtract the background and normalise by the background level""" 1100 exposure = CalibTask.processSingle(self, sensorRef)
1101 bgLevel = self.stats.run(exposure)
1102 self.subtractBackground.run(exposure)
1103 mi = exposure.getMaskedImage()
1105 footprintSets = self.detection.detectFootprints(
1106 exposure, sigma=self.config.detectSigma)
1107 mask = exposure.getMaskedImage().getMask()
1108 detected = 1 << mask.addMaskPlane(
"DETECTED")
1109 for fpSet
in (footprintSets.positive, footprintSets.negative):
1110 if fpSet
is not None:
1111 afwDet.setMaskFromFootprintList(
1112 mask, fpSet.getFootprints(), detected)
1117 """Configuration for sky frame construction""" 1118 detection = ConfigurableField(target=measAlg.SourceDetectionTask, doc=
"Detection configuration")
1119 detectSigma = Field(dtype=float, default=2.0, doc=
"Detection PSF gaussian sigma")
1120 subtractBackground = ConfigurableField(target=measAlg.SubtractBackgroundTask,
1121 doc=
"Regular-scale background configuration, for object detection")
1122 largeScaleBackground = ConfigField(dtype=FocalPlaneBackgroundConfig,
1123 doc=
"Large-scale background configuration")
1124 sky = ConfigurableField(target=SkyMeasurementTask, doc=
"Sky measurement")
1125 maskThresh = Field(dtype=float, default=3.0, doc=
"k-sigma threshold for masking pixels")
1126 mask = ListField(dtype=str, default=[
"BAD",
"SAT",
"DETECTED",
"NO_DATA"],
1127 doc=
"Mask planes to consider as contaminated")
1131 """Task for sky frame construction 1133 The sky frame is a (relatively) small-scale background 1134 model, the response of the camera to the sky. 1136 To construct, we first remove a large-scale background (e.g., caused 1137 by moonlight) which may vary from image to image. Then we construct a 1138 model of the sky, which is essentially a binned version of the image 1139 (important configuration parameters: sky.background.[xy]BinSize). 1140 It is these models which are coadded to yield the sky frame. 1142 ConfigClass = SkyConfig
1143 _DefaultName =
"sky" 1147 CalibTask.__init__(self, *args, **kwargs)
1148 self.makeSubtask(
"detection")
1149 self.makeSubtask(
"subtractBackground")
1150 self.makeSubtask(
"sky")
1153 """!Scatter the processing among the nodes 1155 Only the master node executes this method, assigning work to the 1158 We measure and subtract off a large-scale background model across 1159 all CCDs, which requires a scatter/gather. Then we process the 1160 individual CCDs, subtracting the large-scale background model and 1161 the residual background model measured. These residuals will be 1162 combined for the sky frame. 1164 @param pool Process pool 1165 @param ccdIdLists Dict of data identifier lists for each CCD name 1166 @return Dict of lists of returned data for each CCD name 1168 self.log.info(
"Scatter processing")
1170 numExps = set(len(expList)
for expList
in ccdIdLists.values()).pop()
1178 for exp
in range(numExps):
1179 bgModels = [bgModelList[ccdName][exp]
for ccdName
in ccdIdLists]
1180 visit = set(tuple(ccdIdLists[ccdName][exp][key]
for key
in sorted(self.config.visitKeys))
for 1181 ccdName
in ccdIdLists)
1182 assert len(visit) == 1
1184 bgModel = bgModels[0]
1185 for bg
in bgModels[1:]:
1187 self.log.info(
"Background model min/max for visit %s: %f %f", visit,
1188 np.min(bgModel.getStatsImage().getArray()),
1189 np.max(bgModel.getStatsImage().getArray()))
1190 backgrounds[visit] = bgModel
1191 scales[visit] = np.median(bgModel.getStatsImage().getArray())
1193 return mapToMatrix(pool, self.
process, ccdIdLists, backgrounds=backgrounds, scales=scales)
1196 """!Measure background model for CCD 1198 This method is executed by the slaves. 1200 The background models for all CCDs in an exposure will be 1201 combined to form a full focal-plane background model. 1203 @param cache Process pool cache 1204 @param dataId Data identifier 1205 @return Bcakground model 1212 config = self.config.largeScaleBackground
1213 camera = dataRef.get(
"camera")
1214 bgModel = FocalPlaneBackground.fromCamera(config, camera)
1215 bgModel.addCcd(exposure)
1219 """!Process a single CCD for the background 1221 This method is executed by the slaves. 1223 Because we're interested in the background, we detect and mask astrophysical 1224 sources, and pixels above the noise level. 1226 @param dataRef Data reference for CCD. 1227 @return processed exposure 1229 if not self.config.clobber
and dataRef.datasetExists(
"postISRCCD"):
1230 return dataRef.get(
"postISRCCD")
1231 exposure = CalibTask.processSingle(self, dataRef)
1234 bgTemp = self.subtractBackground.run(exposure).background
1235 footprints = self.detection.detectFootprints(exposure, sigma=self.config.detectSigma)
1236 image = exposure.getMaskedImage()
1237 if footprints.background
is not None:
1238 image += footprints.background.getImage()
1241 variance = image.getVariance()
1242 noise = np.sqrt(np.median(variance.getArray()))
1243 isHigh = image.getImage().getArray() > self.config.maskThresh*noise
1244 image.getMask().getArray()[isHigh] |= image.getMask().getPlaneBitMask(
"DETECTED")
1247 image += bgTemp.getImage()
1250 maskVal = image.getMask().getPlaneBitMask(self.config.mask)
1251 isBad = image.getMask().getArray() & maskVal > 0
1252 bgLevel = np.median(image.getImage().getArray()[~isBad])
1253 image.getImage().getArray()[isBad] = bgLevel
1254 dataRef.put(exposure,
"postISRCCD")
1258 """Process a single CCD, specified by a data reference 1260 We subtract the appropriate focal plane background model, 1261 divide by the appropriate scale and measure the background. 1263 Only slave nodes execute this method. 1265 @param dataRef Data reference for single CCD 1266 @param backgrounds Background model for each visit 1267 @param scales Scales for each visit 1268 @return Processed exposure 1270 visit = tuple(dataRef.dataId[key]
for key
in sorted(self.config.visitKeys))
1271 exposure = dataRef.get(
"postISRCCD", immediate=
True)
1272 image = exposure.getMaskedImage()
1273 detector = exposure.getDetector()
1274 bbox = image.getBBox()
1276 bgModel = backgrounds[visit]
1277 bg = bgModel.toCcdBackground(detector, bbox)
1278 image -= bg.getImage()
1279 image /= scales[visit]
1282 dataRef.put(bg,
"icExpBackground")
1286 """!Combine multiple background models of a particular CCD and write the output 1288 Only the slave nodes execute this method. 1290 @param cache Process pool cache 1291 @param struct Parameters for the combination, which has the following components: 1292 * ccdName Name tuple for CCD 1293 * ccdIdList List of data identifiers for combination 1294 @param outputId Data identifier for combined image (exposure part only) 1295 @return binned calib image 1298 dataRefList = [
getDataRef(cache.butler, dataId)
if dataId
is not None else None for 1299 dataId
in struct.ccdIdList]
1300 self.log.info(
"Combining %s on %s" % (outputId, NODE))
1301 bgList = [dataRef.get(
"icExpBackground", immediate=
True).clone()
for dataRef
in dataRefList]
1303 bgExp = self.sky.averageBackgrounds(bgList)
1306 cache.butler.put(bgExp,
"sky", outputId)
1307 return afwMath.binImage(self.sky.exposureToBackground(bgExp).getImage(), self.config.binning)
def applyOverrides(cls, config)
def getFilter(self, butler, dataId)
def __init__(self, args, kwargs)
def processWrite(self, dataRef, exposure, outputName="postISRCCD")
Write the processed CCD.
def scatterCombine(self, pool, outputId, ccdIdLists, scales)
Scatter the combination of exposures across multiple nodes.
def __init__(self, args, kwargs)
def run(self, exposureOrImage)
Measure a particular statistic on an image (of some sort).
def processResult(self, exposure)
def checksum(obj, header=None, sumType="MD5")
Calculate a checksum of an object.
def getCcdIdListFromExposures(expRefList, level="sensor", ccdKeys=["ccd"])
Determine a list of CCDs from exposure references.
def __init__(self, calibName, args, kwargs)
def applyScale(self, exposure, scale=None)
def processSingle(self, sensorRef)
def applyOverrides(cls, config)
def processSingle(self, sensorRef)
def getFullyQualifiedOutputId(self, ccdName, butler, outputId)
def __call__(self, parser, namespace, values, option_string)
def processSingle(self, dataRef)
def dictToTuple(dict_, keys)
Return a tuple of specific values from a dict.
def interpolateNans(self, image)
def getDataRef(butler, dataId, datasetType="raw")
def updateMetadata(self, calibImage, exposureTime, darkTime=None, kwargs)
Update the metadata from the VisitInfo.
def scale(self, ccdIdLists, data)
def process(self, cache, ccdId, outputName="postISRCCD", kwargs)
Process a CCD, specified by a data identifier.
def parse_args(self, args, kwargs)
def processSingle(self, dataRef, backgrounds, scales)
def makeCameraImage(self, camera, dataId, calibs)
Create and write an image of the entire camera.
def combine(self, target, imageList, stats)
Combine multiple images.
def getMjd(self, butler, dataId, timescale=dafBase.DateTime.UTC)
def getDarkTime(self, exposure)
def combine(self, cache, struct, outputId)
Combine multiple exposures of a particular CCD and write the output.
def checkCcdIdLists(self, ccdIdLists)
def runDataRef(self, expRefList, butler, calibId)
Construct a calib from a list of exposure references.
def write(self, butler, exposure, dataId)
Write the final combined calib.
def combine(self, cache, struct, outputId)
Combine multiple background models of a particular CCD and write the output.
def measureBackground(self, cache, dataId)
Measure background model for CCD.
def recordCalibInputs(self, butler, calib, dataIdList, outputId)
Record metadata including the inputs and creation details.
def scatterProcess(self, pool, ccdIdLists)
Scatter the processing among the nodes.
def run(self, sensorRefList, expScales=None, finalScale=None, inputName="postISRCCD")
Combine calib images for a single sensor.
def __init__(self, args, kwargs)
def __init__(self, args, kwargs)
def applyOverrides(cls, config)
def getOutputId(self, expRefList, calibId)
Generate the data identifier for the output calib.
def processResult(self, exposure)
def mapToMatrix(pool, func, ccdIdLists, args, kwargs)
def __init__(self, args, kwargs)
def scale(self, ccdIdLists, data)
Determine scaling across CCDs and exposures.
def addMissingKeys(self, dataId, butler, missingKeys=None, calibName=None)
def getDimensions(self, sensorRefList, inputName="postISRCCD")
def getTargetList(parsedCmd, kwargs)
def __init__(self, args, kwargs)
Base class for constructing calibs.
def applyOverrides(cls, config)
def processSingleBackground(self, dataRef)
Process a single CCD for the background.
def scatterProcess(self, pool, ccdIdLists)
Scatter the processing among the nodes.
def batchWallTime(cls, time, parsedCmd, numCores)