lsst.pipe.drivers  17.0.1-2-gd73ec07+12
constructCalibs.py
Go to the documentation of this file.
1 from __future__ import absolute_import, division, print_function
2 
3 import sys
4 import math
5 import time
6 import argparse
7 import traceback
8 import collections
9 
10 import numpy as np
11 from builtins import zip
12 from builtins import range
13 
14 from astro_metadata_translator import merge_headers, ObservationGroup
15 from astro_metadata_translator.serialize import dates_to_fits
16 
17 from lsst.pex.config import Config, ConfigurableField, Field, ListField, ConfigField
18 from lsst.pipe.base import Task, Struct, TaskRunner, ArgumentParser
19 import lsst.daf.base as dafBase
20 import lsst.afw.math as afwMath
21 import lsst.afw.geom as afwGeom
22 import lsst.afw.detection as afwDet
23 import lsst.afw.image as afwImage
24 import lsst.meas.algorithms as measAlg
25 from lsst.pipe.tasks.repair import RepairTask
26 from lsst.ip.isr import IsrTask
27 
28 from lsst.ctrl.pool.parallel import BatchPoolTask
29 from lsst.ctrl.pool.pool import Pool, NODE
30 from lsst.pipe.drivers.background import SkyMeasurementTask, FocalPlaneBackground, FocalPlaneBackgroundConfig
31 from lsst.pipe.drivers.visualizeVisit import makeCameraImage
32 
33 from .checksum import checksum
34 from .utils import getDataRef
35 
36 
37 class CalibStatsConfig(Config):
38  """Parameters controlling the measurement of background statistics"""
39  stat = Field(doc="Statistic to use to estimate background (from lsst.afw.math)", dtype=int,
40  default=int(afwMath.MEANCLIP))
41  clip = Field(doc="Clipping threshold for background",
42  dtype=float, default=3.0)
43  nIter = Field(doc="Clipping iterations for background",
44  dtype=int, default=3)
45  maxVisitsToCalcErrorFromInputVariance = Field(
46  doc="Maximum number of visits to estimate variance from input variance, not per-pixel spread",
47  dtype=int, default=2)
48  mask = ListField(doc="Mask planes to reject",
49  dtype=str, default=["DETECTED", "BAD", "NO_DATA"])
50 
51 
52 class CalibStatsTask(Task):
53  """Measure statistics on the background
54 
55  This can be useful for scaling the background, e.g., for flats and fringe frames.
56  """
57  ConfigClass = CalibStatsConfig
58 
59  def run(self, exposureOrImage):
60  """!Measure a particular statistic on an image (of some sort).
61 
62  @param exposureOrImage Exposure, MaskedImage or Image.
63  @return Value of desired statistic
64  """
65  stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
66  afwImage.Mask.getPlaneBitMask(self.config.mask))
67  try:
68  image = exposureOrImage.getMaskedImage()
69  except Exception:
70  try:
71  image = exposureOrImage.getImage()
72  except Exception:
73  image = exposureOrImage
74 
75  return afwMath.makeStatistics(image, self.config.stat, stats).getValue()
76 
77 
78 class CalibCombineConfig(Config):
79  """Configuration for combining calib images"""
80  rows = Field(doc="Number of rows to read at a time",
81  dtype=int, default=512)
82  mask = ListField(doc="Mask planes to respect", dtype=str,
83  default=["SAT", "DETECTED", "INTRP"])
84  combine = Field(doc="Statistic to use for combination (from lsst.afw.math)", dtype=int,
85  default=int(afwMath.MEANCLIP))
86  clip = Field(doc="Clipping threshold for combination",
87  dtype=float, default=3.0)
88  nIter = Field(doc="Clipping iterations for combination",
89  dtype=int, default=3)
90  stats = ConfigurableField(target=CalibStatsTask,
91  doc="Background statistics configuration")
92 
93 
94 class CalibCombineTask(Task):
95  """Task to combine calib images"""
96  ConfigClass = CalibCombineConfig
97 
98  def __init__(self, *args, **kwargs):
99  Task.__init__(self, *args, **kwargs)
100  self.makeSubtask("stats")
101 
102  def run(self, sensorRefList, expScales=None, finalScale=None, inputName="postISRCCD"):
103  """!Combine calib images for a single sensor
104 
105  @param sensorRefList List of data references to combine (for a single sensor)
106  @param expScales List of scales to apply for each exposure
107  @param finalScale Desired scale for final combined image
108  @param inputName Data set name for inputs
109  @return combined image
110  """
111  width, height = self.getDimensions(sensorRefList)
112  stats = afwMath.StatisticsControl(self.config.clip, self.config.nIter,
113  afwImage.Mask.getPlaneBitMask(self.config.mask))
114  numImages = len(sensorRefList)
115  if numImages < 1:
116  raise RuntimeError("No valid input data")
117  if numImages < self.config.stats.maxVisitsToCalcErrorFromInputVariance:
118  stats.setCalcErrorFromInputVariance(True)
119 
120  # Combine images
121  combined = afwImage.MaskedImageF(width, height)
122  imageList = [None]*numImages
123  for start in range(0, height, self.config.rows):
124  rows = min(self.config.rows, height - start)
125  box = afwGeom.Box2I(afwGeom.Point2I(0, start),
126  afwGeom.Extent2I(width, rows))
127  subCombined = combined.Factory(combined, box)
128 
129  for i, sensorRef in enumerate(sensorRefList):
130  if sensorRef is None:
131  imageList[i] = None
132  continue
133  exposure = sensorRef.get(inputName + "_sub", bbox=box)
134  if expScales is not None:
135  self.applyScale(exposure, expScales[i])
136  imageList[i] = exposure.getMaskedImage()
137 
138  self.combine(subCombined, imageList, stats)
139 
140  if finalScale is not None:
141  background = self.stats.run(combined)
142  self.log.info("%s: Measured background of stack is %f; adjusting to %f" %
143  (NODE, background, finalScale))
144  combined *= finalScale / background
145 
146  return combined
147 
148  def getDimensions(self, sensorRefList, inputName="postISRCCD"):
149  """Get dimensions of the inputs"""
150  dimList = []
151  for sensorRef in sensorRefList:
152  if sensorRef is None:
153  continue
154  md = sensorRef.get(inputName + "_md")
155  dimList.append(afwImage.bboxFromMetadata(md).getDimensions())
156  return getSize(dimList)
157 
158  def applyScale(self, exposure, scale=None):
159  """Apply scale to input exposure
160 
161  This implementation applies a flux scaling: the input exposure is
162  divided by the provided scale.
163  """
164  if scale is not None:
165  mi = exposure.getMaskedImage()
166  mi /= scale
167 
168  def combine(self, target, imageList, stats):
169  """!Combine multiple images
170 
171  @param target Target image to receive the combined pixels
172  @param imageList List of input images
173  @param stats Statistics control
174  """
175  images = [img for img in imageList if img is not None]
176  afwMath.statisticsStack(target, images, afwMath.Property(self.config.combine), stats)
177 
178 
179 def getSize(dimList):
180  """Determine a consistent size, given a list of image sizes"""
181  dim = set((w, h) for w, h in dimList)
182  dim.discard(None)
183  if len(dim) != 1:
184  raise RuntimeError("Inconsistent dimensions: %s" % dim)
185  return dim.pop()
186 
187 
188 def dictToTuple(dict_, keys):
189  """!Return a tuple of specific values from a dict
190 
191  This provides a hashable representation of the dict from certain keywords.
192  This can be useful for creating e.g., a tuple of the values in the DataId
193  that identify the CCD.
194 
195  @param dict_ dict to parse
196  @param keys keys to extract (order is important)
197  @return tuple of values
198  """
199  return tuple(dict_[k] for k in keys)
200 
201 
202 def getCcdIdListFromExposures(expRefList, level="sensor", ccdKeys=["ccd"]):
203  """!Determine a list of CCDs from exposure references
204 
205  This essentially inverts the exposure-level references (which
206  provides a list of CCDs for each exposure), by providing
207  a dataId list for each CCD. Consider an input list of exposures
208  [e1, e2, e3], and each exposure has CCDs c1 and c2. Then this
209  function returns:
210 
211  {(c1,): [e1c1, e2c1, e3c1], (c2,): [e1c2, e2c2, e3c2]}
212 
213  This is a dict whose keys are tuples of the identifying values of a
214  CCD (usually just the CCD number) and the values are lists of dataIds
215  for that CCD in each exposure. A missing dataId is given the value
216  None.
217 
218  @param expRefList List of data references for exposures
219  @param level Level for the butler to generate CCDs
220  @param ccdKeys DataId keywords that identify a CCD
221  @return dict of data identifier lists for each CCD;
222  keys are values of ccdKeys in order
223  """
224  expIdList = [[ccdRef.dataId for ccdRef in expRef.subItems(
225  level)] for expRef in expRefList]
226 
227  # Determine what additional keys make a CCD from an exposure
228  if len(ccdKeys) != len(set(ccdKeys)):
229  raise RuntimeError("Duplicate keys found in ccdKeys: %s" % ccdKeys)
230  ccdNames = set() # Set of tuples which are values for each of the CCDs in an exposure
231  for ccdIdList in expIdList:
232  for ccdId in ccdIdList:
233  name = dictToTuple(ccdId, ccdKeys)
234  ccdNames.add(name)
235 
236  # Turn the list of CCDs for each exposure into a list of exposures for
237  # each CCD
238  ccdLists = {}
239  for n, ccdIdList in enumerate(expIdList):
240  for ccdId in ccdIdList:
241  name = dictToTuple(ccdId, ccdKeys)
242  if name not in ccdLists:
243  ccdLists[name] = []
244  ccdLists[name].append(ccdId)
245 
246  for ccd in ccdLists:
247  # Sort the list by the dataId values (ordered by key)
248  ccdLists[ccd] = sorted(ccdLists[ccd], key=lambda dd: dictToTuple(dd, sorted(dd.keys())))
249 
250  return ccdLists
251 
252 
253 def mapToMatrix(pool, func, ccdIdLists, *args, **kwargs):
254  """Generate a matrix of results using pool.map
255 
256  The function should have the call signature:
257  func(cache, dataId, *args, **kwargs)
258 
259  We return a dict mapping 'ccd name' to a list of values for
260  each exposure.
261 
262  @param pool Process pool
263  @param func Function to call for each dataId
264  @param ccdIdLists Dict of data identifier lists for each CCD name
265  @return matrix of results
266  """
267  dataIdList = sum(ccdIdLists.values(), [])
268  resultList = pool.map(func, dataIdList, *args, **kwargs)
269  # Piece everything back together
270  data = dict((ccdName, [None] * len(expList)) for ccdName, expList in ccdIdLists.items())
271  indices = dict(sum([[(tuple(dataId.values()) if dataId is not None else None, (ccdName, expNum))
272  for expNum, dataId in enumerate(expList)]
273  for ccdName, expList in ccdIdLists.items()], []))
274  for dataId, result in zip(dataIdList, resultList):
275  if dataId is None:
276  continue
277  ccdName, expNum = indices[tuple(dataId.values())]
278  data[ccdName][expNum] = result
279  return data
280 
281 
282 class CalibIdAction(argparse.Action):
283  """Split name=value pairs and put the result in a dict"""
284 
285  def __call__(self, parser, namespace, values, option_string):
286  output = getattr(namespace, self.dest, {})
287  for nameValue in values:
288  name, sep, valueStr = nameValue.partition("=")
289  if not valueStr:
290  parser.error("%s value %s must be in form name=value" %
291  (option_string, nameValue))
292  output[name] = valueStr
293  setattr(namespace, self.dest, output)
294 
295 
296 class CalibArgumentParser(ArgumentParser):
297  """ArgumentParser for calibration construction"""
298 
299  def __init__(self, calibName, *args, **kwargs):
300  """Add a --calibId argument to the standard pipe_base argument parser"""
301  ArgumentParser.__init__(self, *args, **kwargs)
302  self.calibName = calibName
303  self.add_id_argument("--id", datasetType="raw",
304  help="input identifiers, e.g., --id visit=123 ccd=4")
305  self.add_argument("--calibId", nargs="*", action=CalibIdAction, default={},
306  help="identifiers for calib, e.g., --calibId version=1",
307  metavar="KEY=VALUE1[^VALUE2[^VALUE3...]")
308 
309  def parse_args(self, *args, **kwargs):
310  """Parse arguments
311 
312  Checks that the "--calibId" provided works.
313  """
314  namespace = ArgumentParser.parse_args(self, *args, **kwargs)
315 
316  keys = namespace.butler.getKeys(self.calibName)
317  parsed = {}
318  for name, value in namespace.calibId.items():
319  if name not in keys:
320  self.error(
321  "%s is not a relevant calib identifier key (%s)" % (name, keys))
322  parsed[name] = keys[name](value)
323  namespace.calibId = parsed
324 
325  return namespace
326 
327 
328 class CalibConfig(Config):
329  """Configuration for constructing calibs"""
330  clobber = Field(dtype=bool, default=True,
331  doc="Clobber existing processed images?")
332  isr = ConfigurableField(target=IsrTask, doc="ISR configuration")
333  dateObs = Field(dtype=str, default="dateObs",
334  doc="Key for observation date in exposure registry")
335  dateCalib = Field(dtype=str, default="calibDate",
336  doc="Key for calib date in calib registry")
337  filter = Field(dtype=str, default="filter",
338  doc="Key for filter name in exposure/calib registries")
339  combination = ConfigurableField(
340  target=CalibCombineTask, doc="Calib combination configuration")
341  ccdKeys = ListField(dtype=str, default=["ccd"],
342  doc="DataId keywords specifying a CCD")
343  visitKeys = ListField(dtype=str, default=["visit"],
344  doc="DataId keywords specifying a visit")
345  calibKeys = ListField(dtype=str, default=[],
346  doc="DataId keywords specifying a calibration")
347  doCameraImage = Field(dtype=bool, default=True, doc="Create camera overview image?")
348  binning = Field(dtype=int, default=64, doc="Binning to apply for camera image")
349 
350  def setDefaults(self):
351  self.isr.doWrite = False
352 
353 
354 class CalibTaskRunner(TaskRunner):
355  """Get parsed values into the CalibTask.run"""
356  @staticmethod
357  def getTargetList(parsedCmd, **kwargs):
358  return [dict(expRefList=parsedCmd.id.refList, butler=parsedCmd.butler, calibId=parsedCmd.calibId)]
359 
360  def __call__(self, args):
361  """Call the Task with the kwargs from getTargetList"""
362  task = self.TaskClass(config=self.config, log=self.log)
363  exitStatus = 0 # exit status for the shell
364  if self.doRaise:
365  result = task.runDataRef(**args)
366  else:
367  try:
368  result = task.runDataRef(**args)
369  except Exception as e:
370  # n.b. The shell exit value is the number of dataRefs returning
371  # non-zero, so the actual value used here is lost
372  exitStatus = 1
373 
374  task.log.fatal("Failed: %s" % e)
375  traceback.print_exc(file=sys.stderr)
376 
377  if self.doReturnResults:
378  return Struct(
379  exitStatus=exitStatus,
380  args=args,
381  metadata=task.metadata,
382  result=result,
383  )
384  else:
385  return Struct(
386  exitStatus=exitStatus,
387  )
388 
389 
391  """!Base class for constructing calibs.
392 
393  This should be subclassed for each of the required calib types.
394  The subclass should be sure to define the following class variables:
395  * _DefaultName: default name of the task, used by CmdLineTask
396  * calibName: name of the calibration data set in the butler
397  The subclass may optionally set:
398  * filterName: filter name to give the resultant calib
399  """
400  ConfigClass = CalibConfig
401  RunnerClass = CalibTaskRunner
402  filterName = None
403  calibName = None
404  exposureTime = 1.0 # sets this exposureTime in the output
405 
406  def __init__(self, *args, **kwargs):
407  """Constructor"""
408  BatchPoolTask.__init__(self, *args, **kwargs)
409  self.makeSubtask("isr")
410  self.makeSubtask("combination")
411 
412  @classmethod
413  def batchWallTime(cls, time, parsedCmd, numCores):
414  numCcds = len(parsedCmd.butler.get("camera"))
415  numExps = len(cls.RunnerClass.getTargetList(
416  parsedCmd)[0]['expRefList'])
417  numCycles = int(numCcds/float(numCores) + 0.5)
418  return time*numExps*numCycles
419 
420  @classmethod
421  def _makeArgumentParser(cls, *args, **kwargs):
422  kwargs.pop("doBatch", False)
423  return CalibArgumentParser(calibName=cls.calibName, name=cls._DefaultName, *args, **kwargs)
424 
425  def runDataRef(self, expRefList, butler, calibId):
426  """!Construct a calib from a list of exposure references
427 
428  This is the entry point, called by the TaskRunner.__call__
429 
430  Only the master node executes this method.
431 
432  @param expRefList List of data references at the exposure level
433  @param butler Data butler
434  @param calibId Identifier dict for calib
435  """
436  if len(expRefList) < 1:
437  raise RuntimeError("No valid input data")
438 
439  for expRef in expRefList:
440  self.addMissingKeys(expRef.dataId, butler, self.config.ccdKeys, 'raw')
441 
442  outputId = self.getOutputId(expRefList, calibId)
443  ccdIdLists = getCcdIdListFromExposures(
444  expRefList, level="sensor", ccdKeys=self.config.ccdKeys)
445  self.checkCcdIdLists(ccdIdLists)
446 
447  # Ensure we can generate filenames for each output
448  outputIdItemList = list(outputId.items())
449  for ccdName in ccdIdLists:
450  dataId = dict([(k, ccdName[i]) for i, k in enumerate(self.config.ccdKeys)])
451  dataId.update(outputIdItemList)
452  self.addMissingKeys(dataId, butler)
453  dataId.update(outputIdItemList)
454 
455  try:
456  butler.get(self.calibName + "_filename", dataId)
457  except Exception as e:
458  raise RuntimeError(
459  "Unable to determine output filename \"%s_filename\" from %s: %s" %
460  (self.calibName, dataId, e))
461 
462  processPool = Pool("process")
463  processPool.storeSet(butler=butler)
464 
465  # Scatter: process CCDs independently
466  data = self.scatterProcess(processPool, ccdIdLists)
467 
468  # Gather: determine scalings
469  scales = self.scale(ccdIdLists, data)
470 
471  combinePool = Pool("combine")
472  combinePool.storeSet(butler=butler)
473 
474  # Scatter: combine
475  calibs = self.scatterCombine(combinePool, outputId, ccdIdLists, scales)
476 
477  if self.config.doCameraImage:
478  camera = butler.get("camera")
479  # Convert indexing of calibs from "ccdName" to detector ID (as used by makeImageFromCamera)
480  calibs = {butler.get("postISRCCD_detector",
481  dict(zip(self.config.ccdKeys, ccdName))).getId(): calibs[ccdName]
482  for ccdName in ccdIdLists}
483 
484  try:
485  cameraImage = self.makeCameraImage(camera, outputId, calibs)
486  butler.put(cameraImage, self.calibName + "_camera", dataId)
487  except Exception as exc:
488  self.log.warn("Unable to create camera image: %s" % (exc,))
489 
490  return Struct(
491  outputId=outputId,
492  ccdIdLists=ccdIdLists,
493  scales=scales,
494  calibs=calibs,
495  processPool=processPool,
496  combinePool=combinePool,
497  )
498 
499  def getOutputId(self, expRefList, calibId):
500  """!Generate the data identifier for the output calib
501 
502  The mean date and the common filter are included, using keywords
503  from the configuration. The CCD-specific part is not included
504  in the data identifier.
505 
506  @param expRefList List of data references at exposure level
507  @param calibId Data identifier elements for the calib provided by the user
508  @return data identifier
509  """
510  midTime = 0
511  filterName = None
512  for expRef in expRefList:
513  butler = expRef.getButler()
514  dataId = expRef.dataId
515 
516  midTime += self.getMjd(butler, dataId)
517  thisFilter = self.getFilter(
518  butler, dataId) if self.filterName is None else self.filterName
519  if filterName is None:
520  filterName = thisFilter
521  elif filterName != thisFilter:
522  raise RuntimeError("Filter mismatch for %s: %s vs %s" % (
523  dataId, thisFilter, filterName))
524 
525  midTime /= len(expRefList)
526  date = str(dafBase.DateTime(
527  midTime, dafBase.DateTime.MJD).toPython().date())
528 
529  outputId = {self.config.filter: filterName,
530  self.config.dateCalib: date}
531  outputId.update(calibId)
532  return outputId
533 
534  def getMjd(self, butler, dataId, timescale=dafBase.DateTime.UTC):
535  """Determine the Modified Julian Date (MJD; in TAI) from a data identifier"""
536  if self.config.dateObs in dataId:
537  dateObs = dataId[self.config.dateObs]
538  else:
539  dateObs = butler.queryMetadata('raw', [self.config.dateObs], dataId)[0]
540  if "T" not in dateObs:
541  dateObs = dateObs + "T12:00:00.0Z"
542  elif not dateObs.endswith("Z"):
543  dateObs += "Z"
544 
545  return dafBase.DateTime(dateObs, timescale).get(dafBase.DateTime.MJD)
546 
547  def getFilter(self, butler, dataId):
548  """Determine the filter from a data identifier"""
549  filt = butler.queryMetadata('raw', [self.config.filter], dataId)[0]
550  return filt
551 
552  def addMissingKeys(self, dataId, butler, missingKeys=None, calibName=None):
553  if calibName is None:
554  calibName = self.calibName
555 
556  if missingKeys is None:
557  missingKeys = set(butler.getKeys(calibName).keys()) - set(dataId.keys())
558 
559  for k in missingKeys:
560  try:
561  v = butler.queryMetadata('raw', [k], dataId) # n.b. --id refers to 'raw'
562  except Exception:
563  continue
564 
565  if len(v) == 0: # failed to lookup value
566  continue
567 
568  if len(v) == 1:
569  dataId[k] = v[0]
570  else:
571  raise RuntimeError("No unique lookup for %s: %s" % (k, v))
572 
573  def updateMetadata(self, calibImage, exposureTime, darkTime=None, **kwargs):
574  """!Update the metadata from the VisitInfo
575 
576  @param calibImage The image whose metadata is to be set
577  @param exposureTime The exposure time for the image
578  @param darkTime The time since the last read (default: exposureTime)
579  """
580 
581  if darkTime is None:
582  darkTime = exposureTime # avoid warning messages when using calibration products
583 
584  visitInfo = afwImage.makeVisitInfo(exposureTime=exposureTime, darkTime=darkTime, **kwargs)
585  md = calibImage.getMetadata()
586 
587  afwImage.setVisitInfoMetadata(md, visitInfo)
588 
589  def scatterProcess(self, pool, ccdIdLists):
590  """!Scatter the processing among the nodes
591 
592  We scatter each CCD independently (exposures aren't grouped together),
593  to make full use of all available processors. This necessitates piecing
594  everything back together in the same format as ccdIdLists afterwards.
595 
596  Only the master node executes this method.
597 
598  @param pool Process pool
599  @param ccdIdLists Dict of data identifier lists for each CCD name
600  @return Dict of lists of returned data for each CCD name
601  """
602  self.log.info("Scatter processing")
603  return mapToMatrix(pool, self.process, ccdIdLists)
604 
605  def process(self, cache, ccdId, outputName="postISRCCD", **kwargs):
606  """!Process a CCD, specified by a data identifier
607 
608  After processing, optionally returns a result (produced by
609  the 'processResult' method) calculated from the processed
610  exposure. These results will be gathered by the master node,
611  and is a means for coordinated scaling of all CCDs for flats,
612  etc.
613 
614  Only slave nodes execute this method.
615 
616  @param cache Process pool cache
617  @param ccdId Data identifier for CCD
618  @param outputName Output dataset name for butler
619  @return result from 'processResult'
620  """
621  if ccdId is None:
622  self.log.warn("Null identifier received on %s" % NODE)
623  return None
624  sensorRef = getDataRef(cache.butler, ccdId)
625  if self.config.clobber or not sensorRef.datasetExists(outputName):
626  self.log.info("Processing %s on %s" % (ccdId, NODE))
627  try:
628  exposure = self.processSingle(sensorRef, **kwargs)
629  except Exception as e:
630  self.log.warn("Unable to process %s: %s" % (ccdId, e))
631  raise
632  return None
633  self.processWrite(sensorRef, exposure)
634  else:
635  self.log.info(
636  "Using previously persisted processed exposure for %s" % (sensorRef.dataId,))
637  exposure = sensorRef.get(outputName)
638  return self.processResult(exposure)
639 
640  def processSingle(self, dataRef):
641  """Process a single CCD, specified by a data reference
642 
643  Generally, this simply means doing ISR.
644 
645  Only slave nodes execute this method.
646  """
647  return self.isr.runDataRef(dataRef).exposure
648 
649  def processWrite(self, dataRef, exposure, outputName="postISRCCD"):
650  """!Write the processed CCD
651 
652  We need to write these out because we can't hold them all in
653  memory at once.
654 
655  Only slave nodes execute this method.
656 
657  @param dataRef Data reference
658  @param exposure CCD exposure to write
659  @param outputName Output dataset name for butler.
660  """
661  dataRef.put(exposure, outputName)
662 
663  def processResult(self, exposure):
664  """Extract processing results from a processed exposure
665 
666  This method generates what is gathered by the master node.
667  This can be a background measurement or similar for scaling
668  flat-fields. It must be picklable!
669 
670  Only slave nodes execute this method.
671  """
672  return None
673 
674  def scale(self, ccdIdLists, data):
675  """!Determine scaling across CCDs and exposures
676 
677  This is necessary mainly for flats, so as to determine a
678  consistent scaling across the entire focal plane. This
679  implementation is simply a placeholder.
680 
681  Only the master node executes this method.
682 
683  @param ccdIdLists Dict of data identifier lists for each CCD tuple
684  @param data Dict of lists of returned data for each CCD tuple
685  @return dict of Struct(ccdScale: scaling for CCD,
686  expScales: scaling for each exposure
687  ) for each CCD tuple
688  """
689  self.log.info("Scale on %s" % NODE)
690  return dict((name, Struct(ccdScale=None, expScales=[None] * len(ccdIdLists[name])))
691  for name in ccdIdLists)
692 
693  def scatterCombine(self, pool, outputId, ccdIdLists, scales):
694  """!Scatter the combination of exposures across multiple nodes
695 
696  In this case, we can only scatter across as many nodes as
697  there are CCDs.
698 
699  Only the master node executes this method.
700 
701  @param pool Process pool
702  @param outputId Output identifier (exposure part only)
703  @param ccdIdLists Dict of data identifier lists for each CCD name
704  @param scales Dict of structs with scales, for each CCD name
705  @param dict of binned images
706  """
707  self.log.info("Scatter combination")
708  data = [Struct(ccdName=ccdName, ccdIdList=ccdIdLists[ccdName], scales=scales[ccdName]) for
709  ccdName in ccdIdLists]
710  images = pool.map(self.combine, data, outputId)
711  return dict(zip(ccdIdLists.keys(), images))
712 
713  def getFullyQualifiedOutputId(self, ccdName, butler, outputId):
714  """Get fully-qualified output data identifier
715 
716  We may need to look up keys that aren't in the output dataId.
717 
718  @param ccdName Name tuple for CCD
719  @param butler Data butler
720  @param outputId Data identifier for combined image (exposure part only)
721  @return fully-qualified output dataId
722  """
723  fullOutputId = {k: ccdName[i] for i, k in enumerate(self.config.ccdKeys)}
724  fullOutputId.update(outputId)
725  self.addMissingKeys(fullOutputId, butler)
726  fullOutputId.update(outputId) # must be after the call to queryMetadata in 'addMissingKeys'
727  return fullOutputId
728 
729  def combine(self, cache, struct, outputId):
730  """!Combine multiple exposures of a particular CCD and write the output
731 
732  Only the slave nodes execute this method.
733 
734  @param cache Process pool cache
735  @param struct Parameters for the combination, which has the following components:
736  * ccdName Name tuple for CCD
737  * ccdIdList List of data identifiers for combination
738  * scales Scales to apply (expScales are scalings for each exposure,
739  ccdScale is final scale for combined image)
740  @param outputId Data identifier for combined image (exposure part only)
741  @return binned calib image
742  """
743  outputId = self.getFullyQualifiedOutputId(struct.ccdName, cache.butler, outputId)
744  dataRefList = [getDataRef(cache.butler, dataId) if dataId is not None else None for
745  dataId in struct.ccdIdList]
746  self.log.info("Combining %s on %s" % (outputId, NODE))
747  calib = self.combination.run(dataRefList, expScales=struct.scales.expScales,
748  finalScale=struct.scales.ccdScale)
749 
750  if not hasattr(calib, "getMetadata"):
751  if hasattr(calib, "getVariance"):
752  calib = afwImage.makeExposure(calib)
753  else:
754  calib = afwImage.DecoratedImageF(calib.getImage()) # n.b. hardwires "F" for the output type
755 
756  self.calculateOutputHeaderFromRaws(cache.butler, calib, struct.ccdIdList, outputId)
757 
758  self.updateMetadata(calib, self.exposureTime)
759 
760  self.recordCalibInputs(cache.butler, calib,
761  struct.ccdIdList, outputId)
762 
763  self.interpolateNans(calib)
764 
765  self.write(cache.butler, calib, outputId)
766 
767  return afwMath.binImage(calib.getImage(), self.config.binning)
768 
769  def calculateOutputHeaderFromRaws(self, butler, calib, dataIdList, outputId):
770  """!Calculate the output header from the raw headers.
771 
772  This metadata will go into the output FITS header. It will include all
773  headers that are identical in all inputs.
774 
775  @param butler Data butler
776  @param calib Combined calib exposure.
777  @param dataIdList List of data identifiers for calibration inputs
778  @param outputId Data identifier for output
779  """
780  header = calib.getMetadata()
781 
782  rawmd = [butler.get("raw_md", dataId) for dataId in dataIdList if
783  dataId is not None]
784 
785  merged = merge_headers(rawmd, mode="drop")
786 
787  # Place merged set into the PropertyList if a value is not
788  # present already
789  # Comments are not present in the merged version so copy them across
790  for k, v in merged.items():
791  if k not in header:
792  comment = rawmd[0].getComment(k) if k in rawmd[0] else None
793  header.set(k, v, comment=comment)
794 
795  # Create an observation group so we can add some standard headers
796  # independent of the form in the input files.
797  # Use try block in case we are dealing with unexpected data headers
798  try:
799  group = ObservationGroup(rawmd, pedantic=False)
800  except Exception:
801  group = None
802 
803  comments = {"TIMESYS": "Time scale for all dates",
804  "DATE-OBS": "Start date of earliest input observation",
805  "MJD-OBS": "[d] Start MJD of earliest input observation",
806  "DATE-END": "End date of oldest input observation",
807  "MJD-END": "[d] End MJD of oldest input observation",
808  "MJD-AVG": "[d] MJD midpoint of all input observations",
809  "DATE-AVG": "Midpoint date of all input observations"}
810 
811  if group is not None:
812  oldest, newest = group.extremes()
813  dateCards = dates_to_fits(oldest.datetime_begin, newest.datetime_end)
814  else:
815  # Fall back to setting a DATE-OBS from the calibDate
816  dateCards = {"DATE-OBS": "{}T00:00:00.00".format(outputId[self.config.dateCalib])}
817  comments["DATE-OBS"] = "Date of start of day of calibration midpoint"
818 
819  for k, v in dateCards.items():
820  header.set(k, v, comment=comments.get(k, None))
821 
822  def recordCalibInputs(self, butler, calib, dataIdList, outputId):
823  """!Record metadata including the inputs and creation details
824 
825  This metadata will go into the FITS header.
826 
827  @param butler Data butler
828  @param calib Combined calib exposure.
829  @param dataIdList List of data identifiers for calibration inputs
830  @param outputId Data identifier for output
831  """
832  header = calib.getMetadata()
833  header.add("OBSTYPE", self.calibName) # Used by ingestCalibs.py
834 
835  # date, time, host, and root
836  now = time.localtime()
837  header.add("CALIB_CREATION_DATE", time.strftime("%Y-%m-%d", now))
838  header.add("CALIB_CREATION_TIME", time.strftime("%X %Z", now))
839 
840  # Inputs
841  visits = [str(dictToTuple(dataId, self.config.visitKeys)) for dataId in dataIdList if
842  dataId is not None]
843  for i, v in enumerate(sorted(set(visits))):
844  header.add("CALIB_INPUT_%d" % (i,), v)
845 
846  header.add("CALIB_ID", " ".join("%s=%s" % (key, value)
847  for key, value in outputId.items()))
848  checksum(calib, header)
849 
850  def interpolateNans(self, image):
851  """Interpolate over NANs in the combined image
852 
853  NANs can result from masked areas on the CCD. We don't want them getting
854  into our science images, so we replace them with the median of the image.
855  """
856  if hasattr(image, "getMaskedImage"): # Deal with Exposure vs Image
857  self.interpolateNans(image.getMaskedImage().getVariance())
858  image = image.getMaskedImage().getImage()
859  if hasattr(image, "getImage"): # Deal with DecoratedImage or MaskedImage vs Image
860  image = image.getImage()
861  array = image.getArray()
862  bad = np.isnan(array)
863  array[bad] = np.median(array[np.logical_not(bad)])
864 
865  def write(self, butler, exposure, dataId):
866  """!Write the final combined calib
867 
868  Only the slave nodes execute this method
869 
870  @param butler Data butler
871  @param exposure CCD exposure to write
872  @param dataId Data identifier for output
873  """
874  self.log.info("Writing %s on %s" % (dataId, NODE))
875  butler.put(exposure, self.calibName, dataId)
876 
877  def makeCameraImage(self, camera, dataId, calibs):
878  """!Create and write an image of the entire camera
879 
880  This is useful for judging the quality or getting an overview of
881  the features of the calib.
882 
883  @param camera Camera object
884  @param dataId Data identifier for output
885  @param calibs Dict mapping CCD detector ID to calib image
886  """
887  return makeCameraImage(camera, calibs, self.config.binning)
888 
889  def checkCcdIdLists(self, ccdIdLists):
890  """Check that the list of CCD dataIds is consistent
891 
892  @param ccdIdLists Dict of data identifier lists for each CCD name
893  @return Number of exposures, number of CCDs
894  """
895  visitIdLists = collections.defaultdict(list)
896  for ccdName in ccdIdLists:
897  for dataId in ccdIdLists[ccdName]:
898  visitName = dictToTuple(dataId, self.config.visitKeys)
899  visitIdLists[visitName].append(dataId)
900 
901  numExps = set(len(expList) for expList in ccdIdLists.values())
902  numCcds = set(len(ccdList) for ccdList in visitIdLists.values())
903 
904  if len(numExps) != 1 or len(numCcds) != 1:
905  # Presumably a visit somewhere doesn't have the full complement available.
906  # Dump the information so the user can figure it out.
907  self.log.warn("Number of visits for each CCD: %s",
908  {ccdName: len(ccdIdLists[ccdName]) for ccdName in ccdIdLists})
909  self.log.warn("Number of CCDs for each visit: %s",
910  {vv: len(visitIdLists[vv]) for vv in visitIdLists})
911  raise RuntimeError("Inconsistent number of exposures/CCDs")
912 
913  return numExps.pop(), numCcds.pop()
914 
915 
917  """Configuration for bias construction.
918 
919  No changes required compared to the base class, but
920  subclassed for distinction.
921  """
922  pass
923 
924 
925 class BiasTask(CalibTask):
926  """Bias construction"""
927  ConfigClass = BiasConfig
928  _DefaultName = "bias"
929  calibName = "bias"
930  filterName = "NONE" # Sets this filter name in the output
931  exposureTime = 0.0 # sets this exposureTime in the output
932 
933  @classmethod
934  def applyOverrides(cls, config):
935  """Overrides to apply for bias construction"""
936  config.isr.doBias = False
937  config.isr.doDark = False
938  config.isr.doFlat = False
939  config.isr.doFringe = False
940 
941 
943  """Configuration for dark construction"""
944  doRepair = Field(dtype=bool, default=True, doc="Repair artifacts?")
945  psfFwhm = Field(dtype=float, default=3.0, doc="Repair PSF FWHM (pixels)")
946  psfSize = Field(dtype=int, default=21, doc="Repair PSF size (pixels)")
947  crGrow = Field(dtype=int, default=2, doc="Grow radius for CR (pixels)")
948  repair = ConfigurableField(
949  target=RepairTask, doc="Task to repair artifacts")
950 
951  def setDefaults(self):
952  CalibConfig.setDefaults(self)
953  self.combination.mask.append("CR")
954 
955 
957  """Dark construction
958 
959  The only major difference from the base class is a cosmic-ray
960  identification stage, and dividing each image by the dark time
961  to generate images of the dark rate.
962  """
963  ConfigClass = DarkConfig
964  _DefaultName = "dark"
965  calibName = "dark"
966  filterName = "NONE" # Sets this filter name in the output
967 
968  def __init__(self, *args, **kwargs):
969  CalibTask.__init__(self, *args, **kwargs)
970  self.makeSubtask("repair")
971 
972  @classmethod
973  def applyOverrides(cls, config):
974  """Overrides to apply for dark construction"""
975  config.isr.doDark = False
976  config.isr.doFlat = False
977  config.isr.doFringe = False
978 
979  def processSingle(self, sensorRef):
980  """Process a single CCD
981 
982  Besides the regular ISR, also masks cosmic-rays and divides each
983  processed image by the dark time to generate images of the dark rate.
984  The dark time is provided by the 'getDarkTime' method.
985  """
986  exposure = CalibTask.processSingle(self, sensorRef)
987 
988  if self.config.doRepair:
989  psf = measAlg.DoubleGaussianPsf(self.config.psfSize, self.config.psfSize,
990  self.config.psfFwhm/(2*math.sqrt(2*math.log(2))))
991  exposure.setPsf(psf)
992  self.repair.run(exposure, keepCRs=False)
993  if self.config.crGrow > 0:
994  mask = exposure.getMaskedImage().getMask().clone()
995  mask &= mask.getPlaneBitMask("CR")
996  fpSet = afwDet.FootprintSet(
997  mask, afwDet.Threshold(0.5))
998  fpSet = afwDet.FootprintSet(fpSet, self.config.crGrow, True)
999  fpSet.setMask(exposure.getMaskedImage().getMask(), "CR")
1000 
1001  mi = exposure.getMaskedImage()
1002  mi /= self.getDarkTime(exposure)
1003  return exposure
1004 
1005  def getDarkTime(self, exposure):
1006  """Retrieve the dark time for an exposure"""
1007  darkTime = exposure.getInfo().getVisitInfo().getDarkTime()
1008  if not np.isfinite(darkTime):
1009  raise RuntimeError("Non-finite darkTime")
1010  return darkTime
1011 
1012 
1014  """Configuration for flat construction"""
1015  iterations = Field(dtype=int, default=10,
1016  doc="Number of iterations for scale determination")
1017  stats = ConfigurableField(target=CalibStatsTask,
1018  doc="Background statistics configuration")
1019 
1020 
1022  """Flat construction
1023 
1024  The principal change from the base class involves gathering the background
1025  values from each image and using them to determine the scalings for the final
1026  combination.
1027  """
1028  ConfigClass = FlatConfig
1029  _DefaultName = "flat"
1030  calibName = "flat"
1031 
1032  @classmethod
1033  def applyOverrides(cls, config):
1034  """Overrides for flat construction"""
1035  config.isr.doFlat = False
1036  config.isr.doFringe = False
1037 
1038  def __init__(self, *args, **kwargs):
1039  CalibTask.__init__(self, *args, **kwargs)
1040  self.makeSubtask("stats")
1041 
1042  def processResult(self, exposure):
1043  return self.stats.run(exposure)
1044 
1045  def scale(self, ccdIdLists, data):
1046  """Determine the scalings for the final combination
1047 
1048  We have a matrix B_ij = C_i E_j, where C_i is the relative scaling
1049  of one CCD to all the others in an exposure, and E_j is the scaling
1050  of the exposure. We convert everything to logarithms so we can work
1051  with a linear system. We determine the C_i and E_j from B_ij by iteration,
1052  under the additional constraint that the average CCD scale is unity.
1053 
1054  This algorithm comes from Eugene Magnier and Pan-STARRS.
1055  """
1056  assert len(ccdIdLists.values()) > 0, "No successful CCDs"
1057  lengths = set([len(expList) for expList in ccdIdLists.values()])
1058  assert len(lengths) == 1, "Number of successful exposures for each CCD differs"
1059  assert tuple(lengths)[0] > 0, "No successful exposures"
1060  # Format background measurements into a matrix
1061  indices = dict((name, i) for i, name in enumerate(ccdIdLists))
1062  bgMatrix = np.array([[0.0] * len(expList) for expList in ccdIdLists.values()])
1063  for name in ccdIdLists:
1064  i = indices[name]
1065  bgMatrix[i] = [d if d is not None else np.nan for d in data[name]]
1066 
1067  numpyPrint = np.get_printoptions()
1068  np.set_printoptions(threshold=np.inf)
1069  self.log.info("Input backgrounds: %s" % bgMatrix)
1070 
1071  # Flat-field scaling
1072  numCcds = len(ccdIdLists)
1073  numExps = bgMatrix.shape[1]
1074  # log(Background) for each exposure/component
1075  bgMatrix = np.log(bgMatrix)
1076  bgMatrix = np.ma.masked_array(bgMatrix, ~np.isfinite(bgMatrix))
1077  # Initial guess at log(scale) for each component
1078  compScales = np.zeros(numCcds)
1079  expScales = np.array([(bgMatrix[:, i0] - compScales).mean() for i0 in range(numExps)])
1080 
1081  for iterate in range(self.config.iterations):
1082  compScales = np.array([(bgMatrix[i1, :] - expScales).mean() for i1 in range(numCcds)])
1083  bad = np.isnan(compScales)
1084  if np.any(bad):
1085  # Bad CCDs: just set them to the mean scale
1086  compScales[bad] = compScales[~bad].mean()
1087  expScales = np.array([(bgMatrix[:, i2] - compScales).mean() for i2 in range(numExps)])
1088 
1089  avgScale = np.average(np.exp(compScales))
1090  compScales -= np.log(avgScale)
1091  self.log.debug("Iteration %d exposure scales: %s", iterate, np.exp(expScales))
1092  self.log.debug("Iteration %d component scales: %s", iterate, np.exp(compScales))
1093 
1094  expScales = np.array([(bgMatrix[:, i3] - compScales).mean() for i3 in range(numExps)])
1095 
1096  if np.any(np.isnan(expScales)):
1097  raise RuntimeError("Bad exposure scales: %s --> %s" % (bgMatrix, expScales))
1098 
1099  expScales = np.exp(expScales)
1100  compScales = np.exp(compScales)
1101 
1102  self.log.info("Exposure scales: %s" % expScales)
1103  self.log.info("Component relative scaling: %s" % compScales)
1104  np.set_printoptions(**numpyPrint)
1105 
1106  return dict((ccdName, Struct(ccdScale=compScales[indices[ccdName]], expScales=expScales))
1107  for ccdName in ccdIdLists)
1108 
1109 
1111  """Configuration for fringe construction"""
1112  stats = ConfigurableField(target=CalibStatsTask,
1113  doc="Background statistics configuration")
1114  subtractBackground = ConfigurableField(target=measAlg.SubtractBackgroundTask,
1115  doc="Background configuration")
1116  detection = ConfigurableField(
1117  target=measAlg.SourceDetectionTask, doc="Detection configuration")
1118  detectSigma = Field(dtype=float, default=1.0,
1119  doc="Detection PSF gaussian sigma")
1120 
1121  def setDefaults(self):
1122  CalibConfig.setDefaults(self)
1123  self.detection.reEstimateBackground = False
1124 
1125 
1127  """Fringe construction task
1128 
1129  The principal change from the base class is that the images are
1130  background-subtracted and rescaled by the background.
1131 
1132  XXX This is probably not right for a straight-up combination, as we
1133  are currently doing, since the fringe amplitudes need not scale with
1134  the continuum.
1135 
1136  XXX Would like to have this do PCA and generate multiple images, but
1137  that will take a bit of work with the persistence code.
1138  """
1139  ConfigClass = FringeConfig
1140  _DefaultName = "fringe"
1141  calibName = "fringe"
1142 
1143  @classmethod
1144  def applyOverrides(cls, config):
1145  """Overrides for fringe construction"""
1146  config.isr.doFringe = False
1147 
1148  def __init__(self, *args, **kwargs):
1149  CalibTask.__init__(self, *args, **kwargs)
1150  self.makeSubtask("detection")
1151  self.makeSubtask("stats")
1152  self.makeSubtask("subtractBackground")
1153 
1154  def processSingle(self, sensorRef):
1155  """Subtract the background and normalise by the background level"""
1156  exposure = CalibTask.processSingle(self, sensorRef)
1157  bgLevel = self.stats.run(exposure)
1158  self.subtractBackground.run(exposure)
1159  mi = exposure.getMaskedImage()
1160  mi /= bgLevel
1161  footprintSets = self.detection.detectFootprints(
1162  exposure, sigma=self.config.detectSigma)
1163  mask = exposure.getMaskedImage().getMask()
1164  detected = 1 << mask.addMaskPlane("DETECTED")
1165  for fpSet in (footprintSets.positive, footprintSets.negative):
1166  if fpSet is not None:
1167  afwDet.setMaskFromFootprintList(
1168  mask, fpSet.getFootprints(), detected)
1169  return exposure
1170 
1171 
1173  """Configuration for sky frame construction"""
1174  detection = ConfigurableField(target=measAlg.SourceDetectionTask, doc="Detection configuration")
1175  detectSigma = Field(dtype=float, default=2.0, doc="Detection PSF gaussian sigma")
1176  subtractBackground = ConfigurableField(target=measAlg.SubtractBackgroundTask,
1177  doc="Regular-scale background configuration, for object detection")
1178  largeScaleBackground = ConfigField(dtype=FocalPlaneBackgroundConfig,
1179  doc="Large-scale background configuration")
1180  sky = ConfigurableField(target=SkyMeasurementTask, doc="Sky measurement")
1181  maskThresh = Field(dtype=float, default=3.0, doc="k-sigma threshold for masking pixels")
1182  mask = ListField(dtype=str, default=["BAD", "SAT", "DETECTED", "NO_DATA"],
1183  doc="Mask planes to consider as contaminated")
1184 
1185 
1187  """Task for sky frame construction
1188 
1189  The sky frame is a (relatively) small-scale background
1190  model, the response of the camera to the sky.
1191 
1192  To construct, we first remove a large-scale background (e.g., caused
1193  by moonlight) which may vary from image to image. Then we construct a
1194  model of the sky, which is essentially a binned version of the image
1195  (important configuration parameters: sky.background.[xy]BinSize).
1196  It is these models which are coadded to yield the sky frame.
1197  """
1198  ConfigClass = SkyConfig
1199  _DefaultName = "sky"
1200  calibName = "sky"
1201 
1202  def __init__(self, *args, **kwargs):
1203  CalibTask.__init__(self, *args, **kwargs)
1204  self.makeSubtask("detection")
1205  self.makeSubtask("subtractBackground")
1206  self.makeSubtask("sky")
1207 
1208  def scatterProcess(self, pool, ccdIdLists):
1209  """!Scatter the processing among the nodes
1210 
1211  Only the master node executes this method, assigning work to the
1212  slaves.
1213 
1214  We measure and subtract off a large-scale background model across
1215  all CCDs, which requires a scatter/gather. Then we process the
1216  individual CCDs, subtracting the large-scale background model and
1217  the residual background model measured. These residuals will be
1218  combined for the sky frame.
1219 
1220  @param pool Process pool
1221  @param ccdIdLists Dict of data identifier lists for each CCD name
1222  @return Dict of lists of returned data for each CCD name
1223  """
1224  self.log.info("Scatter processing")
1225 
1226  numExps = set(len(expList) for expList in ccdIdLists.values()).pop()
1227 
1228  # First subtract off general gradients to make all the exposures look similar.
1229  # We want to preserve the common small-scale structure, which we will coadd.
1230  bgModelList = mapToMatrix(pool, self.measureBackground, ccdIdLists)
1231 
1232  backgrounds = {}
1233  scales = {}
1234  for exp in range(numExps):
1235  bgModels = [bgModelList[ccdName][exp] for ccdName in ccdIdLists]
1236  visit = set(tuple(ccdIdLists[ccdName][exp][key] for key in sorted(self.config.visitKeys)) for
1237  ccdName in ccdIdLists)
1238  assert len(visit) == 1
1239  visit = visit.pop()
1240  bgModel = bgModels[0]
1241  for bg in bgModels[1:]:
1242  bgModel.merge(bg)
1243  self.log.info("Background model min/max for visit %s: %f %f", visit,
1244  np.min(bgModel.getStatsImage().getArray()),
1245  np.max(bgModel.getStatsImage().getArray()))
1246  backgrounds[visit] = bgModel
1247  scales[visit] = np.median(bgModel.getStatsImage().getArray())
1248 
1249  return mapToMatrix(pool, self.process, ccdIdLists, backgrounds=backgrounds, scales=scales)
1250 
1251  def measureBackground(self, cache, dataId):
1252  """!Measure background model for CCD
1253 
1254  This method is executed by the slaves.
1255 
1256  The background models for all CCDs in an exposure will be
1257  combined to form a full focal-plane background model.
1258 
1259  @param cache Process pool cache
1260  @param dataId Data identifier
1261  @return Bcakground model
1262  """
1263  dataRef = getDataRef(cache.butler, dataId)
1264  exposure = self.processSingleBackground(dataRef)
1265 
1266  # NAOJ prototype smoothed and then combined the entire image, but it shouldn't be any different
1267  # to bin and combine the binned images except that there's fewer pixels to worry about.
1268  config = self.config.largeScaleBackground
1269  camera = dataRef.get("camera")
1270  bgModel = FocalPlaneBackground.fromCamera(config, camera)
1271  bgModel.addCcd(exposure)
1272  return bgModel
1273 
1274  def processSingleBackground(self, dataRef):
1275  """!Process a single CCD for the background
1276 
1277  This method is executed by the slaves.
1278 
1279  Because we're interested in the background, we detect and mask astrophysical
1280  sources, and pixels above the noise level.
1281 
1282  @param dataRef Data reference for CCD.
1283  @return processed exposure
1284  """
1285  if not self.config.clobber and dataRef.datasetExists("postISRCCD"):
1286  return dataRef.get("postISRCCD")
1287  exposure = CalibTask.processSingle(self, dataRef)
1288 
1289  # Detect sources. Requires us to remove the background; we'll restore it later.
1290  bgTemp = self.subtractBackground.run(exposure).background
1291  footprints = self.detection.detectFootprints(exposure, sigma=self.config.detectSigma)
1292  image = exposure.getMaskedImage()
1293  if footprints.background is not None:
1294  image += footprints.background.getImage()
1295 
1296  # Mask high pixels
1297  variance = image.getVariance()
1298  noise = np.sqrt(np.median(variance.getArray()))
1299  isHigh = image.getImage().getArray() > self.config.maskThresh*noise
1300  image.getMask().getArray()[isHigh] |= image.getMask().getPlaneBitMask("DETECTED")
1301 
1302  # Restore the background: it's what we want!
1303  image += bgTemp.getImage()
1304 
1305  # Set detected/bad pixels to background to ensure they don't corrupt the background
1306  maskVal = image.getMask().getPlaneBitMask(self.config.mask)
1307  isBad = image.getMask().getArray() & maskVal > 0
1308  bgLevel = np.median(image.getImage().getArray()[~isBad])
1309  image.getImage().getArray()[isBad] = bgLevel
1310  dataRef.put(exposure, "postISRCCD")
1311  return exposure
1312 
1313  def processSingle(self, dataRef, backgrounds, scales):
1314  """Process a single CCD, specified by a data reference
1315 
1316  We subtract the appropriate focal plane background model,
1317  divide by the appropriate scale and measure the background.
1318 
1319  Only slave nodes execute this method.
1320 
1321  @param dataRef Data reference for single CCD
1322  @param backgrounds Background model for each visit
1323  @param scales Scales for each visit
1324  @return Processed exposure
1325  """
1326  visit = tuple(dataRef.dataId[key] for key in sorted(self.config.visitKeys))
1327  exposure = dataRef.get("postISRCCD", immediate=True)
1328  image = exposure.getMaskedImage()
1329  detector = exposure.getDetector()
1330  bbox = image.getBBox()
1331 
1332  bgModel = backgrounds[visit]
1333  bg = bgModel.toCcdBackground(detector, bbox)
1334  image -= bg.getImage()
1335  image /= scales[visit]
1336 
1337  bg = self.sky.measureBackground(exposure.getMaskedImage())
1338  dataRef.put(bg, "icExpBackground")
1339  return exposure
1340 
1341  def combine(self, cache, struct, outputId):
1342  """!Combine multiple background models of a particular CCD and write the output
1343 
1344  Only the slave nodes execute this method.
1345 
1346  @param cache Process pool cache
1347  @param struct Parameters for the combination, which has the following components:
1348  * ccdName Name tuple for CCD
1349  * ccdIdList List of data identifiers for combination
1350  @param outputId Data identifier for combined image (exposure part only)
1351  @return binned calib image
1352  """
1353  outputId = self.getFullyQualifiedOutputId(struct.ccdName, cache.butler, outputId)
1354  dataRefList = [getDataRef(cache.butler, dataId) if dataId is not None else None for
1355  dataId in struct.ccdIdList]
1356  self.log.info("Combining %s on %s" % (outputId, NODE))
1357  bgList = [dataRef.get("icExpBackground", immediate=True).clone() for dataRef in dataRefList]
1358 
1359  bgExp = self.sky.averageBackgrounds(bgList)
1360 
1361  self.recordCalibInputs(cache.butler, bgExp, struct.ccdIdList, outputId)
1362  cache.butler.put(bgExp, "sky", outputId)
1363  return afwMath.binImage(self.sky.exposureToBackground(bgExp).getImage(), self.config.binning)
def processWrite(self, dataRef, exposure, outputName="postISRCCD")
Write the processed CCD.
def scatterCombine(self, pool, outputId, ccdIdLists, scales)
Scatter the combination of exposures across multiple nodes.
def run(self, exposureOrImage)
Measure a particular statistic on an image (of some sort).
def checksum(obj, header=None, sumType="MD5")
Calculate a checksum of an object.
Definition: checksum.py:29
def getCcdIdListFromExposures(expRefList, level="sensor", ccdKeys=["ccd"])
Determine a list of CCDs from exposure references.
def getFullyQualifiedOutputId(self, ccdName, butler, outputId)
def __call__(self, parser, namespace, values, option_string)
def dictToTuple(dict_, keys)
Return a tuple of specific values from a dict.
def getDataRef(butler, dataId, datasetType="raw")
Definition: utils.py:17
def updateMetadata(self, calibImage, exposureTime, darkTime=None, kwargs)
Update the metadata from the VisitInfo.
def process(self, cache, ccdId, outputName="postISRCCD", kwargs)
Process a CCD, specified by a data identifier.
def processSingle(self, dataRef, backgrounds, scales)
def makeCameraImage(self, camera, dataId, calibs)
Create and write an image of the entire camera.
def combine(self, target, imageList, stats)
Combine multiple images.
def getMjd(self, butler, dataId, timescale=dafBase.DateTime.UTC)
def combine(self, cache, struct, outputId)
Combine multiple exposures of a particular CCD and write the output.
def runDataRef(self, expRefList, butler, calibId)
Construct a calib from a list of exposure references.
def write(self, butler, exposure, dataId)
Write the final combined calib.
def combine(self, cache, struct, outputId)
Combine multiple background models of a particular CCD and write the output.
def measureBackground(self, cache, dataId)
Measure background model for CCD.
def recordCalibInputs(self, butler, calib, dataIdList, outputId)
Record metadata including the inputs and creation details.
def scatterProcess(self, pool, ccdIdLists)
Scatter the processing among the nodes.
def run(self, sensorRefList, expScales=None, finalScale=None, inputName="postISRCCD")
Combine calib images for a single sensor.
def getOutputId(self, expRefList, calibId)
Generate the data identifier for the output calib.
def mapToMatrix(pool, func, ccdIdLists, args, kwargs)
def scale(self, ccdIdLists, data)
Determine scaling across CCDs and exposures.
def addMissingKeys(self, dataId, butler, missingKeys=None, calibName=None)
def getDimensions(self, sensorRefList, inputName="postISRCCD")
Base class for constructing calibs.
def calculateOutputHeaderFromRaws(self, butler, calib, dataIdList, outputId)
Calculate the output header from the raw headers.
def processSingleBackground(self, dataRef)
Process a single CCD for the background.
def scatterProcess(self, pool, ccdIdLists)
Scatter the processing among the nodes.
def batchWallTime(cls, time, parsedCmd, numCores)