lsst.pipe.tasks  16.0-42-g47a0cbbb+1
multiBandUtils.py
Go to the documentation of this file.
1 import lsst.afw.image as afwImage
2 import lsst.afw.table as afwTable
3 
4 from lsst.coadd.utils import ExistingCoaddDataIdContainer
5 from lsst.pipe.base import TaskRunner, ArgumentParser
6 from lsst.pex.config import Config, RangeField
7 
8 
9 class MergeSourcesRunner(TaskRunner):
10  """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
11 
12  Required because the run method requires a list of
13  dataRefs rather than a single dataRef.
14  """
15  def makeTask(self, parsedCmd=None, args=None):
16  """Provide a butler to the Task constructor.
17 
18  Parameters
19  ----------
20  parsedCmd:
21  The parsed command
22  args: tuple
23  Tuple of a list of data references and kwargs (un-used)
24 
25  Raises
26  ------
27  RuntimeError
28  Thrown if both `parsedCmd` & `args` are `None`
29  """
30  if parsedCmd is not None:
31  butler = parsedCmd.butler
32  elif args is not None:
33  dataRefList, kwargs = args
34  butler = dataRefList[0].getButler()
35  else:
36  raise RuntimeError("Neither parsedCmd or args specified")
37  return self.TaskClass(config=self.config, log=self.log, butler=butler)
38 
39  @staticmethod
40  def buildRefDict(parsedCmd):
41  """Build a hierarchical dictionary of patch references
42 
43  Parameters
44  ----------
45  parsedCmd:
46  The parsed command
47 
48  Returns
49  -------
50  refDict: dict
51  A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
52 
53  Raises
54  ------
55  RuntimeError
56  Thrown when multiple references are provided for the same
57  combination of tract, patch and filter
58  """
59  refDict = {} # Will index this as refDict[tract][patch][filter] = ref
60  for ref in parsedCmd.id.refList:
61  tract = ref.dataId["tract"]
62  patch = ref.dataId["patch"]
63  filter = ref.dataId["filter"]
64  if tract not in refDict:
65  refDict[tract] = {}
66  if patch not in refDict[tract]:
67  refDict[tract][patch] = {}
68  if filter in refDict[tract][patch]:
69  raise RuntimeError("Multiple versions of %s" % (ref.dataId,))
70  refDict[tract][patch][filter] = ref
71  return refDict
72 
73  @staticmethod
74  def getTargetList(parsedCmd, **kwargs):
75  """Provide a list of patch references for each patch, tract, filter combo.
76 
77  Parameters
78  ----------
79  parsedCmd:
80  The parsed command
81  kwargs:
82  Keyword arguments passed to the task
83 
84  Returns
85  -------
86  targetList: list
87  List of tuples, where each tuple is a (dataRef, kwargs) pair.
88  """
89  refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
90  return [(list(p.values()), kwargs) for t in refDict.values() for p in t.values()]
91 
92 
93 def _makeGetSchemaCatalogs(datasetSuffix):
94  """Construct a getSchemaCatalogs instance method
95 
96  These are identical for most of the classes here, so we'll consolidate
97  the code.
98 
99  datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
100  """
101 
102  def getSchemaCatalogs(self):
103  """Return a dict of empty catalogs for each catalog dataset produced by this task."""
104  src = afwTable.SourceCatalog(self.schema)
105  if hasattr(self, "algMetadata"):
106  src.getTable().setMetadata(self.algMetadata)
107  return {self.config.coaddName + "Coadd_" + datasetSuffix: src}
108  return getSchemaCatalogs
109 
110 
111 def makeMergeArgumentParser(name, dataset):
112  """!
113  @brief Create a suitable ArgumentParser.
114 
115  We will use the ArgumentParser to get a provide a list of data
116  references for patches; the RunnerClass will sort them into lists
117  of data references for the same patch
118  """
119  parser = ArgumentParser(name)
120  parser.add_id_argument("--id", "deepCoadd_" + dataset,
121  ContainerClass=ExistingCoaddDataIdContainer,
122  help="data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
123  return parser
124 
125 
126 def getInputSchema(task, butler=None, schema=None):
127  """!
128  @brief Obtain the input schema either directly or froma butler reference.
129 
130  @param[in] butler butler reference to obtain the input schema from
131  @param[in] schema the input schema
132  """
133  if schema is None:
134  assert butler is not None, "Neither butler nor schema specified"
135  schema = butler.get(task.config.coaddName + "Coadd_" + task.inputDataset + "_schema",
136  immediate=True).schema
137  return schema
138 
139 
141  """Given a longer, camera-specific filter name (e.g. "HSC-I") return its shorthand name ("i").
142  """
143  # I'm not sure if this is the way this is supposed to be implemented, but it seems to work,
144  # and its the only way I could get it to work.
145  return afwImage.Filter(name).getFilterProperty().getName()
146 
147 
148 def readCatalog(task, patchRef):
149  """!
150  @brief Read input catalog.
151 
152  We read the input dataset provided by the 'inputDataset'
153  class variable.
154 
155  @param[in] patchRef data reference for patch
156  @return tuple consisting of the filter name and the catalog
157  """
158  filterName = patchRef.dataId["filter"]
159  catalog = patchRef.get(task.config.coaddName + "Coadd_" + task.inputDataset, immediate=True)
160  task.log.info("Read %d sources for filter %s: %s" % (len(catalog), filterName, patchRef.dataId))
161  return filterName, catalog
162 
163 
164 class CullPeaksConfig(Config):
165  """!
166  @anchor CullPeaksConfig_
167 
168  @brief Configuration for culling garbage peaks after merging footprints.
169 
170  Peaks may also be culled after detection or during deblending; this configuration object
171  only deals with culling after merging Footprints.
172 
173  These cuts are based on three quantities:
174  - nBands: the number of bands in which the peak was detected
175  - peakRank: the position of the peak within its family, sorted from brightest to faintest.
176  - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
177 
178  The formula that identifie peaks to cull is:
179 
180  nBands < nBandsSufficient
181  AND (rank >= rankSufficient)
182  AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
183 
184  To disable peak culling, simply set nBandsSufficient=1.
185  """
186 
187  nBandsSufficient = RangeField(dtype=int, default=2, min=1,
188  doc="Always keep peaks detected in this many bands")
189  rankSufficient = RangeField(dtype=int, default=20, min=1,
190  doc="Always keep this many peaks in each family")
191  rankConsidered = RangeField(dtype=int, default=30, min=1,
192  doc=("Keep peaks with less than this rank that also match the "
193  "rankNormalizedConsidered condition."))
194  rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
195  doc=("Keep peaks with less than this normalized rank that"
196  " also match the rankConsidered condition."))
197 
198 
199 def _makeMakeIdFactory(datasetName):
200  """Construct a makeIdFactory instance method
201 
202  These are identical for all the classes here, so this consolidates
203  the code.
204 
205  datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
206  """
207 
208  def makeIdFactory(self, dataRef):
209  """Return an IdFactory for setting the detection identifiers
210 
211  The actual parameters used in the IdFactory are provided by
212  the butler (through the provided data reference.
213  """
214  expBits = dataRef.get(self.config.coaddName + datasetName + "_bits")
215  expId = int(dataRef.get(self.config.coaddName + datasetName))
216  return afwTable.IdFactory.makeSource(expId, 64 - expBits)
217  return makeIdFactory
def makeMergeArgumentParser(name, dataset)
Create a suitable ArgumentParser.
def readCatalog(task, patchRef)
Read input catalog.
def getInputSchema(task, butler=None, schema=None)
Obtain the input schema either directly or froma butler reference.
def makeTask(self, parsedCmd=None, args=None)
Configuration for culling garbage peaks after merging footprints.