lsst.pipe.tasks  21.0.0-38-g070523fc+12fa6302a7
multiBandUtils.py
Go to the documentation of this file.
1 # This file is part of pipe_tasks.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (https://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <https://www.gnu.org/licenses/>.
21 import lsst.afw.table as afwTable
22 
23 from lsst.coadd.utils import ExistingCoaddDataIdContainer
24 from lsst.pipe.base import TaskRunner, ArgumentParser
25 from lsst.pex.config import Config, RangeField
26 
27 
28 class MergeSourcesRunner(TaskRunner):
29  """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
30 
31  Required because the run method requires a list of
32  dataRefs rather than a single dataRef.
33  """
34  def makeTask(self, parsedCmd=None, args=None):
35  """Provide a butler to the Task constructor.
36 
37  Parameters
38  ----------
39  parsedCmd:
40  The parsed command
41  args: tuple
42  Tuple of a list of data references and kwargs (un-used)
43 
44  Raises
45  ------
46  RuntimeError
47  Thrown if both `parsedCmd` & `args` are `None`
48  """
49  if parsedCmd is not None:
50  butler = parsedCmd.butler
51  elif args is not None:
52  dataRefList, kwargs = args
53  butler = dataRefList[0].getButler()
54  else:
55  raise RuntimeError("Neither parsedCmd or args specified")
56  return self.TaskClass(config=self.config, log=self.log, butler=butler)
57 
58  @staticmethod
59  def buildRefDict(parsedCmd):
60  """Build a hierarchical dictionary of patch references
61 
62  Parameters
63  ----------
64  parsedCmd:
65  The parsed command
66 
67  Returns
68  -------
69  refDict: dict
70  A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
71 
72  Raises
73  ------
74  RuntimeError
75  Thrown when multiple references are provided for the same
76  combination of tract, patch and filter
77  """
78  refDict = {} # Will index this as refDict[tract][patch][filter] = ref
79  for ref in parsedCmd.id.refList:
80  tract = ref.dataId["tract"]
81  patch = ref.dataId["patch"]
82  filter = ref.dataId["filter"]
83  if tract not in refDict:
84  refDict[tract] = {}
85  if patch not in refDict[tract]:
86  refDict[tract][patch] = {}
87  if filter in refDict[tract][patch]:
88  raise RuntimeError("Multiple versions of %s" % (ref.dataId,))
89  refDict[tract][patch][filter] = ref
90  return refDict
91 
92  @staticmethod
93  def getTargetList(parsedCmd, **kwargs):
94  """Provide a list of patch references for each patch, tract, filter combo.
95 
96  Parameters
97  ----------
98  parsedCmd:
99  The parsed command
100  kwargs:
101  Keyword arguments passed to the task
102 
103  Returns
104  -------
105  targetList: list
106  List of tuples, where each tuple is a (dataRef, kwargs) pair.
107  """
108  refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
109  return [(list(p.values()), kwargs) for t in refDict.values() for p in t.values()]
110 
111 
112 def _makeGetSchemaCatalogs(datasetSuffix):
113  """Construct a getSchemaCatalogs instance method
114 
115  These are identical for most of the classes here, so we'll consolidate
116  the code.
117 
118  datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
119  """
120 
121  def getSchemaCatalogs(self):
122  """Return a dict of empty catalogs for each catalog dataset produced by this task."""
123  src = afwTable.SourceCatalog(self.schema)
124  if hasattr(self, "algMetadata"):
125  src.getTable().setMetadata(self.algMetadata)
126  return {self.config.coaddName + "Coadd_" + datasetSuffix: src}
127  return getSchemaCatalogs
128 
129 
130 def makeMergeArgumentParser(name, dataset):
131  """!
132  @brief Create a suitable ArgumentParser.
133 
134  We will use the ArgumentParser to get a provide a list of data
135  references for patches; the RunnerClass will sort them into lists
136  of data references for the same patch
137  """
138  parser = ArgumentParser(name)
139  parser.add_id_argument("--id", "deepCoadd_" + dataset,
140  ContainerClass=ExistingCoaddDataIdContainer,
141  help="data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
142  return parser
143 
144 
145 def getInputSchema(task, butler=None, schema=None):
146  """!
147  @brief Obtain the input schema either directly or froma butler reference.
148 
149  @param[in] butler butler reference to obtain the input schema from
150  @param[in] schema the input schema
151  """
152  if schema is None:
153  assert butler is not None, "Neither butler nor schema specified"
154  schema = butler.get(task.config.coaddName + "Coadd_" + task.inputDataset + "_schema",
155  immediate=True).schema
156  return schema
157 
158 
159 def readCatalog(task, patchRef):
160  """!
161  @brief Read input catalog.
162 
163  We read the input dataset provided by the 'inputDataset'
164  class variable.
165 
166  @param[in] patchRef data reference for patch
167  @return tuple consisting of the band name and the catalog
168  """
169  band = patchRef.get(task.config.coaddName + "Coadd_filterLabel", immediate=True).bandLabel
170  catalog = patchRef.get(task.config.coaddName + "Coadd_" + task.inputDataset, immediate=True)
171  task.log.info("Read %d sources for band %s: %s" % (len(catalog), band, patchRef.dataId))
172  return band, catalog
173 
174 
175 class CullPeaksConfig(Config):
176  """!
177  @anchor CullPeaksConfig_
178 
179  @brief Configuration for culling garbage peaks after merging footprints.
180 
181  Peaks may also be culled after detection or during deblending; this configuration object
182  only deals with culling after merging Footprints.
183 
184  These cuts are based on three quantities:
185  - nBands: the number of bands in which the peak was detected
186  - peakRank: the position of the peak within its family, sorted from brightest to faintest.
187  - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
188 
189  The formula that identifie peaks to cull is:
190 
191  nBands < nBandsSufficient
192  AND (rank >= rankSufficient)
193  AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
194 
195  To disable peak culling, simply set nBandsSufficient=1.
196  """
197 
198  nBandsSufficient = RangeField(dtype=int, default=2, min=1,
199  doc="Always keep peaks detected in this many bands")
200  rankSufficient = RangeField(dtype=int, default=20, min=1,
201  doc="Always keep this many peaks in each family")
202  rankConsidered = RangeField(dtype=int, default=30, min=1,
203  doc=("Keep peaks with less than this rank that also match the "
204  "rankNormalizedConsidered condition."))
205  rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
206  doc=("Keep peaks with less than this normalized rank that"
207  " also match the rankConsidered condition."))
208 
209 
210 def _makeMakeIdFactory(datasetName):
211  """Construct a makeIdFactory instance method
212 
213  These are identical for all the classes here, so this consolidates
214  the code.
215 
216  datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
217  """
218 
219  def makeIdFactory(self, dataRef):
220  """Return an IdFactory for setting the detection identifiers
221 
222  The actual parameters used in the IdFactory are provided by
223  the butler (through the provided data reference.
224  """
225  expBits = dataRef.get(self.config.coaddName + datasetName + "_bits")
226  expId = int(dataRef.get(self.config.coaddName + datasetName))
227  return afwTable.IdFactory.makeSource(expId, 64 - expBits)
228  return makeIdFactory
def makeTask(self, parsedCmd=None, args=None)
def makeMergeArgumentParser(name, dataset)
Create a suitable ArgumentParser.
def getInputSchema(task, butler=None, schema=None)
Obtain the input schema either directly or froma butler reference.
def readCatalog(task, patchRef)
Read input catalog.