26 from lsst.obs.base
import ExposureIdInfo
30 """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
32 Required because the run method requires a list of
33 dataRefs rather than a single dataRef.
36 """Provide a butler to the Task constructor.
43 Tuple of a list of data references and kwargs (un-used)
48 Thrown if both `parsedCmd` & `args` are `None`
50 if parsedCmd
is not None:
51 butler = parsedCmd.butler
52 elif args
is not None:
53 dataRefList, kwargs = args
54 butler = dataRefList[0].getButler()
56 raise RuntimeError(
"Neither parsedCmd or args specified")
57 return self.TaskClass(config=self.config, log=self.log, butler=butler)
61 """Build a hierarchical dictionary of patch references
71 A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
76 Thrown when multiple references are provided for the same
77 combination of tract, patch and filter
80 for ref
in parsedCmd.id.refList:
81 tract = ref.dataId[
"tract"]
82 patch = ref.dataId[
"patch"]
83 filter = ref.dataId[
"filter"]
84 if tract
not in refDict:
86 if patch
not in refDict[tract]:
87 refDict[tract][patch] = {}
88 if filter
in refDict[tract][patch]:
89 raise RuntimeError(
"Multiple versions of %s" % (ref.dataId,))
90 refDict[tract][patch][filter] = ref
95 """Provide a list of patch references for each patch, tract, filter combo.
102 Keyword arguments passed to the task
107 List of tuples, where each tuple is a (dataRef, kwargs) pair.
109 refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
110 return [(list(p.values()), kwargs)
for t
in refDict.values()
for p
in t.values()]
113 def _makeGetSchemaCatalogs(datasetSuffix):
114 """Construct a getSchemaCatalogs instance method
116 These are identical for most of the classes here, so we'll consolidate
119 datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
122 def getSchemaCatalogs(self):
123 """Return a dict of empty catalogs for each catalog dataset produced by this task."""
124 src = afwTable.SourceCatalog(self.schema)
125 if hasattr(self,
"algMetadata"):
126 src.getTable().setMetadata(self.algMetadata)
127 return {self.config.coaddName +
"Coadd_" + datasetSuffix: src}
128 return getSchemaCatalogs
133 @brief Create a suitable ArgumentParser.
135 We will use the ArgumentParser to get a provide a list of data
136 references for patches; the RunnerClass will sort them into lists
137 of data references for the same patch
139 parser = ArgumentParser(name)
140 parser.add_id_argument(
"--id",
"deepCoadd_" + dataset,
141 ContainerClass=ExistingCoaddDataIdContainer,
142 help=
"data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
148 @brief Obtain the input schema either directly or froma butler reference.
150 @param[in] butler butler reference to obtain the input schema from
151 @param[in] schema the input schema
154 assert butler
is not None,
"Neither butler nor schema specified"
155 schema = butler.get(task.config.coaddName +
"Coadd_" + task.inputDataset +
"_schema",
156 immediate=
True).schema
162 @brief Read input catalog.
164 We read the input dataset provided by the 'inputDataset'
167 @param[in] patchRef data reference for patch
168 @return tuple consisting of the band name and the catalog
170 band = patchRef.get(task.config.coaddName +
"Coadd_filterLabel", immediate=
True).bandLabel
171 catalog = patchRef.get(task.config.coaddName +
"Coadd_" + task.inputDataset, immediate=
True)
172 task.log.info(
"Read %d sources for band %s: %s" % (len(catalog), band, patchRef.dataId))
178 @anchor CullPeaksConfig_
180 @brief Configuration for culling garbage peaks after merging footprints.
182 Peaks may also be culled after detection or during deblending; this configuration object
183 only deals with culling after merging Footprints.
185 These cuts are based on three quantities:
186 - nBands: the number of bands in which the peak was detected
187 - peakRank: the position of the peak within its family, sorted from brightest to faintest.
188 - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
190 The formula that identifie peaks to cull is:
192 nBands < nBandsSufficient
193 AND (rank >= rankSufficient)
194 AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
196 To disable peak culling, simply set nBandsSufficient=1.
199 nBandsSufficient = RangeField(dtype=int, default=2, min=1,
200 doc=
"Always keep peaks detected in this many bands")
201 rankSufficient = RangeField(dtype=int, default=20, min=1,
202 doc=
"Always keep this many peaks in each family")
203 rankConsidered = RangeField(dtype=int, default=30, min=1,
204 doc=(
"Keep peaks with less than this rank that also match the "
205 "rankNormalizedConsidered condition."))
206 rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
207 doc=(
"Keep peaks with less than this normalized rank that"
208 " also match the rankConsidered condition."))
211 def _makeMakeIdFactory(datasetName):
212 """Construct a makeIdFactory instance method
214 These are identical for all the classes here, so this consolidates
217 datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
220 def makeIdFactory(self, dataRef):
221 """Return an IdFactory for setting the detection identifiers
223 The actual parameters used in the IdFactory are provided by
224 the butler (through the provided data reference.
226 info = ExposureIdInfo(
227 int(dataRef.get(self.config.coaddName + datasetName)),
228 dataRef.get(self.config.coaddName + datasetName +
"_bits")
230 return info.makeSourceIdFactory()
def makeTask(self, parsedCmd=None, args=None)
def getTargetList(parsedCmd, **kwargs)
def buildRefDict(parsedCmd)
def makeMergeArgumentParser(name, dataset)
Create a suitable ArgumentParser.
def getInputSchema(task, butler=None, schema=None)
Obtain the input schema either directly or froma butler reference.
def readCatalog(task, patchRef)
Read input catalog.