29 """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
31 Required because the run method requires a list of
32 dataRefs rather than a single dataRef.
35 """Provide a butler to the Task constructor.
42 Tuple of a list of data references and kwargs (un-used)
47 Thrown if both `parsedCmd` & `args` are `None`
49 if parsedCmd
is not None:
50 butler = parsedCmd.butler
51 elif args
is not None:
52 dataRefList, kwargs = args
53 butler = dataRefList[0].getButler()
55 raise RuntimeError(
"Neither parsedCmd or args specified")
56 return self.TaskClass(config=self.config, log=self.log, butler=butler)
60 """Build a hierarchical dictionary of patch references
70 A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
75 Thrown when multiple references are provided for the same
76 combination of tract, patch and filter
79 for ref
in parsedCmd.id.refList:
80 tract = ref.dataId[
"tract"]
81 patch = ref.dataId[
"patch"]
82 filter = ref.dataId[
"filter"]
83 if tract
not in refDict:
85 if patch
not in refDict[tract]:
86 refDict[tract][patch] = {}
87 if filter
in refDict[tract][patch]:
88 raise RuntimeError(
"Multiple versions of %s" % (ref.dataId,))
89 refDict[tract][patch][filter] = ref
94 """Provide a list of patch references for each patch, tract, filter combo.
101 Keyword arguments passed to the task
106 List of tuples, where each tuple is a (dataRef, kwargs) pair.
108 refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
109 return [(list(p.values()), kwargs)
for t
in refDict.values()
for p
in t.values()]
112 def _makeGetSchemaCatalogs(datasetSuffix):
113 """Construct a getSchemaCatalogs instance method
115 These are identical for most of the classes here, so we'll consolidate
118 datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
121 def getSchemaCatalogs(self):
122 """Return a dict of empty catalogs for each catalog dataset produced by this task."""
123 src = afwTable.SourceCatalog(self.schema)
124 if hasattr(self,
"algMetadata"):
125 src.getTable().setMetadata(self.algMetadata)
126 return {self.config.coaddName +
"Coadd_" + datasetSuffix: src}
127 return getSchemaCatalogs
132 @brief Create a suitable ArgumentParser.
134 We will use the ArgumentParser to get a provide a list of data
135 references for patches; the RunnerClass will sort them into lists
136 of data references for the same patch
138 parser = ArgumentParser(name)
139 parser.add_id_argument(
"--id",
"deepCoadd_" + dataset,
140 ContainerClass=ExistingCoaddDataIdContainer,
141 help=
"data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
147 @brief Obtain the input schema either directly or froma butler reference.
149 @param[in] butler butler reference to obtain the input schema from
150 @param[in] schema the input schema
153 assert butler
is not None,
"Neither butler nor schema specified"
154 schema = butler.get(task.config.coaddName +
"Coadd_" + task.inputDataset +
"_schema",
155 immediate=
True).schema
161 @brief Read input catalog.
163 We read the input dataset provided by the 'inputDataset'
166 @param[in] patchRef data reference for patch
167 @return tuple consisting of the band name and the catalog
169 band = patchRef.get(task.config.coaddName +
"Coadd_filterLabel", immediate=
True).bandLabel
170 catalog = patchRef.get(task.config.coaddName +
"Coadd_" + task.inputDataset, immediate=
True)
171 task.log.info(
"Read %d sources for band %s: %s" % (len(catalog), band, patchRef.dataId))
177 @anchor CullPeaksConfig_
179 @brief Configuration for culling garbage peaks after merging footprints.
181 Peaks may also be culled after detection or during deblending; this configuration object
182 only deals with culling after merging Footprints.
184 These cuts are based on three quantities:
185 - nBands: the number of bands in which the peak was detected
186 - peakRank: the position of the peak within its family, sorted from brightest to faintest.
187 - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
189 The formula that identifie peaks to cull is:
191 nBands < nBandsSufficient
192 AND (rank >= rankSufficient)
193 AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
195 To disable peak culling, simply set nBandsSufficient=1.
198 nBandsSufficient = RangeField(dtype=int, default=2, min=1,
199 doc=
"Always keep peaks detected in this many bands")
200 rankSufficient = RangeField(dtype=int, default=20, min=1,
201 doc=
"Always keep this many peaks in each family")
202 rankConsidered = RangeField(dtype=int, default=30, min=1,
203 doc=(
"Keep peaks with less than this rank that also match the "
204 "rankNormalizedConsidered condition."))
205 rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
206 doc=(
"Keep peaks with less than this normalized rank that"
207 " also match the rankConsidered condition."))
210 def _makeMakeIdFactory(datasetName):
211 """Construct a makeIdFactory instance method
213 These are identical for all the classes here, so this consolidates
216 datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
219 def makeIdFactory(self, dataRef):
220 """Return an IdFactory for setting the detection identifiers
222 The actual parameters used in the IdFactory are provided by
223 the butler (through the provided data reference.
225 expBits = dataRef.get(self.config.coaddName + datasetName +
"_bits")
226 expId = int(dataRef.get(self.config.coaddName + datasetName))
227 return afwTable.IdFactory.makeSource(expId, 64 - expBits)
def makeTask(self, parsedCmd=None, args=None)
def getTargetList(parsedCmd, **kwargs)
def buildRefDict(parsedCmd)
def makeMergeArgumentParser(name, dataset)
Create a suitable ArgumentParser.
def getInputSchema(task, butler=None, schema=None)
Obtain the input schema either directly or froma butler reference.
def readCatalog(task, patchRef)
Read input catalog.