10 """Task runner for `MergeDetectionTask` `MergeMeasurementTask` 12 Required because the run method requires a list of 13 dataRefs rather than a single dataRef. 16 """Provide a butler to the Task constructor. 23 Tuple of a list of data references and kwargs (un-used) 28 Thrown if both `parsedCmd` & `args` are `None` 30 if parsedCmd
is not None:
31 butler = parsedCmd.butler
32 elif args
is not None:
33 dataRefList, kwargs = args
34 butler = dataRefList[0].getButler()
36 raise RuntimeError(
"Neither parsedCmd or args specified")
37 return self.TaskClass(config=self.config, log=self.log, butler=butler)
41 """Build a hierarchical dictionary of patch references 51 A reference dictionary of the form {patch: {tract: {filter: dataRef}}} 56 Thrown when multiple references are provided for the same 57 combination of tract, patch and filter 60 for ref
in parsedCmd.id.refList:
61 tract = ref.dataId[
"tract"]
62 patch = ref.dataId[
"patch"]
63 filter = ref.dataId[
"filter"]
64 if tract
not in refDict:
66 if patch
not in refDict[tract]:
67 refDict[tract][patch] = {}
68 if filter
in refDict[tract][patch]:
69 raise RuntimeError(
"Multiple versions of %s" % (ref.dataId,))
70 refDict[tract][patch][filter] = ref
75 """Provide a list of patch references for each patch, tract, filter combo. 82 Keyword arguments passed to the task 87 List of tuples, where each tuple is a (dataRef, kwargs) pair. 89 refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
90 return [(list(p.values()), kwargs)
for t
in refDict.values()
for p
in t.values()]
93 def _makeGetSchemaCatalogs(datasetSuffix):
94 """Construct a getSchemaCatalogs instance method 96 These are identical for most of the classes here, so we'll consolidate 99 datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src" 102 def getSchemaCatalogs(self):
103 """Return a dict of empty catalogs for each catalog dataset produced by this task.""" 104 src = afwTable.SourceCatalog(self.schema)
105 if hasattr(self,
"algMetadata"):
106 src.getTable().setMetadata(self.algMetadata)
107 return {self.config.coaddName +
"Coadd_" + datasetSuffix: src}
108 return getSchemaCatalogs
113 @brief Create a suitable ArgumentParser. 115 We will use the ArgumentParser to get a provide a list of data 116 references for patches; the RunnerClass will sort them into lists 117 of data references for the same patch 119 parser = ArgumentParser(name)
120 parser.add_id_argument(
"--id",
"deepCoadd_" + dataset,
121 ContainerClass=ExistingCoaddDataIdContainer,
122 help=
"data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
128 @brief Obtain the input schema either directly or froma butler reference. 130 @param[in] butler butler reference to obtain the input schema from 131 @param[in] schema the input schema 134 assert butler
is not None,
"Neither butler nor schema specified" 135 schema = butler.get(task.config.coaddName +
"Coadd_" + task.inputDataset +
"_schema",
136 immediate=
True).schema
141 """Given a longer, camera-specific filter name (e.g. "HSC-I") return its shorthand name ("i"). 145 return afwImage.Filter(name).getFilterProperty().getName()
150 @brief Read input catalog. 152 We read the input dataset provided by the 'inputDataset' 155 @param[in] patchRef data reference for patch 156 @return tuple consisting of the filter name and the catalog 158 filterName = patchRef.dataId[
"filter"]
159 catalog = patchRef.get(task.config.coaddName +
"Coadd_" + task.inputDataset, immediate=
True)
160 task.log.info(
"Read %d sources for filter %s: %s" % (len(catalog), filterName, patchRef.dataId))
161 return filterName, catalog
166 @anchor CullPeaksConfig_ 168 @brief Configuration for culling garbage peaks after merging footprints. 170 Peaks may also be culled after detection or during deblending; this configuration object 171 only deals with culling after merging Footprints. 173 These cuts are based on three quantities: 174 - nBands: the number of bands in which the peak was detected 175 - peakRank: the position of the peak within its family, sorted from brightest to faintest. 176 - peakRankNormalized: the peak rank divided by the total number of peaks in the family. 178 The formula that identifie peaks to cull is: 180 nBands < nBandsSufficient 181 AND (rank >= rankSufficient) 182 AND (rank >= rankConsider OR rank >= rankNormalizedConsider) 184 To disable peak culling, simply set nBandsSufficient=1. 187 nBandsSufficient = RangeField(dtype=int, default=2, min=1,
188 doc=
"Always keep peaks detected in this many bands")
189 rankSufficient = RangeField(dtype=int, default=20, min=1,
190 doc=
"Always keep this many peaks in each family")
191 rankConsidered = RangeField(dtype=int, default=30, min=1,
192 doc=(
"Keep peaks with less than this rank that also match the " 193 "rankNormalizedConsidered condition."))
194 rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
195 doc=(
"Keep peaks with less than this normalized rank that" 196 " also match the rankConsidered condition."))
199 def _makeMakeIdFactory(datasetName):
200 """Construct a makeIdFactory instance method 202 These are identical for all the classes here, so this consolidates 205 datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId" 208 def makeIdFactory(self, dataRef):
209 """Return an IdFactory for setting the detection identifiers 211 The actual parameters used in the IdFactory are provided by 212 the butler (through the provided data reference. 214 expBits = dataRef.get(self.config.coaddName + datasetName +
"_bits")
215 expId = int(dataRef.get(self.config.coaddName + datasetName))
216 return afwTable.IdFactory.makeSource(expId, 64 - expBits)
def makeMergeArgumentParser(name, dataset)
Create a suitable ArgumentParser.
def readCatalog(task, patchRef)
Read input catalog.
def getInputSchema(task, butler=None, schema=None)
Obtain the input schema either directly or froma butler reference.
def makeTask(self, parsedCmd=None, args=None)
Configuration for culling garbage peaks after merging footprints.
def buildRefDict(parsedCmd)
def getTargetList(parsedCmd, kwargs)
def getShortFilterName(name)