Coverage for python/lsst/pipe/tasks/multiBandUtils.py : 52%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import lsst.afw.image as afwImage
2import lsst.afw.table as afwTable
3import lsst.pex.exceptions as pexExceptions
5from lsst.coadd.utils import ExistingCoaddDataIdContainer
6from lsst.pipe.base import TaskRunner, ArgumentParser
7from lsst.pex.config import Config, RangeField
10class MergeSourcesRunner(TaskRunner):
11 """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
13 Required because the run method requires a list of
14 dataRefs rather than a single dataRef.
15 """
16 def makeTask(self, parsedCmd=None, args=None):
17 """Provide a butler to the Task constructor.
19 Parameters
20 ----------
21 parsedCmd:
22 The parsed command
23 args: tuple
24 Tuple of a list of data references and kwargs (un-used)
26 Raises
27 ------
28 RuntimeError
29 Thrown if both `parsedCmd` & `args` are `None`
30 """
31 if parsedCmd is not None:
32 butler = parsedCmd.butler
33 elif args is not None:
34 dataRefList, kwargs = args
35 butler = dataRefList[0].getButler()
36 else:
37 raise RuntimeError("Neither parsedCmd or args specified")
38 return self.TaskClass(config=self.config, log=self.log, butler=butler)
40 @staticmethod
41 def buildRefDict(parsedCmd):
42 """Build a hierarchical dictionary of patch references
44 Parameters
45 ----------
46 parsedCmd:
47 The parsed command
49 Returns
50 -------
51 refDict: dict
52 A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
54 Raises
55 ------
56 RuntimeError
57 Thrown when multiple references are provided for the same
58 combination of tract, patch and filter
59 """
60 refDict = {} # Will index this as refDict[tract][patch][filter] = ref
61 for ref in parsedCmd.id.refList:
62 tract = ref.dataId["tract"]
63 patch = ref.dataId["patch"]
64 filter = ref.dataId["filter"]
65 if tract not in refDict:
66 refDict[tract] = {}
67 if patch not in refDict[tract]:
68 refDict[tract][patch] = {}
69 if filter in refDict[tract][patch]:
70 raise RuntimeError("Multiple versions of %s" % (ref.dataId,))
71 refDict[tract][patch][filter] = ref
72 return refDict
74 @staticmethod
75 def getTargetList(parsedCmd, **kwargs):
76 """Provide a list of patch references for each patch, tract, filter combo.
78 Parameters
79 ----------
80 parsedCmd:
81 The parsed command
82 kwargs:
83 Keyword arguments passed to the task
85 Returns
86 -------
87 targetList: list
88 List of tuples, where each tuple is a (dataRef, kwargs) pair.
89 """
90 refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
91 return [(list(p.values()), kwargs) for t in refDict.values() for p in t.values()]
94def _makeGetSchemaCatalogs(datasetSuffix):
95 """Construct a getSchemaCatalogs instance method
97 These are identical for most of the classes here, so we'll consolidate
98 the code.
100 datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
101 """
103 def getSchemaCatalogs(self):
104 """Return a dict of empty catalogs for each catalog dataset produced by this task."""
105 src = afwTable.SourceCatalog(self.schema)
106 if hasattr(self, "algMetadata"):
107 src.getTable().setMetadata(self.algMetadata)
108 return {self.config.coaddName + "Coadd_" + datasetSuffix: src}
109 return getSchemaCatalogs
112def makeMergeArgumentParser(name, dataset):
113 """!
114 @brief Create a suitable ArgumentParser.
116 We will use the ArgumentParser to get a provide a list of data
117 references for patches; the RunnerClass will sort them into lists
118 of data references for the same patch
119 """
120 parser = ArgumentParser(name)
121 parser.add_id_argument("--id", "deepCoadd_" + dataset,
122 ContainerClass=ExistingCoaddDataIdContainer,
123 help="data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
124 return parser
127def getInputSchema(task, butler=None, schema=None):
128 """!
129 @brief Obtain the input schema either directly or froma butler reference.
131 @param[in] butler butler reference to obtain the input schema from
132 @param[in] schema the input schema
133 """
134 if schema is None: 134 ↛ 138line 134 didn't jump to line 138, because the condition on line 134 was never false
135 assert butler is not None, "Neither butler nor schema specified"
136 schema = butler.get(task.config.coaddName + "Coadd_" + task.inputDataset + "_schema",
137 immediate=True).schema
138 return schema
141def getShortFilterName(name):
142 """Given a longer, camera-specific filter name (e.g. "HSC-I") return its shorthand name ("i").
143 """
144 # I'm not sure if this is the way this is supposed to be implemented, but it seems to work,
145 # and its the only way I could get it to work.
146 try:
147 return afwImage.Filter(name).getFilterProperty().getName()
148 except pexExceptions.NotFoundError:
149 # No mapping could be found, try proceeding with given name
150 return name
153def readCatalog(task, patchRef):
154 """!
155 @brief Read input catalog.
157 We read the input dataset provided by the 'inputDataset'
158 class variable.
160 @param[in] patchRef data reference for patch
161 @return tuple consisting of the filter name and the catalog
162 """
163 filterName = patchRef.dataId["filter"]
164 catalog = patchRef.get(task.config.coaddName + "Coadd_" + task.inputDataset, immediate=True)
165 task.log.info("Read %d sources for filter %s: %s" % (len(catalog), filterName, patchRef.dataId))
166 return filterName, catalog
169class CullPeaksConfig(Config):
170 """!
171 @anchor CullPeaksConfig_
173 @brief Configuration for culling garbage peaks after merging footprints.
175 Peaks may also be culled after detection or during deblending; this configuration object
176 only deals with culling after merging Footprints.
178 These cuts are based on three quantities:
179 - nBands: the number of bands in which the peak was detected
180 - peakRank: the position of the peak within its family, sorted from brightest to faintest.
181 - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
183 The formula that identifie peaks to cull is:
185 nBands < nBandsSufficient
186 AND (rank >= rankSufficient)
187 AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
189 To disable peak culling, simply set nBandsSufficient=1.
190 """
192 nBandsSufficient = RangeField(dtype=int, default=2, min=1,
193 doc="Always keep peaks detected in this many bands")
194 rankSufficient = RangeField(dtype=int, default=20, min=1,
195 doc="Always keep this many peaks in each family")
196 rankConsidered = RangeField(dtype=int, default=30, min=1,
197 doc=("Keep peaks with less than this rank that also match the "
198 "rankNormalizedConsidered condition."))
199 rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
200 doc=("Keep peaks with less than this normalized rank that"
201 " also match the rankConsidered condition."))
204def _makeMakeIdFactory(datasetName):
205 """Construct a makeIdFactory instance method
207 These are identical for all the classes here, so this consolidates
208 the code.
210 datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
211 """
213 def makeIdFactory(self, dataRef):
214 """Return an IdFactory for setting the detection identifiers
216 The actual parameters used in the IdFactory are provided by
217 the butler (through the provided data reference.
218 """
219 expBits = dataRef.get(self.config.coaddName + datasetName + "_bits")
220 expId = int(dataRef.get(self.config.coaddName + datasetName))
221 return afwTable.IdFactory.makeSource(expId, 64 - expBits)
222 return makeIdFactory