Coverage for python/lsst/pipe/tasks/multiBandUtils.py : 29%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21import lsst.afw.table as afwTable
23from lsst.coadd.utils import ExistingCoaddDataIdContainer
24from lsst.pipe.base import TaskRunner, ArgumentParser
25from lsst.pex.config import Config, RangeField
28class MergeSourcesRunner(TaskRunner):
29 """Task runner for `MergeDetectionTask` `MergeMeasurementTask`
31 Required because the run method requires a list of
32 dataRefs rather than a single dataRef.
33 """
34 def makeTask(self, parsedCmd=None, args=None):
35 """Provide a butler to the Task constructor.
37 Parameters
38 ----------
39 parsedCmd:
40 The parsed command
41 args: tuple
42 Tuple of a list of data references and kwargs (un-used)
44 Raises
45 ------
46 RuntimeError
47 Thrown if both `parsedCmd` & `args` are `None`
48 """
49 if parsedCmd is not None:
50 butler = parsedCmd.butler
51 elif args is not None:
52 dataRefList, kwargs = args
53 butler = dataRefList[0].getButler()
54 else:
55 raise RuntimeError("Neither parsedCmd or args specified")
56 return self.TaskClass(config=self.config, log=self.log, butler=butler)
58 @staticmethod
59 def buildRefDict(parsedCmd):
60 """Build a hierarchical dictionary of patch references
62 Parameters
63 ----------
64 parsedCmd:
65 The parsed command
67 Returns
68 -------
69 refDict: dict
70 A reference dictionary of the form {patch: {tract: {filter: dataRef}}}
72 Raises
73 ------
74 RuntimeError
75 Thrown when multiple references are provided for the same
76 combination of tract, patch and filter
77 """
78 refDict = {} # Will index this as refDict[tract][patch][filter] = ref
79 for ref in parsedCmd.id.refList:
80 tract = ref.dataId["tract"]
81 patch = ref.dataId["patch"]
82 filter = ref.dataId["filter"]
83 if tract not in refDict:
84 refDict[tract] = {}
85 if patch not in refDict[tract]:
86 refDict[tract][patch] = {}
87 if filter in refDict[tract][patch]:
88 raise RuntimeError("Multiple versions of %s" % (ref.dataId,))
89 refDict[tract][patch][filter] = ref
90 return refDict
92 @staticmethod
93 def getTargetList(parsedCmd, **kwargs):
94 """Provide a list of patch references for each patch, tract, filter combo.
96 Parameters
97 ----------
98 parsedCmd:
99 The parsed command
100 kwargs:
101 Keyword arguments passed to the task
103 Returns
104 -------
105 targetList: list
106 List of tuples, where each tuple is a (dataRef, kwargs) pair.
107 """
108 refDict = MergeSourcesRunner.buildRefDict(parsedCmd)
109 return [(list(p.values()), kwargs) for t in refDict.values() for p in t.values()]
112def _makeGetSchemaCatalogs(datasetSuffix):
113 """Construct a getSchemaCatalogs instance method
115 These are identical for most of the classes here, so we'll consolidate
116 the code.
118 datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src"
119 """
121 def getSchemaCatalogs(self):
122 """Return a dict of empty catalogs for each catalog dataset produced by this task."""
123 src = afwTable.SourceCatalog(self.schema)
124 if hasattr(self, "algMetadata"):
125 src.getTable().setMetadata(self.algMetadata)
126 return {self.config.coaddName + "Coadd_" + datasetSuffix: src}
127 return getSchemaCatalogs
130def makeMergeArgumentParser(name, dataset):
131 """!
132 @brief Create a suitable ArgumentParser.
134 We will use the ArgumentParser to get a provide a list of data
135 references for patches; the RunnerClass will sort them into lists
136 of data references for the same patch
137 """
138 parser = ArgumentParser(name)
139 parser.add_id_argument("--id", "deepCoadd_" + dataset,
140 ContainerClass=ExistingCoaddDataIdContainer,
141 help="data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i")
142 return parser
145def getInputSchema(task, butler=None, schema=None):
146 """!
147 @brief Obtain the input schema either directly or froma butler reference.
149 @param[in] butler butler reference to obtain the input schema from
150 @param[in] schema the input schema
151 """
152 if schema is None:
153 assert butler is not None, "Neither butler nor schema specified"
154 schema = butler.get(task.config.coaddName + "Coadd_" + task.inputDataset + "_schema",
155 immediate=True).schema
156 return schema
159def readCatalog(task, patchRef):
160 """!
161 @brief Read input catalog.
163 We read the input dataset provided by the 'inputDataset'
164 class variable.
166 @param[in] patchRef data reference for patch
167 @return tuple consisting of the band name and the catalog
168 """
169 band = patchRef.get(task.config.coaddName + "Coadd_filterLabel", immediate=True).bandLabel
170 catalog = patchRef.get(task.config.coaddName + "Coadd_" + task.inputDataset, immediate=True)
171 task.log.info("Read %d sources for band %s: %s" % (len(catalog), band, patchRef.dataId))
172 return band, catalog
175class CullPeaksConfig(Config):
176 """!
177 @anchor CullPeaksConfig_
179 @brief Configuration for culling garbage peaks after merging footprints.
181 Peaks may also be culled after detection or during deblending; this configuration object
182 only deals with culling after merging Footprints.
184 These cuts are based on three quantities:
185 - nBands: the number of bands in which the peak was detected
186 - peakRank: the position of the peak within its family, sorted from brightest to faintest.
187 - peakRankNormalized: the peak rank divided by the total number of peaks in the family.
189 The formula that identifie peaks to cull is:
191 nBands < nBandsSufficient
192 AND (rank >= rankSufficient)
193 AND (rank >= rankConsider OR rank >= rankNormalizedConsider)
195 To disable peak culling, simply set nBandsSufficient=1.
196 """
198 nBandsSufficient = RangeField(dtype=int, default=2, min=1,
199 doc="Always keep peaks detected in this many bands")
200 rankSufficient = RangeField(dtype=int, default=20, min=1,
201 doc="Always keep this many peaks in each family")
202 rankConsidered = RangeField(dtype=int, default=30, min=1,
203 doc=("Keep peaks with less than this rank that also match the "
204 "rankNormalizedConsidered condition."))
205 rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0,
206 doc=("Keep peaks with less than this normalized rank that"
207 " also match the rankConsidered condition."))
210def _makeMakeIdFactory(datasetName):
211 """Construct a makeIdFactory instance method
213 These are identical for all the classes here, so this consolidates
214 the code.
216 datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId"
217 """
219 def makeIdFactory(self, dataRef):
220 """Return an IdFactory for setting the detection identifiers
222 The actual parameters used in the IdFactory are provided by
223 the butler (through the provided data reference.
224 """
225 expBits = dataRef.get(self.config.coaddName + datasetName + "_bits")
226 expId = int(dataRef.get(self.config.coaddName + datasetName))
227 return afwTable.IdFactory.makeSource(expId, 64 - expBits)
228 return makeIdFactory