Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1import lsst.afw.image as afwImage 

2import lsst.afw.table as afwTable 

3import lsst.pex.exceptions as pexExceptions 

4 

5from lsst.coadd.utils import ExistingCoaddDataIdContainer 

6from lsst.pipe.base import TaskRunner, ArgumentParser 

7from lsst.pex.config import Config, RangeField 

8 

9 

10class MergeSourcesRunner(TaskRunner): 

11 """Task runner for `MergeDetectionTask` `MergeMeasurementTask` 

12 

13 Required because the run method requires a list of 

14 dataRefs rather than a single dataRef. 

15 """ 

16 def makeTask(self, parsedCmd=None, args=None): 

17 """Provide a butler to the Task constructor. 

18 

19 Parameters 

20 ---------- 

21 parsedCmd: 

22 The parsed command 

23 args: tuple 

24 Tuple of a list of data references and kwargs (un-used) 

25 

26 Raises 

27 ------ 

28 RuntimeError 

29 Thrown if both `parsedCmd` & `args` are `None` 

30 """ 

31 if parsedCmd is not None: 

32 butler = parsedCmd.butler 

33 elif args is not None: 

34 dataRefList, kwargs = args 

35 butler = dataRefList[0].getButler() 

36 else: 

37 raise RuntimeError("Neither parsedCmd or args specified") 

38 return self.TaskClass(config=self.config, log=self.log, butler=butler) 

39 

40 @staticmethod 

41 def buildRefDict(parsedCmd): 

42 """Build a hierarchical dictionary of patch references 

43 

44 Parameters 

45 ---------- 

46 parsedCmd: 

47 The parsed command 

48 

49 Returns 

50 ------- 

51 refDict: dict 

52 A reference dictionary of the form {patch: {tract: {filter: dataRef}}} 

53 

54 Raises 

55 ------ 

56 RuntimeError 

57 Thrown when multiple references are provided for the same 

58 combination of tract, patch and filter 

59 """ 

60 refDict = {} # Will index this as refDict[tract][patch][filter] = ref 

61 for ref in parsedCmd.id.refList: 

62 tract = ref.dataId["tract"] 

63 patch = ref.dataId["patch"] 

64 filter = ref.dataId["filter"] 

65 if tract not in refDict: 

66 refDict[tract] = {} 

67 if patch not in refDict[tract]: 

68 refDict[tract][patch] = {} 

69 if filter in refDict[tract][patch]: 

70 raise RuntimeError("Multiple versions of %s" % (ref.dataId,)) 

71 refDict[tract][patch][filter] = ref 

72 return refDict 

73 

74 @staticmethod 

75 def getTargetList(parsedCmd, **kwargs): 

76 """Provide a list of patch references for each patch, tract, filter combo. 

77 

78 Parameters 

79 ---------- 

80 parsedCmd: 

81 The parsed command 

82 kwargs: 

83 Keyword arguments passed to the task 

84 

85 Returns 

86 ------- 

87 targetList: list 

88 List of tuples, where each tuple is a (dataRef, kwargs) pair. 

89 """ 

90 refDict = MergeSourcesRunner.buildRefDict(parsedCmd) 

91 return [(list(p.values()), kwargs) for t in refDict.values() for p in t.values()] 

92 

93 

94def _makeGetSchemaCatalogs(datasetSuffix): 

95 """Construct a getSchemaCatalogs instance method 

96 

97 These are identical for most of the classes here, so we'll consolidate 

98 the code. 

99 

100 datasetSuffix: Suffix of dataset name, e.g., "src" for "deepCoadd_src" 

101 """ 

102 

103 def getSchemaCatalogs(self): 

104 """Return a dict of empty catalogs for each catalog dataset produced by this task.""" 

105 src = afwTable.SourceCatalog(self.schema) 

106 if hasattr(self, "algMetadata"): 

107 src.getTable().setMetadata(self.algMetadata) 

108 return {self.config.coaddName + "Coadd_" + datasetSuffix: src} 

109 return getSchemaCatalogs 

110 

111 

112def makeMergeArgumentParser(name, dataset): 

113 """! 

114 @brief Create a suitable ArgumentParser. 

115 

116 We will use the ArgumentParser to get a provide a list of data 

117 references for patches; the RunnerClass will sort them into lists 

118 of data references for the same patch 

119 """ 

120 parser = ArgumentParser(name) 

121 parser.add_id_argument("--id", "deepCoadd_" + dataset, 

122 ContainerClass=ExistingCoaddDataIdContainer, 

123 help="data ID, e.g. --id tract=12345 patch=1,2 filter=g^r^i") 

124 return parser 

125 

126 

127def getInputSchema(task, butler=None, schema=None): 

128 """! 

129 @brief Obtain the input schema either directly or froma butler reference. 

130 

131 @param[in] butler butler reference to obtain the input schema from 

132 @param[in] schema the input schema 

133 """ 

134 if schema is None: 134 ↛ 138line 134 didn't jump to line 138, because the condition on line 134 was never false

135 assert butler is not None, "Neither butler nor schema specified" 

136 schema = butler.get(task.config.coaddName + "Coadd_" + task.inputDataset + "_schema", 

137 immediate=True).schema 

138 return schema 

139 

140 

141# TODO: DM-27170 should remove this function. 

142def getShortFilterName(name): 

143 """Given a longer, camera-specific filter name (e.g. "HSC-I") return its shorthand name ("i"). 

144 """ 

145 try: 

146 return afwImage.Filter(name).getCanonicalName() 

147 except pexExceptions.NotFoundError: 

148 # No mapping could be found, try proceeding with given name 

149 return name 

150 

151 

152def readCatalog(task, patchRef): 

153 """! 

154 @brief Read input catalog. 

155 

156 We read the input dataset provided by the 'inputDataset' 

157 class variable. 

158 

159 @param[in] patchRef data reference for patch 

160 @return tuple consisting of the filter name and the catalog 

161 """ 

162 filterName = patchRef.dataId["filter"] 

163 catalog = patchRef.get(task.config.coaddName + "Coadd_" + task.inputDataset, immediate=True) 

164 task.log.info("Read %d sources for filter %s: %s" % (len(catalog), filterName, patchRef.dataId)) 

165 return filterName, catalog 

166 

167 

168class CullPeaksConfig(Config): 

169 """! 

170 @anchor CullPeaksConfig_ 

171 

172 @brief Configuration for culling garbage peaks after merging footprints. 

173 

174 Peaks may also be culled after detection or during deblending; this configuration object 

175 only deals with culling after merging Footprints. 

176 

177 These cuts are based on three quantities: 

178 - nBands: the number of bands in which the peak was detected 

179 - peakRank: the position of the peak within its family, sorted from brightest to faintest. 

180 - peakRankNormalized: the peak rank divided by the total number of peaks in the family. 

181 

182 The formula that identifie peaks to cull is: 

183 

184 nBands < nBandsSufficient 

185 AND (rank >= rankSufficient) 

186 AND (rank >= rankConsider OR rank >= rankNormalizedConsider) 

187 

188 To disable peak culling, simply set nBandsSufficient=1. 

189 """ 

190 

191 nBandsSufficient = RangeField(dtype=int, default=2, min=1, 

192 doc="Always keep peaks detected in this many bands") 

193 rankSufficient = RangeField(dtype=int, default=20, min=1, 

194 doc="Always keep this many peaks in each family") 

195 rankConsidered = RangeField(dtype=int, default=30, min=1, 

196 doc=("Keep peaks with less than this rank that also match the " 

197 "rankNormalizedConsidered condition.")) 

198 rankNormalizedConsidered = RangeField(dtype=float, default=0.7, min=0.0, 

199 doc=("Keep peaks with less than this normalized rank that" 

200 " also match the rankConsidered condition.")) 

201 

202 

203def _makeMakeIdFactory(datasetName): 

204 """Construct a makeIdFactory instance method 

205 

206 These are identical for all the classes here, so this consolidates 

207 the code. 

208 

209 datasetName: Dataset name without the coadd name prefix, e.g., "CoaddId" for "deepCoaddId" 

210 """ 

211 

212 def makeIdFactory(self, dataRef): 

213 """Return an IdFactory for setting the detection identifiers 

214 

215 The actual parameters used in the IdFactory are provided by 

216 the butler (through the provided data reference. 

217 """ 

218 expBits = dataRef.get(self.config.coaddName + datasetName + "_bits") 

219 expId = int(dataRef.get(self.config.coaddName + datasetName)) 

220 return afwTable.IdFactory.makeSource(expId, 64 - expBits) 

221 return makeIdFactory