Coverage for python/lsst/ap/pipe/metrics.py: 50%

76 statements  

« prev     ^ index     » next       coverage.py v7.5.1, created at 2024-05-10 03:49 -0700

1# This file is part of ap_pipe. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21# 

22 

23"""Metrics for ap_pipe tasks. 

24""" 

25 

26__all__ = [ 

27 "ApFakesCompletenessMetricTask", "ApFakesCompletenessMetricConfig", 

28 "ApFakesCountMetricTask", "ApFakesCountMetricConfig", 

29 "PipelineTimingMetricTask", "PipelineTimingMetricConfig", 

30] 

31 

32import astropy.units as u 

33from datetime import datetime 

34import numpy as np 

35 

36import lsst.pex.config as pexConfig 

37from lsst.pipe.base import Struct, NoWorkFound 

38import lsst.pipe.base.connectionTypes as connTypes 

39from lsst.pipe.tasks.insertFakes import InsertFakesConfig 

40from lsst.verify import Measurement, Datum 

41from lsst.verify.tasks import AbstractMetadataMetricTask, MetricTask, MetricComputationError 

42 

43 

44class ApFakesCompletenessMetricConnections( 

45 MetricTask.ConfigClass.ConnectionsClass, 

46 dimensions={"instrument", "visit", "detector", "band"}, 

47 defaultTemplates={"coaddName": "deep", 

48 "fakesType": "fakes_", 

49 "package": "ap_pipe", 

50 "metric": "apFakesCompleteness"}): 

51 """ApFakesCompleteness connections. 

52 """ 

53 matchedFakes = connTypes.Input( 

54 doc="Fakes matched to their detections in the difference image.", 

55 name="{fakesType}{coaddName}Diff_matchDiaSrc", 

56 storageClass="DataFrame", 

57 dimensions=("instrument", "visit", "detector"), 

58 ) 

59 

60 

61# Inherits from InsertFakesConfig to preserve column names in the fakes 

62# catalog. 

63class ApFakesCompletenessMetricConfig( 

64 MetricTask.ConfigClass, 

65 InsertFakesConfig, 

66 pipelineConnections=ApFakesCompletenessMetricConnections): 

67 """ApFakesCompleteness config. 

68 """ 

69 magMin = pexConfig.RangeField( 

70 doc="Minimum of cut on magnitude range used to compute completeness " 

71 "in.", 

72 dtype=float, 

73 default=20, 

74 min=1, 

75 max=40, 

76 ) 

77 magMax = pexConfig.RangeField( 

78 doc="Maximum of cut on magnitude range used to compute completeness " 

79 "in.", 

80 dtype=int, 

81 default=30, 

82 min=1, 

83 max=40, 

84 ) 

85 

86 

87class ApFakesCompletenessMetricTask(MetricTask): 

88 """Metric task for summarizing the completeness of fakes inserted into the 

89 AP pipeline. 

90 """ 

91 _DefaultName = "apFakesCompleteness" 

92 ConfigClass = ApFakesCompletenessMetricConfig 

93 

94 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

95 """Do Butler I/O to provide in-memory objects for run. 

96 

97 This specialization of runQuantum passes the band ID to `run`. 

98 """ 

99 inputs = butlerQC.get(inputRefs) 

100 inputs["band"] = butlerQC.quantum.dataId["band"] 

101 outputs = self.run(**inputs) 

102 if outputs.measurement is not None: 

103 butlerQC.put(outputs, outputRefs) 

104 else: 

105 self.log.debug("Skipping measurement of %r on %s " 

106 "as not applicable.", self, inputRefs) 

107 

108 def run(self, matchedFakes, band): 

109 """Compute the completeness of recovered fakes within a magnitude 

110 range. 

111 

112 Parameters 

113 ---------- 

114 matchedFakes : `lsst.afw.table.SourceCatalog` 

115 Catalog of fakes that were inserted into the ccdExposure matched 

116 to their detected counterparts. 

117 band : `str` 

118 Name of the band whose magnitudes are to be analyzed. 

119 

120 Returns 

121 ------- 

122 result : `lsst.pipe.base.Struct` 

123 A `~lsst.pipe.base.Struct` containing the following component: 

124 ``measurement`` 

125 the ratio (`lsst.verify.Measurement` or `None`) 

126 """ 

127 magnitudes = np.fabs(matchedFakes[f"{self.config.mag_col}" % band]) 

128 magCutFakes = matchedFakes[np.logical_and(magnitudes > self.config.magMin, 

129 magnitudes < self.config.magMax)] 

130 if len(magCutFakes) <= 0.0: 

131 raise MetricComputationError( 

132 "No matched fakes catalog sources found; Completeness is " 

133 "ill defined.") 

134 else: 

135 meas = Measurement( 

136 self.config.metricName, 

137 ((magCutFakes["diaSourceId"] > 0).sum() / len(magCutFakes)) 

138 * u.dimensionless_unscaled) 

139 return Struct(measurement=meas) 

140 

141 

142class ApFakesCountMetricConnections( 

143 ApFakesCompletenessMetricConnections, 

144 dimensions={"instrument", "visit", "detector", "band"}, 

145 defaultTemplates={"coaddName": "deep", 

146 "fakesType": "fakes_", 

147 "package": "ap_pipe", 

148 "metric": "apFakesCompleteness"}): 

149 pass 

150 

151 

152class ApFakesCountMetricConfig( 

153 ApFakesCompletenessMetricConfig, 

154 pipelineConnections=ApFakesCountMetricConnections): 

155 """ApFakesCompleteness config. 

156 """ 

157 pass 

158 

159 

160class ApFakesCountMetricTask(ApFakesCompletenessMetricTask): 

161 """Metric task for summarizing the completeness of fakes inserted into the 

162 AP pipeline. 

163 """ 

164 _DefaultName = "apFakesCount" 

165 ConfigClass = ApFakesCountMetricConfig 

166 

167 def run(self, matchedFakes, band): 

168 """Compute the number of fakes inserted within a magnitude 

169 range. 

170 

171 Parameters 

172 ---------- 

173 matchedFakes : `lsst.afw.table.SourceCatalog` 

174 Catalog of fakes that were inserted into the ccdExposure matched 

175 to their detected counterparts. 

176 band : `str` 

177 Single character name of the observed band for this quanta. 

178 

179 Returns 

180 ------- 

181 result : `lsst.pipe.base.Struct` 

182 A `~lsst.pipe.base.Struct` containing the following component: 

183 ``measurement`` 

184 the ratio (`lsst.verify.Measurement` or `None`) 

185 """ 

186 magnitudes = np.fabs(matchedFakes[f"{self.config.mag_col}" % band]) 

187 magCutFakes = matchedFakes[np.logical_and(magnitudes > self.config.magMin, 

188 magnitudes < self.config.magMax)] 

189 meas = Measurement(self.config.metricName, 

190 len(magCutFakes) * u.count) 

191 return Struct(measurement=meas) 

192 

193 

194class PipelineTimingMetricConnections( 

195 MetricTask.ConfigClass.ConnectionsClass, 

196 dimensions={"instrument", "visit", "detector"}, 

197 defaultTemplates={"labelStart": "", 

198 "labelEnd": "", 

199 "package": "ap_pipe", 

200 "metric": "ApPipelineTime"}): 

201 metadataStart = connTypes.Input( 

202 name="{labelStart}_metadata", 

203 doc="The starting task's metadata.", 

204 storageClass="TaskMetadata", 

205 dimensions={"instrument", "exposure", "detector"}, 

206 multiple=False, 

207 ) 

208 metadataEnd = connTypes.Input( 

209 name="{labelEnd}_metadata", 

210 doc="The final task's metadata.", 

211 storageClass="TaskMetadata", 

212 dimensions={"instrument", "visit", "detector"}, 

213 multiple=False, 

214 ) 

215 

216 

217class PipelineTimingMetricConfig(MetricTask.ConfigClass, pipelineConnections=PipelineTimingMetricConnections): 

218 # Don't include the dimensions hack that MetadataMetricConfig has; unlike 

219 # TimingMetricTask, this task is not designed to be run on multiple 

220 # pipelines. 

221 targetStart = pexConfig.Field( 

222 dtype=str, 

223 doc="The method to take as the starting point of the starting task, " 

224 "optionally prefixed by one or more task names in the format of " 

225 "`lsst.pipe.base.Task.getFullMetadata()`.") 

226 targetEnd = pexConfig.Field( 

227 dtype=str, 

228 doc="The method to take as the stopping point of the final task, " 

229 "optionally prefixed by one or more task names in the format of " 

230 "`lsst.pipe.base.Task.getFullMetadata()`.") 

231 

232 

233class PipelineTimingMetricTask(AbstractMetadataMetricTask): 

234 """A Task that computes a wall-clock time for an entire pipeline, using 

235 metadata produced by the `lsst.utils.timer.timeMethod` decorator. 

236 

237 Parameters 

238 ---------- 

239 args 

240 kwargs 

241 Constructor parameters are the same as for 

242 `lsst.verify.tasks.MetricTask`. 

243 """ 

244 

245 _DefaultName = "pipelineTimingMetric" 

246 ConfigClass = PipelineTimingMetricConfig 

247 

248 @classmethod 

249 def getInputMetadataKeys(cls, config): 

250 """Get search strings for the metadata. 

251 

252 Parameters 

253 ---------- 

254 config : ``cls.ConfigClass`` 

255 Configuration for this task. 

256 

257 Returns 

258 ------- 

259 keys : `dict` 

260 A dictionary of keys, optionally prefixed by one or more tasks in 

261 the format of `lsst.pipe.base.Task.getFullMetadata()`. 

262 

263 ``"StartTimestamp"`` 

264 The key for an ISO 8601-compliant text string where the target 

265 pipeline started (`str`). 

266 ``"EndTimestamp"`` 

267 The key for an ISO 8601-compliant text string where the target 

268 pipeline ended (`str`). 

269 """ 

270 return {"StartTimestamp": config.targetStart + "StartUtc", 

271 "EndTimestamp": config.targetEnd + "EndUtc", 

272 } 

273 

274 def run(self, metadataStart, metadataEnd): 

275 """Compute the pipeline wall-clock time from science task metadata. 

276 

277 Parameters 

278 ---------- 

279 metadataStart : `lsst.pipe.base.TaskMetadata` 

280 A metadata object for the first quantum run by the pipeline. 

281 metadataEnd : `lsst.pipe.base.TaskMetadata` 

282 A metadata object for the last quantum run by the pipeline. 

283 

284 Returns 

285 ------- 

286 result : `lsst.pipe.base.Struct` 

287 A `~lsst.pipe.base.Struct` containing the following component: 

288 

289 - ``measurement``: the value of the metric 

290 (`lsst.verify.Measurement` or `None`) 

291 

292 Raises 

293 ------ 

294 lsst.verify.tasks.MetricComputationError 

295 Raised if the strings returned by `getInputMetadataKeys` match 

296 more than one key in either metadata object. 

297 lsst.pipe.base.NoWorkFound 

298 Raised if the metric is ill-defined. Typically this means that at 

299 least one pipeline step was not run. 

300 """ 

301 metadataKeys = self.getInputMetadataKeys(self.config) 

302 timingsStart = self.extractMetadata(metadataStart, metadataKeys) 

303 timingsEnd = self.extractMetadata(metadataEnd, metadataKeys) 

304 

305 if timingsStart["StartTimestamp"] is None: 

306 raise NoWorkFound(f"Nothing to do: no timing information for {self.config.targetStart} found.") 

307 if timingsEnd["EndTimestamp"] is None: 

308 raise NoWorkFound(f"Nothing to do: no timing information for {self.config.targetEnd} found.") 

309 

310 try: 

311 startTime = datetime.fromisoformat(timingsStart["StartTimestamp"]) 

312 endTime = datetime.fromisoformat(timingsEnd["EndTimestamp"]) 

313 except (TypeError, ValueError) as e: 

314 raise MetricComputationError("Invalid metadata") from e 

315 else: 

316 totalTime = (endTime - startTime).total_seconds() 

317 meas = Measurement(self.config.metricName, totalTime * u.second) 

318 meas.notes["estimator"] = "utils.timer.timeMethod" 

319 meas.extras["start"] = Datum(timingsStart["StartTimestamp"]) 

320 meas.extras["end"] = Datum(timingsEnd["EndTimestamp"]) 

321 return Struct(measurement=meas)