Coverage for tests/test_metricsController.py: 36%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

165 statements  

1# This file is part of verify. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22import unittest.mock 

23 

24import astropy.units as u 

25from astropy.tests.helper import assert_quantity_allclose 

26 

27import lsst.utils.tests 

28from lsst.pex.config import Config, FieldValidationError 

29from lsst.pipe.base import Task, Struct, connectionTypes 

30from lsst.verify import Job, Name, Measurement 

31from lsst.verify.tasks import MetricTask, MetricConfig, MetricConnections, \ 

32 MetricComputationError 

33from lsst.verify.gen2tasks import \ 

34 MetricsControllerTask, register, registerMultiple 

35 

36 

37def _metricName(): 

38 return "misc_tasks.FancyMetric" 

39 

40 

41def _extraMetricName1(): 

42 return "misc_tasks.SuperfluousMetric" 

43 

44 

45def _extraMetricName2(): 

46 return "misc_tasks.RedundantMetric" 

47 

48 

49class DemoConnections( 

50 MetricConnections, 

51 defaultTemplates={"package": "misc_tasks", "metric": _metricName()}, 

52 dimensions={}): 

53 inputData = connectionTypes.Input( 

54 name="metadata", 

55 storageClass="TaskMetadata", 

56 ) 

57 

58 

59class DemoMetricConfig(MetricConfig, 

60 pipelineConnections=DemoConnections): 

61 multiplier = lsst.pex.config.Field( 

62 dtype=float, 

63 default=1.0, 

64 doc="Arbitrary factor for measurement") 

65 

66 

67@register("demoMetric") 

68class _DemoMetricTask(MetricTask): 

69 """A minimal `lsst.verify.tasks.MetricTask`. 

70 """ 

71 

72 ConfigClass = DemoMetricConfig 

73 _DefaultName = "test" 

74 

75 def run(self, inputData): 

76 nData = len(inputData) 

77 return Struct(measurement=Measurement( 

78 self.config.metricName, 

79 self.config.multiplier * nData * u.second)) 

80 

81 @classmethod 

82 def getInputDatasetTypes(cls, _config): 

83 return {'inputData': "metadata"} 

84 

85 @classmethod 

86 def areInputDatasetsScalar(cls, _config): 

87 return {'inputData': False} 

88 

89 

90@registerMultiple("repeatedMetric") 

91class _RepeatedMetricTask(MetricTask): 

92 """A minimal `lsst.verify.tasks.MetricTask`. 

93 """ 

94 

95 ConfigClass = DemoMetricConfig 

96 _DefaultName = "test" 

97 

98 def run(self, inputData): 

99 nData = len(inputData) 

100 return Struct(measurement=Measurement( 

101 self.config.metricName, 

102 self.config.multiplier * nData * u.second)) 

103 

104 @classmethod 

105 def getInputDatasetTypes(cls, _config): 

106 return {'inputData': "metadata"} 

107 

108 @classmethod 

109 def areInputDatasetsScalar(cls, _config): 

110 return {'inputData': False} 

111 

112 

113def _makeMockDataref(dataId=None): 

114 """A dataref-like object with a specific data ID. 

115 """ 

116 return unittest.mock.NonCallableMock(dataId=dataId) 

117 

118 

119class _TestMetadataAdder(Task): 

120 """Simplest valid non-identity metadata adder. 

121 """ 

122 ConfigClass = Config 

123 

124 def run(self, job, **kwargs): 

125 job.meta["tested"] = True 

126 return Struct(job=job) 

127 

128 

129def _butlerQuery(_butler, _datasetType, _level="", dataId=None): 

130 """Return a number of datarefs corresponding to a (partial) dataId. 

131 """ 

132 dataref = _makeMockDataref() 

133 

134 # Simulate a dataset of 3 visits and 2 CCDs 

135 nRuns = 1 

136 if "visit" not in dataId: 

137 nRuns *= 3 

138 if "ccd" not in dataId: 

139 nRuns *= 2 

140 return [dataref] * nRuns 

141 

142 

143@unittest.mock.patch.object(Job, "load_metrics_package", side_effect=Job) 

144@unittest.mock.patch("lsst.daf.persistence.searchDataRefs", autospec=True, 

145 side_effect=_butlerQuery) 

146@unittest.mock.patch("lsst.verify.Job.write", autospec=True) 

147class MetricsControllerTestSuite(lsst.utils.tests.TestCase): 

148 

149 def setUp(self): 

150 self.config = MetricsControllerTask.ConfigClass() 

151 self.config.metadataAdder.retarget(_TestMetadataAdder) 

152 self.config.measurers = ["demoMetric", "repeatedMetric"] 

153 

154 self.config.measurers["demoMetric"].multiplier = 2.0 

155 repeated = self.config.measurers["repeatedMetric"] 

156 repeated.configs["first"] = DemoMetricConfig() 

157 repeated.configs["first"].connections.metric = _extraMetricName1() 

158 repeated.configs["second"] = DemoMetricConfig() 

159 repeated.configs["second"].connections.metric = _extraMetricName2() 

160 repeated.configs["second"].multiplier = 3.4 

161 

162 self.task = MetricsControllerTask(self.config) 

163 

164 def _allMetricTaskConfigs(self): 

165 configs = [] 

166 for name, topConfig in zip(self.config.measurers.names, 

167 self.config.measurers.active): 

168 if name != "repeatedMetric": 

169 configs.append(topConfig) 

170 else: 

171 configs.extend(topConfig.configs.values()) 

172 return configs 

173 

174 def _checkMetric(self, mockWriter, datarefs, unitsOfWork): 

175 """Standardized test battery for running a metric. 

176 

177 Parameters 

178 ---------- 

179 mockWriter : `unittest.mock.CallableMock` 

180 A queriable placeholder for `lsst.verify.Job.write`. 

181 datarefs : `list` of `lsst.daf.persistence.ButlerDataRef` 

182 The inputs to `MetricsControllerTask.runDataRefs`. 

183 unitsOfWork : `list` of `int` 

184 The number of science pipeline units of work (i.e., CCD-visit 

185 pairs) that should be combined to make a metric for each element 

186 of ``datarefs``. 

187 """ 

188 if len(datarefs) != len(unitsOfWork): 

189 raise ValueError("Test requires matching datarefs " 

190 "and unitsOfWork") 

191 

192 jobs = self.task.runDataRefs(datarefs).jobs 

193 self.assertEqual(len(jobs), len(datarefs)) 

194 for job, dataref, nTimings in zip(jobs, datarefs, unitsOfWork): 

195 taskConfigs = self._allMetricTaskConfigs() 

196 self.assertEqual(len(job.measurements), len(taskConfigs)) 

197 for metricName, metricConfig in zip(job.measurements, taskConfigs): 

198 configuredName = Name(metricConfig.connections.package, 

199 metricConfig.connections.metric) 

200 self.assertEqual(metricName, configuredName) 

201 assert_quantity_allclose( 

202 job.measurements[metricName].quantity, 

203 metricConfig.multiplier * float(nTimings) * u.second) 

204 

205 self.assertTrue(job.meta["tested"]) 

206 

207 # Exact arguments to Job.write are implementation detail, don't test 

208 if not jobs: 

209 mockWriter.assert_not_called() 

210 elif len(jobs) == 1: 

211 mockWriter.assert_called_once() 

212 else: 

213 self.assertEqual(mockWriter.call_count, len(jobs)) 

214 

215 def testCcdGrainedMetric(self, mockWriter, _mockButler, 

216 _mockMetricsLoader): 

217 dataId = {"visit": 42, "ccd": 101, "filter": "k"} 

218 datarefs = [_makeMockDataref(dataId)] 

219 self._checkMetric(mockWriter, datarefs, unitsOfWork=[1]) 

220 

221 def testVisitGrainedMetric(self, mockWriter, _mockButler, 

222 _mockMetricsLoader): 

223 dataId = {"visit": 42, "filter": "k"} 

224 datarefs = [_makeMockDataref(dataId)] 

225 self._checkMetric(mockWriter, datarefs, unitsOfWork=[2]) 

226 

227 def testDatasetGrainedMetric(self, mockWriter, _mockButler, 

228 _mockMetricsLoader): 

229 dataId = {} 

230 datarefs = [_makeMockDataref(dataId)] 

231 self._checkMetric(mockWriter, datarefs, unitsOfWork=[6]) 

232 

233 def testMultipleMetrics(self, mockWriter, _mockButler, 

234 _mockMetricsLoader): 

235 dataIds = [{"visit": 42, "ccd": 101, "filter": "k"}, 

236 {"visit": 42, "ccd": 102, "filter": "k"}] 

237 datarefs = [_makeMockDataref(dataId) for dataId in dataIds] 

238 self._checkMetric(mockWriter, datarefs, 

239 unitsOfWork=[1] * len(dataIds)) 

240 

241 def testSkippedMetrics(self, mockWriter, _mockButler, _mockMetricsLoader): 

242 dataIds = [{"visit": 42, "ccd": 101, "filter": "k"}, 

243 {"visit": 42, "ccd": 102, "filter": "k"}] 

244 datarefs = [_makeMockDataref(dataId) for dataId in dataIds] 

245 

246 with unittest.mock.patch("os.path.isfile", side_effect=[True, False]): 

247 jobs = self.task.runDataRefs(datarefs).jobs 

248 self.assertEqual(len(jobs), 2) 

249 self.assertEqual(mockWriter.call_count, 2) 

250 

251 mockWriter.reset_mock() 

252 

253 with unittest.mock.patch("os.path.isfile", side_effect=[True, False]): 

254 jobs = self.task.runDataRefs(datarefs, skipExisting=True).jobs 

255 self.assertEqual(len(jobs), 1) 

256 mockWriter.assert_called_once() 

257 

258 def testInvalidMetricSegregation(self, _mockWriter, _mockButler, 

259 _mockMetricsLoader): 

260 self.config.measurers = ["demoMetric"] 

261 self.task = MetricsControllerTask(self.config) 

262 with unittest.mock.patch.object(_DemoMetricTask, 

263 "adaptArgsAndRun") as mockCall: 

264 # Run _DemoMetricTask twice, with one failure and one result 

265 mockCall.side_effect = (MetricComputationError, 

266 unittest.mock.DEFAULT) 

267 expectedValue = 1.0 * u.second 

268 mockCall.return_value = Struct(measurement=lsst.verify.Measurement( 

269 _metricName(), expectedValue)) 

270 

271 dataIds = [{"visit": 42, "ccd": 101, "filter": "k"}, 

272 {"visit": 42, "ccd": 102, "filter": "k"}] 

273 datarefs = [_makeMockDataref(dataId) for dataId in dataIds] 

274 

275 jobs = self.task.runDataRefs(datarefs).jobs 

276 self.assertEqual(len(jobs), len(datarefs)) 

277 

278 # Failed job 

279 self.assertEqual(len(jobs[0].measurements), 0) 

280 

281 # Successful job 

282 self.assertTrue(jobs[1].meta["tested"]) 

283 self.assertEqual(len(jobs[1].measurements), 1) 

284 assert_quantity_allclose( 

285 jobs[1].measurements[_metricName()].quantity, 

286 expectedValue) 

287 

288 def testNoData(self, mockWriter, _mockButler, _mockMetricsLoader): 

289 datarefs = [] 

290 self._checkMetric(mockWriter, datarefs, unitsOfWork=[]) 

291 

292 def testBadMetric(self, _mockWriter, _mockButler, _mockMetricsLoader): 

293 with self.assertRaises(FieldValidationError): 

294 self.config.measurers = ["totallyAndDefinitelyNotARealMetric"] 

295 

296 def testCustomMetadata(self, _mockWriter, _mockButler, _mockMetricsLoader): 

297 dataIds = [{"visit": 42, "ccd": 101, "filter": "k"}, 

298 {"visit": 42, "ccd": 102, "filter": "k"}] 

299 datarefs = [_makeMockDataref(dataId) for dataId in dataIds] 

300 extraMetadata = {"test_protocol": 42} 

301 jobs = self.task.runDataRefs(datarefs, extraMetadata).jobs 

302 

303 for job in jobs: 

304 self.assertTrue(job.meta["tested"]) 

305 self.assertEqual(job.meta["test_protocol"], 

306 extraMetadata["test_protocol"]) 

307 

308 

309class MemoryTester(lsst.utils.tests.MemoryTestCase): 

310 pass 

311 

312 

313def setup_module(module): 

314 lsst.utils.tests.init() 

315 

316 

317if __name__ == "__main__": 317 ↛ 318line 317 didn't jump to line 318, because the condition on line 317 was never true

318 lsst.utils.tests.init() 

319 unittest.main()