Coverage for tests / test_commonMetrics.py: 27%

184 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-04-30 08:53 +0000

1# This file is part of verify. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22import time 

23import unittest 

24 

25import astropy.units as u 

26 

27import lsst.utils.tests 

28import lsst.pipe.base.testUtils 

29from lsst.pex.config import Config 

30from lsst.pipe.base import Task 

31from lsst.utils.timer import timeMethod 

32 

33from lsst.verify import Measurement, Name 

34from lsst.verify.tasks import MetricComputationError, TimingMetricTask, \ 

35 CpuTimingMetricTask, MemoryMetricTask 

36from lsst.verify.tasks.testUtils import MetricTaskTestCase, MetadataMetricTestCase 

37 

38 

39class DummyTask(Task): 

40 ConfigClass = Config 

41 _DefaultName = "NotARealTask" 

42 taskLength = 0.1 

43 

44 @timeMethod 

45 def run(self): 

46 time.sleep(self.taskLength) 

47 

48 

49class TimingMetricTestSuite(MetadataMetricTestCase): 

50 @classmethod 

51 def makeTask(cls): 

52 return TimingMetricTask(config=cls._standardConfig()) 

53 

54 @staticmethod 

55 def _standardConfig(): 

56 config = TimingMetricTask.ConfigClass() 

57 config.connections.labelName = DummyTask._DefaultName 

58 config.target = DummyTask._DefaultName + ".run" 

59 config.connections.package = "verify" 

60 config.connections.metric = "DummyTime" 

61 return config 

62 

63 def setUp(self): 

64 super().setUp() 

65 self.metric = Name("verify.DummyTime") 

66 

67 self.scienceTask = DummyTask() 

68 self.scienceTask.run() 

69 

70 def testValid(self): 

71 result = self.task.run(self.scienceTask.getFullMetadata()) 

72 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

73 meas = result.measurement 

74 

75 self.assertIsInstance(meas, Measurement) 

76 self.assertEqual(meas.metric_name, self.metric) 

77 self.assertGreater(meas.quantity, 0.0 * u.second) 

78 

79 def testRunDifferentMethod(self): 

80 config = self._standardConfig() 

81 config.target = DummyTask._DefaultName + ".runDataRef" 

82 task = TimingMetricTask(config=config) 

83 try: 

84 result = task.run(self.scienceTask.getFullMetadata()) 

85 except lsst.pipe.base.NoWorkFound: 

86 # Correct behavior 

87 pass 

88 else: 

89 # Alternative correct behavior 

90 lsst.pipe.base.testUtils.assertValidOutput(task, result) 

91 meas = result.measurement 

92 self.assertIsNone(meas) 

93 

94 def testNonsenseKeys(self): 

95 metadata = self.scienceTask.getFullMetadata() 

96 startKeys = [key 

97 for key in metadata.paramNames(topLevelOnly=False) 

98 if "StartUtc" in key] 

99 for key in startKeys: 

100 del metadata[key] 

101 

102 with self.assertRaises(MetricComputationError): 

103 self.task.run(metadata) 

104 

105 def testBadlyTypedKeys(self): 

106 metadata = self.scienceTask.getFullMetadata() 

107 endKeys = [key 

108 for key in metadata.paramNames(topLevelOnly=False) 

109 if "EndUtc" in key] 

110 for key in endKeys: 

111 metadata[key] = 42 

112 

113 with self.assertRaises(MetricComputationError): 

114 self.task.run(metadata) 

115 

116 

117class CpuTimingMetricTestSuite(MetadataMetricTestCase): 

118 @classmethod 

119 def makeTask(cls): 

120 return CpuTimingMetricTask(config=cls._standardConfig()) 

121 

122 @staticmethod 

123 def _standardConfig(): 

124 config = CpuTimingMetricTask.ConfigClass() 

125 config.connections.labelName = DummyTask._DefaultName 

126 config.target = DummyTask._DefaultName + ".run" 

127 config.connections.package = "verify" 

128 config.connections.metric = "DummyCpuTime" 

129 return config 

130 

131 def setUp(self): 

132 super().setUp() 

133 self.metric = Name("verify.DummyCpuTime") 

134 

135 self.scienceTask = DummyTask() 

136 self.scienceTask.run() 

137 

138 def testValid(self): 

139 result = self.task.run(self.scienceTask.getFullMetadata()) 

140 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

141 meas = result.measurement 

142 

143 self.assertIsInstance(meas, Measurement) 

144 self.assertEqual(meas.metric_name, self.metric) 

145 self.assertGreater(meas.quantity, 0.0 * u.second) 

146 

147 # CPU time should be less than wall-clock time. 

148 wallClock = TimingMetricTask(config=TimingMetricTestSuite._standardConfig()) 

149 wallResult = wallClock.run(self.scienceTask.getFullMetadata()) 

150 # Include 0.1% margin for almost-equal values. 

151 self.assertLess(meas.quantity, 1.001 * wallResult.measurement.quantity) 

152 

153 def testRunDifferentMethod(self): 

154 config = self._standardConfig() 

155 config.target = DummyTask._DefaultName + ".runDataRef" 

156 task = CpuTimingMetricTask(config=config) 

157 try: 

158 result = task.run(self.scienceTask.getFullMetadata()) 

159 except lsst.pipe.base.NoWorkFound: 

160 # Correct behavior 

161 pass 

162 else: 

163 # Alternative correct behavior 

164 lsst.pipe.base.testUtils.assertValidOutput(task, result) 

165 meas = result.measurement 

166 self.assertIsNone(meas) 

167 

168 def testNonsenseKeys(self): 

169 metadata = self.scienceTask.getFullMetadata() 

170 startKeys = [key 

171 for key in metadata.paramNames(topLevelOnly=False) 

172 if "StartCpuTime" in key] 

173 for key in startKeys: 

174 del metadata[key] 

175 

176 with self.assertRaises(MetricComputationError): 

177 self.task.run(metadata) 

178 

179 def testBadlyTypedKeys(self): 

180 metadata = self.scienceTask.getFullMetadata() 

181 endKeys = [key 

182 for key in metadata.paramNames(topLevelOnly=False) 

183 if "EndCpuTime" in key] 

184 for key in endKeys: 

185 metadata[key] = str(float(metadata[key])) 

186 

187 with self.assertRaises(MetricComputationError): 

188 self.task.run(metadata) 

189 

190 

191class MemoryMetricTestSuite(MetadataMetricTestCase): 

192 @classmethod 

193 def makeTask(cls): 

194 return MemoryMetricTask(config=cls._standardConfig()) 

195 

196 @staticmethod 

197 def _standardConfig(): 

198 config = MemoryMetricTask.ConfigClass() 

199 config.connections.labelName = DummyTask._DefaultName 

200 config.target = DummyTask._DefaultName + ".run" 

201 config.connections.package = "verify" 

202 config.connections.metric = "DummyMemory" 

203 return config 

204 

205 def setUp(self): 

206 super().setUp() 

207 self.metric = Name("verify.DummyMemory") 

208 

209 self.scienceTask = DummyTask() 

210 self.scienceTask.run() 

211 

212 def testValid(self): 

213 result = self.task.run(self.scienceTask.getFullMetadata()) 

214 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

215 meas = result.measurement 

216 

217 self.assertIsInstance(meas, Measurement) 

218 self.assertEqual(meas.metric_name, self.metric) 

219 self.assertGreater(meas.quantity, 0.0 * u.byte) 

220 

221 def testRunDifferentMethod(self): 

222 config = self._standardConfig() 

223 config.target = DummyTask._DefaultName + ".runDataRef" 

224 task = MemoryMetricTask(config=config) 

225 try: 

226 result = task.run(self.scienceTask.getFullMetadata()) 

227 except lsst.pipe.base.NoWorkFound: 

228 # Correct behavior 

229 pass 

230 else: 

231 # Alternative correct behavior 

232 lsst.pipe.base.testUtils.assertValidOutput(task, result) 

233 meas = result.measurement 

234 self.assertIsNone(meas) 

235 

236 def testBadlyTypedKeys(self): 

237 metadata = self.scienceTask.getFullMetadata() 

238 endKeys = [key 

239 for key in metadata.paramNames(topLevelOnly=False) 

240 if "EndMaxResidentSetSize" in key] 

241 for key in endKeys: 

242 metadata[key] = str(float(metadata[key])) 

243 

244 with self.assertRaises(MetricComputationError): 

245 self.task.run(metadata) 

246 

247 def testOldMetadata(self): 

248 """Test compatibility with version 0 metadata 

249 

250 This can't actually test differences in unit handling between version 0 

251 and version 1, but at least verifies that the code didn't choke on 

252 old-style metadata. 

253 """ 

254 newMetadata = self.scienceTask.getFullMetadata() 

255 oldMetadata = newMetadata.model_copy() 

256 for key in newMetadata.names(): 

257 if "__version__" in key: 

258 del oldMetadata[key] 

259 

260 result = self.task.run(oldMetadata) 

261 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

262 meas = result.measurement 

263 

264 self.assertIsInstance(meas, Measurement) 

265 self.assertEqual(meas.metric_name, self.metric) 

266 

267 # Since new style is always bytes, old-style will be less or equal 

268 newResult = self.task.run(newMetadata) 

269 self.assertGreater(meas.quantity, 0.0 * u.byte) 

270 self.assertLessEqual(meas.quantity, newResult.measurement.quantity) 

271 

272 

273# Hack around unittest's hacky test setup system 

274del MetricTaskTestCase 

275del MetadataMetricTestCase 

276 

277 

278class MemoryTester(lsst.utils.tests.MemoryTestCase): 

279 pass 

280 

281 

282def setup_module(module): 

283 lsst.utils.tests.init() 

284 

285 

286if __name__ == "__main__": 286 ↛ 287line 286 didn't jump to line 287 because the condition on line 286 was never true

287 lsst.utils.tests.init() 

288 unittest.main()