Coverage for tests/test_commonMetrics.py: 24%

186 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2023-04-01 02:12 -0700

1# This file is part of verify. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22import time 

23import unittest 

24 

25import astropy.units as u 

26 

27import lsst.utils.tests 

28import lsst.pipe.base.testUtils 

29from lsst.pex.config import Config 

30from lsst.pipe.base import Task 

31from lsst.utils.timer import timeMethod 

32 

33from lsst.verify import Measurement, Name 

34from lsst.verify.tasks import MetricComputationError, TimingMetricTask, \ 

35 CpuTimingMetricTask, MemoryMetricTask 

36from lsst.verify.tasks.testUtils import MetricTaskTestCase, MetadataMetricTestCase 

37 

38 

39class DummyTask(Task): 

40 ConfigClass = Config 

41 _DefaultName = "NotARealTask" 

42 taskLength = 0.1 

43 

44 @timeMethod 

45 def run(self): 

46 time.sleep(self.taskLength) 

47 

48 

49class TimingMetricTestSuite(MetadataMetricTestCase): 

50 @classmethod 

51 def makeTask(cls): 

52 return TimingMetricTask(config=cls._standardConfig()) 

53 

54 @staticmethod 

55 def _standardConfig(): 

56 config = TimingMetricTask.ConfigClass() 

57 config.connections.labelName = DummyTask._DefaultName 

58 config.target = DummyTask._DefaultName + ".run" 

59 config.connections.package = "verify" 

60 config.connections.metric = "DummyTime" 

61 return config 

62 

63 def setUp(self): 

64 super().setUp() 

65 self.metric = Name("verify.DummyTime") 

66 

67 self.scienceTask = DummyTask() 

68 self.scienceTask.run() 

69 

70 def testValid(self): 

71 result = self.task.run(self.scienceTask.getFullMetadata()) 

72 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

73 meas = result.measurement 

74 

75 self.assertIsInstance(meas, Measurement) 

76 self.assertEqual(meas.metric_name, self.metric) 

77 self.assertGreater(meas.quantity, 0.0 * u.second) 

78 self.assertLess(meas.quantity, 2 * DummyTask.taskLength * u.second) 

79 

80 def testRunDifferentMethod(self): 

81 config = self._standardConfig() 

82 config.target = DummyTask._DefaultName + ".runDataRef" 

83 task = TimingMetricTask(config=config) 

84 try: 

85 result = task.run(self.scienceTask.getFullMetadata()) 

86 except lsst.pipe.base.NoWorkFound: 

87 # Correct behavior 

88 pass 

89 else: 

90 # Alternative correct behavior 

91 lsst.pipe.base.testUtils.assertValidOutput(task, result) 

92 meas = result.measurement 

93 self.assertIsNone(meas) 

94 

95 def testNonsenseKeys(self): 

96 metadata = self.scienceTask.getFullMetadata() 

97 startKeys = [key 

98 for key in metadata.paramNames(topLevelOnly=False) 

99 if "StartUtc" in key] 

100 for key in startKeys: 

101 del metadata[key] 

102 

103 with self.assertRaises(MetricComputationError): 

104 self.task.run(metadata) 

105 

106 def testBadlyTypedKeys(self): 

107 metadata = self.scienceTask.getFullMetadata() 

108 endKeys = [key 

109 for key in metadata.paramNames(topLevelOnly=False) 

110 if "EndUtc" in key] 

111 for key in endKeys: 

112 metadata[key] = 42 

113 

114 with self.assertRaises(MetricComputationError): 

115 self.task.run(metadata) 

116 

117 

118class CpuTimingMetricTestSuite(MetadataMetricTestCase): 

119 @classmethod 

120 def makeTask(cls): 

121 return CpuTimingMetricTask(config=cls._standardConfig()) 

122 

123 @staticmethod 

124 def _standardConfig(): 

125 config = CpuTimingMetricTask.ConfigClass() 

126 config.connections.labelName = DummyTask._DefaultName 

127 config.target = DummyTask._DefaultName + ".run" 

128 config.connections.package = "verify" 

129 config.connections.metric = "DummyCpuTime" 

130 return config 

131 

132 def setUp(self): 

133 super().setUp() 

134 self.metric = Name("verify.DummyCpuTime") 

135 

136 self.scienceTask = DummyTask() 

137 self.scienceTask.run() 

138 

139 def testValid(self): 

140 result = self.task.run(self.scienceTask.getFullMetadata()) 

141 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

142 meas = result.measurement 

143 

144 self.assertIsInstance(meas, Measurement) 

145 self.assertEqual(meas.metric_name, self.metric) 

146 self.assertGreater(meas.quantity, 0.0 * u.second) 

147 self.assertLess(meas.quantity, 2 * DummyTask.taskLength * u.second) 

148 

149 # CPU time should be less than wall-clock time. 

150 wallClock = TimingMetricTask(config=TimingMetricTestSuite._standardConfig()) 

151 wallResult = wallClock.run(self.scienceTask.getFullMetadata()) 

152 # Include 0.1% margin for almost-equal values. 

153 self.assertLess(meas.quantity, 1.001 * wallResult.measurement.quantity) 

154 

155 def testRunDifferentMethod(self): 

156 config = self._standardConfig() 

157 config.target = DummyTask._DefaultName + ".runDataRef" 

158 task = CpuTimingMetricTask(config=config) 

159 try: 

160 result = task.run(self.scienceTask.getFullMetadata()) 

161 except lsst.pipe.base.NoWorkFound: 

162 # Correct behavior 

163 pass 

164 else: 

165 # Alternative correct behavior 

166 lsst.pipe.base.testUtils.assertValidOutput(task, result) 

167 meas = result.measurement 

168 self.assertIsNone(meas) 

169 

170 def testNonsenseKeys(self): 

171 metadata = self.scienceTask.getFullMetadata() 

172 startKeys = [key 

173 for key in metadata.paramNames(topLevelOnly=False) 

174 if "StartCpuTime" in key] 

175 for key in startKeys: 

176 del metadata[key] 

177 

178 with self.assertRaises(MetricComputationError): 

179 self.task.run(metadata) 

180 

181 def testBadlyTypedKeys(self): 

182 metadata = self.scienceTask.getFullMetadata() 

183 endKeys = [key 

184 for key in metadata.paramNames(topLevelOnly=False) 

185 if "EndCpuTime" in key] 

186 for key in endKeys: 

187 metadata[key] = str(float(metadata[key])) 

188 

189 with self.assertRaises(MetricComputationError): 

190 self.task.run(metadata) 

191 

192 

193class MemoryMetricTestSuite(MetadataMetricTestCase): 

194 @classmethod 

195 def makeTask(cls): 

196 return MemoryMetricTask(config=cls._standardConfig()) 

197 

198 @staticmethod 

199 def _standardConfig(): 

200 config = MemoryMetricTask.ConfigClass() 

201 config.connections.labelName = DummyTask._DefaultName 

202 config.target = DummyTask._DefaultName + ".run" 

203 config.connections.package = "verify" 

204 config.connections.metric = "DummyMemory" 

205 return config 

206 

207 def setUp(self): 

208 super().setUp() 

209 self.metric = Name("verify.DummyMemory") 

210 

211 self.scienceTask = DummyTask() 

212 self.scienceTask.run() 

213 

214 def testValid(self): 

215 result = self.task.run(self.scienceTask.getFullMetadata()) 

216 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

217 meas = result.measurement 

218 

219 self.assertIsInstance(meas, Measurement) 

220 self.assertEqual(meas.metric_name, self.metric) 

221 self.assertGreater(meas.quantity, 0.0 * u.byte) 

222 

223 def testRunDifferentMethod(self): 

224 config = self._standardConfig() 

225 config.target = DummyTask._DefaultName + ".runDataRef" 

226 task = MemoryMetricTask(config=config) 

227 try: 

228 result = task.run(self.scienceTask.getFullMetadata()) 

229 except lsst.pipe.base.NoWorkFound: 

230 # Correct behavior 

231 pass 

232 else: 

233 # Alternative correct behavior 

234 lsst.pipe.base.testUtils.assertValidOutput(task, result) 

235 meas = result.measurement 

236 self.assertIsNone(meas) 

237 

238 def testBadlyTypedKeys(self): 

239 metadata = self.scienceTask.getFullMetadata() 

240 endKeys = [key 

241 for key in metadata.paramNames(topLevelOnly=False) 

242 if "EndMaxResidentSetSize" in key] 

243 for key in endKeys: 

244 metadata[key] = str(float(metadata[key])) 

245 

246 with self.assertRaises(MetricComputationError): 

247 self.task.run(metadata) 

248 

249 def testOldMetadata(self): 

250 """Test compatibility with version 0 metadata 

251 

252 This can't actually test differences in unit handling between version 0 

253 and version 1, but at least verifies that the code didn't choke on 

254 old-style metadata. 

255 """ 

256 newMetadata = self.scienceTask.getFullMetadata() 

257 oldMetadata = newMetadata.copy() 

258 for key in newMetadata.names(topLevelOnly=False): 

259 if "__version__" in key: 

260 del oldMetadata[key] 

261 

262 result = self.task.run(oldMetadata) 

263 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

264 meas = result.measurement 

265 

266 self.assertIsInstance(meas, Measurement) 

267 self.assertEqual(meas.metric_name, self.metric) 

268 

269 # Since new style is always bytes, old-style will be less or equal 

270 newResult = self.task.run(newMetadata) 

271 self.assertGreater(meas.quantity, 0.0 * u.byte) 

272 self.assertLessEqual(meas.quantity, newResult.measurement.quantity) 

273 

274 

275# Hack around unittest's hacky test setup system 

276del MetricTaskTestCase 

277del MetadataMetricTestCase 

278 

279 

280class MemoryTester(lsst.utils.tests.MemoryTestCase): 

281 pass 

282 

283 

284def setup_module(module): 

285 lsst.utils.tests.init() 

286 

287 

288if __name__ == "__main__": 288 ↛ 289line 288 didn't jump to line 289, because the condition on line 288 was never true

289 lsst.utils.tests.init() 

290 unittest.main()