Coverage for tests/test_commonMetrics.py: 31%

140 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2022-09-03 03:58 -0700

1# This file is part of verify. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22import time 

23import unittest 

24 

25import astropy.units as u 

26 

27import lsst.utils.tests 

28import lsst.pipe.base.testUtils 

29from lsst.pex.config import Config 

30from lsst.pipe.base import Task 

31from lsst.utils.timer import timeMethod 

32 

33from lsst.verify import Measurement, Name 

34from lsst.verify.tasks import MetricComputationError, TimingMetricTask, \ 

35 MemoryMetricTask 

36from lsst.verify.tasks.testUtils import MetricTaskTestCase, MetadataMetricTestCase 

37 

38 

39class DummyTask(Task): 

40 ConfigClass = Config 

41 _DefaultName = "NotARealTask" 

42 taskLength = 0.1 

43 

44 @timeMethod 

45 def run(self): 

46 time.sleep(self.taskLength) 

47 

48 

49class TimingMetricTestSuite(MetadataMetricTestCase): 

50 @classmethod 

51 def makeTask(cls): 

52 return TimingMetricTask(config=cls._standardConfig()) 

53 

54 @staticmethod 

55 def _standardConfig(): 

56 config = TimingMetricTask.ConfigClass() 

57 config.connections.labelName = DummyTask._DefaultName 

58 config.target = DummyTask._DefaultName + ".run" 

59 config.connections.package = "verify" 

60 config.connections.metric = "DummyTime" 

61 return config 

62 

63 def setUp(self): 

64 super().setUp() 

65 self.config = TimingMetricTestSuite._standardConfig() 

66 self.metric = Name("verify.DummyTime") 

67 

68 self.scienceTask = DummyTask() 

69 self.scienceTask.run() 

70 

71 def testValid(self): 

72 result = self.task.run(self.scienceTask.getFullMetadata()) 

73 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

74 meas = result.measurement 

75 

76 self.assertIsInstance(meas, Measurement) 

77 self.assertEqual(meas.metric_name, self.metric) 

78 self.assertGreater(meas.quantity, 0.0 * u.second) 

79 self.assertLess(meas.quantity, 2 * DummyTask.taskLength * u.second) 

80 

81 def testMissingData(self): 

82 result = self.task.run(None) 

83 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

84 meas = result.measurement 

85 self.assertIsNone(meas) 

86 

87 def testRunDifferentMethod(self): 

88 self.config.target = DummyTask._DefaultName + ".runDataRef" 

89 task = TimingMetricTask(config=self.config) 

90 result = task.run(self.scienceTask.getFullMetadata()) 

91 lsst.pipe.base.testUtils.assertValidOutput(task, result) 

92 meas = result.measurement 

93 self.assertIsNone(meas) 

94 

95 def testNonsenseKeys(self): 

96 metadata = self.scienceTask.getFullMetadata() 

97 startKeys = [key 

98 for key in metadata.paramNames(topLevelOnly=False) 

99 if "StartCpuTime" in key] 

100 for key in startKeys: 

101 del metadata[key] 

102 

103 task = TimingMetricTask(config=self.config) 

104 with self.assertRaises(MetricComputationError): 

105 task.run(metadata) 

106 

107 def testBadlyTypedKeys(self): 

108 metadata = self.scienceTask.getFullMetadata() 

109 endKeys = [key 

110 for key in metadata.paramNames(topLevelOnly=False) 

111 if "EndCpuTime" in key] 

112 for key in endKeys: 

113 metadata[key] = str(float(metadata[key])) 

114 

115 task = TimingMetricTask(config=self.config) 

116 with self.assertRaises(MetricComputationError): 

117 task.run(metadata) 

118 

119 

120class MemoryMetricTestSuite(MetadataMetricTestCase): 

121 @classmethod 

122 def makeTask(cls): 

123 return MemoryMetricTask(config=cls._standardConfig()) 

124 

125 @staticmethod 

126 def _standardConfig(): 

127 config = MemoryMetricTask.ConfigClass() 

128 config.connections.labelName = DummyTask._DefaultName 

129 config.target = DummyTask._DefaultName + ".run" 

130 config.connections.package = "verify" 

131 config.connections.metric = "DummyMemory" 

132 return config 

133 

134 def setUp(self): 

135 super().setUp() 

136 self.config = self._standardConfig() 

137 self.metric = Name("verify.DummyMemory") 

138 

139 self.scienceTask = DummyTask() 

140 self.scienceTask.run() 

141 

142 def testValid(self): 

143 result = self.task.run(self.scienceTask.getFullMetadata()) 

144 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

145 meas = result.measurement 

146 

147 self.assertIsInstance(meas, Measurement) 

148 self.assertEqual(meas.metric_name, self.metric) 

149 self.assertGreater(meas.quantity, 0.0 * u.byte) 

150 

151 def testMissingData(self): 

152 result = self.task.run(None) 

153 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

154 meas = result.measurement 

155 self.assertIsNone(meas) 

156 

157 def testRunDifferentMethod(self): 

158 self.config.target = DummyTask._DefaultName + ".runDataRef" 

159 task = MemoryMetricTask(config=self.config) 

160 result = task.run(self.scienceTask.getFullMetadata()) 

161 lsst.pipe.base.testUtils.assertValidOutput(task, result) 

162 meas = result.measurement 

163 self.assertIsNone(meas) 

164 

165 def testBadlyTypedKeys(self): 

166 metadata = self.scienceTask.getFullMetadata() 

167 endKeys = [key 

168 for key in metadata.paramNames(topLevelOnly=False) 

169 if "EndMaxResidentSetSize" in key] 

170 for key in endKeys: 

171 metadata[key] = str(float(metadata[key])) 

172 

173 task = MemoryMetricTask(config=self.config) 

174 with self.assertRaises(MetricComputationError): 

175 task.run(metadata) 

176 

177 def testOldMetadata(self): 

178 """Test compatibility with version 0 metadata 

179 

180 This can't actually test differences in unit handling between version 0 

181 and version 1, but at least verifies that the code didn't choke on 

182 old-style metadata. 

183 """ 

184 newMetadata = self.scienceTask.getFullMetadata() 

185 oldMetadata = newMetadata.copy() 

186 for key in newMetadata.names(topLevelOnly=False): 

187 if "__version__" in key: 

188 del oldMetadata[key] 

189 

190 result = self.task.run(oldMetadata) 

191 lsst.pipe.base.testUtils.assertValidOutput(self.task, result) 

192 meas = result.measurement 

193 

194 self.assertIsInstance(meas, Measurement) 

195 self.assertEqual(meas.metric_name, self.metric) 

196 

197 # Since new style is always bytes, old-style will be less or equal 

198 newResult = self.task.run(newMetadata) 

199 self.assertGreater(meas.quantity, 0.0 * u.byte) 

200 self.assertLessEqual(meas.quantity, newResult.measurement.quantity) 

201 

202 

203# Hack around unittest's hacky test setup system 

204del MetricTaskTestCase 

205del MetadataMetricTestCase 

206 

207 

208class MemoryTester(lsst.utils.tests.MemoryTestCase): 

209 pass 

210 

211 

212def setup_module(module): 

213 lsst.utils.tests.init() 

214 

215 

216if __name__ == "__main__": 216 ↛ 217line 216 didn't jump to line 217, because the condition on line 216 was never true

217 lsst.utils.tests.init() 

218 unittest.main()