Coverage for tests/test_commonMetrics.py: 31%
160 statements
« prev ^ index » next coverage.py v6.4.1, created at 2022-07-11 06:50 +0000
« prev ^ index » next coverage.py v6.4.1, created at 2022-07-11 06:50 +0000
1# This file is part of verify.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22import time
23import unittest
24import warnings
26import astropy.units as u
28import lsst.utils.tests
29from lsst.pex.config import Config
30from lsst.pipe.base import Task, timeMethod
32from lsst.verify import Measurement, Name
33from lsst.verify.gen2tasks.testUtils import MetricTaskTestCase
34from lsst.verify.tasks import MetricComputationError, TimingMetricTask, \
35 MemoryMetricTask
36from lsst.verify.tasks.testUtils import MetadataMetricTestCase
39class DummyTask(Task):
40 ConfigClass = Config
41 _DefaultName = "NotARealTask"
42 taskLength = 0.1
44 @timeMethod
45 def run(self):
46 time.sleep(self.taskLength)
49class TimingMetricTestSuite(MetadataMetricTestCase):
50 @classmethod
51 def makeTask(cls):
52 return TimingMetricTask(config=cls._standardConfig())
54 @staticmethod
55 def _standardConfig():
56 config = TimingMetricTask.ConfigClass()
57 config.connections.labelName = DummyTask._DefaultName
58 config.target = DummyTask._DefaultName + ".run"
59 config.connections.package = "verify"
60 config.connections.metric = "DummyTime"
61 return config
63 def setUp(self):
64 super().setUp()
65 self.config = TimingMetricTestSuite._standardConfig()
66 self.metric = Name("verify.DummyTime")
68 self.scienceTask = DummyTask()
69 self.scienceTask.run()
71 def testValid(self):
72 result = self.task.run(self.scienceTask.getFullMetadata())
73 lsst.pipe.base.testUtils.assertValidOutput(self.task, result)
74 meas = result.measurement
76 self.assertIsInstance(meas, Measurement)
77 self.assertEqual(meas.metric_name, self.metric)
78 self.assertGreater(meas.quantity, 0.0 * u.second)
79 self.assertLess(meas.quantity, 2 * DummyTask.taskLength * u.second)
81 def testNoMetric(self):
82 self.config.connections.package = "foo.bar"
83 self.config.connections.metric = "FooBarTime"
84 task = TimingMetricTask(config=self.config)
85 with self.assertRaises(TypeError):
86 task.run(self.scienceTask.getFullMetadata())
88 def testMissingData(self):
89 result = self.task.run(None)
90 lsst.pipe.base.testUtils.assertValidOutput(self.task, result)
91 meas = result.measurement
92 self.assertIsNone(meas)
94 def testRunDifferentMethod(self):
95 self.config.target = DummyTask._DefaultName + ".runDataRef"
96 task = TimingMetricTask(config=self.config)
97 result = task.run(self.scienceTask.getFullMetadata())
98 lsst.pipe.base.testUtils.assertValidOutput(task, result)
99 meas = result.measurement
100 self.assertIsNone(meas)
102 def testNonsenseKeys(self):
103 metadata = self.scienceTask.getFullMetadata()
104 startKeys = [key
105 for key in metadata.paramNames(topLevelOnly=False)
106 if "StartCpuTime" in key]
107 for key in startKeys:
108 metadata.remove(key)
110 task = TimingMetricTask(config=self.config)
111 with self.assertRaises(MetricComputationError):
112 task.run(metadata)
114 def testBadlyTypedKeys(self):
115 metadata = self.scienceTask.getFullMetadata()
116 endKeys = [key
117 for key in metadata.paramNames(topLevelOnly=False)
118 if "EndCpuTime" in key]
119 for key in endKeys:
120 metadata.set(key, str(metadata.getAsDouble(key)))
122 task = TimingMetricTask(config=self.config)
123 with self.assertRaises(MetricComputationError):
124 task.run(metadata)
126 def testDeprecated(self):
127 with warnings.catch_warnings(record=True):
128 self.config.metric = "verify.DummyTime"
129 self.config.connections.package = ""
130 self.config.connections.metric = ""
131 with warnings.catch_warnings(record=True) as emitted:
132 self.config.validate()
133 self.assertEqual(len(emitted), 1)
134 self.assertEqual(emitted[0].category, FutureWarning)
135 self.assertEqual(self.config.connections.package, "verify")
136 self.assertEqual(self.config.connections.metric, "DummyTime")
139class MemoryMetricTestSuite(MetadataMetricTestCase):
140 @classmethod
141 def makeTask(cls):
142 return MemoryMetricTask(config=cls._standardConfig())
144 @staticmethod
145 def _standardConfig():
146 config = MemoryMetricTask.ConfigClass()
147 config.connections.labelName = DummyTask._DefaultName
148 config.target = DummyTask._DefaultName + ".run"
149 config.connections.package = "verify"
150 config.connections.metric = "DummyMemory"
151 return config
153 def setUp(self):
154 super().setUp()
155 self.config = self._standardConfig()
156 self.metric = Name("verify.DummyMemory")
158 self.scienceTask = DummyTask()
159 self.scienceTask.run()
161 def testValid(self):
162 result = self.task.run(self.scienceTask.getFullMetadata())
163 lsst.pipe.base.testUtils.assertValidOutput(self.task, result)
164 meas = result.measurement
166 self.assertIsInstance(meas, Measurement)
167 self.assertEqual(meas.metric_name, self.metric)
168 self.assertGreater(meas.quantity, 0.0 * u.byte)
170 def testNoMetric(self):
171 self.config.connections.package = "foo.bar"
172 self.config.connections.metric = "FooBarMemory"
173 task = MemoryMetricTask(config=self.config)
174 with self.assertRaises(TypeError):
175 task.run(self.scienceTask.getFullMetadata())
177 def testMissingData(self):
178 result = self.task.run(None)
179 lsst.pipe.base.testUtils.assertValidOutput(self.task, result)
180 meas = result.measurement
181 self.assertIsNone(meas)
183 def testRunDifferentMethod(self):
184 self.config.target = DummyTask._DefaultName + ".runDataRef"
185 task = MemoryMetricTask(config=self.config)
186 result = task.run(self.scienceTask.getFullMetadata())
187 lsst.pipe.base.testUtils.assertValidOutput(task, result)
188 meas = result.measurement
189 self.assertIsNone(meas)
191 def testBadlyTypedKeys(self):
192 metadata = self.scienceTask.getFullMetadata()
193 endKeys = [key
194 for key in metadata.paramNames(topLevelOnly=False)
195 if "EndMaxResidentSetSize" in key]
196 for key in endKeys:
197 metadata.set(key, str(metadata.getAsDouble(key)))
199 task = MemoryMetricTask(config=self.config)
200 with self.assertRaises(MetricComputationError):
201 task.run(metadata)
203 def testDeprecated(self):
204 with warnings.catch_warnings(record=True):
205 self.config.metric = "verify.DummyMemory"
206 self.config.connections.package = ""
207 self.config.connections.metric = ""
208 with warnings.catch_warnings(record=True) as emitted:
209 self.config.validate()
210 self.assertEqual(len(emitted), 1)
211 self.assertEqual(emitted[0].category, FutureWarning)
212 self.assertEqual(self.config.connections.package, "verify")
213 self.assertEqual(self.config.connections.metric, "DummyMemory")
216# Hack around unittest's hacky test setup system
217del MetricTaskTestCase
218del MetadataMetricTestCase
221class MemoryTester(lsst.utils.tests.MemoryTestCase):
222 pass
225def setup_module(module):
226 lsst.utils.tests.init()
229if __name__ == "__main__": 229 ↛ 230line 229 didn't jump to line 230, because the condition on line 229 was never true
230 lsst.utils.tests.init()
231 unittest.main()