Coverage for tests/test_metrics.py: 29%
184 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-03-19 03:09 -0700
« prev ^ index » next coverage.py v7.4.4, created at 2024-03-19 03:09 -0700
1# This file is part of ap_pipe.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21#
23import astropy.units as u
24import numpy as np
25import time
26import unittest
28import lsst.pex.config
29from lsst.pipe.base import testUtils
30import lsst.skymap as skyMap
31from lsst.utils.timer import timeMethod
32import lsst.utils.tests
33from lsst.verify import Name
34import lsst.verify.tasks
35from lsst.verify.tasks.testUtils import MetricTaskTestCase
37from lsst.ap.pipe.createApFakes import CreateRandomApFakesTask, CreateRandomApFakesConfig
38from lsst.ap.pipe.metrics import (ApFakesCompletenessMetricTask, ApFakesCompletenessMetricConfig,
39 ApFakesCountMetricTask, ApFakesCountMetricConfig,
40 PipelineTimingMetricTask)
43class TestApCompletenessTask(MetricTaskTestCase):
45 @classmethod
46 def makeTask(cls, magMin=20, magMax=30):
47 """Make the task and allow for modification of the config min and max.
49 Parameters
50 ----------
51 magMin : min magnitude, `float`
52 Minimum magnitude
53 magMax : min magnitude, `float`
54 Maximum magnitude
55 """
56 config = ApFakesCompletenessMetricConfig()
57 config.magMin = magMin
58 config.magMax = magMax
60 return ApFakesCompletenessMetricTask(config=config)
62 def setUp(self):
63 super().setUp()
65 simpleMapConfig = skyMap.discreteSkyMap.DiscreteSkyMapConfig()
66 simpleMapConfig.raList = [45]
67 simpleMapConfig.decList = [45]
68 simpleMapConfig.radiusList = [0.1]
70 self.simpleMap = skyMap.DiscreteSkyMap(simpleMapConfig)
71 self.tractId = 0
72 bCircle = self.simpleMap.generateTract(self.tractId).getInnerSkyPolygon().getBoundingCircle()
73 self.targetSources = 1000
74 self.sourceDensity = (self.targetSources
75 / (bCircle.getArea() * (180 / np.pi) ** 2))
77 fakesConfig = CreateRandomApFakesConfig()
78 fakesConfig.fraction = 0.0
79 fakesConfig.fakeDensity = self.sourceDensity
80 fakesTask = CreateRandomApFakesTask(config=fakesConfig)
81 fakeCat = fakesTask.run(self.tractId, self.simpleMap).fakeCat
83 self.band = 'g'
84 self.magCut = 25
85 magMask = (fakeCat[fakesConfig.mag_col % self.band] < self.magCut)
86 self.expectedAllMatched = magMask.sum()
87 ids = np.where(magMask, np.arange(1, len(fakeCat) + 1, dtype=int), 0)
88 # Add columns to mimic the matched fakes result without running the
89 # full pipeline.
90 self.fakeCat = fakeCat.assign(diaObjectId=ids,
91 filterName=["g"] * len(fakeCat),
92 diaSourceId=ids)
94 def testValid(self):
95 """Test the run method.
96 """
97 result = self.task.run(self.fakeCat, self.band)
98 testUtils.assertValidOutput(self.task, result)
100 meas = result.measurement
101 self.assertEqual(meas.metric_name, Name(metric="ap_pipe.apFakesCompleteness"))
102 # Work around for Mac failing this test.
103 self.assertAlmostEqual(
104 meas.quantity.value,
105 ((self.expectedAllMatched / self.targetSources) * u.dimensionless_unscaled).value,
106 places=2)
108 def testValidEmpty(self):
109 """Test the run method with a valid but zero result.
110 """
111 metricComplete = self.makeTask(self.magCut, self.magCut + 5)
112 result = metricComplete.run(self.fakeCat, self.band)
113 testUtils.assertValidOutput(metricComplete, result)
115 meas = result.measurement
116 self.assertEqual(meas.metric_name, Name(metric="ap_pipe.apFakesCompleteness"))
117 self.assertEqual(meas.quantity, 0 * u.dimensionless_unscaled)
120class TestApCountTask(MetricTaskTestCase):
122 @classmethod
123 def makeTask(cls, magMin=20, magMax=25):
124 """Make the task and allow for modification of the config min and max.
126 Parameters
127 ----------
128 magMin : min magnitude, `float`
129 Minimum magnitude
130 magMax : min magnitude, `float`
131 Maximum magnitude
132 """
133 config = ApFakesCountMetricConfig()
134 config.magMin = magMin
135 config.magMax = magMax
137 return ApFakesCountMetricTask(config=config)
139 def setUp(self):
140 super().setUp()
142 simpleMapConfig = skyMap.discreteSkyMap.DiscreteSkyMapConfig()
143 simpleMapConfig.raList = [45]
144 simpleMapConfig.decList = [45]
145 simpleMapConfig.radiusList = [0.1]
147 self.simpleMap = skyMap.DiscreteSkyMap(simpleMapConfig)
148 self.tractId = 0
149 bCircle = self.simpleMap.generateTract(self.tractId).getInnerSkyPolygon().getBoundingCircle()
150 self.targetSources = 1000
151 self.sourceDensity = (self.targetSources
152 / (bCircle.getArea() * (180 / np.pi) ** 2))
154 fakesConfig = CreateRandomApFakesConfig()
155 fakesConfig.fraction = 0.0
156 fakesConfig.fakeDensity = self.sourceDensity
157 fakesTask = CreateRandomApFakesTask(config=fakesConfig)
158 fakeCat = fakesTask.run(self.tractId, self.simpleMap).fakeCat
160 self.band = 'g'
161 self.magCut = 25
162 magMask = (fakeCat[fakesConfig.mag_col % self.band] < self.magCut)
163 self.expectedAllMatched = magMask.sum()
164 ids = np.where(magMask, np.arange(1, len(fakeCat) + 1, dtype=int), 0)
165 # Add columns to mimic the matched fakes result without running the
166 # full pipeline.
167 self.fakeCat = fakeCat.assign(diaObjectId=ids,
168 filterName=["g"] * len(fakeCat),
169 diaSourceId=ids)
171 def testValid(self):
172 """Test the run method.
173 """
174 result = self.task.run(self.fakeCat, self.band)
175 testUtils.assertValidOutput(self.task, result)
177 meas = result.measurement
178 self.assertEqual(meas.metric_name, Name(metric="ap_pipe.apFakesCompleteness"))
179 # Work around for Mac failing this test.
180 self.assertAlmostEqual(
181 meas.quantity.value,
182 (self.expectedAllMatched * u.count).value,
183 places=2)
185 def testValidEmpty(self):
186 """Test the run method with a valid but zero result.
187 """
188 # Make a mag cut that will have no sources. 30 < g < 35.
189 metricComplete = self.makeTask(self.magCut + 5, self.magCut + 10)
190 result = metricComplete.run(self.fakeCat, self.band)
191 testUtils.assertValidOutput(metricComplete, result)
193 meas = result.measurement
194 self.assertEqual(meas.metric_name, Name(metric="ap_pipe.apFakesCompleteness"))
195 self.assertEqual(meas.quantity, 0 * u.count)
198class DummyTask(lsst.pipe.base.Task):
199 ConfigClass = lsst.pex.config.Config
200 _DefaultName = "NotARealTask"
201 taskLength = 0.1
203 @timeMethod
204 def run(self):
205 time.sleep(self.taskLength)
208# Can't test against MetadataMetricTestCase, because this class is not a MetadataMetricTask
209class TestPipelineTimingMetricTask(MetricTaskTestCase):
210 @staticmethod
211 def _makeConfig(nameStart=DummyTask._DefaultName, nameEnd=DummyTask._DefaultName):
212 config = PipelineTimingMetricTask.ConfigClass()
213 config.connections.labelStart = nameStart
214 config.connections.labelEnd = nameEnd
215 config.targetStart = nameStart + ".run"
216 config.targetEnd = nameEnd + ".run"
217 config.connections.package = "ap_pipe"
218 config.connections.metric = "DummyTime"
219 return config
221 @classmethod
222 def makeTask(cls):
223 return PipelineTimingMetricTask(config=cls._makeConfig(nameStart="first", nameEnd="last"))
225 def setUp(self):
226 super().setUp()
227 self.metric = Name("ap_pipe.DummyTime")
229 self.startTask = DummyTask(name="first")
230 self.startTask.run()
231 self.endTask = DummyTask(name="last")
232 self.endTask.run()
234 def testSingleTask(self):
235 task = PipelineTimingMetricTask(config=self._makeConfig(nameStart="first", nameEnd="first"))
237 altConfig = lsst.verify.tasks.TimingMetricConfig()
238 altConfig.connections.labelName = "first"
239 altConfig.target = "first.run"
240 altConfig.connections.package = "verify"
241 altConfig.connections.metric = "DummyTime"
242 altTask = lsst.verify.tasks.TimingMetricTask(config=altConfig)
244 result = task.run(self.startTask.getFullMetadata(), self.startTask.getFullMetadata())
245 oracle = altTask.run(self.startTask.getFullMetadata())
247 self.assertEqual(result.measurement.metric_name, self.metric)
248 self.assertAlmostEqual(result.measurement.quantity.to_value(u.s),
249 oracle.measurement.quantity.to_value(u.s))
251 def testTwoTasks(self):
252 firstTask = PipelineTimingMetricTask(config=self._makeConfig(nameStart="first", nameEnd="first"))
253 secondTask = PipelineTimingMetricTask(config=self._makeConfig(nameStart="last", nameEnd="last"))
255 result = self.task.run(self.startTask.getFullMetadata(), self.endTask.getFullMetadata())
256 firstResult = firstTask.run(self.startTask.getFullMetadata(), self.startTask.getFullMetadata())
257 secondResult = secondTask.run(self.endTask.getFullMetadata(), self.endTask.getFullMetadata())
259 self.assertEqual(result.measurement.metric_name, self.metric)
260 self.assertGreater(result.measurement.quantity, 0.0 * u.s)
261 self.assertGreaterEqual(result.measurement.quantity,
262 firstResult.measurement.quantity + secondResult.measurement.quantity)
264 def testRunDifferentMethodFirst(self):
265 config = self._makeConfig(nameStart="first", nameEnd="last")
266 config.targetStart = "first.doProcess"
267 task = PipelineTimingMetricTask(config=config)
268 try:
269 result = task.run(self.startTask.getFullMetadata(), self.endTask.getFullMetadata())
270 except lsst.pipe.base.NoWorkFound:
271 # Correct behavior for MetricTask
272 pass
273 else:
274 # Alternative correct behavior for MetricTask
275 testUtils.assertValidOutput(task, result)
276 meas = result.measurement
277 self.assertIsNone(meas)
279 def testRunDifferentMethodLast(self):
280 config = self._makeConfig(nameStart="first", nameEnd="last")
281 config.targetStart = "last.doProcess"
282 task = PipelineTimingMetricTask(config=config)
283 try:
284 result = task.run(self.startTask.getFullMetadata(), self.endTask.getFullMetadata())
285 except lsst.pipe.base.NoWorkFound:
286 # Correct behavior for MetricTask
287 pass
288 else:
289 # Alternative correct behavior for MetricTask
290 testUtils.assertValidOutput(task, result)
291 meas = result.measurement
292 self.assertIsNone(meas)
294 def testBadlyTypedKeys(self):
295 metadata = self.endTask.getFullMetadata()
296 endKeys = [key
297 for key in metadata.paramNames(topLevelOnly=False)
298 if "EndUtc" in key]
299 for key in endKeys:
300 metadata[key] = 42
302 with self.assertRaises(lsst.verify.tasks.MetricComputationError):
303 self.task.run(self.startTask.getFullMetadata(), metadata)
306# Hack around unittest's hacky test setup system
307del MetricTaskTestCase
310class MemoryTester(lsst.utils.tests.MemoryTestCase):
311 pass
314def setup_module(module):
315 lsst.utils.tests.init()
318if __name__ == "__main__": 318 ↛ 319line 318 didn't jump to line 319, because the condition on line 318 was never true
319 lsst.utils.tests.init()
320 unittest.main()