Coverage for tests/test_metricsController.py : 34%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# This file is part of verify. # # Developed for the LSST Data Management System. # This product includes software developed by the LSST Project # (https://www.lsst.org). # See the COPYRIGHT file at the top-level directory of this distribution # for details of code ownership. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>.
MetricTask, MetricsControllerTask, register, registerMultiple
return "misc_tasks.SuperfluousMetric"
return "misc_tasks.RedundantMetric"
dtype=str, default=_metricName(), doc="Metric to target") dtype=float, default=1.0, doc="Arbitrary factor for measurement")
"""A minimal `lsst.verify.gen2tasks.MetricTask`. """
nData = len(inputs) return Struct(measurement=Measurement( self.getOutputMetricName(self.config), self.config.multiplier * nData * u.second))
def getInputDatasetTypes(cls, _config): return {'inputs': "metadata"}
def getOutputMetricName(cls, config): return Name(config.metric)
"""A minimal `lsst.verify.gen2tasks.MetricTask`. """
nData = len(inputs) return Struct(measurement=Measurement( self.getOutputMetricName(self.config), self.config.multiplier * nData * u.second))
def getInputDatasetTypes(cls, _config): return {'inputs': "metadata"}
def getOutputMetricName(cls, config): return Name(config.metric)
"""A dataref-like object with a specific data ID. """ return unittest.mock.NonCallableMock(dataId=dataId)
"""Simplest valid non-identity metadata adder. """
job.meta["tested"] = True return Struct(job=job)
"""Return a number of datarefs corresponding to a (partial) dataId. """ dataref = _makeMockDataref()
# Simulate a dataset of 3 visits and 2 CCDs nRuns = 1 if "visit" not in dataId: nRuns *= 3 if "ccd" not in dataId: nRuns *= 2 return [dataref] * nRuns
side_effect=_butlerQuery)
self.config = MetricsControllerTask.ConfigClass() self.config.metadataAdder.retarget(_TestMetadataAdder) self.config.measurers = ["demoMetric", "repeatedMetric"]
self.config.measurers["demoMetric"].multiplier = 2.0 repeated = self.config.measurers["repeatedMetric"] repeated.configs["first"] = DemoMetricConfig() repeated.configs["first"].metric = _extraMetricName1() repeated.configs["second"] = DemoMetricConfig() repeated.configs["second"].metric = _extraMetricName2() repeated.configs["second"].multiplier = 3.4
self.task = MetricsControllerTask(self.config)
configs = [] for name, topConfig in zip(self.config.measurers.names, self.config.measurers.active): if name != "repeatedMetric": configs.append(topConfig) else: configs.extend(topConfig.configs.values()) return configs
"""Standardized test battery for running a metric.
Parameters ---------- mockWriter : `unittest.mock.CallableMock` A queriable placeholder for `lsst.verify.Job.write`. datarefs : `list` of `lsst.daf.persistence.ButlerDataRef` The inputs to `MetricsControllerTask.runDataRefs`. unitsOfWork : `list` of `int` The number of science pipeline units of work (i.e., CCD-visit pairs) that should be combined to make a metric for each element of ``datarefs``. """ if len(datarefs) != len(unitsOfWork): raise ValueError("Test requires matching datarefs " "and unitsOfWork")
jobs = self.task.runDataRefs(datarefs).jobs self.assertEqual(len(jobs), len(datarefs)) for job, dataref, nTimings in zip(jobs, datarefs, unitsOfWork): taskConfigs = self._allMetricTaskConfigs() self.assertEqual(len(job.measurements), len(taskConfigs)) for metricName, metricConfig in zip(job.measurements, taskConfigs): self.assertEqual(metricName, Name(metricConfig.metric)) assert_quantity_allclose( job.measurements[metricConfig.metric].quantity, metricConfig.multiplier * float(nTimings) * u.second)
self.assertTrue(job.meta["tested"])
# Exact arguments to Job.write are implementation detail, don't test if not jobs: mockWriter.assert_not_called() elif len(jobs) == 1: mockWriter.assert_called_once() else: mockWriter.assert_called()
_mockMetricsLoader): dataId = {"visit": 42, "ccd": 101, "filter": "k"} datarefs = [_makeMockDataref(dataId)] self._checkMetric(mockWriter, datarefs, unitsOfWork=[1])
_mockMetricsLoader): dataId = {"visit": 42, "filter": "k"} datarefs = [_makeMockDataref(dataId)] self._checkMetric(mockWriter, datarefs, unitsOfWork=[2])
_mockMetricsLoader): dataId = {} datarefs = [_makeMockDataref(dataId)] self._checkMetric(mockWriter, datarefs, unitsOfWork=[6])
_mockMetricsLoader): dataIds = [{"visit": 42, "ccd": 101, "filter": "k"}, {"visit": 42, "ccd": 102, "filter": "k"}] datarefs = [_makeMockDataref(dataId) for dataId in dataIds] self._checkMetric(mockWriter, datarefs, unitsOfWork=[1] * len(dataIds))
_mockMetricsLoader): self.config.measurers = ["demoMetric"] self.task = MetricsControllerTask(self.config) with unittest.mock.patch.object(_DemoMetricTask, "adaptArgsAndRun") as mockCall: # Run _DemoMetricTask twice, with one failure and one result mockCall.side_effect = (MetricComputationError, unittest.mock.DEFAULT) expectedValue = 1.0 * u.second mockCall.return_value = Struct(measurement=lsst.verify.Measurement( _metricName(), expectedValue))
dataIds = [{"visit": 42, "ccd": 101, "filter": "k"}, {"visit": 42, "ccd": 102, "filter": "k"}] datarefs = [_makeMockDataref(dataId) for dataId in dataIds]
jobs = self.task.runDataRefs(datarefs).jobs self.assertEqual(len(jobs), len(datarefs))
# Failed job self.assertEqual(len(jobs[0].measurements), 0)
# Successful job self.assertTrue(jobs[1].meta["tested"]) self.assertEqual(len(jobs[1].measurements), 1) assert_quantity_allclose( jobs[1].measurements[_metricName()].quantity, expectedValue)
datarefs = [] self._checkMetric(mockWriter, datarefs, unitsOfWork=[])
with self.assertRaises(FieldValidationError): self.config.measurers = ["totallyAndDefinitelyNotARealMetric"]
dataIds = [{"visit": 42, "ccd": 101, "filter": "k"}, {"visit": 42, "ccd": 102, "filter": "k"}] datarefs = [_makeMockDataref(dataId) for dataId in dataIds] extraMetadata = {"test_protocol": 42} jobs = self.task.runDataRefs(datarefs, extraMetadata).jobs
for job in jobs: self.assertTrue(job.meta["tested"]) self.assertEqual(job.meta["test_protocol"], extraMetadata["test_protocol"])
lsst.utils.tests.init()
lsst.utils.tests.init() unittest.main() |