Coverage for tests/test_pipelineTask.py : 21%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# This file is part of pipe_base. # # Developed for the LSST Data Management System. # This product includes software developed by the LSST Project # (http://www.lsst.org). # See the COPYRIGHT file at the top-level directory of this distribution # for details of code ownership. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
"""Mock version of butler, only usable for this test """ self.datasets = {} self.registry = SimpleNamespace(dimensions=DimensionUniverse.fromConfig())
def key(dataId): """Make a dict key out of dataId. """ key = (dataId["camera"], dataId["visit"]) return tuple(key)
if isinstance(datasetRefOrType, DatasetRef): dataId = datasetRefOrType.dataId dsTypeName = datasetRefOrType.datasetType.name else: dsTypeName = datasetRefOrType key = self.key(dataId) dsdata = self.datasets.get(dsTypeName) if dsdata: return dsdata.get(key) return None
key = self.key(dataId) dsdata = self.datasets.setdefault(dsTypeName, {}) dsdata[key] = inMemoryDataset
dimensions=["instrument", "visit"], storageClass="Catalog", doc="Input dataset type for this task") dimensions=["instrument", "visit"], storageClass="Catalog", doc="Output dataset type for this task")
# set dimensions of a quantum, this task uses per-visit quanta and it # expects dataset dimensions to be the same self.quantum.dimensions = ["instrument", "visit"] self.quantum.sql = None
# example task which overrides run() method
self.metadata.add("add", self.config.addend) output = [val + self.config.addend for val in input] return pipeBase.Struct(output=output)
# example task which overrides adaptArgsAndRun() method
self.metadata.add("add", self.config.addend) input = inputData["input"] output = [val + self.config.addend for val in input] return pipeBase.Struct(output=output)
"""A test case for DatasetTypeDescriptor """
"""Test DatasetTypeDescriptor init """ name = "testDataset" dimensionNames = frozenset(["label"]) storageClassName = "Catalog" universe = DimensionUniverse.fromConfig() descriptor = pipeBase.DatasetTypeDescriptor(name=name, dimensionNames=dimensionNames, storageClassName=storageClassName, scalar=False, manualLoad=False) datasetType = descriptor.makeDatasetType(universe) self.assertEqual(datasetType.name, name) self.assertEqual(datasetType.dimensions.names, dimensionNames) self.assertEqual(datasetType.storageClass.name, storageClassName) self.assertFalse(descriptor.scalar)
descriptor = pipeBase.DatasetTypeDescriptor(name=name, dimensionNames=dimensionNames, storageClassName=storageClassName, scalar=True, manualLoad=False) datasetType = descriptor.makeDatasetType(universe) self.assertEqual(datasetType.name, name) self.assertEqual(datasetType.dimensions.names, dimensionNames) self.assertEqual(datasetType.storageClass.name, storageClassName) self.assertTrue(descriptor.scalar)
"""Test DatasetTypeDescriptor.fromConfig() """ universe = DimensionUniverse.fromConfig() config = AddConfig() descriptor = pipeBase.DatasetTypeDescriptor.fromConfig(config.input) datasetType = descriptor.makeDatasetType(universe) self.assertIsInstance(descriptor, pipeBase.DatasetTypeDescriptor) self.assertEqual(datasetType.name, "add_input") self.assertFalse(descriptor.scalar)
descriptor = pipeBase.DatasetTypeDescriptor.fromConfig(config.output) datasetType = descriptor.makeDatasetType(universe) self.assertIsInstance(descriptor, pipeBase.DatasetTypeDescriptor) self.assertEqual(datasetType.name, "add_output") self.assertFalse(descriptor.scalar)
"""A test case for PipelineTask """
return DatasetRef(datasetType=dstype, dataId=dict(camera="X", visit=visitId, physical_filter='a', abstract_filter='b'))
"""Create set of Quanta """ universe = DimensionUniverse.fromConfig() run = Run(collection=1, environment=None, pipeline=None)
descriptor = pipeBase.DatasetTypeDescriptor.fromConfig(config.input) dstype0 = descriptor.makeDatasetType(universe) descriptor = pipeBase.DatasetTypeDescriptor.fromConfig(config.output) dstype1 = descriptor.makeDatasetType(universe)
quanta = [] for visit in range(100): quantum = Quantum(run=run) quantum.addPredictedInput(self._makeDSRefVisit(dstype0, visit)) quantum.addOutput(self._makeDSRefVisit(dstype1, visit)) quanta.append(quantum)
return quanta
"""Test for AddTask.runQuantum() implementation. """ butler = ButlerMock() task = AddTask(config=AddConfig())
# make all quanta quanta = self._makeQuanta(task.config)
# add input data to butler descriptor = pipeBase.DatasetTypeDescriptor.fromConfig(task.config.input) dstype0 = descriptor.makeDatasetType(butler.registry.dimensions) for i, quantum in enumerate(quanta): ref = quantum.predictedInputs[dstype0.name][0] butler.put(100 + i, dstype0.name, ref.dataId)
# run task on each quanta for quantum in quanta: task.runQuantum(quantum, butler)
# look at the output produced by the task outputName = task.config.output.name dsdata = butler.datasets[outputName] self.assertEqual(len(dsdata), len(quanta)) for i, quantum in enumerate(quanta): ref = quantum.outputs[outputName][0] self.assertEqual(dsdata[butler.key(ref.dataId)], 100 + i + 3)
"""Test for two-task chain. """ butler = ButlerMock() task1 = AddTask(config=AddConfig()) config2 = AddConfig() config2.addend = 200 config2.input.name = task1.config.output.name config2.output.name = "add_output_2" task2 = AddTask2(config=config2)
# make all quanta quanta1 = self._makeQuanta(task1.config) quanta2 = self._makeQuanta(task2.config)
# add input data to butler descriptor = pipeBase.DatasetTypeDescriptor.fromConfig(task1.config.input) dstype0 = descriptor.makeDatasetType(butler.registry.dimensions) for i, quantum in enumerate(quanta1): ref = quantum.predictedInputs[dstype0.name][0] butler.put(100 + i, dstype0.name, ref.dataId)
# run task on each quanta for quantum in quanta1: task1.runQuantum(quantum, butler) for quantum in quanta2: task2.runQuantum(quantum, butler)
# look at the output produced by the task outputName = task1.config.output.name dsdata = butler.datasets[outputName] self.assertEqual(len(dsdata), len(quanta1)) for i, quantum in enumerate(quanta1): ref = quantum.outputs[outputName][0] self.assertEqual(dsdata[butler.key(ref.dataId)], 100 + i + 3)
outputName = task2.config.output.name dsdata = butler.datasets[outputName] self.assertEqual(len(dsdata), len(quanta2)) for i, quantum in enumerate(quanta2): ref = quantum.outputs[outputName][0] self.assertEqual(dsdata[butler.key(ref.dataId)], 100 + i + 3 + 200)
lsst.utils.tests.init()
lsst.utils.tests.init() unittest.main() |