23 __all__ = [
"makeQuantum",
"runTestQuantum",
"assertValidOutput"]
26 import collections.abc
29 from lsst.daf.butler
import DataCoordinate, DatasetRef, Quantum, StorageClassFactory
34 """Create a Quantum for a particular data ID(s). 38 task : `lsst.pipe.base.PipelineTask` 39 The task whose processing the quantum represents. 40 butler : `lsst.daf.butler.Butler` 41 The collection the quantum refers to. 42 dataIds : `collections.abc.Mapping` [`str`] 43 A mapping keyed by input/output names. Values must be data IDs for 44 single connections and sequences of data IDs for multiple connections. 48 quantum : `lsst.daf.butler.Quantum` 49 A quantum for ``task``, when called with ``dataIds``. 51 quantum = Quantum(taskClass=type(task))
52 connections = task.config.ConnectionsClass(config=task.config)
55 for name
in connections.inputs:
56 connection = connections.__getattribute__(name)
57 _checkDataIdMultiplicity(name, dataIds[name], connection.multiple)
58 ids = _normalizeDataIds(dataIds[name])
60 quantum.addPredictedInput(_refFromConnection(butler, connection, id))
61 for name
in connections.outputs:
62 connection = connections.__getattribute__(name)
63 _checkDataIdMultiplicity(name, dataIds[name], connection.multiple)
64 ids = _normalizeDataIds(dataIds[name])
66 quantum.addOutput(_refFromConnection(butler, connection, id))
69 raise ValueError(
"Mismatch in input data.")
from e
72 def _checkDataIdMultiplicity(name, dataIds, multiple):
73 """Test whether data IDs are scalars for scalar connections and sequences 74 for multiple connections. 79 The name of the connection being tested. 80 dataIds : any data ID type or `~collections.abc.Sequence` [data ID] 81 The data ID(s) provided for the connection. 83 The ``multiple`` field of the connection. 88 Raised if ``dataIds`` and ``multiple`` do not match. 91 if not isinstance(dataIds, collections.abc.Sequence):
92 raise ValueError(f
"Expected multiple data IDs for {name}, got {dataIds}.")
95 if not isinstance(dataIds, collections.abc.Mapping):
96 raise ValueError(f
"Expected single data ID for {name}, got {dataIds}.")
99 def _normalizeDataIds(dataIds):
100 """Represent both single and multiple data IDs as a list. 104 dataIds : any data ID type or `~collections.abc.Sequence` thereof 105 The data ID(s) provided for a particular input or output connection. 109 normalizedIds : `~collections.abc.Sequence` [data ID] 110 A sequence equal to ``dataIds`` if it was already a sequence, or 111 ``[dataIds]`` if it was a single ID. 113 if isinstance(dataIds, collections.abc.Sequence):
119 def _refFromConnection(butler, connection, dataId, **kwargs):
120 """Create a DatasetRef for a connection in a collection. 124 butler : `lsst.daf.butler.Butler` 125 The collection to point to. 126 connection : `lsst.pipe.base.connectionTypes.DimensionedConnection` 127 The connection defining the dataset type to point to. 129 The data ID for the dataset to point to. 131 Additional keyword arguments used to augment or construct 132 a `~lsst.daf.butler.DataCoordinate`. 136 ref : `lsst.daf.butler.DatasetRef` 137 A reference to a dataset compatible with ``connection``, with ID 138 ``dataId``, in the collection pointed to by ``butler``. 140 universe = butler.registry.dimensions
141 dataId = DataCoordinate.standardize(dataId, **kwargs, universe=universe)
142 datasetType = connection.makeDatasetType(universe)
144 butler.registry.getDatasetType(datasetType.name)
146 raise ValueError(f
"Invalid dataset type {connection.name}.")
148 ref = DatasetRef(datasetType=datasetType, dataId=dataId)
150 except KeyError
as e:
151 raise ValueError(f
"Dataset type ({connection.name}) and ID {dataId.byName()} not compatible.") \
156 """Run a PipelineTask on a Quantum. 160 task : `lsst.pipe.base.PipelineTask` 161 The task to run on the quantum. 162 butler : `lsst.daf.butler.Butler` 163 The collection to run on. 164 quantum : `lsst.daf.butler.Quantum` 167 Whether or not to replace ``task``'s ``run`` method. The default of 168 `True` is recommended unless ``run`` needs to do real work (e.g., 169 because the test needs real output datasets). 173 run : `unittest.mock.Mock` or `None` 174 If ``mockRun`` is set, the mock that replaced ``run``. This object can 175 be queried for the arguments ``runQuantum`` passed to ``run``. 178 connections = task.config.ConnectionsClass(config=task.config)
179 inputRefs, outputRefs = connections.buildDatasetRefs(quantum)
181 with unittest.mock.patch.object(task,
"run")
as mock, \
182 unittest.mock.patch(
"lsst.pipe.base.ButlerQuantumContext.put"):
183 task.runQuantum(butlerQc, inputRefs, outputRefs)
186 task.runQuantum(butlerQc, inputRefs, outputRefs)
191 """Test that the output of a call to ``run`` conforms to its own connections. 195 task : `lsst.pipe.base.PipelineTask` 196 The task whose connections need validation. This is a fully-configured 197 task object to support features such as optional outputs. 198 result : `lsst.pipe.base.Struct` 199 A result object produced by calling ``task.run``. 204 Raised if ``result`` does not match what's expected from ``task's`` 207 connections = task.config.ConnectionsClass(config=task.config)
208 recoveredOutputs = result.getDict()
210 for name
in connections.outputs:
211 connection = connections.__getattribute__(name)
214 output = recoveredOutputs[name]
216 raise AssertionError(f
"No such output: {name}")
218 if connection.multiple:
219 if not isinstance(output, collections.abc.Sequence):
220 raise AssertionError(f
"Expected {name} to be a sequence, got {output} instead.")
223 if isinstance(output, collections.abc.Sequence) \
225 StorageClassFactory().getStorageClass(connection.storageClass).pytype,
226 collections.abc.Sequence):
227 raise AssertionError(f
"Expected {name} to be a single value, got {output} instead.")
def makeQuantum(task, butler, dataIds)
def assertValidOutput(task, result)
def runTestQuantum(task, butler, quantum, mockRun=True)