22 """Module defining connection types to be used within a
23 `PipelineTaskConnections` class.
26 __all__ = [
"InitInput",
"InitOutput",
"Input",
"PrerequisiteInput",
27 "Output",
"BaseConnection"]
31 from typing
import Callable, Iterable, Optional
33 from lsst.daf.butler
import (
44 @dataclasses.dataclass(frozen=
True)
46 """Base class used for declaring PipelineTask connections
51 The name used to identify the dataset type
53 The storage class used when (un)/persisting the dataset type
55 Indicates if this connection should expect to contain multiple objects
56 of the given dataset type
61 multiple: bool =
False
66 This is a method used to turn a connection into a descriptor.
67 When a connection is added to a connection class, it is a class level
68 variable. This method makes accessing this connection, on the
69 instance of the connection class owning this connection, return a
70 result specialized for that instance. In the case of connections
71 this specifically means names specified in a config instance will
72 be visible instead of the default names for the connection.
80 if not hasattr(inst,
'_connectionCache'):
81 object.__setattr__(inst,
'_connectionCache', {})
84 if idSelf
in inst._connectionCache:
85 return inst._connectionCache[idSelf]
88 for field
in dataclasses.fields(self):
89 params[field.name] = getattr(self, field.name)
91 params[
'name'] = inst._nameOverrides[self.varName]
94 return inst._connectionCache.setdefault(idSelf, self.__class__(**params))
97 parentStorageClass: Optional[StorageClass] =
None):
98 """Construct a true `DatasetType` instance with normalized dimensions.
102 universe : `lsst.daf.butler.DimensionUniverse`
103 Set of all known dimensions to be used to normalize the dimension
104 names specified in config.
105 parentStorageClass : `lsst.daf.butler.StorageClass`, optional
106 Parent storage class for component datasets; `None` otherwise.
110 datasetType : `DatasetType`
111 The `DatasetType` defined by this connection.
113 return DatasetType(self.name,
116 parentStorageClass=parentStorageClass)
119 @dataclasses.dataclass(frozen=
True)
121 """Class used for declaring PipelineTask connections that includes
127 The name used to identify the dataset type
129 The storage class used when (un)/persisting the dataset type
131 Indicates if this connection should expect to contain multiple objects
132 of the given dataset type
133 dimensions : iterable of `str`
134 The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
135 to identify the dataset type identified by the specified name
136 isCalibration: `bool`, optional
137 `True` if this dataset type may be included in CALIBRATION-type
138 collections to associate it with a validity range, `False` (default)
141 dimensions: typing.Iterable[str] = ()
142 isCalibration: bool =
False
145 if isinstance(self.dimensions, str):
146 raise TypeError(
"Dimensions must be iterable of dimensions, got str,"
147 "possibly omitted trailing comma")
148 if not isinstance(self.dimensions, typing.Iterable):
149 raise TypeError(
"Dimensions must be iterable of dimensions")
152 parentStorageClass: Optional[StorageClass] =
None):
153 """Construct a true `DatasetType` instance with normalized dimensions.
157 universe : `lsst.daf.butler.DimensionUniverse`
158 Set of all known dimensions to be used to normalize the dimension
159 names specified in config.
160 parentStorageClass : `lsst.daf.butler.StorageClass`, optional
161 Parent storage class for component datasets; `None` otherwise.
165 datasetType : `DatasetType`
166 The `DatasetType` defined by this connection.
168 return DatasetType(self.name,
169 universe.extract(self.dimensions),
170 self.storageClass, isCalibration=self.isCalibration,
171 parentStorageClass=parentStorageClass)
174 @dataclasses.dataclass(frozen=
True)
176 """Class used for declaring PipelineTask input connections
181 The default name used to identify the dataset type
183 The storage class used when (un)/persisting the dataset type
185 Indicates if this connection should expect to contain multiple objects
186 of the given dataset type
187 dimensions : iterable of `str`
188 The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
189 to identify the dataset type identified by the specified name
191 Indicates that this dataset type will be loaded as a
192 `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
193 object to load the object at a later time.
195 deferLoad: bool =
False
198 @dataclasses.dataclass(frozen=
True)
203 @dataclasses.dataclass(frozen=
True)
205 """Class used for declaring PipelineTask prerequisite connections
210 The default name used to identify the dataset type
212 The storage class used when (un)/persisting the dataset type
214 Indicates if this connection should expect to contain multiple objects
215 of the given dataset type
216 dimensions : iterable of `str`
217 The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
218 to identify the dataset type identified by the specified name
220 Indicates that this dataset type will be loaded as a
221 `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
222 object to load the object at a later time.
223 lookupFunction: `typing.Callable`, optional
224 An optional callable function that will look up PrerequisiteInputs
225 using the DatasetType, registry, quantum dataId, and input collections
226 passed to it. If no function is specified, the default temporal spatial
229 lookupFunction: Optional[Callable[[DatasetType, Registry, DataCoordinate, CollectionSearch],
230 Iterable[DatasetRef]]] =
None
233 @dataclasses.dataclass(frozen=
True)
238 @dataclasses.dataclass(frozen=
True)
243 @dataclasses.dataclass(frozen=
True)