22 """Module defining connection types to be used within a
23 `PipelineTaskConnections` class.
26 __all__ = [
"InitInput",
"InitOutput",
"Input",
"PrerequisiteInput",
27 "Output",
"BaseConnection"]
31 from typing
import Callable, Iterable, Optional
33 from lsst.daf.butler
import (
44 @dataclasses.dataclass(frozen=
True)
46 """Base class used for declaring PipelineTask connections
51 The name used to identify the dataset type
53 The storage class used when (un)/persisting the dataset type
55 Indicates if this connection should expect to contain multiple objects
56 of the given dataset type
61 multiple: bool =
False
66 This is a method used to turn a connection into a descriptor.
67 When a connection is added to a connection class, it is a class level
68 variable. This method makes accessing this connection, on the
69 instance of the connection class owning this connection, return a
70 result specialized for that instance. In the case of connections
71 this specifically means names specified in a config instance will
72 be visible instead of the default names for the connection.
80 if not hasattr(inst,
'_connectionCache'):
81 object.__setattr__(inst,
'_connectionCache', {})
84 if idSelf
in inst._connectionCache:
85 return inst._connectionCache[idSelf]
88 for field
in dataclasses.fields(self):
89 params[field.name] = getattr(self, field.name)
91 params[
'name'] = inst._nameOverrides[self.varName]
94 return inst._connectionCache.setdefault(idSelf, self.__class__(**params))
97 parentStorageClass: Optional[StorageClass] =
None):
98 """Construct a true `DatasetType` instance with normalized dimensions.
101 universe : `lsst.daf.butler.DimensionUniverse`
102 Set of all known dimensions to be used to normalize the dimension
103 names specified in config.
104 parentStorageClass : `lsst.daf.butler.StorageClass`, optional
105 Parent storage class for component datasets; `None` otherwise.
109 datasetType : `DatasetType`
110 The `DatasetType` defined by this connection.
112 return DatasetType(self.name,
115 parentStorageClass=parentStorageClass)
118 @dataclasses.dataclass(frozen=
True)
120 """Class used for declaring PipelineTask connections that includes
126 The name used to identify the dataset type
128 The storage class used when (un)/persisting the dataset type
130 Indicates if this connection should expect to contain multiple objects
131 of the given dataset type
132 dimensions : iterable of `str`
133 The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
134 to identify the dataset type identified by the specified name
135 isCalibration: `bool`, optional
136 `True` if this dataset type may be included in CALIBRATION-type
137 collections to associate it with a validity range, `False` (default)
140 dimensions: typing.Iterable[str] = ()
141 isCalibration: bool =
False
144 if isinstance(self.dimensions, str):
145 raise TypeError(
"Dimensions must be iterable of dimensions, got str,"
146 "possibly omitted trailing comma")
147 if not isinstance(self.dimensions, typing.Iterable):
148 raise TypeError(
"Dimensions must be iterable of dimensions")
151 parentStorageClass: Optional[StorageClass] =
None):
152 """Construct a true `DatasetType` instance with normalized dimensions.
155 universe : `lsst.daf.butler.DimensionUniverse`
156 Set of all known dimensions to be used to normalize the dimension
157 names specified in config.
158 parentStorageClass : `lsst.daf.butler.StorageClass`, optional
159 Parent storage class for component datasets; `None` otherwise.
163 datasetType : `DatasetType`
164 The `DatasetType` defined by this connection.
166 return DatasetType(self.name,
167 universe.extract(self.dimensions),
168 self.storageClass, isCalibration=self.isCalibration,
169 parentStorageClass=parentStorageClass)
172 @dataclasses.dataclass(frozen=
True)
174 """Class used for declaring PipelineTask input connections
179 The default name used to identify the dataset type
181 The storage class used when (un)/persisting the dataset type
183 Indicates if this connection should expect to contain multiple objects
184 of the given dataset type
185 dimensions : iterable of `str`
186 The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
187 to identify the dataset type identified by the specified name
189 Indicates that this dataset type will be loaded as a
190 `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
191 object to load the object at a later time.
193 deferLoad: bool =
False
196 @dataclasses.dataclass(frozen=
True)
201 @dataclasses.dataclass(frozen=
True)
203 """Class used for declaring PipelineTask prerequisite connections
208 The default name used to identify the dataset type
210 The storage class used when (un)/persisting the dataset type
212 Indicates if this connection should expect to contain multiple objects
213 of the given dataset type
214 dimensions : iterable of `str`
215 The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
216 to identify the dataset type identified by the specified name
218 Indicates that this dataset type will be loaded as a
219 `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
220 object to load the object at a later time.
221 lookupFunction: `typing.Callable`, optional
222 An optional callable function that will look up PrerequisiteInputs
223 using the DatasetType, registry, quantum dataId, and input collections
224 passed to it. If no function is specified, the default temporal spatial
227 lookupFunction: Optional[Callable[[DatasetType, Registry, DataCoordinate, CollectionSearch],
228 Iterable[DatasetRef]]] =
None
231 @dataclasses.dataclass(frozen=
True)
236 @dataclasses.dataclass(frozen=
True)
241 @dataclasses.dataclass(frozen=
True)