22 """Module defining connection types to be used within a 23 `PipelineTaskConnections` class. 26 __all__ = [
"InitInput",
"InitOutput",
"Input",
"PrerequisiteInput",
27 "Output",
"BaseConnection"]
32 from lsst.daf.butler
import DatasetType, DimensionUniverse
35 @dataclasses.dataclass(frozen=
True)
37 """Base class used for declaring PipelineTask connections 42 The name used to identify the dataset type 44 The storage class used when (un)/persisting the dataset type 46 Indicates if this connection should expect to contain multiple objects 47 of the given dataset type 52 multiple: bool =
False 57 This is a method used to turn a connection into a descriptor. 58 When a connection is added to a connection class, it is a class level 59 variable. This method makes accessing this connection, on the 60 instance of the connection class owning this connection, return a 61 result specialized for that instance. In the case of connections 62 this specifically means names specified in a config instance will 63 be visible instead of the default names for the connection. 71 if not hasattr(inst,
'_connectionCache'):
72 object.__setattr__(inst,
'_connectionCache', {})
75 if idSelf
in inst._connectionCache:
76 return inst._connectionCache[idSelf]
79 for field
in dataclasses.fields(self):
80 params[field.name] = getattr(self, field.name)
82 params[
'name'] = inst._nameOverrides[self.varName]
85 return inst._connectionCache.setdefault(idSelf, self.__class__(**params))
88 @dataclasses.dataclass(frozen=
True)
90 """Class used for declaring PipelineTask connections that includes 96 The name used to identify the dataset type 98 The storage class used when (un)/persisting the dataset type 100 Indicates if this connection should expect to contain multiple objects 101 of the given dataset type 102 dimensions : iterable of `str` 103 The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used 104 to identify the dataset type identified by the specified name 106 dimensions: typing.Iterable[str] = ()
109 """Construct a true `DatasetType` instance with normalized dimensions. 112 universe : `lsst.daf.butler.DimensionUniverse` 113 Set of all known dimensions to be used to normalize the dimension 114 names specified in config. 117 datasetType : `DatasetType` 118 The `DatasetType` defined by this connection. 120 return DatasetType(self.name,
121 universe.extract(self.dimensions),
125 @dataclasses.dataclass(frozen=
True)
127 """Class used for declaring PipelineTask input connections 132 The name used to identify the dataset type 134 The storage class used when (un)/persisting the dataset type 136 Indicates if this connection should expect to contain multiple objects 137 of the given dataset type 138 dimensions : iterable of `str` 139 The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used 140 to identify the dataset type identified by the specified name 142 Indicates that this dataset type will be loaded as a 143 `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this 144 object to load the object at a later time. 146 deferLoad: bool =
False 149 @dataclasses.dataclass(frozen=
True)
154 @dataclasses.dataclass(frozen=
True)
159 @dataclasses.dataclass(frozen=
True)
164 @dataclasses.dataclass(frozen=
True)
169 @dataclasses.dataclass(frozen=
True)
def __get__(self, inst, klass)