lsst.pipe.base  18.1.0-12-g3dc8cbe
connectionTypes.py
Go to the documentation of this file.
1 # This file is part of pipe_base.
2 #
3 # Developed for the LSST Data Management System.
4 # This product includes software developed by the LSST Project
5 # (http://www.lsst.org).
6 # See the COPYRIGHT file at the top-level directory of this distribution
7 # for details of code ownership.
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the GNU General Public License
20 # along with this program. If not, see <http://www.gnu.org/licenses/>.
21 
22 """Module defining connection types to be used within a
23 `PipelineTaskConnections` class.
24 """
25 
26 __all__ = ["InitInput", "InitOutput", "Input", "PrerequisiteInput",
27  "Output", "BaseConnection"]
28 
29 import dataclasses
30 import typing
31 
32 from lsst.daf.butler import DatasetType, DimensionUniverse
33 
34 
35 @dataclasses.dataclass(frozen=True)
37  """Base class used for declaring PipelineTask connections
38 
39  Parameters
40  ----------
41  name : `str`
42  The name used to identify the dataset type
43  storageClass : `str`
44  The storage class used when (un)/persisting the dataset type
45  multiple : `bool`
46  Indicates if this connection should expect to contain multiple objects
47  of the given dataset type
48  """
49  name: str
50  storageClass: str
51  doc: str = ""
52  multiple: bool = False
53 
54  def __get__(self, inst, klass):
55  """Descriptor method
56 
57  This is a method used to turn a connection into a descriptor.
58  When a connection is added to a connection class, it is a class level
59  variable. This method makes accessing this connection, on the
60  instance of the connection class owning this connection, return a
61  result specialized for that instance. In the case of connections
62  this specifically means names specified in a config instance will
63  be visible instead of the default names for the connection.
64  """
65  # If inst is None, this is being accessed by the class and not an
66  # instance, return this connection itself
67  if inst is None:
68  return self
69  # If no object cache exists, create one to track the instances this
70  # connection has been accessed by
71  if not hasattr(inst, '_connectionCache'):
72  object.__setattr__(inst, '_connectionCache', {})
73  # Look up an existing cached instance
74  idSelf = id(self)
75  if idSelf in inst._connectionCache:
76  return inst._connectionCache[idSelf]
77  # Accumulate the parameters that define this connection
78  params = {}
79  for field in dataclasses.fields(self):
80  params[field.name] = getattr(self, field.name)
81  # Get the name override defined by the instance of the connection class
82  params['name'] = inst._nameOverrides[self.varName]
83  # Return a new instance of this connection specialized with the
84  # information provided by the connection class instance
85  return inst._connectionCache.setdefault(idSelf, self.__class__(**params))
86 
87 
88 @dataclasses.dataclass(frozen=True)
90  """Class used for declaring PipelineTask connections that includes
91  dimensions
92 
93  Parameters
94  ----------
95  name : `str`
96  The name used to identify the dataset type
97  storageClass : `str`
98  The storage class used when (un)/persisting the dataset type
99  multiple : `bool`
100  Indicates if this connection should expect to contain multiple objects
101  of the given dataset type
102  dimensions : iterable of `str`
103  The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
104  to identify the dataset type identified by the specified name
105  """
106  dimensions: typing.Iterable[str] = ()
107 
108  def makeDatasetType(self, universe: DimensionUniverse):
109  """Construct a true `DatasetType` instance with normalized dimensions.
110  Parameters
111  ----------
112  universe : `lsst.daf.butler.DimensionUniverse`
113  Set of all known dimensions to be used to normalize the dimension
114  names specified in config.
115  Returns
116  -------
117  datasetType : `DatasetType`
118  The `DatasetType` defined by this connection.
119  """
120  return DatasetType(self.name,
121  universe.extract(self.dimensions),
122  self.storageClass)
123 
124 
125 @dataclasses.dataclass(frozen=True)
127  """Class used for declaring PipelineTask input connections
128 
129  Parameters
130  ----------
131  name : `str`
132  The name used to identify the dataset type
133  storageClass : `str`
134  The storage class used when (un)/persisting the dataset type
135  multiple : `bool`
136  Indicates if this connection should expect to contain multiple objects
137  of the given dataset type
138  dimensions : iterable of `str`
139  The `lsst.daf.butler.Butler` `lsst.daf.butler.Registry` dimensions used
140  to identify the dataset type identified by the specified name
141  deferLoad : `bool`
142  Indicates that this dataset type will be loaded as a
143  `lsst.daf.butler.DeferredDatasetHandle`. PipelineTasks can use this
144  object to load the object at a later time.
145  """
146  deferLoad: bool = False
147 
148 
149 @dataclasses.dataclass(frozen=True)
151  pass
152 
153 
154 @dataclasses.dataclass(frozen=True)
156  pass
157 
158 
159 @dataclasses.dataclass(frozen=True)
161  pass
162 
163 
164 @dataclasses.dataclass(frozen=True)
166  pass
167 
168 
169 @dataclasses.dataclass(frozen=True)
171  pass