Coverage for python/lsst/daf/butler/core/quantum.py: 36%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

62 statements  

1# This file is part of daf_butler. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22from __future__ import annotations 

23 

24__all__ = ("Quantum",) 

25 

26from typing import ( 

27 Any, 

28 Iterable, 

29 List, 

30 Mapping, 

31 Optional, 

32 Tuple, 

33 Type, 

34 Union, 

35) 

36 

37from lsst.utils import doImport 

38 

39from .datasets import DatasetRef, DatasetType 

40from .dimensions import DataCoordinate 

41from .named import NamedKeyDict, NamedKeyMapping 

42 

43 

44class Quantum: 

45 """Class representing a discrete unit of work. 

46 

47 A Quantum may depend on one or more datasets and produce one or more 

48 datasets. 

49 

50 Most Quanta will be executions of a particular ``PipelineTask``’s 

51 ``runQuantum`` method, but they can also be used to represent discrete 

52 units of work performed manually by human operators or other software 

53 agents. 

54 

55 Parameters 

56 ---------- 

57 taskName : `str`, optional 

58 Fully-qualified name of the Task class that executed or will execute 

59 this Quantum. If not provided, ``taskClass`` must be. 

60 taskClass : `type`, optional 

61 The Task class that executed or will execute this Quantum. If not 

62 provided, ``taskName`` must be. Overrides ``taskName`` if both are 

63 provided. 

64 dataId : `DataId`, optional 

65 The dimension values that identify this `Quantum`. 

66 initInputs : collection of `DatasetRef`, optional 

67 Datasets that are needed to construct an instance of the Task. May 

68 be a flat iterable of `DatasetRef` instances or a mapping from 

69 `DatasetType` to `DatasetRef`. 

70 inputs : `~collections.abc.Mapping`, optional 

71 Inputs identified prior to execution, organized as a mapping from 

72 `DatasetType` to a list of `DatasetRef`. 

73 outputs : `~collections.abc.Mapping`, optional 

74 Outputs from executing this quantum of work, organized as a mapping 

75 from `DatasetType` to a list of `DatasetRef`. 

76 """ 

77 

78 __slots__ = ("_taskName", "_taskClass", "_dataId", "_initInputs", "_inputs", "_outputs", "_hash") 

79 

80 def __init__(self, *, taskName: Optional[str] = None, 

81 taskClass: Optional[Type] = None, 

82 dataId: Optional[DataCoordinate] = None, 

83 initInputs: Optional[Union[Mapping[DatasetType, DatasetRef], Iterable[DatasetRef]]] = None, 

84 inputs: Optional[Mapping[DatasetType, List[DatasetRef]]] = None, 

85 outputs: Optional[Mapping[DatasetType, List[DatasetRef]]] = None, 

86 ): 

87 if taskClass is not None: 

88 taskName = f"{taskClass.__module__}.{taskClass.__name__}" 

89 self._taskName = taskName 

90 self._taskClass = taskClass 

91 self._dataId = dataId 

92 if initInputs is None: 

93 initInputs = {} 

94 elif not isinstance(initInputs, Mapping): 

95 initInputs = {ref.datasetType: ref for ref in initInputs} 

96 if inputs is None: 

97 inputs = {} 

98 if outputs is None: 

99 outputs = {} 

100 self._initInputs = NamedKeyDict[DatasetType, DatasetRef](initInputs).freeze() 

101 self._inputs = NamedKeyDict[DatasetType, List[DatasetRef]](inputs).freeze() 

102 self._outputs = NamedKeyDict[DatasetType, List[DatasetRef]](outputs).freeze() 

103 

104 @property 

105 def taskClass(self) -> Optional[Type]: 

106 """Task class associated with this `Quantum` (`type`).""" 

107 if self._taskClass is None: 

108 self._taskClass = doImport(self._taskName) 

109 return self._taskClass 

110 

111 @property 

112 def taskName(self) -> Optional[str]: 

113 """Return Fully-qualified name of the task associated with `Quantum`. 

114 

115 (`str`). 

116 """ 

117 return self._taskName 

118 

119 @property 

120 def dataId(self) -> Optional[DataCoordinate]: 

121 """Return dimension values of the unit of processing (`DataId`).""" 

122 return self._dataId 

123 

124 @property 

125 def initInputs(self) -> NamedKeyMapping[DatasetType, DatasetRef]: 

126 """Return mapping of datasets used to construct the Task. 

127 

128 Has `DatasetType` instances as keys (names can also be used for 

129 lookups) and `DatasetRef` instances as values. 

130 """ 

131 return self._initInputs 

132 

133 @property 

134 def inputs(self) -> NamedKeyMapping[DatasetType, List[DatasetRef]]: 

135 """Return mapping of input datasets that were expected to be used. 

136 

137 Has `DatasetType` instances as keys (names can also be used for 

138 lookups) and a list of `DatasetRef` instances as values. 

139 

140 Notes 

141 ----- 

142 We cannot use `set` instead of `list` for the nested container because 

143 `DatasetRef` instances cannot be compared reliably when some have 

144 integers IDs and others do not. 

145 """ 

146 return self._inputs 

147 

148 @property 

149 def outputs(self) -> NamedKeyMapping[DatasetType, List[DatasetRef]]: 

150 """Return mapping of output datasets (to be) generated by this quantum. 

151 

152 Has the same form as `predictedInputs`. 

153 

154 Notes 

155 ----- 

156 We cannot use `set` instead of `list` for the nested container because 

157 `DatasetRef` instances cannot be compared reliably when some have 

158 integers IDs and others do not. 

159 """ 

160 return self._outputs 

161 

162 def __eq__(self, other: object) -> bool: 

163 if not isinstance(other, Quantum): 

164 return False 

165 for item in ("taskClass", "dataId", "initInputs", "inputs", "outputs"): 

166 if getattr(self, item) != getattr(other, item): 

167 return False 

168 return True 

169 

170 def __hash__(self) -> int: 

171 return hash((self.taskClass, self.dataId)) 

172 

173 def __reduce__(self) -> Union[str, Tuple[Any, ...]]: 

174 return (self._reduceFactory, 

175 (self.taskName, self.taskClass, self.dataId, dict(self.initInputs.items()), 

176 dict(self.inputs), dict(self.outputs))) 

177 

178 def __str__(self) -> str: 

179 return f"{self.__class__.__name__}(taskName={self.taskName}, dataId={self.dataId})" 

180 

181 @staticmethod 

182 def _reduceFactory(taskName: Optional[str], 

183 taskClass: Optional[Type], 

184 dataId: Optional[DataCoordinate], 

185 initInputs: Optional[Union[Mapping[DatasetType, DatasetRef], Iterable[DatasetRef]]], 

186 inputs: Optional[Mapping[DatasetType, List[DatasetRef]]], 

187 outputs: Optional[Mapping[DatasetType, List[DatasetRef]]] 

188 ) -> Quantum: 

189 return Quantum(taskName=taskName, taskClass=taskClass, dataId=dataId, initInputs=initInputs, 

190 inputs=inputs, outputs=outputs)