Coverage for python/lsst/ctrl/mpexec/mock_task.py: 20%

87 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2023-02-07 02:42 -0800

1# This file is part of ctrl_mpexec. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22import logging 

23from typing import Any, List, Optional, Union 

24 

25from lsst.daf.butler import Butler, DatasetRef, Quantum 

26from lsst.pex.config import Field 

27from lsst.pipe.base import ( 

28 ButlerQuantumContext, 

29 DeferredDatasetRef, 

30 InputQuantizedConnection, 

31 OutputQuantizedConnection, 

32 PipelineTask, 

33 PipelineTaskConfig, 

34 PipelineTaskConnections, 

35) 

36from lsst.utils import doImportType 

37from lsst.utils.introspection import get_full_type_name 

38 

39from .dataid_match import DataIdMatch 

40 

41_LOG = logging.getLogger(__name__) 

42 

43 

44class MockButlerQuantumContext(ButlerQuantumContext): 

45 """Implementation of ButlerQuantumContext to use with a mock task. 

46 

47 Parameters 

48 ---------- 

49 butler : `~lsst.daf.butler.Butler` 

50 Data butler instance. 

51 quantum : `~lsst.daf.butler.Quantum` 

52 Execution quantum. 

53 

54 Notes 

55 ----- 

56 This implementation overrides get method to try to retrieve dataset from a 

57 mock dataset type if it exists. Get method always returns a dictionary. 

58 Put method stores the data with a mock dataset type, but also registers 

59 DatasetRef with registry using original dataset type. 

60 """ 

61 

62 def __init__(self, butler: Butler, quantum: Quantum): 

63 super().__init__(butler=butler, limited=butler, quantum=quantum) 

64 self.butler = butler 

65 self.registry = butler.registry 

66 

67 @classmethod 

68 def mockDatasetTypeName(cls, datasetTypeName: str) -> str: 

69 """Make mock dataset type name from actual dataset type name.""" 

70 return "_mock_" + datasetTypeName 

71 

72 def _get(self, ref: Optional[Union[DeferredDatasetRef, DatasetRef]]) -> Any: 

73 # docstring is inherited from the base class 

74 if ref is None: 

75 return None 

76 if isinstance(ref, DeferredDatasetRef): 

77 ref = ref.datasetRef 

78 datasetType = ref.datasetType 

79 

80 typeName, component = datasetType.nameAndComponent() 

81 if component is not None: 

82 mockDatasetTypeName = self.mockDatasetTypeName(typeName) 

83 else: 

84 mockDatasetTypeName = self.mockDatasetTypeName(datasetType.name) 

85 

86 try: 

87 mockDatasetType = self.butler.registry.getDatasetType(mockDatasetTypeName) 

88 ref = DatasetRef(mockDatasetType, ref.dataId) 

89 data = self.butler.get(ref) 

90 except KeyError: 

91 data = super()._get(ref) 

92 # If the input as an actual non-mock data then we want to replace 

93 # it with a provenance data which will be stored as a part of 

94 # output dataset. 

95 data = { 

96 "ref": { 

97 "dataId": {key.name: ref.dataId[key] for key in ref.dataId.keys()}, 

98 "datasetType": ref.datasetType.name, 

99 }, 

100 "type": get_full_type_name(type(data)), 

101 } 

102 if component is not None: 

103 data.update(component=component) 

104 return data 

105 

106 def _put(self, value: Any, ref: DatasetRef) -> None: 

107 # docstring is inherited from the base class 

108 

109 mockDatasetType = self.registry.getDatasetType(self.mockDatasetTypeName(ref.datasetType.name)) 

110 mockRef = DatasetRef(mockDatasetType, ref.dataId) 

111 value.setdefault("ref", {}).update(datasetType=mockDatasetType.name) 

112 self.butler.put(value, mockRef) 

113 

114 # also "store" non-mock refs, make sure it is not resolved. 

115 self.registry._importDatasets([ref.unresolved()]) 

116 

117 def _checkMembership(self, ref: Union[List[DatasetRef], DatasetRef], inout: set) -> None: 

118 # docstring is inherited from the base class 

119 return 

120 

121 

122class MockPipelineTaskConfig(PipelineTaskConfig, pipelineConnections=PipelineTaskConnections): 

123 failCondition: Field[str] = Field( 

124 dtype=str, 

125 default="", 

126 doc=( 

127 "Condition on DataId to raise an exception. String expression which includes attributes of " 

128 "quantum DataId using a syntax of daf_butler user expressions (e.g. 'visit = 123')." 

129 ), 

130 ) 

131 

132 failException: Field[str] = Field( 

133 dtype=str, 

134 default="builtins.ValueError", 

135 doc=( 

136 "Class name of the exception to raise when fail condition is triggered. Can be " 

137 "'lsst.pipe.base.NoWorkFound' to specify non-failure exception." 

138 ), 

139 ) 

140 

141 def dataIdMatch(self) -> Optional[DataIdMatch]: 

142 if not self.failCondition: 

143 return None 

144 return DataIdMatch(self.failCondition) 

145 

146 

147class MockPipelineTask(PipelineTask): 

148 """Implementation of PipelineTask used for running a mock pipeline. 

149 

150 Notes 

151 ----- 

152 This class overrides `runQuantum` to read all input datasetRefs and to 

153 store simple dictionary as output data. Output dictionary contains some 

154 provenance data about inputs, the task that produced it, and corresponding 

155 quantum. This class depends on `MockButlerQuantumContext` which knows how 

156 to store the output dictionary data with special dataset types. 

157 """ 

158 

159 ConfigClass = MockPipelineTaskConfig 

160 

161 def __init__(self, *, config: Optional[MockPipelineTaskConfig] = None, **kwargs: Any): 

162 super().__init__(config=config, **kwargs) 

163 

164 self.failException: Optional[type] = None 

165 self.dataIdMatch: Optional[DataIdMatch] = None 

166 if config is not None: 

167 self.dataIdMatch = config.dataIdMatch() 

168 if self.dataIdMatch: 

169 self.failException = doImportType(config.failException) 

170 

171 def runQuantum( 

172 self, 

173 butlerQC: ButlerQuantumContext, 

174 inputRefs: InputQuantizedConnection, 

175 outputRefs: OutputQuantizedConnection, 

176 ) -> None: 

177 # docstring is inherited from the base class 

178 quantum = butlerQC.quantum 

179 

180 _LOG.info("Mocking execution of task '%s' on quantum %s", self.getName(), quantum.dataId) 

181 

182 assert quantum.dataId is not None, "Quantum DataId cannot be None" 

183 

184 # Possibly raise an exception. 

185 if self.dataIdMatch is not None and self.dataIdMatch.match(quantum.dataId): 

186 _LOG.info("Simulating failure of task '%s' on quantum %s", self.getName(), quantum.dataId) 

187 message = f"Simulated failure: task={self.getName()} dataId={quantum.dataId}" 

188 assert self.failException is not None, "Exception type must be defined" 

189 raise self.failException(message) 

190 

191 # read all inputs 

192 inputs = butlerQC.get(inputRefs) 

193 

194 _LOG.info("Read input data for task '%s' on quantum %s", self.getName(), quantum.dataId) 

195 

196 # To avoid very deep provenance we trim inputs to a single level 

197 for name, data in inputs.items(): 

198 if isinstance(data, dict): 

199 data = [data] 

200 if isinstance(data, list): 

201 for item in data: 

202 qdata = item.get("quantum", {}) 

203 qdata.pop("inputs", None) 

204 

205 # store mock outputs 

206 for name, refs in outputRefs: 

207 if not isinstance(refs, list): 

208 refs = [refs] 

209 for ref in refs: 

210 data = { 

211 "ref": { 

212 "dataId": {key.name: ref.dataId[key] for key in ref.dataId.keys()}, 

213 "datasetType": ref.datasetType.name, 

214 }, 

215 "quantum": { 

216 "task": self.getName(), 

217 "dataId": {key.name: quantum.dataId[key] for key in quantum.dataId.keys()}, 

218 "inputs": inputs, 

219 }, 

220 "outputName": name, 

221 } 

222 butlerQC.put(data, ref) 

223 

224 _LOG.info("Finished mocking task '%s' on quantum %s", self.getName(), quantum.dataId)