Coverage for tests/test_quantum.py: 15%

78 statements  

« prev     ^ index     » next       coverage.py v7.5.0, created at 2024-05-03 02:48 -0700

1# This file is part of daf_butler. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This software is dual licensed under the GNU General Public License and also 

10# under a 3-clause BSD license. Recipients may choose which of these licenses 

11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt, 

12# respectively. If you choose the GPL option then the following text applies 

13# (but note that there is still no warranty even if you opt for BSD instead): 

14# 

15# This program is free software: you can redistribute it and/or modify 

16# it under the terms of the GNU General Public License as published by 

17# the Free Software Foundation, either version 3 of the License, or 

18# (at your option) any later version. 

19# 

20# This program is distributed in the hope that it will be useful, 

21# but WITHOUT ANY WARRANTY; without even the implied warranty of 

22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

23# GNU General Public License for more details. 

24# 

25# You should have received a copy of the GNU General Public License 

26# along with this program. If not, see <http://www.gnu.org/licenses/>. 

27 

28import json 

29import unittest 

30from collections.abc import Iterable 

31 

32from lsst.daf.butler import ( 

33 DataCoordinate, 

34 DatasetRef, 

35 DatasetType, 

36 DimensionRecordsAccumulator, 

37 DimensionUniverse, 

38 NamedKeyDict, 

39 Quantum, 

40 SerializedQuantum, 

41 StorageClass, 

42) 

43from lsst.sphgeom import Circle 

44 

45"""Tests for Quantum. 

46""" 

47 

48 

49class MockTask: 

50 """Mock task for testing.""" 

51 

52 pass 

53 

54 

55class QuantumTestCase(unittest.TestCase): 

56 """Test for Quantum.""" 

57 

58 def _buildFullQuantum(self, taskName, addRecords=False) -> tuple[Quantum, Iterable[DatasetType]]: 

59 universe = DimensionUniverse() 

60 datasetTypeNameInit = "test_ds_init" 

61 datasetTypeNameInput = "test_ds_input" 

62 datasetTypeNameOutput = "test_ds_output" 

63 

64 storageClass = StorageClass("testref_StructuredData") 

65 

66 instrument = universe["instrument"] 

67 instrumentRecord = instrument.RecordClass(name="test") 

68 

69 band = universe["band"] 

70 bandRecord = band.RecordClass(name="r") 

71 

72 physical_filter = universe["physical_filter"] 

73 physical_filter_record = physical_filter.RecordClass(name="r", instrument="test", band="r") 

74 

75 day_obs = universe["day_obs"] 

76 day_obs_record = day_obs.RecordClass(instrument="test", id=20250101) 

77 

78 visit_system = universe["visit_system"] 

79 visit_system_record = visit_system.RecordClass(id=9, instrument="test", name="test_visit_system") 

80 

81 visit = universe["visit"] 

82 region = Circle() 

83 # create a synthetic value to mock as a visit hash 

84 visit_record_42 = visit.RecordClass( 

85 id=42, 

86 instrument="test", 

87 name="test_visit", 

88 physical_filter="r", 

89 region=region, 

90 day_obs=20250101, 

91 ) 

92 visit_record_43 = visit.RecordClass( 

93 id=43, 

94 instrument="test", 

95 name="test_visit", 

96 physical_filter="r", 

97 region=region, 

98 day_obs=20250101, 

99 ) 

100 

101 records42 = { 

102 instrument: instrumentRecord, 

103 band: bandRecord, 

104 physical_filter: physical_filter_record, 

105 visit_system: visit_system_record, 

106 visit: visit_record_42, 

107 day_obs: day_obs_record, 

108 } 

109 

110 records43 = { 

111 instrument: instrumentRecord, 

112 band: bandRecord, 

113 physical_filter: physical_filter_record, 

114 visit_system: visit_system_record, 

115 visit: visit_record_43, 

116 day_obs: day_obs_record, 

117 } 

118 

119 dataId42 = DataCoordinate.standardize( 

120 dict(instrument="test", visit=42), 

121 universe=universe, # type: ignore 

122 ) 

123 dataId43 = DataCoordinate.standardize( 

124 dict(instrument="test", visit=43), 

125 universe=universe, # type: ignore 

126 ) 

127 

128 if addRecords: 

129 dataId42 = dataId42.expanded(records42) # type: ignore 

130 dataId43 = dataId43.expanded(records43) # type: ignore 

131 

132 datasetTypeInit = DatasetType( 

133 datasetTypeNameInit, universe.conform(("instrument", "visit")), storageClass 

134 ) 

135 datasetTypeInput = DatasetType( 

136 datasetTypeNameInput, universe.conform(("instrument", "visit")), storageClass 

137 ) 

138 datasetTypeOutput = DatasetType( 

139 datasetTypeNameOutput, universe.conform(("instrument", "visit")), storageClass 

140 ) 

141 predictedInputs = { 

142 datasetTypeInput: [ 

143 DatasetRef(datasetTypeInput, dataId42, run="input"), 

144 DatasetRef(datasetTypeInput, dataId43, run="input"), 

145 ] 

146 } 

147 outputs = { 

148 datasetTypeOutput: [ 

149 DatasetRef(datasetTypeOutput, dataId42, run="some_run"), 

150 DatasetRef(datasetTypeOutput, dataId43, run="other_run"), 

151 ] 

152 } 

153 initInputs = {datasetTypeInit: DatasetRef(datasetTypeInit, dataId42, run="input_run")} 

154 

155 return Quantum(taskName=taskName, inputs=predictedInputs, outputs=outputs, initInputs=initInputs), [ 

156 datasetTypeInit, 

157 datasetTypeInput, 

158 datasetTypeOutput, 

159 ] 

160 

161 def testConstructor(self): 

162 """Test of constructor.""" 

163 # Quantum specific arguments 

164 taskName = "some.task.object" # can't use a real PipelineTask due to inverted package dependency 

165 

166 quantum = Quantum(taskName=taskName) 

167 self.assertEqual(quantum.taskName, taskName) 

168 self.assertEqual(quantum.initInputs, {}) 

169 self.assertEqual(quantum.inputs, NamedKeyDict()) 

170 self.assertEqual(quantum.outputs, {}) 

171 self.assertIsNone(quantum.dataId) 

172 

173 quantum, (_, datasetTypeInput, datasetTypeOutput) = self._buildFullQuantum(taskName) 

174 self.assertEqual(len(quantum.inputs[datasetTypeInput]), 2) 

175 self.assertEqual(len(quantum.outputs[datasetTypeOutput]), 2) 

176 

177 def testSerialization(self): 

178 taskName = f"{MockTask.__module__}.{MockTask.__qualname__}" 

179 # from simple w/o records 

180 quantum, _ = self._buildFullQuantum(taskName) 

181 serialized = quantum.to_simple() 

182 self.assertEqual(quantum, quantum.from_simple(serialized, DimensionUniverse())) 

183 

184 # from simple w/ records 

185 quantum, _ = self._buildFullQuantum(taskName, addRecords=True) 

186 serialized = quantum.to_simple() 

187 self.assertEqual(quantum, quantum.from_simple(serialized, DimensionUniverse())) 

188 

189 # verify direct works 

190 jsonVersion = json.loads(serialized.model_dump_json()) 

191 fromDirect = SerializedQuantum.direct(**jsonVersion) 

192 self.assertEqual(fromDirect, serialized) 

193 

194 # verify direct with records works 

195 quantum, _ = self._buildFullQuantum(taskName, addRecords=True) 

196 serialized = quantum.to_simple() 

197 jsonVersion = json.loads(serialized.model_dump_json()) 

198 fromDirect = SerializedQuantum.direct(**jsonVersion) 

199 self.assertEqual(fromDirect, serialized) 

200 

201 # verify the simple accumulator works 

202 accumulator = DimensionRecordsAccumulator() 

203 quantum, _ = self._buildFullQuantum(taskName, addRecords=True) 

204 serialized = quantum.to_simple(accumulator) 

205 # verify the accumulator was populated 

206 recordMapping = accumulator.makeSerializedDimensionRecordMapping() 

207 self.assertGreater(len(recordMapping), 0) 

208 # verify the dimension records were not written out 

209 self.assertEqual(serialized.dimensionRecords, None) 

210 serialized.dimensionRecords = accumulator.makeSerializedDimensionRecordMapping() # type: ignore 

211 

212 self.assertEqual(quantum, quantum.from_simple(serialized, universe=DimensionUniverse())) 

213 

214 

215if __name__ == "__main__": 

216 unittest.main()