Coverage for python/lsst/cp/verify/mergeResults.py : 20%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of cp_verify.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21import lsst.pipe.base as pipeBase
22import lsst.pipe.base.connectionTypes as cT
23import lsst.pex.config as pexConfig
26__all__ = ['CpVerifyExpMergeConfig', 'CpVerifyExpMergeTask',
27 'CpVerifyRunMergeConfig', 'CpVerifyRunMergeTask']
30class CpVerifyExpMergeConnections(pipeBase.PipelineTaskConnections,
31 dimensions={"instrument", "exposure"},
32 defaultTemplates={}):
33 inputStats = cT.Input(
34 name="detectorStats",
35 doc="Input statistics to merge.",
36 storageClass="StructuredDataDict",
37 dimensions=["instrument", "exposure", "detector"],
38 multiple=True,
39 )
40 camera = cT.PrerequisiteInput(
41 name="camera",
42 storageClass="Camera",
43 doc="Input camera.",
44 dimensions=["instrument", ],
45 isCalibration=True,
46 )
48 outputStats = cT.Output(
49 name="exposureStats",
50 doc="Output statistics.",
51 storageClass="StructuredDataDict",
52 dimensions=["instrument", "exposure"],
53 )
56class CpVerifyExpMergeConfig(pipeBase.PipelineTaskConfig,
57 pipelineConnections=CpVerifyExpMergeConnections):
58 """Configuration parameters for exposure stats merging.
59 """
60 exposureStatKeywords = pexConfig.DictField(
61 keytype=str,
62 itemtype=str,
63 doc="Dictionary of statistics to run on the set of detector values. The key should be the test "
64 "name to record in the output, and the value should be the `lsst.afw.math` statistic name string.",
65 default={},
66 )
69class CpVerifyExpMergeTask(pipeBase.PipelineTask, pipeBase.CmdLineTask):
70 """Merge statistics from detectors together.
71 """
72 ConfigClass = CpVerifyExpMergeConfig
73 _DefaultName = 'cpVerifyExpMerge'
75 def runQuantum(self, butlerQC, inputRefs, outputRefs):
76 inputs = butlerQC.get(inputRefs)
78 dimensions = [exp.dataId.byName() for exp in inputRefs.inputStats]
79 inputs['inputDims'] = dimensions
81 outputs = self.run(**inputs)
82 butlerQC.put(outputs, outputRefs)
84 def run(self, inputStats, camera, inputDims):
85 """Merge statistics.
87 Parameters
88 ----------
89 inputStats : `list` [`dict`]
90 Measured statistics for a detector (from
91 CpVerifyStatsTask).
92 camera : `lsst.afw.cameraGeom.Camera`
93 The camera geometry for this exposure.
94 inputDims : `list` [`dict`]
95 List of dictionaries of input data dimensions/values.
96 Each list entry should contain:
98 ``"exposure"``
99 exposure id value (`int`)
100 ``"detector"``
101 detector id value (`int`)
103 Returns
104 -------
105 outputStats : `dict`
106 Merged full exposure statistics.
108 See Also
109 --------
110 lsst.cp.verify.CpVerifyStatsTask
112 Notes
113 -----
114 The outputStats should have a yaml representation of the form:
116 DET:
117 DetName1:
118 FAILURES:
119 - TEST_NAME
120 STAT: value
121 STAT2: value2
122 DetName2:
123 VERIFY:
124 TEST: boolean
125 TEST2: boolean
126 SUCCESS: boolean
127 """
128 outputStats = {}
129 success = True
131 for detStats, dimensions in zip(inputStats, inputDims):
132 detId = dimensions['detector']
133 detName = camera[detId].getName()
134 calcStats = {}
136 if detStats['SUCCESS'] is True:
137 calcStats['SUCCESS'] = True
138 else:
139 calcStats['SUCCESS'] = False
140 calcStats['FAILURES'] = list()
141 success = False
142 # See if the detector failed
143 if 'DET' in detStats['VERIFY']:
144 for testName, testResult in detStats['VERIFY']['DET'].items():
145 if testResult is False:
146 calcStats['FAILURES'].append(testName)
147 # See if the catalog failed
148 if 'CATALOG' in detStats['VERIFY']:
149 for testName, testResult in detStats['VERIFY']['CATALOG'].items():
150 if testResult is False:
151 calcStats['FAILURES'].append(testName)
152 # See if an amplifier failed
153 for ampName, ampStats in detStats['VERIFY']['AMP'].items():
154 ampSuccess = ampStats.pop('SUCCESS')
155 if not ampSuccess:
156 for testName, testResult in ampStats.items():
157 if testResult is False:
158 calcStats['FAILURES'].append(ampName + " " + testName)
160 outputStats[detName] = calcStats
162 exposureSuccess = True
163 if len(self.config.exposureStatKeywords):
164 outputStats['VERIFY'], exposureSuccess = self.verify(outputStats)
166 outputStats['SUCCESS'] = success & exposureSuccess
168 return pipeBase.Struct(
169 outputStats=outputStats,
170 )
172 def verify(self, statisticsDictionary):
173 """Verify if the measured statistics meet the verification criteria.
175 Parameters
176 ----------
177 statisticsDictionary : `dict` [`str`, `dict` [`str`, scalar]],
178 Dictionary of measured statistics. The inner dictionary
179 should have keys that are statistic names (`str`) with
180 values that are some sort of scalar (`int` or `float` are
181 the mostly likely types).
183 Returns
184 -------
185 outputStatistics : `dict` [`str`, `dict` [`str`, `bool`]]
186 A dictionary indexed by the amplifier name, containing
187 dictionaries of the verification criteria.
188 success : `bool`
189 A boolean indicating if all tests have passed.
191 Raises
192 ------
193 NotImplementedError :
194 This method must be implemented by the calibration-type
195 subclass.
196 """
197 raise NotImplementedError("Subclasses must implement verification criteria.")
200class CpVerifyRunMergeConnections(pipeBase.PipelineTaskConnections,
201 dimensions={"instrument"},
202 defaultTemplates={}):
203 inputStats = cT.Input(
204 name="exposureStats",
205 doc="Input statistics to merge.",
206 storageClass="StructuredDataDict",
207 dimensions=["instrument", "exposure"],
208 multiple=True,
209 )
211 outputStats = cT.Output(
212 name="runStats",
213 doc="Output statistics.",
214 storageClass="StructuredDataDict",
215 dimensions=["instrument"],
216 )
219class CpVerifyRunMergeConfig(pipeBase.PipelineTaskConfig,
220 pipelineConnections=CpVerifyRunMergeConnections):
221 """Configuration paramters for exposure stats merging.
222 """
223 runStatKeywords = pexConfig.DictField(
224 keytype=str,
225 itemtype=str,
226 doc="Dictionary of statistics to run on the set of exposure values. The key should be the test "
227 "name to record in the output, and the value should be the `lsst.afw.math` statistic name string.",
228 default={},
229 )
232class CpVerifyRunMergeTask(pipeBase.PipelineTask, pipeBase.CmdLineTask):
233 """Merge statistics from detectors together.
234 """
235 ConfigClass = CpVerifyRunMergeConfig
236 _DefaultName = 'cpVerifyRunMerge'
238 def runQuantum(self, butlerQC, inputRefs, outputRefs):
239 inputs = butlerQC.get(inputRefs)
241 dimensions = [exp.dataId.byName() for exp in inputRefs.inputStats]
242 inputs['inputDims'] = dimensions
244 outputs = self.run(**inputs)
245 butlerQC.put(outputs, outputRefs)
247 def run(self, inputStats, inputDims):
248 """Merge statistics.
250 Parameters
251 ----------
252 inputStats : `list` [`dict`]
253 Measured statistics for a detector.
254 inputDims : `list` [`dict`]
255 List of dictionaries of input data dimensions/values.
256 Each list entry should contain:
258 ``"exposure"``
259 exposure id value (`int`)
261 Returns
262 -------
263 outputStats : `dict`
264 Merged full exposure statistics.
266 Notes
267 -----
268 The outputStats should have a yaml representation as follows.
270 VERIFY:
271 ExposureId1:
272 VERIFY_MEAN: boolean
273 VERIFY_SIGMA: boolean
274 ExposureId2:
275 [...]
276 MEAN_UNIMODAL: boolean
277 SIGMA_UNIMODAL: boolean
278 """
279 outputStats = {}
280 success = True
281 for expStats, dimensions in zip(inputStats, inputDims):
282 expId = dimensions['exposure']
283 calcStats = {}
285 expSuccess = expStats.pop('SUCCESS')
286 if expSuccess:
287 calcStats['SUCCESS'] = True
288 else:
289 calcStats['FAILURES'] = list()
290 success = False
291 for detName, detStats in expStats.items():
292 detSuccess = detStats.pop('SUCCESS')
293 if not detSuccess:
294 for testName in expStats[detName]['FAILURES']:
295 calcStats['FAILURES'].append(detName + " " + testName)
297 outputStats[expId] = calcStats
299 runSuccess = True
300 if len(self.config.runStatKeywords):
301 outputStats['VERIFY'], runSuccess = self.verify(outputStats)
303 outputStats['SUCCESS'] = success & runSuccess
305 return pipeBase.Struct(
306 outputStats=outputStats,
307 )
309 def verify(self, statisticsDictionary):
310 """Verify if the measured statistics meet the verification criteria.
312 Parameters
313 ----------
314 statisticsDictionary : `dict` [`str`, `dict`],
315 Dictionary of measured statistics. The inner dictionary
316 should have keys that are statistic names (`str`) with
317 values that are some sort of scalar (`int` or `float` are
318 the mostly likely types).
320 Returns
321 -------
322 outputStatistics : `dict` [`str`, `dict` [`str`, `bool`]]
323 A dictionary indexed by the amplifier name, containing
324 dictionaries of the verification criteria.
325 success : `bool`
326 A boolean indicating if all tests have passed.
328 Raises
329 ------
330 NotImplementedError :
331 This method must be implemented by the calibration-type
332 subclass.
334 """
335 raise NotImplementedError("Subclasses must implement verification criteria.")