Coverage for python / lsst / cp / verify / verifyDark.py: 11%
67 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-24 08:55 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-24 08:55 +0000
1# This file is part of cp_verify.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21import numpy as np
23from lsst.ip.isr.isrFunctions import getExposureReadNoises, getExposureGains
24from .verifyStats import CpVerifyStatsConfig, CpVerifyStatsTask, CpVerifyStatsConnections
26__all__ = ['CpVerifyDarkConfig', 'CpVerifyDarkTask']
29class CpVerifyDarkConfig(CpVerifyStatsConfig,
30 pipelineConnections=CpVerifyStatsConnections):
31 """Inherits from base CpVerifyStatsConfig.
32 """
34 def setDefaults(self):
35 super().setDefaults()
36 self.stageName = 'DARK'
37 self.imageStatKeywords = {'MEAN': 'MEAN', # noqa F841
38 'NOISE': 'STDEVCLIP', }
39 self.crImageStatKeywords = {'CR_NOISE': 'STDEV', } # noqa F841
40 self.metadataStatKeywords = {
41 'LSST ISR OVERSCAN RESIDUAL SERIAL STDEV': 'READ_NOISE_ADU',
42 } # noqa F841
45class CpVerifyDarkTask(CpVerifyStatsTask):
46 """Dark verification sub-class, implementing the verify method.
47 """
48 ConfigClass = CpVerifyDarkConfig
49 _DefaultName = 'cpVerifyDark'
51 def verify(self, exposure, statisticsDict):
52 """Verify that the measured statistics meet the verification criteria.
54 Parameters
55 ----------
56 exposure : `lsst.afw.image.Exposure`
57 The exposure the statistics are from.
58 statisticsDictionary : `dict` [`str`, `dict` [`str`, scalar]],
59 Dictionary of measured statistics. The inner dictionary
60 should have keys that are statistic names (`str`) with
61 values that are some sort of scalar (`int` or `float` are
62 the mostly likely types).
64 Returns
65 -------
66 outputStatistics : `dict` [`str`, `dict` [`str`, `bool`]]
67 A dictionary indexed by the amplifier name, containing
68 dictionaries of the verification criteria.
69 success : `bool`
70 A boolean indicating if all tests have passed.
71 """
72 ampStats = statisticsDict['AMP']
73 metadataStats = statisticsDict['METADATA']
75 verifyStats = {}
76 success = True
77 # These are the PTC gain and RN (e-) used in constructing the
78 # variance plane:
79 gainDict = getExposureGains(exposure)
80 readNoiseDict = getExposureReadNoises(exposure)
81 for ampName, stats in ampStats.items():
82 verify = {}
84 # DMTN-101 Test 5.2: Mean is 0.0 within the noise measured
85 # on the image (e-):
86 verify['MEAN'] = bool(np.abs(stats['MEAN']) < stats['NOISE'])
88 # DMTN-101 Test 5.3: Clipped mean matches nominal PTC
89 # readNoise. This test should use the nominal detector
90 # read noise. The f"READ_NOISE_ADU" metadata entry
91 # contains the measured dispersion in the
92 # overscan-corrected overscan region, which should provide
93 # an estimate of the read noise (in ADU). However,
94 # directly using this value will cause some fraction of
95 # verification runs to fail if the scatter in read noise
96 # values is comparable to the test threshold, as the
97 # overscan residual measured may be sampling from the low
98 # end tail of the distribution. This measurement is also
99 # likely to be smaller than that measured on the bulk of
100 # the image as the overscan correction should be an
101 # optimal fit to the overscan region, but not necessarily
102 # for the image region. We check read noise consistency
103 # below.
104 readNoise = readNoiseDict[ampName]
105 verify['NOISE'] = bool((stats['NOISE'] - readNoise)/readNoise <= 0.05)
107 # DMTN-101 Test 5.4: CR rejection matches clipped mean
108 verify['CR_NOISE'] = bool(np.abs(stats['NOISE'] - stats['CR_NOISE'])/stats['CR_NOISE'] <= 0.05)
110 # Confirm this hasn't triggered a raise condition.
111 if 'FORCE_FAILURE' in stats:
112 verify['PROCESSING'] = False
114 verify['SUCCESS'] = bool(np.all(list(verify.values())))
115 if verify['SUCCESS'] is False:
116 success = False
118 # After determining the verification status for this
119 # exposure, we can also check to see how well the read
120 # noise measured from the overscan residual matches the
121 # nominal value used above in Test 5.3. If these disagree
122 # consistently and significantly, then the assumptions
123 # used in that test may be incorrect, and the nominal read
124 # noise may need recalculation. Only perform this check
125 # if the metadataStats contain the required entry. This
126 # is in ADU (the serial overscan is measured prior to gain
127 # normalization), so we need to convert to electrons here.
128 gain = gainDict[ampName]
129 overscanReadNoise = gain * metadataStats['READ_NOISE_ADU'][ampName]
130 if overscanReadNoise:
131 if ((overscanReadNoise - readNoise)/readNoise > 0.05) or not np.isfinite(overscanReadNoise):
132 verify['READ_NOISE_CONSISTENT'] = False
133 else:
134 verify['READ_NOISE_CONSISTENT'] = True
136 verifyStats[ampName] = verify
138 return {'AMP': verifyStats}, bool(success)
140 def repackStats(self, statisticsDict, dimensions):
141 # docstring inherited
142 rows = {}
143 rowList = []
144 matrixRowList = None
146 if self.config.useIsrStatistics:
147 mjd = statisticsDict["ISR"]["MJD"]
148 else:
149 mjd = np.nan
151 rowBase = {
152 "instrument": dimensions["instrument"],
153 "exposure": dimensions["exposure"],
154 "detector": dimensions["detector"],
155 "mjd": mjd,
156 }
158 # AMP results:
159 for ampName, stats in statisticsDict["AMP"].items():
160 rows[ampName] = {}
161 rows[ampName].update(rowBase)
162 rows[ampName]["amplifier"] = ampName
163 for key, value in stats.items():
164 rows[ampName][f"{self.config.stageName}_{key}"] = value
166 # VERIFY results
167 for ampName, stats in statisticsDict["VERIFY"]["AMP"].items():
168 for key, value in stats.items():
169 rows[ampName][f"{self.config.stageName}_VERIFY_{key}"] = value
171 # METADATA results
172 for ampName, value in statisticsDict["METADATA"]["READ_NOISE_ADU"].items():
173 rows[ampName][f"{self.config.stageName}_READ_NOISE_ADU"] = value
175 # ISR results
176 if self.config.useIsrStatistics and "ISR" in statisticsDict:
177 for ampName, stats in statisticsDict["ISR"]["CALIBDIST"].items():
178 for level in self.config.expectedDistributionLevels:
179 key = f"LSST CALIB {self.config.stageName.upper()} {ampName} DISTRIBUTION {level}-PCT"
180 rows[ampName][f"{self.config.stageName}_DARK_DIST_{level}_PCT"] = stats[key]
182 # pack final list
183 for ampName, stats in rows.items():
184 rowList.append(stats)
186 return rowList, matrixRowList