Coverage for python / lsst / cp / verify / verifyBias.py: 10%
104 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-05-07 08:54 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-05-07 08:54 +0000
1# This file is part of cp_verify.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21import numpy as np
23import lsst.afw.math as afwMath
25from lsst.geom import Point2I, Extent2I, Box2I
26from lsst.pex.config import Field
27from lsst.ip.isr.isrFunctions import getExposureReadNoises, getExposureGains
28from .verifyStats import CpVerifyStatsConfig, CpVerifyStatsTask, CpVerifyStatsConnections
30__all__ = ['CpVerifyBiasConfig', 'CpVerifyBiasTask']
33class CpVerifyBiasConfig(CpVerifyStatsConfig,
34 pipelineConnections=CpVerifyStatsConnections):
35 """Inherits from base CpVerifyStatsConfig.
36 """
38 ampCornerBoxSize = Field(
39 dtype=int,
40 doc="Size of box to use for measure corner signal.",
41 default=200,
42 )
44 def setDefaults(self):
45 super().setDefaults()
46 self.stageName = 'BIAS'
47 self.imageStatKeywords = {'MEAN': 'MEAN', # noqa F841
48 'NOISE': 'STDEVCLIP', }
49 self.crImageStatKeywords = {'CR_NOISE': 'STDEV', } # noqa F841
50 self.metadataStatKeywords = {
51 'LSST ISR OVERSCAN RESIDUAL SERIAL STDEV': 'READ_NOISE_ADU',
52 } # noqa F841
55class CpVerifyBiasTask(CpVerifyStatsTask):
56 """Bias verification sub-class, implementing the verify method.
57 """
58 ConfigClass = CpVerifyBiasConfig
59 _DefaultName = 'cpVerifyBias'
61 def imageStatistics(self, exposure, uncorrectedExposure, statControl):
62 # Docstring inherited
63 outputStatistics = super().imageStatistics(exposure, uncorrectedExposure, statControl)
65 boxSize = self.config.ampCornerBoxSize
66 statisticToRun = afwMath.stringToStatisticsProperty("MEAN")
68 for ampIdx, amp in enumerate(exposure.getDetector()):
69 ampName = amp.getName()
71 bbox = amp.getBBox()
72 xmin = bbox.getMaxX() - boxSize if amp.getRawFlipX() else bbox.getMinX()
73 ymin = bbox.getMaxY() - boxSize if amp.getRawFlipY() else bbox.getMinY()
74 llc = Point2I(xmin, ymin)
75 extent = Extent2I(boxSize, boxSize)
76 cornerBox = Box2I(llc, extent)
77 cornerExp = exposure[cornerBox]
79 stats = afwMath.makeStatistics(
80 cornerExp.getMaskedImage(), statisticToRun, statControl
81 )
82 outputStatistics[ampName]['AMP_CORNER'] = stats.getValue()
84 return outputStatistics
86 def verify(self, exposure, statisticsDict):
87 """Verify that the measured statistics meet the verification criteria.
89 Parameters
90 ----------
91 exposure : `lsst.afw.image.Exposure`
92 The exposure the statistics are from.
93 statisticsDictionary : `dict` [`str`, `dict` [`str`, scalar]],
94 Dictionary of measured statistics. The inner dictionary
95 should have keys that are statistic names (`str`) with
96 values that are some sort of scalar (`int` or `float` are
97 the mostly likely types).
99 Returns
100 -------
101 outputStatistics : `dict` [`str`, `dict` [`str`, `bool`]]
102 A dictionary indexed by the amplifier name, containing
103 dictionaries of the verification criteria.
104 success : `bool`
105 A boolean indicating if all tests have passed.
106 """
107 ampStats = statisticsDict['AMP']
108 metadataStats = statisticsDict['METADATA']
110 verifyStats = {}
111 success = True
112 # These are the PTC gain and RN (e-) used in constructing the
113 # variance plane:
114 gainDict = getExposureGains(exposure)
115 readNoiseDict = getExposureReadNoises(exposure)
116 for ampName, stats in ampStats.items():
117 verify = {}
119 # DMTN-101 Test 4.2: Mean is 0.0 within the noise measured
120 # on the image (e-):
121 verify['MEAN'] = bool(np.abs(stats['MEAN']) < stats['NOISE'])
123 # DMTN-101 Test 4.3: Clipped mean matches nominal PTC
124 # readNoise. This test should use the nominal detector
125 # read noise. The f"READ_NOISE_ADU {ampName}" metadata
126 # entry contains the measured dispersion in the
127 # overscan-corrected overscan region, which should provide
128 # an estimate of the read noise (in ADU). However,
129 # directly using this value will cause some fraction of
130 # verification runs to fail if the scatter in read noise
131 # values is comparable to the test threshold, as the
132 # overscan residual measured may be sampling from the low
133 # end tail of the distribution. This measurement is also
134 # likely to be smaller than that measured on the bulk of
135 # the image as the overscan correction should be an
136 # optimal fit to the overscan region, but not necessarily
137 # for the image region. We check read noise consistency
138 # below.
139 readNoise = readNoiseDict[ampName]
140 verify['NOISE'] = bool((stats['NOISE'] - readNoise)/readNoise <= 0.05)
142 # DMTN-101 Test 4.4: CR rejection matches clipped mean.
143 verify['CR_NOISE'] = bool(np.abs(stats['NOISE'] - stats['CR_NOISE'])/stats['CR_NOISE'] <= 0.05)
145 # Confirm this hasn't triggered a raise condition.
146 if 'FORCE_FAILURE' in stats:
147 verify['PROCESSING'] = False
149 verify['SUCCESS'] = bool(np.all(list(verify.values())))
150 if verify['SUCCESS'] is False:
151 success = False
153 # After determining the verification status for this
154 # exposure, we can also check to see how well the read
155 # noise measured from the overscan residual matches the
156 # nominal value used above in Test 4.3. If these disagree
157 # consistently and significantly, then the assumptions
158 # used in that test may be incorrect, and the nominal read
159 # noise may need recalculation. Only perform this check
160 # if the metadataStats contain the required entry. This
161 # is in ADU (the serial overscan is measured prior to gain
162 # normalization), so we need to convert to electrons here.
163 gain = gainDict[ampName]
164 overscanReadNoise = gain * metadataStats['READ_NOISE_ADU'][ampName]
165 if overscanReadNoise:
166 if ((overscanReadNoise - readNoise)/readNoise > 0.05) or not np.isfinite(overscanReadNoise):
167 verify['READ_NOISE_CONSISTENT'] = False
168 else:
169 verify['READ_NOISE_CONSISTENT'] = True
171 verifyStats[ampName] = verify
173 return {'AMP': verifyStats}, bool(success)
175 def repackStats(self, statisticsDict, dimensions):
176 # docstring inherited
177 rows = {}
178 rowList = []
179 matrixRowList = None
181 if self.config.useIsrStatistics:
182 mjd = statisticsDict["ISR"]["MJD"]
183 else:
184 mjd = np.nan
186 rowBase = {
187 "instrument": dimensions["instrument"],
188 "exposure": dimensions["exposure"],
189 "detector": dimensions["detector"],
190 "mjd": mjd,
191 }
193 # AMP results:
194 for ampName, stats in statisticsDict["AMP"].items():
195 rows[ampName] = {}
196 rows[ampName].update(rowBase)
198 rows[ampName]["amplifier"] = ampName
199 for key, value in stats.items():
200 rows[ampName][f"{self.config.stageName}_{key}"] = value
202 # VERIFY results
203 for ampName, stats in statisticsDict["VERIFY"]["AMP"].items():
204 for key, value in stats.items():
205 rows[ampName][f"{self.config.stageName}_VERIFY_{key}"] = value
207 # METADATA results
208 if 'READ_NOISE_ADU' in statisticsDict["METADATA"]:
209 for ampName, value in statisticsDict["METADATA"]["READ_NOISE_ADU"].items():
210 rows[ampName][f"{self.config.stageName}_READ_NOISE_ADU"] = value
212 # ISR results
213 if self.config.useIsrStatistics and "ISR" in statisticsDict:
214 if "AMPCORR" in statisticsDict["ISR"]:
215 matrixRowList = statisticsDict["ISR"]["AMPCORR"]
217 for ampName, stats in statisticsDict["ISR"]["BIASSHIFT"].items():
218 rows[ampName][f"{self.config.stageName}_BIAS_SHIFT_COUNT"] = len(stats['BIAS_SHIFTS'])
219 rows[ampName][F"{self.config.stageName}_BIAS_SHIFT_NOISE"] = stats['LOCAL_NOISE']
221 for ampName, stats in statisticsDict["ISR"]["CALIBDIST"].items():
222 for level in self.config.expectedDistributionLevels:
223 key = f"LSST CALIB {self.config.stageName.upper()} {ampName} DISTRIBUTION {level}-PCT"
224 rows[ampName][f"{self.config.stageName}_BIAS_DIST_{level}_PCT"] = stats[key]
226 if "PROJECTION" in statisticsDict["ISR"]:
227 # We need all rows of biasParallelProfile and
228 # biasParallelProfile to be the same length for
229 # serialization. Therefore, we pad to the longest
230 # length.
231 projStats = statisticsDict["ISR"]["PROJECTION"]
232 maxLen = 0
233 for sourceKey, key in {"AMP_HPROJECTION": f"{self.config.stageName}_SERIAL_PROF",
234 "AMP_VPROJECTION": f"{self.config.stageName}_PARALLEL_PROF"}.items():
235 for ampName in projStats[sourceKey].keys():
236 rows[ampName][key] = np.array(projStats[sourceKey][ampName])
237 if (myLen := len(rows[ampName][key])) > maxLen:
238 maxLen = myLen
240 for ampName in rows.keys():
241 if (myLen := len(rows[ampName][key])) < maxLen:
242 rows[ampName][key] = np.pad(
243 rows[ampName][key],
244 (0, maxLen - myLen),
245 constant_values=np.nan)
247 # pack final list
248 for ampName, stats in rows.items():
249 rowList.append(stats)
251 return rowList, matrixRowList