Coverage for python/lsst/cp/verify/verifyBias.py: 10%
102 statements
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-09 12:33 +0000
« prev ^ index » next coverage.py v7.5.1, created at 2024-05-09 12:33 +0000
1# This file is part of cp_verify.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21import numpy as np
23import lsst.afw.math as afwMath
25from lsst.geom import Point2I, Extent2I, Box2I
26from lsst.pex.config import Field
27from .verifyStats import CpVerifyStatsConfig, CpVerifyStatsTask, CpVerifyStatsConnections
29__all__ = ['CpVerifyBiasConfig', 'CpVerifyBiasTask']
32class CpVerifyBiasConfig(CpVerifyStatsConfig,
33 pipelineConnections=CpVerifyStatsConnections):
34 """Inherits from base CpVerifyStatsConfig.
35 """
37 ampCornerBoxSize = Field(
38 dtype=int,
39 doc="Size of box to use for measure corner signal.",
40 default=200,
41 )
43 def setDefaults(self):
44 super().setDefaults()
45 self.stageName = 'BIAS'
46 self.imageStatKeywords = {'MEAN': 'MEAN', # noqa F841
47 'NOISE': 'STDEVCLIP', }
48 self.crImageStatKeywords = {'CR_NOISE': 'STDEV', } # noqa F841
49 self.metadataStatKeywords = {'RESIDUAL STDEV': 'AMP', } # noqa F841
52class CpVerifyBiasTask(CpVerifyStatsTask):
53 """Bias verification sub-class, implementing the verify method.
54 """
55 ConfigClass = CpVerifyBiasConfig
56 _DefaultName = 'cpVerifyBias'
58 def imageStatistics(self, exposure, uncorrectedExposure, statControl):
59 # Docstring inherited
60 outputStatistics = super().imageStatistics(exposure, uncorrectedExposure, statControl)
62 boxSize = self.config.ampCornerBoxSize
63 statisticToRun = afwMath.stringToStatisticsProperty("MEAN")
65 for ampIdx, amp in enumerate(exposure.getDetector()):
66 ampName = amp.getName()
68 bbox = amp.getBBox()
69 xmin = bbox.getMaxX() - boxSize if amp.getRawFlipX() else bbox.getMinX()
70 ymin = bbox.getMaxY() - boxSize if amp.getRawFlipY() else bbox.getMinY()
71 llc = Point2I(xmin, ymin)
72 extent = Extent2I(boxSize, boxSize)
73 cornerBox = Box2I(llc, extent)
74 cornerExp = exposure[cornerBox]
76 stats = afwMath.makeStatistics(
77 cornerExp.getMaskedImage(), statisticToRun, statControl
78 )
79 outputStatistics[ampName]['AMP_CORNER'] = stats.getValue()
81 return outputStatistics
83 def verify(self, exposure, statisticsDict):
84 """Verify that the measured statistics meet the verification criteria.
86 Parameters
87 ----------
88 exposure : `lsst.afw.image.Exposure`
89 The exposure the statistics are from.
90 statisticsDictionary : `dict` [`str`, `dict` [`str`, scalar]],
91 Dictionary of measured statistics. The inner dictionary
92 should have keys that are statistic names (`str`) with
93 values that are some sort of scalar (`int` or `float` are
94 the mostly likely types).
96 Returns
97 -------
98 outputStatistics : `dict` [`str`, `dict` [`str`, `bool`]]
99 A dictionary indexed by the amplifier name, containing
100 dictionaries of the verification criteria.
101 success : `bool`
102 A boolean indicating if all tests have passed.
103 """
104 detector = exposure.getDetector()
105 ampStats = statisticsDict['AMP']
106 metadataStats = statisticsDict['METADATA']
108 verifyStats = {}
109 success = True
110 for ampName, stats in ampStats.items():
111 verify = {}
113 # DMTN-101 Test 4.2: Mean is 0.0 within noise.
114 verify['MEAN'] = bool(np.abs(stats['MEAN']) < stats['NOISE'])
116 # DMTN-101 Test 4.3: Clipped mean matches readNoise. This
117 # test should use the nominal detector read noise. The
118 # f"RESIDUAL STDEV {ampName}" metadata entry contains the
119 # measured dispersion in the overscan-corrected overscan
120 # region, which should provide an estimate of the read
121 # noise. However, directly using this value will cause
122 # some fraction of verification runs to fail if the
123 # scatter in read noise values is comparable to the test
124 # threshold, as the overscan residual measured may be
125 # sampling from the low end tail of the distribution.
126 # This measurement is also likely to be smaller than that
127 # measured on the bulk of the image as the overscan
128 # correction should be an optimal fit to the overscan
129 # region, but not necessarily for the image region.
130 readNoise = detector[ampName].getReadNoise()
131 verify['NOISE'] = bool((stats['NOISE'] - readNoise)/readNoise <= 0.05)
133 # DMTN-101 Test 4.4: CR rejection matches clipped mean.
134 verify['CR_NOISE'] = bool(np.abs(stats['NOISE'] - stats['CR_NOISE'])/stats['CR_NOISE'] <= 0.05)
136 # Confirm this hasn't triggered a raise condition.
137 if 'FORCE_FAILURE' in stats:
138 verify['PROCESSING'] = False
140 verify['SUCCESS'] = bool(np.all(list(verify.values())))
141 if verify['SUCCESS'] is False:
142 success = False
144 # After determining the verification status for this
145 # exposure, we can also check to see how well the read
146 # noise measured from the overscan residual matches the
147 # nominal value used above in Test 4.3. If these disagree
148 # consistently and significantly, then the assumptions
149 # used in that test may be incorrect, and the nominal read
150 # noise may need recalculation. Only perform this check
151 # if the metadataStats contain the required entry.
152 if 'RESIDUAL STDEV' in metadataStats and ampName in metadataStats['RESIDUAL STDEV']:
153 verify['READ_NOISE_CONSISTENT'] = True
154 overscanReadNoise = metadataStats['RESIDUAL STDEV'][ampName]
155 if overscanReadNoise:
156 if ((overscanReadNoise - readNoise)/readNoise > 0.05):
157 verify['READ_NOISE_CONSISTENT'] = False
159 verifyStats[ampName] = verify
161 return {'AMP': verifyStats}, bool(success)
163 def repackStats(self, statisticsDict, dimensions):
164 # docstring inherited
165 rows = {}
166 rowList = []
167 matrixRowList = None
169 if self.config.useIsrStatistics:
170 mjd = statisticsDict["ISR"]["MJD"]
171 else:
172 mjd = np.nan
174 rowBase = {
175 "instrument": dimensions["instrument"],
176 "exposure": dimensions["exposure"],
177 "detector": dimensions["detector"],
178 "mjd": mjd,
179 }
181 # AMP results:
182 for ampName, stats in statisticsDict["AMP"].items():
183 rows[ampName] = {}
184 rows[ampName].update(rowBase)
186 rows[ampName]["amplifier"] = ampName
187 for key, value in stats.items():
188 rows[ampName][f"{self.config.stageName}_{key}"] = value
190 # VERIFY results
191 for ampName, stats in statisticsDict["VERIFY"]["AMP"].items():
192 for key, value in stats.items():
193 rows[ampName][f"{self.config.stageName}_VERIFY_{key}"] = value
195 # METADATA results
196 if 'RESIDUAL STDEV' in statisticsDict["METADATA"]:
197 for ampName, value in statisticsDict["METADATA"]["RESIDUAL STDEV"].items():
198 rows[ampName][f"{self.config.stageName}_READ_NOISE"] = value
200 # ISR results
201 if self.config.useIsrStatistics and "ISR" in statisticsDict:
202 if "AMPCORR" in statisticsDict["ISR"]:
203 matrixRowList = statisticsDict["ISR"]["AMPCORR"]
205 for ampName, stats in statisticsDict["ISR"]["BIASSHIFT"].items():
206 rows[ampName][f"{self.config.stageName}_BIAS_SHIFT_COUNT"] = len(stats['BIAS_SHIFTS'])
207 rows[ampName][F"{self.config.stageName}_BIAS_SHIFT_NOISE"] = stats['LOCAL_NOISE']
209 for ampName, stats in statisticsDict["ISR"]["CALIBDIST"].items():
210 for level in self.config.expectedDistributionLevels:
211 key = f"LSST CALIB {self.config.stageName.upper()} {ampName} DISTRIBUTION {level}-PCT"
212 rows[ampName][f"{self.config.stageName}_BIAS_DIST_{level}_PCT"] = stats[key]
214 if "PROJECTION" in statisticsDict["ISR"]:
215 # We need all rows of biasParallelProfile and
216 # biasParallelProfile to be the same length for
217 # serialization. Therefore, we pad to the longest
218 # length.
219 projStats = statisticsDict["ISR"]["PROJECTION"]
220 maxLen = 0
221 for sourceKey, key in {"AMP_HPROJECTION": f"{self.config.stageName}_SERIAL_PROF",
222 "AMP_VPROJECTION": f"{self.config.stageName}_PARALLEL_PROF"}.items():
223 for ampName in projStats[sourceKey].keys():
224 rows[ampName][key] = np.array(projStats[sourceKey][ampName])
225 if (myLen := len(rows[ampName][key])) > maxLen:
226 maxLen = myLen
228 for ampName in rows.keys():
229 if (myLen := len(rows[ampName][key])) < maxLen:
230 rows[ampName][key] = np.pad(
231 rows[ampName][key],
232 (0, maxLen - myLen),
233 constant_values=np.nan)
235 # pack final list
236 for ampName, stats in rows.items():
237 rowList.append(stats)
239 return rowList, matrixRowList