Coverage for python/lsst/cp/verify/verifyBias.py: 18%

37 statements  

« prev     ^ index     » next       coverage.py v7.2.5, created at 2023-05-18 03:25 -0700

1# This file is part of cp_verify. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21import numpy as np 

22 

23from .verifyStats import CpVerifyStatsConfig, CpVerifyStatsTask, CpVerifyStatsConnections 

24 

25 

26__all__ = ['CpVerifyBiasConfig', 'CpVerifyBiasTask'] 

27 

28 

29class CpVerifyBiasConfig(CpVerifyStatsConfig, 

30 pipelineConnections=CpVerifyStatsConnections): 

31 """Inherits from base CpVerifyStatsConfig. 

32 """ 

33 

34 def setDefaults(self): 

35 super().setDefaults() 

36 self.imageStatKeywords = {'MEAN': 'MEAN', # noqa F841 

37 'NOISE': 'STDEVCLIP', } 

38 self.crImageStatKeywords = {'CR_NOISE': 'STDEV', } # noqa F841 

39 self.metadataStatKeywords = {'RESIDUAL STDEV': 'AMP', } # noqa F841 

40 

41 

42class CpVerifyBiasTask(CpVerifyStatsTask): 

43 """Bias verification sub-class, implementing the verify method. 

44 """ 

45 ConfigClass = CpVerifyBiasConfig 

46 _DefaultName = 'cpVerifyBias' 

47 

48 def verify(self, exposure, statisticsDict): 

49 """Verify that the measured statistics meet the verification criteria. 

50 

51 Parameters 

52 ---------- 

53 exposure : `lsst.afw.image.Exposure` 

54 The exposure the statistics are from. 

55 statisticsDictionary : `dict` [`str`, `dict` [`str`, scalar]], 

56 Dictionary of measured statistics. The inner dictionary 

57 should have keys that are statistic names (`str`) with 

58 values that are some sort of scalar (`int` or `float` are 

59 the mostly likely types). 

60 

61 Returns 

62 ------- 

63 outputStatistics : `dict` [`str`, `dict` [`str`, `bool`]] 

64 A dictionary indexed by the amplifier name, containing 

65 dictionaries of the verification criteria. 

66 success : `bool` 

67 A boolean indicating if all tests have passed. 

68 """ 

69 detector = exposure.getDetector() 

70 ampStats = statisticsDict['AMP'] 

71 metadataStats = statisticsDict['METADATA'] 

72 

73 verifyStats = {} 

74 success = True 

75 for ampName, stats in ampStats.items(): 

76 verify = {} 

77 

78 # DMTN-101 Test 4.2: Mean is 0.0 within noise. 

79 verify['MEAN'] = bool(np.abs(stats['MEAN']) < stats['NOISE']) 

80 

81 # DMTN-101 Test 4.3: Clipped mean matches readNoise. This 

82 # test should use the nominal detector read noise. The 

83 # f"RESIDUAL STDEV {ampName}" metadata entry contains the 

84 # measured dispersion in the overscan-corrected overscan 

85 # region, which should provide an estimate of the read 

86 # noise. However, directly using this value will cause 

87 # some fraction of verification runs to fail if the 

88 # scatter in read noise values is comparable to the test 

89 # threshold, as the overscan residual measured may be 

90 # sampling from the low end tail of the distribution. 

91 # This measurement is also likely to be smaller than that 

92 # measured on the bulk of the image as the overscan 

93 # correction should be an optimal fit to the overscan 

94 # region, but not necessarily for the image region. 

95 readNoise = detector[ampName].getReadNoise() 

96 verify['NOISE'] = bool((stats['NOISE'] - readNoise)/readNoise <= 0.05) 

97 

98 # DMTN-101 Test 4.4: CR rejection matches clipped mean. 

99 verify['CR_NOISE'] = bool(np.abs(stats['NOISE'] - stats['CR_NOISE'])/stats['CR_NOISE'] <= 0.05) 

100 

101 # Confirm this hasn't triggered a raise condition. 

102 if 'FORCE_FAILURE' in stats: 

103 verify['PROCESSING'] = False 

104 

105 verify['SUCCESS'] = bool(np.all(list(verify.values()))) 

106 if verify['SUCCESS'] is False: 

107 success = False 

108 

109 # After determining the verification status for this 

110 # exposure, we can also check to see how well the read 

111 # noise measured from the overscan residual matches the 

112 # nominal value used above in Test 4.3. If these disagree 

113 # consistently and significantly, then the assumptions 

114 # used in that test may be incorrect, and the nominal read 

115 # noise may need recalculation. Only perform this check 

116 # if the metadataStats contain the required entry. 

117 if 'RESIDUAL STDEV' in metadataStats and ampName in metadataStats['RESIDUAL STDEV']: 

118 verify['READ_NOISE_CONSISTENT'] = True 

119 overscanReadNoise = metadataStats['RESIDUAL STDEV'][ampName] 

120 if overscanReadNoise: 

121 if ((overscanReadNoise - readNoise)/readNoise > 0.05): 

122 verify['READ_NOISE_CONSISTENT'] = False 

123 

124 verifyStats[ampName] = verify 

125 

126 return {'AMP': verifyStats}, bool(success)