Coverage for python/lsst/verify/tasks/metricTask.py: 51%

31 statements  

« prev     ^ index     » next       coverage.py v7.2.7, created at 2023-06-14 02:16 -0700

1# This file is part of verify. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22 

23__all__ = ["MetricComputationError", "MetricTask", "MetricConfig", 

24 "MetricConnections"] 

25 

26 

27import abc 

28 

29import lsst.pipe.base as pipeBase 

30from lsst.pipe.base import connectionTypes 

31 

32from lsst.verify import Name 

33 

34 

35class MetricComputationError(RuntimeError): 

36 """This class represents unresolvable errors in computing a metric. 

37 

38 `lsst.verify.tasks.MetricTask` raises ``MetricComputationError`` 

39 instead of other data- or processing-related exceptions to let code that 

40 calls a mix of data processing and metric tasks distinguish between 

41 the two. Therefore, most ``MetricComputationError`` instances should be 

42 chained to another exception representing the underlying problem. 

43 """ 

44 pass 

45 

46 

47class MetricConnections(pipeBase.PipelineTaskConnections, 

48 defaultTemplates={"package": None, "metric": None}, 

49 dimensions={"instrument", "visit", "detector"}, 

50 ): 

51 """An abstract connections class defining a metric output. 

52 

53 This class assumes detector-level metrics, which is the most common case. 

54 Subclasses can redeclare ``measurement`` and ``dimensions`` to override 

55 this assumption. 

56 

57 Notes 

58 ----- 

59 ``MetricConnections`` defines the following dataset templates: 

60 ``package`` 

61 Name of the metric's namespace. By 

62 :ref:`verify_metrics <verify-metrics-package>` convention, this is 

63 the name of the package the metric is most closely 

64 associated with. 

65 ``metric`` 

66 Name of the metric, excluding any namespace. 

67 """ 

68 measurement = connectionTypes.Output( 

69 name="metricvalue_{package}_{metric}", 

70 doc="The metric value computed by this task.", 

71 storageClass="MetricValue", 

72 dimensions={"instrument", "visit", "detector"}, 

73 ) 

74 

75 

76class MetricConfig(pipeBase.PipelineTaskConfig, 

77 pipelineConnections=MetricConnections): 

78 

79 def validate(self): 

80 super().validate() 

81 

82 if "." in self.connections.package: 

83 raise ValueError(f"package name {self.connections.package} must " 

84 "not contain periods") 

85 if "." in self.connections.metric: 

86 raise ValueError(f"metric name {self.connections.metric} must " 

87 "not contain periods; use connections.package " 

88 "instead") 

89 

90 @property 

91 def metricName(self): 

92 """The metric calculated by a `MetricTask` with this config 

93 (`lsst.verify.Name`, read-only). 

94 """ 

95 return Name(package=self.connections.package, 

96 metric=self.connections.metric) 

97 

98 

99class MetricTask(pipeBase.PipelineTask, metaclass=abc.ABCMeta): 

100 """A base class for tasks that compute one metric from input datasets. 

101 

102 Parameters 

103 ---------- 

104 *args 

105 **kwargs 

106 Constructor parameters are the same as for 

107 `lsst.pipe.base.PipelineTask`. 

108 

109 Notes 

110 ----- 

111 In general, both the ``MetricTask``'s metric and its input data are 

112 configurable. Metrics may be associated with a data ID at any level of 

113 granularity, including repository-wide. 

114 

115 Like `lsst.pipe.base.PipelineTask`, this class should be customized by 

116 overriding `run` and by providing a `lsst.pipe.base.connectionTypes.Input` 

117 for each parameter of `run`. For requirements that are specific to 

118 ``MetricTask``, see `run`. 

119 """ 

120 

121 ConfigClass = MetricConfig 

122 

123 def __init__(self, **kwargs): 

124 super().__init__(**kwargs) 

125 

126 @abc.abstractmethod 

127 def run(self, **kwargs): 

128 """Run the MetricTask on in-memory data. 

129 

130 Parameters 

131 ---------- 

132 **kwargs 

133 Keyword arguments matching the inputs given in the class config; 

134 see `lsst.pipe.base.PipelineTask.run` for more details. 

135 

136 Returns 

137 ------- 

138 struct : `lsst.pipe.base.Struct` 

139 A `~lsst.pipe.base.Struct` containing at least the 

140 following component: 

141 

142 - ``measurement``: the value of the metric 

143 (`lsst.verify.Measurement` or `None`). This method is not 

144 responsible for adding mandatory metadata (e.g., the data ID); 

145 this is handled by the caller. `None` may be used to indicate 

146 that a metric is undefined or irrelevant instead of raising 

147 `~lsst.pipe.base.NoWorkFound`. 

148 

149 Raises 

150 ------ 

151 lsst.verify.tasks.MetricComputationError 

152 Raised if an algorithmic or system error prevents calculation 

153 of the metric. Examples include corrupted input data or 

154 unavoidable exceptions raised by analysis code. The 

155 `~lsst.verify.tasks.MetricComputationError` should be chained to a 

156 more specific exception describing the root cause. 

157 

158 Not having enough data for a metric to be applicable is not an 

159 error, and should raise ``NoWorkFound`` (see below) instead of 

160 this exception. 

161 lsst.pipe.base.NoWorkFound 

162 Raised if the metric is ill-defined or otherwise inapplicable to 

163 the data. Typically this means that the pipeline step or option 

164 being measured was not run. 

165 """ 

166 

167 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

168 """Do Butler I/O to provide in-memory objects for run. 

169 

170 This specialization of runQuantum performs error-handling specific to 

171 MetricTasks. 

172 """ 

173 # Synchronize changes to this method with ApdbMetricTask 

174 inputs = butlerQC.get(inputRefs) 

175 outputs = self.run(**inputs) 

176 if outputs.measurement is not None: 

177 butlerQC.put(outputs, outputRefs) 

178 else: 

179 self.log.debug("Skipping measurement of %r on %s " 

180 "as not applicable.", self, inputRefs)