Coverage for python/lsst/analysis/tools/tasks/propertyMapTractAnalysis.py: 41%

51 statements  

« prev     ^ index     » next       coverage.py v7.5.0, created at 2024-04-24 04:10 -0700

1# This file is part of analysis_tools. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21from __future__ import annotations 

22 

23__all__ = [ 

24 "PropertyMapTractAnalysisConfig", 

25 "PropertyMapTractAnalysisTask", 

26] 

27 

28from typing import Any, Mapping, Union 

29 

30from lsst.daf.butler import DataCoordinate 

31from lsst.pex.config import Config, ConfigDictField, Field, ListField 

32from lsst.pipe.base import InputQuantizedConnection, OutputQuantizedConnection, QuantumContext 

33from lsst.pipe.base import connectionTypes as ct 

34from lsst.skymap import BaseSkyMap 

35 

36from ..interfaces import AnalysisBaseConfig, AnalysisBaseConnections, AnalysisPipelineTask 

37 

38 

39class PropertyMapConfig(Config): 

40 coaddName = Field( 

41 dtype=str, 

42 doc="Coadd name: typically one of deep or goodSeeing.", 

43 default="deep", 

44 ) 

45 operations = ListField( 

46 dtype=str, 

47 doc="List of operations whose corresponding maps should be retrieved.", 

48 default=["min", "max", "mean", "weighted_mean", "sum"], 

49 ) 

50 nBinsHist = Field( 

51 dtype=int, 

52 doc="Number of bins to use for the histogram.", 

53 default=100, 

54 ) 

55 

56 

57class PropertyMapTractAnalysisConnections( 

58 AnalysisBaseConnections, 

59 dimensions=("skymap", "tract", "band"), 

60 defaultTemplates={"outputName": "propertyMapTract"}, 

61): 

62 healSparsePropertyMapsConfig = ct.Input( 

63 doc="Configuration parameters for HealSparseInputMapTask in pipe_tasks.", 

64 name="healSparsePropertyMaps_config", 

65 storageClass="Config", 

66 dimensions=(), 

67 ) 

68 

69 skymap = ct.Input( 

70 doc="The skymap that covers the tract that the data is from.", 

71 name=BaseSkyMap.SKYMAP_DATASET_TYPE_NAME, 

72 storageClass="SkyMap", 

73 dimensions=("skymap",), 

74 ) 

75 

76 def __init__(self, *, config=None): 

77 super().__init__(config=config) 

78 

79 operationNameLookup = { 

80 "min": "Minimum", 

81 "max": "Maximum", 

82 "mean": "Mean", 

83 "weighted_mean": "Weighted mean", 

84 "sum": "Sum", 

85 } 

86 

87 # Making connections for the maps that are configured to run. 

88 for propertyName in config.properties: 

89 coaddName = config.properties[propertyName].coaddName 

90 for operationName in config.properties[propertyName].operations: 

91 operationLongName = operationNameLookup[operationName] 

92 name = f"{coaddName}Coadd_{propertyName}_map_{operationName}" 

93 setattr( 

94 self, 

95 f"{coaddName}Coadd_{propertyName}_{operationName}", 

96 ct.Input( 

97 doc=f"{operationLongName}-value map of {{propertyLongName}}", 

98 name=name, 

99 storageClass="HealSparseMap", 

100 dimensions=("tract", "skymap", "band"), 

101 multiple=False, 

102 deferLoad=True, 

103 ), 

104 ) 

105 

106 

107class PropertyMapTractAnalysisConfig( 

108 AnalysisBaseConfig, pipelineConnections=PropertyMapTractAnalysisConnections 

109): 

110 zoomFactors = ListField( 

111 dtype=float, 

112 doc="Two-element list of zoom factors to use when plotting the maps.", 

113 default=[2, 8], 

114 ) 

115 

116 properties = ConfigDictField( 

117 doc="A configurable dictionary describing the property maps to be plotted, and the coadd name and " 

118 "operations for each map. The available properties include 'exposure_time', 'psf_size', 'psf_e1', " 

119 "'psf_e2', 'psf_maglim', 'sky_noise', 'sky_background', 'dcr_dra', 'dcr_ddec', 'dcr_e1', 'dcr_e2', " 

120 "and 'epoch'.", 

121 keytype=str, 

122 itemtype=PropertyMapConfig, 

123 default={}, 

124 ) 

125 

126 

127class PropertyMapTractAnalysisTask(AnalysisPipelineTask): 

128 ConfigClass = PropertyMapTractAnalysisConfig 

129 _DefaultName = "propertyMapTractAnalysisTask" 

130 

131 def parsePlotInfo( 

132 self, inputs: Mapping[str, Any], dataId: DataCoordinate | None, connectionNames: list[str] 

133 ) -> Mapping[str, Union[Mapping[str, str], str, int]]: 

134 """Parse the inputs and dataId to get the information needed to add to 

135 the figure. 

136 

137 Parameters 

138 ---------- 

139 inputs: `dict` 

140 The inputs to the task 

141 dataId: `~lsst.daf.butler.DataCoordinate` 

142 The dataId that the task is being run on. 

143 connectionNames: `list` [`str`] 

144 Name of the input connections to use for determining table names. 

145 

146 Returns 

147 ------- 

148 plotInfo : `dict` 

149 A dictionary containing the information needed to add to the 

150 figure. 

151 

152 Notes 

153 ----- 

154 We customized this method to fit our needs, because our analyses are 

155 not 1-1 with datasettypes. We analyze multiple connections/datasettypes 

156 at once, thus the table names are not the same for all connections. 

157 """ 

158 

159 # Initialize the plot info dictionary. 

160 plotInfo = { 

161 # To be filled in later. 

162 "tableNames": {}, 

163 # They all share the same run, so just grab the first one. 

164 "run": inputs[connectionNames[0]].ref.run, 

165 } 

166 

167 # For each connection, separately store the table name. 

168 for connectionName in connectionNames: 

169 tableName = inputs[connectionName].ref.datasetType.name 

170 plotInfo["tableNames"][connectionName] = tableName 

171 

172 # Add the dataId information where the band, skymap and tract are the 

173 # same for all connections. 

174 self._populatePlotInfoWithDataId(plotInfo, dataId) 

175 

176 return plotInfo 

177 

178 def runQuantum( 

179 self, 

180 butlerQC: QuantumContext, 

181 inputRefs: InputQuantizedConnection, 

182 outputRefs: OutputQuantizedConnection, 

183 ) -> None: 

184 # Docstring inherited. 

185 

186 inputs = butlerQC.get(inputRefs) 

187 skymap = inputs["skymap"] 

188 dataId = butlerQC.quantum.dataId 

189 tractInfo = skymap[dataId["tract"]] 

190 

191 mapsDict = {} 

192 for key, value in inputs.items(): 

193 if key in ["skymap", "healSparsePropertyMapsConfig"]: 

194 continue 

195 mapsDict[key] = value 

196 

197 plotInfo = self.parsePlotInfo(inputs, dataId, list(mapsDict.keys())) 

198 outputs = self.run( 

199 data={"maps": mapsDict}, tractInfo=tractInfo, plotConfig=self.config, plotInfo=plotInfo 

200 ) 

201 butlerQC.put(outputs, outputRefs)