Coverage for python/lsst/analysis/tools/tasks/reconstructor.py: 16%

41 statements  

« prev     ^ index     » next       coverage.py v7.2.7, created at 2023-06-07 03:18 -0700

1# This file is part of analysis_tools. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21from __future__ import annotations 

22 

23__all__ = ("reconstructAnalysisTools", "getPlotDatasetTypeNames") 

24 

25from typing import TYPE_CHECKING, Any, Callable, Iterable 

26 

27from lsst.pipe.base.connections import PipelineTaskConnections, iterConnections 

28from lsst.pipe.base.connectionTypes import BaseConnection 

29 

30from ..interfaces import AnalysisBaseConfig 

31 

32if TYPE_CHECKING: 32 ↛ 33line 32 didn't jump to line 33, because the condition on line 32 was never true

33 from lsst.daf.butler import Butler, DataId 

34 

35 

36def reconstructAnalysisTools( 

37 butler: Butler, 

38 collection: str, 

39 label: str, 

40 dataId: DataId, 

41 callback: Callable[[dict[str, Any], DataId], dict[str, Any]] | None, 

42) -> tuple[AnalysisBaseConfig, dict[str, Any]]: 

43 """Reconstructs the analysis tools used to produce metrics and plots in a 

44 task and all input data required. 

45 

46 Parameters 

47 ---------- 

48 butler : `~lsst.daf.butler.Butler` 

49 The butler where the data is stored. 

50 collection : `str` 

51 Collection within the butler associated with desired data. 

52 label : `str` 

53 The label from the `~lsst.pipe.base.Pipeline` associated with the task 

54 whose tools are to be reconstructed. 

55 dataId : `~lsst.daf.butler.DataId` 

56 Identifier for which data to retrieve. 

57 callback : `~typing.Callable` or None 

58 An optional function which can transform the data after it has been 

59 loaded from the butler. The function must take a dict of strings to 

60 data products, and the DataId. The function must return a dict of 

61 string to data products. The returned dict is what will be returned 

62 by `reconstructAnalysisTools` 

63 

64 Returns 

65 ------- 

66 config : `AnalysisBaseConfig` 

67 The configuration of the task run to produce metrics and plots. This 

68 config contains all the `AnalysisTools` as configured when the task 

69 produced the data. 

70 data : `dict` of `str` to `Any` 

71 The data that went into producing metrics and plots. 

72 """ 

73 configDSType = f"{label}_config" 

74 config = butler.get(configDSType, collections=(collection,)) 

75 

76 connections: PipelineTaskConnections = config.connections.ConnectionsClass(config=config) 

77 inputs: dict[str, Any] = {} 

78 

79 for name in connections.inputs: 

80 connection: BaseConnection = getattr(connections, name) 

81 dsName = connection.name 

82 # If this is a multiple connection, query the butler for all the 

83 # inputs for this dataset type name 

84 if connection.multiple: 

85 container = [] 

86 for ref in set( 

87 butler.registry.queryDatasets( 

88 dsName, dataId=dataId, findFirst=True, collections=(collection,) 

89 ) 

90 ): 

91 container.append(butler.get(ref, collections=(collection,))) 

92 inputs[name] = container 

93 else: 

94 inputs[name] = butler.get(dsName, dataId=dataId, collections=(collection,)) 

95 

96 if callback is not None: 

97 inputs = callback(inputs, dataId) 

98 

99 return (config, inputs) 

100 

101 

102def getPlotDatasetTypeNames( 

103 butler: Butler, 

104 collections: str | Iterable[str], 

105 label: str | None = None, 

106) -> Iterable[str]: 

107 """Get the dataset type names for plots (anything with StorageClass="Plot") 

108 from butler collections. 

109 

110 Parameters 

111 ---------- 

112 butler : `~lsst.daf.butler.Butler` 

113 The butler where the data is stored. 

114 collections : `str` or `list` [`str`] 

115 Collections within the butler to query for datasets containing plots. 

116 label : `str`, optional 

117 The label from the `~lsst.pipe.base.Pipeline` associated with the task 

118 whose plots are to be queried. If no label is given, all requested 

119 collections will be queried. 

120 

121 Returns 

122 ------- 

123 plotNames : `list` [`str`] 

124 Plot dataset type names. 

125 """ 

126 if label is not None: 

127 configs = [butler.get(f"{label}_config", collections=collections)] 

128 else: 

129 configs = [] 

130 datasetRefs = butler.registry.queryDatasets("*_config", collections=collections) 

131 for datasetRef in datasetRefs: 

132 config = butler.get(datasetRef) 

133 if isinstance(config, AnalysisBaseConfig): 

134 configs.append(config) 

135 plotNames = [] 

136 for config in configs: 

137 connections: PipelineTaskConnections = config.connections.ConnectionsClass(config=config) 

138 for connection in iterConnections(connections, "outputs"): 

139 if connection.storageClass == "Plot": 

140 plotNames.append(connection.name) 

141 return plotNames