Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# LSST Data Management System 

2# Copyright 2017 AURA/LSST. 

3# 

4# This product includes software developed by the 

5# LSST Project (http://www.lsst.org/). 

6# 

7# This program is free software: you can redistribute it and/or modify 

8# it under the terms of the GNU General Public License as published by 

9# the Free Software Foundation, either version 3 of the License, or 

10# (at your option) any later version. 

11# 

12# This program is distributed in the hope that it will be useful, 

13# but WITHOUT ANY WARRANTY; without even the implied warranty of 

14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

15# GNU General Public License for more details. 

16# 

17# You should have received a copy of the LSST License Statement and 

18# the GNU General Public License along with this program. If not, 

19# see <https://www.lsstcorp.org/LegalNotices/>. 

20 

21from astropy.table import Column, Table 

22import json 

23import numpy as np 

24 

25from lsst.verify import Job, SpecificationSet, MetricSet 

26from .validate import get_specs_metrics 

27 

28 

29def run(validation_drp_report_filenames, output_file, 

30 srd_level=None, 

31 release_specs_package=None, release_level=None, 

32 metrics_package='verify_metrics'): 

33 """ 

34 Parameters 

35 --- 

36 validation_drp_report_filenames : list or str 

37 filepaths for JSON files. 

38 output_file : str 

39 filepath of output RST file. 

40 srd_level : str 

41 SRD level to quote. One of ['design', 'minimum', 'stretch'] 

42 release_specs_package : str, optional 

43 Name of package to use in constructing the release level specs. 

44 release_level : str, A specification level in the 'release_specs_file' 

45 E.g., 'FY17' or 'ORR' 

46 

47 Products 

48 --- 

49 Writes table of performance metrics to an RST file. 

50 """ 

51 input_objects = ingest_data(validation_drp_report_filenames, metrics_package) 

52 input_table = objects_to_table(input_objects, level=srd_level) 

53 if input_table is None: 

54 msg = "Table from Job is None. Returning without writing table" 

55 print(msg) 

56 return 

57 if release_specs_package is not None and release_level is not None: 

58 tmp_specs = SpecificationSet.load_metrics_package(release_specs_package, subset='validate_drp') 

59 release_specs = tmp_specs.subset(spec_tags=['release']) 

60 add_release_spec(input_table, release_specs, release_level, srd_level) 

61 

62 write_report(input_table, output_file) 

63 

64 

65def ingest_data(filenames, metrics_package): 

66 """Load JSON files into a list of lsst.validate.base measurement Jobs. 

67 

68 Parameters 

69 ---------- 

70 filenames : list of str 

71 Filenames of JSON files to load. 

72 

73 Returns 

74 ------- 

75 job_list : list of lsst.validate.base.Job 

76 Each element is the Job representation of the JSON file. 

77 """ 

78 jobs = {} 

79 # Read in JSON output from metrics run 

80 for filename in filenames: 

81 with open(filename) as fh: 

82 data = json.load(fh) 

83 job = Job.deserialize(**data) 

84 filter_name = job.meta['filter_name'] 

85 metrics = MetricSet.load_metrics_package(metrics_package) 

86 job.metrics.update(metrics) 

87 specs = SpecificationSet.load_metrics_package(metrics_package) 

88 job.specs.update(specs) 

89 jobs[filter_name] = job 

90 

91 return jobs 

92 

93 

94# Identify key data from JSON 

95def objects_to_table(input_objects, level='design'): 

96 """Take lsst.validate.base.Job objects and convert to astropy.table.Table 

97 

98 Parameters 

99 ---------- 

100 input_objects : list of Job objects 

101 level : str 

102 The requirement level to compare to for each metric. 

103 This is required because there are metrics with dependencies 

104 E.g., AD1 depends on AF1. Thus you have to specify a level 

105 to even get a single AD1 metric. 

106 

107 Returns 

108 ------- 

109 report : astropy.table.Table 

110 Table with columns needed for final report. 

111 """ 

112 rows = [] 

113 for filter_name, job in input_objects.items(): 

114 specs, metrics = get_specs_metrics(job) 

115 for key, m in job.measurements.items(): 

116 parts = key.metric.split("_") 

117 metric = parts[0] # For compound metrics 

118 if len(parts) > 1: 

119 if level not in ".".join(parts): 

120 continue 

121 spec_set = specs[metric] 

122 spec = None 

123 for spec_key in spec_set: 

124 if level in job.specs[spec_key].tags: 

125 spec = job.specs[spec_key] 

126 # If spec is not tagged, ispect the name. 

127 if spec is None: 

128 for spec_key in spec_set: 

129 if level in spec_key.spec: 

130 spec = job.specs[spec_key] 

131 if spec is not None: 

132 if np.isnan(m.quantity): 

133 meas_quantity_value = "**" # -- is reserved in rst for headers 

134 else: 

135 meas_quantity_value = m.quantity.value 

136 this_row = [metric, filter_name, meas_quantity_value, m.quantity.unit, 

137 spec.operator_str, spec.threshold.value, job.meta['instrument']] 

138 rows.append(this_row) 

139 

140 if len(rows) == 0: 

141 msg_format = "No rows loaded from Job at level: '{:s}'" 

142 print(msg_format.format(level)) 

143 return None 

144 

145 srd_requirement_col_name = 'SRD Requirement: %s' % level 

146 col_names = ('Metric', 'Filter', 'Value', 'Unit', 

147 'Operator', srd_requirement_col_name, 'Instrument') 

148 output = Table(rows=rows, names=col_names) 

149 output.add_column(Column(['']*len(output), dtype=str, name='Comments')) 

150 return output 

151 

152 

153# Calculate numbers in table 

154def add_release_spec(data, release_specs, release_specs_level, srd_level): 

155 """Add columns of additional metric thresholds. 

156 

157 Intended use is for specifying a set of release metrics that converge 

158 over time to the SRD metrics. 

159 

160 If release_specs_level is not present in release_specs, 

161 then the original data is unchanged. 

162 """ 

163 release_targets = [] 

164 for row in data: 

165 

166 specs = release_specs.subset(required_meta={'filter_name': row['Filter'], 

167 'instrument': row['Instrument']}, 

168 spec_tags=['chromatic']) 

169 specs.update(release_specs.subset(required_meta={'instrument': row['Instrument']}, 

170 spec_tags=['achromatic'])) 

171 value = None 

172 for spec in specs: 

173 parts = spec.metric.split('_') # for compound specs 

174 if len(parts) == 2 and parts[1] != srd_level: 

175 continue 

176 if parts[0] == row['Metric'] and release_specs_level in spec.spec: 

177 value = specs[spec].threshold.value 

178 release_targets.append(value) 

179 

180 release_targets_col = Column( 

181 release_targets, 

182 dtype=float, 

183 name='Release Target: %s' % release_specs_level) 

184 data.add_column(release_targets_col) 

185 

186 

187def float_or_dash(f, format_string='{:.3g}'): 

188 """Return string of formatted float, or -- if None. 

189 

190 Intended use is to provide formatting output for columns 

191 where where None or non-float value indicates a missing measurement. 

192 """ 

193 # This try/except handles both None and non-numeric strings. 

194 try: 

195 f = float(f) 

196 return format_string.format(f) 

197 except Exception: 

198 # dashes are reserved 

199 return '**' 

200 

201 

202def blank_none(s): 

203 """Return a blank for None or 'None', else return string of input. 

204 

205 Intended use is to provide formatting output for columns where an empty 

206 or None value is totally reasonable and expected and should be display 

207 as a blank ''. 

208 """ 

209 if s is None: 

210 return '' 

211 if s == 'None': 

212 return '' 

213 

214 return str(s) 

215 

216 

217def find_col_name(prefix, colnames): 

218 """Return the first entry in 'colnames' that starts with 'prefix'.""" 

219 for c in colnames: 

220 if c.startswith(prefix): 

221 return c 

222 

223 

224# Output table 

225def write_report(data, filename='test.rst', format='ascii.rst'): 

226 """Write performance report to RST file. 

227 

228 Parameters 

229 ---------- 

230 data : astropy.table.Table 

231 filename : str, optional 

232 Filepath of output RST file. 

233 format : str, optional 

234 astropy.table format for output table. 

235 

236 Creates 

237 ------- 

238 test.rst 

239 Output file with RST version of data Table. 

240 """ 

241 # Find the 'Release Target XYZ' column name 

242 release_target_col_name = find_col_name('Release Target', data.colnames) 

243 # Find the 'SRD Requirement XYZ' column name 

244 srd_requirement_col_name = find_col_name('SRD Requirement', data.colnames) 

245 

246 col_names = ['Metric', 'Filter', 'Unit', 'Operator', 

247 srd_requirement_col_name, 

248 release_target_col_name, 

249 'Value', 'Comments'] 

250 use_col_names = [c for c in col_names if c in data.colnames] 

251 # Provide default formats 

252 for spec_col in (release_target_col_name, srd_requirement_col_name): 

253 if spec_col in data: 

254 data[spec_col].info.format = '.2f' 

255 data['Value'].info.format = float_or_dash 

256 data['Unit'].info.format = blank_none 

257 # Astropy 1.2.1 (the current miniconda stack install) doesn't support 

258 # overwrite=True for Tables 

259 # But in Astropy 2.0 (the modern version) reliance on automatically overwriting 

260 # ASCII files is deprecated and you get a warning if you don't specify it. 

261 # But it does still write. So for now we'll leave overwrite=True out of 

262 # the argument list below, but someday it will likely be required to include it. 

263 data[use_col_names].write(filename=filename, format=format, 

264 include_names=use_col_names)