Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# LSST Data Management System 

2# Copyright 2008-2019 AURA/LSST. 

3# 

4# This product includes software developed by the 

5# LSST Project (http://www.lsst.org/). 

6# 

7# This program is free software: you can redistribute it and/or modify 

8# it under the terms of the GNU General Public License as published by 

9# the Free Software Foundation, either version 3 of the License, or 

10# (at your option) any later version. 

11# 

12# This program is distributed in the hope that it will be useful, 

13# but WITHOUT ANY WARRANTY; without even the implied warranty of 

14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

15# GNU General Public License for more details. 

16# 

17# You should have received a copy of the LSST License Statement and 

18# the GNU General Public License along with this program. If not, 

19# see <https://www.lsstcorp.org/LegalNotices/>. 

20"""Main driver functions for metric measurements, plotting, specification 

21grading, and persistence. 

22""" 

23 

24__all__ = ['plot_metrics', 'print_metrics', 'print_pass_fail_summary', 

25 'run', 'runOneFilter'] 

26 

27import json 

28import os 

29import numpy as np 

30import astropy.units as u 

31 

32from textwrap import TextWrapper 

33import astropy.visualization 

34 

35from lsst.verify import Name 

36from lsst.verify import Job, MetricSet, SpecificationSet 

37 

38from lsst.daf.persistence import Butler 

39 

40from .util import repoNameToPrefix 

41from .matchreduce import build_matched_dataset 

42from .photerrmodel import build_photometric_error_model 

43from .astromerrmodel import build_astrometric_error_model 

44from .calcnonsrd import measure_model_phot_rep 

45from .calcsrd import (measurePA1, measurePA2, measurePF1, measureAMx, 

46 measureAFx, measureADx, measureTEx) 

47from .plot import (plotAMx, plotPA1, plotTEx, plotPhotometryErrorModel, 

48 plotAstrometryErrorModel) 

49 

50 

51class Bcolors: 

52 HEADER = '\033[95m' 

53 OKBLUE = '\033[94m' 

54 OKGREEN = '\033[92m' 

55 WARNING = '\033[93m' 

56 FAIL = '\033[91m' 

57 ENDC = '\033[0m' 

58 BOLD = '\033[1m' 

59 UNDERLINE = '\033[4m' 

60 

61 

62def load_json_output(filepath, metrics_package='verify_metrics'): 

63 """Read JSON from a file into a job object. 

64 

65 Currently just does a trivial de-serialization with no checking 

66 to make sure that one results with a valid validate.base.job object. 

67 

68 Parameters 

69 ---------- 

70 filepath : `str` 

71 Source file name for JSON output. 

72 

73 Returns 

74 ------- 

75 job : A `validate.base.job` object. 

76 """ 

77 with open(filepath, 'r') as infile: 

78 json_data = json.load(infile) 

79 

80 job = Job.deserialize(**json_data) 

81 metrics = MetricSet.load_metrics_package(metrics_package) 

82 job.metrics.update(metrics) 

83 specs = SpecificationSet.load_metrics_package(metrics_package) 

84 job.specs.update(specs) 

85 return job 

86 

87 

88def get_filter_name_from_job(job): 

89 """Get the filtername from a validate.base.job object 

90 

91 Assumes there is only one filter name and that it's the one in 

92 the first measurement 

93 

94 Parameters 

95 ---------- 

96 job : `validate.base.job` object 

97 

98 Returns 

99 ------- 

100 filter_name : `str` 

101 """ 

102 

103 return job.meta['filter_name'] 

104 

105 

106def run(repo_or_json, metrics=None, 

107 outputPrefix=None, makePrint=True, makePlot=True, 

108 level='design', metrics_package='verify_metrics', **kwargs): 

109 """Main entrypoint from ``validateDrp.py``. 

110 

111 Parameters 

112 ---------- 

113 repo_or_json : `str` 

114 The repository. This is generally the directory on disk 

115 that contains the repository and mapper. 

116 This can also be the filepath for a JSON file that contains 

117 the cached output from a previous run. 

118 makePrint : `bool`, optional 

119 Print calculated quantities (to stdout). 

120 makePlot : `bool`, optional 

121 Create plots for metrics. Saved to current working directory. 

122 level : `str` 

123 Use <level> E.g., 'design', 'minimum', 'stretch'. 

124 """ 

125 base_name, ext = os.path.splitext(repo_or_json) 

126 if ext == '.json': 

127 load_json = True 

128 else: 

129 load_json = False 

130 

131 # I think I have to interrogate the kwargs to maintain compatibility 

132 # between Python 2 and Python 3 

133 # In Python 3 I would have let me mix in a keyword default after *args 

134 if outputPrefix is None: 

135 outputPrefix = repoNameToPrefix(base_name) 

136 

137 if load_json: 

138 if not os.path.isfile(repo_or_json): 

139 print("Could not find JSON file %s" % (repo_or_json)) 

140 return 

141 

142 json_path = repo_or_json 

143 job = load_json_output(json_path, metrics_package) 

144 filterName = get_filter_name_from_job(job) 

145 jobs = {filterName: job} 

146 else: 

147 if not os.path.isdir(repo_or_json): 

148 print("Could not find repo %s" % (repo_or_json)) 

149 return 

150 

151 repo_path = repo_or_json 

152 jobs = runOneRepo(repo_path, metrics=metrics, outputPrefix=outputPrefix, 

153 metrics_package=metrics_package, **kwargs) 

154 

155 for filterName, job in jobs.items(): 

156 if makePrint: 

157 print_metrics(job) 

158 if makePlot: 

159 if outputPrefix is None or outputPrefix == '': 

160 thisOutputPrefix = "%s" % filterName 

161 else: 

162 thisOutputPrefix = "%s_%s" % (outputPrefix, filterName) 

163 plot_metrics(job, filterName, outputPrefix=thisOutputPrefix) 

164 

165 print_pass_fail_summary(jobs, default_level=level) 

166 

167 

168def runOneRepo(repo, dataIds=None, metrics=None, outputPrefix='', verbose=False, 

169 instrument=None, dataset_repo_url=None, 

170 metrics_package='verify_metrics', **kwargs): 

171 r"""Calculate statistics for all filters in a repo. 

172 

173 Runs multiple filters, if necessary, through repeated calls to `runOneFilter`. 

174 Assesses results against SRD specs at specified `level`. 

175 

176 Parameters 

177 --------- 

178 repo : `str` 

179 The repository. This is generally the directory on disk 

180 that contains the repository and mapper. 

181 dataIds : `list` of `dict` 

182 List of butler data IDs of Image catalogs to compare to reference. 

183 The calexp cpixel image is needed for the photometric calibration. 

184 Tract IDs must be included if "doApplyExternalPhotoCalib" or 

185 "doApplyExternalSkyWcs" is True. 

186 metrics : `dict` or `collections.OrderedDict` 

187 Dictionary of `lsst.validate.base.Metric` instances. Typically this is 

188 data from ``validate_drp``\ 's ``metrics.yaml`` and loaded with 

189 `lsst.validate.base.load_metrics`. 

190 outputPrefix : `str`, optional 

191 Specify the beginning filename for output files. 

192 The name of each filter will be appended to outputPrefix. 

193 level : `str`, optional 

194 The level of the specification to check: "design", "minimum", "stretch". 

195 verbose : `bool` 

196 Provide detailed output. 

197 instrument : `str` 

198 Name of the instrument. If None will be extracted from the Butler mapper. 

199 dataset_repo_url : `str` 

200 Location of the dataset used. If None will be set to the path of the repo. 

201 metrics_package : `string` 

202 Name of the metrics package to be used in the Jobs created. 

203 

204 Notes 

205 ----- 

206 Names of plot files or JSON file are generated based on repository name, 

207 unless overriden by specifying `ouputPrefix`. 

208 E.g., Analyzing a repository ``CFHT/output`` 

209 will result in filenames that start with ``CFHT_output_``. 

210 The filter name is added to this prefix. If the filter name has spaces, 

211 there will be annoyance and sadness as those spaces will appear in the filenames. 

212 """ 

213 

214 def extract_instrument_from_repo(repo): 

215 """Extract the last part of the mapper name from a Butler repo. 

216 'lsst.obs.lsstSim.lsstSimMapper.LsstSimMapper' -> 'LSSTSIM' 

217 'lsst.obs.cfht.megacamMapper.MegacamMapper' -> 'CFHT' 

218 'lsst.obs.decam.decamMapper.DecamMapper' -> 'DECAM' 

219 'lsst.obs.hsc.hscMapper.HscMapper' -> 'HSC' 

220 """ 

221 mapper_class = Butler.getMapperClass(repo) 

222 instrument = mapper_class.getCameraName() 

223 return instrument.upper() 

224 

225 if instrument is None: 

226 instrument = extract_instrument_from_repo(repo) 

227 if dataset_repo_url is None: 

228 dataset_repo_url = repo 

229 

230 allFilters = set([d['filter'] for d in dataIds]) 

231 

232 jobs = {} 

233 for filterName in allFilters: 

234 # Do this here so that each outputPrefix will have a different name for each filter. 

235 if outputPrefix is None or outputPrefix == '': 

236 thisOutputPrefix = "%s" % filterName 

237 else: 

238 thisOutputPrefix = "%s_%s" % (outputPrefix, filterName) 

239 theseVisitDataIds = [v for v in dataIds if v['filter'] == filterName] 

240 job = runOneFilter(repo, theseVisitDataIds, metrics, 

241 outputPrefix=thisOutputPrefix, 

242 verbose=verbose, filterName=filterName, 

243 instrument=instrument, 

244 dataset_repo_url=dataset_repo_url, 

245 metrics_package=metrics_package, **kwargs) 

246 jobs[filterName] = job 

247 

248 return jobs 

249 

250 

251def runOneFilter(repo, visitDataIds, metrics, brightSnr=100, 

252 makeJson=True, filterName=None, outputPrefix='', 

253 doApplyExternalPhotoCalib=False, externalPhotoCalibName=None, 

254 doApplyExternalSkyWcs=False, externalSkyWcsName=None, 

255 skipTEx=False, verbose=False, 

256 metrics_package='verify_metrics', 

257 instrument='Unknown', dataset_repo_url='./', 

258 skipNonSrd=False, **kwargs): 

259 r"""Main executable for the case where there is just one filter. 

260 

261 Plot files and JSON files are generated in the local directory 

262 prefixed with the repository name (where '_' replace path separators), 

263 unless overriden by specifying `outputPrefix`. 

264 E.g., Analyzing a repository ``CFHT/output`` 

265 will result in filenames that start with ``CFHT_output_``. 

266 

267 Parameters 

268 ---------- 

269 repo : string or Butler 

270 A Butler or a repository URL that can be used to construct one. 

271 dataIds : list of dict 

272 List of `butler` data IDs of Image catalogs to compare to reference. 

273 The `calexp` pixel image is needed for the photometric calibration 

274 unless doApplyExternalPhotoCalib is True such 

275 that the appropriate `photoCalib` dataset is used. Note that these 

276 have data IDs that include the tract number. 

277 metrics : `dict` or `collections.OrderedDict` 

278 Dictionary of `lsst.validate.base.Metric` instances. Typically this is 

279 data from ``validate_drp``\ 's ``metrics.yaml`` and loaded with 

280 `lsst.validate.base.load_metrics`. 

281 brightSnr : float, optional 

282 Minimum SNR for a star to be considered bright 

283 makeJson : bool, optional 

284 Create JSON output file for metrics. Saved to current working directory. 

285 outputPrefix : str, optional 

286 Specify the beginning filename for output files. 

287 filterName : str, optional 

288 Name of the filter (bandpass). 

289 doApplyExternalPhotoCalib : bool, optional 

290 Apply external photoCalib to calibrate fluxes. 

291 externalPhotoCalibName : str, optional 

292 Type of external `PhotoCalib` to apply. Currently supported are jointcal, 

293 fgcm, and fgcm_tract. Must be set if doApplyExternalPhotoCalib is True. 

294 doApplyExternalSkyWcs : bool, optional 

295 Apply external wcs to calibrate positions. 

296 externalSkyWcsName : str, optional 

297 Type of external `wcs` to apply. Currently supported is jointcal. 

298 Must be set if "doApplyExternalSkyWcs" is True. 

299 skipTEx : bool, optional 

300 Skip TEx calculations (useful for older catalogs that don't have 

301 PsfShape measurements). 

302 verbose : bool, optional 

303 Output additional information on the analysis steps. 

304 skipNonSrd : bool, optional 

305 Skip any metrics not defined in the LSST SRD. 

306 

307 Raises 

308 ------ 

309 RuntimeError: 

310 Raised if "doApplyExternalPhotoCalib" is True and "externalPhotoCalibName" 

311 is None, or if "doApplyExternalSkyWcs" is True and "externalSkyWcsName" is 

312 None. 

313 """ 

314 

315 if doApplyExternalPhotoCalib and externalPhotoCalibName is None: 

316 raise RuntimeError("Must set externalPhotoCalibName if doApplyExternalPhotoCalib is True.") 

317 if doApplyExternalSkyWcs and externalSkyWcsName is None: 

318 raise RuntimeError("Must set externalSkyWcsName if doApplyExternalSkyWcs is True.") 

319 

320 job = Job.load_metrics_package(meta={'instrument': instrument, 

321 'filter_name': filterName, 

322 'dataset_repo_url': dataset_repo_url}, 

323 subset='validate_drp', 

324 package_name_or_path=metrics_package) 

325 

326 matchedDataset = build_matched_dataset(repo, visitDataIds, 

327 doApplyExternalPhotoCalib=doApplyExternalPhotoCalib, 

328 externalPhotoCalibName=externalPhotoCalibName, 

329 doApplyExternalSkyWcs=doApplyExternalSkyWcs, 

330 externalSkyWcsName=externalSkyWcsName, 

331 skipTEx=skipTEx, skipNonSrd=skipNonSrd) 

332 

333 photomModel = build_photometric_error_model(matchedDataset) 

334 astromModel = build_astrometric_error_model(matchedDataset) 

335 

336 linkedBlobs = [matchedDataset, photomModel, astromModel] 

337 

338 metrics = job.metrics 

339 specs = job.specs 

340 

341 def add_measurement(measurement): 

342 for blob in linkedBlobs: 

343 measurement.link_blob(blob) 

344 job.measurements.insert(measurement) 

345 

346 for x, D in zip((1, 2, 3), (5., 20., 200.)): 

347 amxName = 'AM{0:d}'.format(x) 

348 afxName = 'AF{0:d}'.format(x) 

349 adxName = 'AD{0:d}'.format(x) 

350 

351 amx = measureAMx(metrics['validate_drp.'+amxName], matchedDataset, D*u.arcmin) 

352 add_measurement(amx) 

353 

354 afx_spec_set = specs.subset(required_meta={'instrument': 'HSC'}, spec_tags=[afxName, ]) 

355 adx_spec_set = specs.subset(required_meta={'instrument': 'HSC'}, spec_tags=[adxName, ]) 

356 for afx_spec_key, adx_spec_key in zip(afx_spec_set, adx_spec_set): 

357 afx_spec = afx_spec_set[afx_spec_key] 

358 adx_spec = adx_spec_set[adx_spec_key] 

359 adx = measureADx(metrics[adx_spec.metric_name], amx, afx_spec) 

360 add_measurement(adx) 

361 afx = measureAFx(metrics[afx_spec.metric_name], amx, adx, adx_spec) 

362 add_measurement(afx) 

363 

364 pa1 = measurePA1( 

365 metrics['validate_drp.PA1'], filterName, matchedDataset.safeMatches, matchedDataset.magKey) 

366 add_measurement(pa1) 

367 

368 if not skipNonSrd: 

369 model_phot_reps = measure_model_phot_rep(metrics, filterName, matchedDataset) 

370 for measurement in model_phot_reps: 

371 add_measurement(measurement) 

372 

373 pf1_spec_set = specs.subset(required_meta={'instrument': instrument, 'filter_name': filterName}, 

374 spec_tags=['PF1', ]) 

375 pa2_spec_set = specs.subset(required_meta={'instrument': instrument, 'filter_name': filterName}, 

376 spec_tags=['PA2', ]) 

377 # I worry these might not always be in the right order. Sorting... 

378 pf1_spec_keys = list(pf1_spec_set.keys()) 

379 pa2_spec_keys = list(pa2_spec_set.keys()) 

380 pf1_spec_keys.sort() 

381 pa2_spec_keys.sort() 

382 for pf1_spec_key, pa2_spec_key in zip(pf1_spec_keys, pa2_spec_keys): 

383 pf1_spec = pf1_spec_set[pf1_spec_key] 

384 pa2_spec = pa2_spec_set[pa2_spec_key] 

385 

386 pa2 = measurePA2(metrics[pa2_spec.metric_name], pa1, pf1_spec.threshold) 

387 add_measurement(pa2) 

388 

389 pf1 = measurePF1(metrics[pf1_spec.metric_name], pa1, pa2_spec) 

390 add_measurement(pf1) 

391 

392 if not skipTEx: 

393 for x, D, bin_range_operator in zip((1, 2), (1.0, 5.0), ("<=", ">=")): 

394 texName = 'TE{0:d}'.format(x) 

395 tex = measureTEx(metrics['validate_drp.'+texName], matchedDataset, D*u.arcmin, bin_range_operator) 

396 add_measurement(tex) 

397 

398 if makeJson: 

399 job.write(outputPrefix+'.json') 

400 

401 return job 

402 

403 

404def get_metric(level, metric_label, in_specs): 

405 for spec in in_specs: 

406 if level in str(spec) and metric_label in str(spec): 

407 break 

408 return Name(package=spec.package, metric=spec.metric) 

409 

410 

411def plot_metrics(job, filterName, outputPrefix=''): 

412 """Plot AM1, AM2, AM3, PA1 plus related informational plots. 

413 

414 Parameters 

415 ---------- 

416 job : `lsst.validate.base.Job` 

417 The job to load data from. 

418 filterName : `str` 

419 string identifying the filter. 

420 """ 

421 astropy.visualization.quantity_support() 

422 

423 specs = job.specs 

424 measurements = job.measurements 

425 spec_name = 'design' 

426 for x in (1, 2, 3): 

427 amxName = 'AM{0:d}'.format(x) 

428 afxName = 'AF{0:d}'.format(x) 

429 # ADx is included on the AFx plots 

430 

431 amx = measurements[get_metric(spec_name, amxName, specs)] 

432 afx = measurements[get_metric(spec_name, afxName, specs)] 

433 

434 if amx.quantity is not None: 

435 try: 

436 plotAMx(job, amx, afx, filterName, amxSpecName=spec_name, 

437 outputPrefix=outputPrefix) 

438 except RuntimeError as e: 

439 print(e) 

440 print('\tSkipped plot{}'.format(amxName)) 

441 

442 try: 

443 pa1 = measurements[get_metric(spec_name, 'PA1', specs)] 

444 plotPA1(pa1, outputPrefix=outputPrefix) 

445 except RuntimeError as e: 

446 print(e) 

447 print('\tSkipped plotPA1') 

448 

449 try: 

450 matchedDataset = pa1.blobs['MatchedMultiVisitDataset'] 

451 photomModel = pa1.blobs['PhotometricErrorModel'] 

452 filterName = pa1.extras['filter_name'] 

453 plotPhotometryErrorModel(matchedDataset, photomModel, 

454 filterName=filterName, 

455 outputPrefix=outputPrefix) 

456 except KeyError as e: 

457 print(e) 

458 print('\tSkipped plotPhotometryErrorModel') 

459 

460 try: 

461 am1 = measurements[get_metric(spec_name, 'AM1', specs)] 

462 matchedDataset = am1.blobs['MatchedMultiVisitDataset'] 

463 astromModel = am1.blobs['AnalyticAstrometryModel'] 

464 plotAstrometryErrorModel(matchedDataset, astromModel, 

465 outputPrefix=outputPrefix) 

466 except KeyError as e: 

467 print(e) 

468 print('\tSkipped plotAstrometryErrorModel') 

469 

470 for x in (1, 2): 

471 texName = 'TE{0:d}'.format(x) 

472 

473 try: 

474 measurement = measurements[get_metric(spec_name, texName, specs)] 

475 plotTEx(job, measurement, filterName, 

476 texSpecName='design', 

477 outputPrefix=outputPrefix) 

478 except (RuntimeError, KeyError) as e: 

479 print(e) 

480 print('\tSkipped plot{}'.format(texName)) 

481 

482 

483def get_specs_metrics(job): 

484 # Get specs for this filter 

485 subset = job.specs.subset(required_meta={'instrument': job.meta['instrument'], 

486 'filter_name': job.meta['filter_name']}, 

487 spec_tags=['chromatic']) 

488 # Get specs that don't depend on filter 

489 subset.update(job.specs.subset(required_meta={'instrument': job.meta['instrument']}, 

490 spec_tags=['achromatic'])) 

491 metrics = {} 

492 specs = {} 

493 for spec in subset: 

494 metric_name = spec.metric.split('_')[0] # Take first part for linked metrics 

495 if metric_name in metrics: 

496 metrics[metric_name].append(Name(package=spec.package, metric=spec.metric)) 

497 specs[metric_name].append(spec) 

498 else: 

499 metrics[metric_name] = [Name(package=spec.package, metric=spec.metric), ] 

500 specs[metric_name] = [spec, ] 

501 return specs, metrics 

502 

503 

504def print_metrics(job, levels=('minimum', 'design', 'stretch')): 

505 specs, metrics = get_specs_metrics(job) 

506 

507 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC) 

508 print(Bcolors.BOLD + Bcolors.HEADER + 

509 '{band} band metric measurements'.format(band=job.meta['filter_name']) + 

510 Bcolors.ENDC) 

511 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC) 

512 

513 wrapper = TextWrapper(width=65) 

514 for metric_name, metric_set in metrics.items(): 

515 metric = job.metrics[metric_set[0]] # Pick the first one for the description 

516 print(Bcolors.HEADER + '{name} - {reference}'.format( 

517 name=metric.name, reference=metric.reference)) 

518 print(wrapper.fill(Bcolors.ENDC + '{description}'.format( 

519 description=metric.description).strip())) 

520 

521 for spec_key, metric_key in zip(specs[metric_name], metrics[metric_name]): 

522 level = None 

523 if 'release' in job.specs[spec_key].tags: 

524 # Skip release specs 

525 continue 

526 for l in levels: 

527 if l in str(spec_key): 

528 level = l 

529 try: 

530 m = job.measurements[metric_key] 

531 except KeyError: 

532 print('\tSkipped {metric_key:12s} with spec {spec}: no such measurement'.format( 

533 metric_key=metric_name, spec=level)) 

534 continue 

535 

536 if np.isnan(m.quantity): 

537 print('\tSkipped {metric_key:12s} no measurement'.format( 

538 metric_key=".".join([metric_name, level]))) 

539 continue 

540 

541 spec = job.specs[spec_key] 

542 passed = spec.check(m.quantity) 

543 if passed: 

544 prefix = Bcolors.OKBLUE + '\tPassed ' 

545 else: 

546 prefix = Bcolors.FAIL + '\tFailed ' 

547 infoStr = '{specName:12s} {meas:.4g} {op} {spec:.4g}'.format( 

548 specName=level, 

549 meas=m.quantity, 

550 op=spec.operator_str, 

551 spec=spec.threshold) 

552 print(prefix + infoStr + Bcolors.ENDC) 

553 

554 

555def print_pass_fail_summary(jobs, levels=('minimum', 'design', 'stretch'), default_level='design'): 

556 currentTestCount = 0 

557 currentFailCount = 0 

558 

559 for filterName, job in jobs.items(): 

560 specs, metrics = get_specs_metrics(job) 

561 print('') 

562 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC) 

563 print(Bcolors.BOLD + Bcolors.HEADER + '{0} band summary'.format(filterName) + Bcolors.ENDC) 

564 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC) 

565 

566 for specName in levels: 

567 measurementCount = 0 

568 failCount = 0 

569 for key, m in job.measurements.items(): 

570 if np.isnan(m.quantity): 

571 continue 

572 measurementCount += 1 

573 metric = key.metric.split("_")[0] # For compound metrics 

574 spec_set = specs.get(metric, None) 

575 if spec_set is None: 

576 continue 

577 spec = None 

578 for spec_key in spec_set: 

579 if specName in spec_key.spec: 

580 spec = job.specs[spec_key] 

581 if spec is None: 

582 for spec_key in spec_set: 

583 if specName in spec_key.metric: # For dependent metrics 

584 spec = job.specs[spec_key] 

585 if spec is not None and not spec.check(m.quantity): 

586 failCount += 1 

587 

588 if specName == default_level: 

589 currentTestCount += measurementCount 

590 currentFailCount += failCount 

591 

592 if failCount == 0: 

593 print('Passed {level:12s} {count:d} measurements'.format( 

594 level=specName, count=measurementCount)) 

595 else: 

596 msg = 'Failed {level:12s} {failCount} of {count:d} failed'.format( 

597 level=specName, failCount=failCount, count=measurementCount) 

598 print(Bcolors.FAIL + msg + Bcolors.ENDC) 

599 

600 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC + '\n') 

601 

602 # print summary against current spec level 

603 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC) 

604 print(Bcolors.BOLD + Bcolors.HEADER + '{0} level summary'.format(default_level) + Bcolors.ENDC) 

605 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC) 

606 if currentFailCount > 0: 

607 msg = 'FAILED ({failCount:d}/{count:d} measurements)'.format( 

608 failCount=currentFailCount, count=currentTestCount) 

609 print(Bcolors.FAIL + msg + Bcolors.ENDC) 

610 else: 

611 print('PASSED ({count:d}/{count:d} measurements)'.format( 

612 count=currentTestCount)) 

613 

614 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)