Coverage for python/lsst/validate/drp/validate.py : 9%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# LSST Data Management System
2# Copyright 2008-2019 AURA/LSST.
3#
4# This product includes software developed by the
5# LSST Project (http://www.lsst.org/).
6#
7# This program is free software: you can redistribute it and/or modify
8# it under the terms of the GNU General Public License as published by
9# the Free Software Foundation, either version 3 of the License, or
10# (at your option) any later version.
11#
12# This program is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15# GNU General Public License for more details.
16#
17# You should have received a copy of the LSST License Statement and
18# the GNU General Public License along with this program. If not,
19# see <https://www.lsstcorp.org/LegalNotices/>.
20"""Main driver functions for metric measurements, plotting, specification
21grading, and persistence.
22"""
24__all__ = ['plot_metrics', 'print_metrics', 'print_pass_fail_summary',
25 'run', 'runOneFilter']
27import json
28import os
29import numpy as np
30import astropy.units as u
32from textwrap import TextWrapper
33import astropy.visualization
35from lsst.verify import Name
36from lsst.verify import Job, MetricSet, SpecificationSet
37from lsst import log
38from lsst.daf.persistence import Butler
40from .util import repoNameToPrefix
41from .matchreduce import build_matched_dataset
42from .photerrmodel import build_photometric_error_model
43from .astromerrmodel import build_astrometric_error_model
44from .calcnonsrd import measure_model_phot_rep
45from .calcsrd import (measurePA1, measurePA2, measurePF1, measureAMx,
46 measureAFx, measureADx, measureTEx)
47from .plot import (plotAMx, plotPA1, plotTEx, plotPhotometryErrorModel,
48 plotAstrometryErrorModel)
51class Bcolors:
52 HEADER = '\033[95m'
53 OKBLUE = '\033[94m'
54 OKGREEN = '\033[92m'
55 WARNING = '\033[93m'
56 FAIL = '\033[91m'
57 ENDC = '\033[0m'
58 BOLD = '\033[1m'
59 UNDERLINE = '\033[4m'
62def load_json_output(filepath, metrics_package='verify_metrics'):
63 """Read JSON from a file into a job object.
65 Currently just does a trivial de-serialization with no checking
66 to make sure that one results with a valid validate.base.job object.
68 Parameters
69 ----------
70 filepath : `str`
71 Source file name for JSON output.
73 Returns
74 -------
75 job : A `validate.base.job` object.
76 """
77 with open(filepath, 'r') as infile:
78 json_data = json.load(infile)
80 job = Job.deserialize(**json_data)
81 metrics = MetricSet.load_metrics_package(metrics_package)
82 job.metrics.update(metrics)
83 specs = SpecificationSet.load_metrics_package(metrics_package)
84 job.specs.update(specs)
85 return job
88def get_filter_name_from_job(job):
89 """Get the filtername from a validate.base.job object
91 Assumes there is only one filter name and that it's the one in
92 the first measurement
94 Parameters
95 ----------
96 job : `validate.base.job` object
98 Returns
99 -------
100 filter_name : `str`
101 """
103 return job.meta['filter_name']
106def run(repo_or_json, outputPrefix=None, makePrint=True, makePlot=True,
107 level='design', metrics_package='verify_metrics', **kwargs):
108 """Main entrypoint from ``validateDrp.py``.
110 Parameters
111 ----------
112 repo_or_json : `str`
113 The repository. This is generally the directory on disk
114 that contains the repository and mapper.
115 This can also be the filepath for a JSON file that contains
116 the cached output from a previous run.
117 makePrint : `bool`, optional
118 Print calculated quantities (to stdout).
119 makePlot : `bool`, optional
120 Create plots for metrics. Saved to current working directory.
121 level : `str`
122 Use <level> E.g., 'design', 'minimum', 'stretch'.
123 """
124 base_name, ext = os.path.splitext(repo_or_json)
125 if ext == '.json':
126 load_json = True
127 else:
128 load_json = False
130 # I think I have to interrogate the kwargs to maintain compatibility
131 # between Python 2 and Python 3
132 # In Python 3 I would have let me mix in a keyword default after *args
133 if outputPrefix is None:
134 outputPrefix = repoNameToPrefix(base_name)
136 if load_json:
137 if not os.path.isfile(repo_or_json):
138 print("Could not find JSON file %s" % (repo_or_json))
139 return
141 json_path = repo_or_json
142 job = load_json_output(json_path, metrics_package)
143 filterName = get_filter_name_from_job(job)
144 jobs = {filterName: job}
145 else:
146 if not os.path.isdir(repo_or_json):
147 print("Could not find repo %s" % (repo_or_json))
148 return
150 repo_path = repo_or_json
151 jobs = runOneRepo(repo_path, outputPrefix=outputPrefix,
152 metrics_package=metrics_package, **kwargs)
154 for filterName, job in jobs.items():
155 if makePrint:
156 print_metrics(job)
157 if makePlot:
158 if outputPrefix is None or outputPrefix == '':
159 thisOutputPrefix = "%s" % filterName
160 else:
161 thisOutputPrefix = "%s_%s" % (outputPrefix, filterName)
162 plot_metrics(job, filterName, outputPrefix=thisOutputPrefix)
164 print_pass_fail_summary(jobs, default_level=level)
167def runOneRepo(repo, dataIds=None, outputPrefix='', verbose=False,
168 instrument=None, dataset_repo_url=None,
169 metrics_package='verify_metrics', **kwargs):
170 r"""Calculate statistics for all filters in a repo.
172 Runs multiple filters, if necessary, through repeated calls to `runOneFilter`.
173 Assesses results against SRD specs at specified `level`.
175 Parameters
176 ---------
177 repo : `str`
178 The repository. This is generally the directory on disk
179 that contains the repository and mapper.
180 dataIds : `list` of `dict`
181 List of butler data IDs of Image catalogs to compare to reference.
182 The calexp cpixel image is needed for the photometric calibration.
183 Tract IDs must be included if "doApplyExternalPhotoCalib" or
184 "doApplyExternalSkyWcs" is True.
185 outputPrefix : `str`, optional
186 Specify the beginning filename for output files.
187 The name of each filter will be appended to outputPrefix.
188 level : `str`, optional
189 The level of the specification to check: "design", "minimum", "stretch".
190 verbose : `bool`
191 Provide detailed output.
192 instrument : `str`
193 Name of the instrument. If None will be extracted from the Butler mapper.
194 dataset_repo_url : `str`
195 Location of the dataset used. If None will be set to the path of the repo.
196 metrics_package : `string`
197 Name of the metrics package to be used in the Jobs created.
199 Notes
200 -----
201 Names of plot files or JSON file are generated based on repository name,
202 unless overriden by specifying `ouputPrefix`.
203 E.g., Analyzing a repository ``CFHT/output``
204 will result in filenames that start with ``CFHT_output_``.
205 The filter name is added to this prefix. If the filter name has spaces,
206 there will be annoyance and sadness as those spaces will appear in the filenames.
207 """
209 def extract_instrument_from_repo(repo):
210 """Extract the last part of the mapper name from a Butler repo.
211 'lsst.obs.lsstSim.lsstSimMapper.LsstSimMapper' -> 'LSSTSIM'
212 'lsst.obs.cfht.megacamMapper.MegacamMapper' -> 'CFHT'
213 'lsst.obs.decam.decamMapper.DecamMapper' -> 'DECAM'
214 'lsst.obs.hsc.hscMapper.HscMapper' -> 'HSC'
215 """
216 mapper_class = Butler.getMapperClass(repo)
217 instrument = mapper_class.getCameraName()
218 return instrument.upper()
220 if instrument is None:
221 instrument = extract_instrument_from_repo(repo)
222 if dataset_repo_url is None:
223 dataset_repo_url = repo
225 allFilters = set([d['filter'] for d in dataIds])
227 jobs = {}
228 for filterName in allFilters:
229 # Do this here so that each outputPrefix will have a different name for each filter.
230 if outputPrefix is None or outputPrefix == '':
231 thisOutputPrefix = "%s" % filterName
232 else:
233 thisOutputPrefix = "%s_%s" % (outputPrefix, filterName)
234 theseVisitDataIds = [v for v in dataIds if v['filter'] == filterName]
235 job = runOneFilter(repo, theseVisitDataIds,
236 outputPrefix=thisOutputPrefix,
237 verbose=verbose, filterName=filterName,
238 instrument=instrument,
239 dataset_repo_url=dataset_repo_url,
240 metrics_package=metrics_package, **kwargs)
241 jobs[filterName] = job
243 return jobs
246def runOneFilter(repo, visitDataIds, brightSnrMin=None, brightSnrMax=None,
247 makeJson=True, filterName=None, outputPrefix='',
248 doApplyExternalPhotoCalib=False, externalPhotoCalibName=None,
249 doApplyExternalSkyWcs=False, externalSkyWcsName=None,
250 skipTEx=False, verbose=False,
251 metrics_package='verify_metrics',
252 instrument='Unknown', dataset_repo_url='./',
253 skipNonSrd=False, **kwargs):
254 r"""Main executable for the case where there is just one filter.
256 Plot files and JSON files are generated in the local directory
257 prefixed with the repository name (where '_' replace path separators),
258 unless overriden by specifying `outputPrefix`.
259 E.g., Analyzing a repository ``CFHT/output``
260 will result in filenames that start with ``CFHT_output_``.
262 Parameters
263 ----------
264 repo : string or Butler
265 A Butler or a repository URL that can be used to construct one.
266 dataIds : list of dict
267 List of `butler` data IDs of Image catalogs to compare to reference.
268 The `calexp` pixel image is needed for the photometric calibration
269 unless doApplyExternalPhotoCalib is True such
270 that the appropriate `photoCalib` dataset is used. Note that these
271 have data IDs that include the tract number.
272 brightSnrMin : float, optional
273 Minimum median SNR for a source to be considered bright; passed to
274 `lsst.validate.drp.matchreduce.build_matched_dataset`.
275 brightSnrMax : float, optional
276 Maximum median SNR for a source to be considered bright; passed to
277 `lsst.validate.drp.matchreduce.build_matched_dataset`.
278 makeJson : bool, optional
279 Create JSON output file for metrics. Saved to current working directory.
280 outputPrefix : str, optional
281 Specify the beginning filename for output files.
282 filterName : str, optional
283 Name of the filter (bandpass).
284 doApplyExternalPhotoCalib : bool, optional
285 Apply external photoCalib to calibrate fluxes.
286 externalPhotoCalibName : str, optional
287 Type of external `PhotoCalib` to apply. Currently supported are jointcal,
288 fgcm, and fgcm_tract. Must be set if doApplyExternalPhotoCalib is True.
289 doApplyExternalSkyWcs : bool, optional
290 Apply external wcs to calibrate positions.
291 externalSkyWcsName : str, optional
292 Type of external `wcs` to apply. Currently supported is jointcal.
293 Must be set if "doApplyExternalSkyWcs" is True.
294 skipTEx : bool, optional
295 Skip TEx calculations (useful for older catalogs that don't have
296 PsfShape measurements).
297 verbose : bool, optional
298 Output additional information on the analysis steps.
299 skipNonSrd : bool, optional
300 Skip any metrics not defined in the LSST SRD.
302 Raises
303 ------
304 RuntimeError:
305 Raised if "doApplyExternalPhotoCalib" is True and "externalPhotoCalibName"
306 is None, or if "doApplyExternalSkyWcs" is True and "externalSkyWcsName" is
307 None.
308 """
310 if kwargs:
311 log.warn(f"Extra kwargs - {kwargs}, will be ignored. Did you add extra things to your config file?")
313 if doApplyExternalPhotoCalib and externalPhotoCalibName is None:
314 raise RuntimeError("Must set externalPhotoCalibName if doApplyExternalPhotoCalib is True.")
315 if doApplyExternalSkyWcs and externalSkyWcsName is None:
316 raise RuntimeError("Must set externalSkyWcsName if doApplyExternalSkyWcs is True.")
318 # collect just the common key, value pairs to omit the keys that are aggregated over
319 job_metadata = dict(set.intersection(*[set(vid.items()) for vid in visitDataIds]))
321 # update with metadata passed into the method
322 job_metadata.update({'instrument': instrument,
323 'filter_name': filterName,
324 'dataset_repo_url': dataset_repo_url})
326 job = Job.load_metrics_package(meta=job_metadata,
327 subset='validate_drp',
328 package_name_or_path=metrics_package)
330 matchedDataset = build_matched_dataset(repo, visitDataIds,
331 doApplyExternalPhotoCalib=doApplyExternalPhotoCalib,
332 externalPhotoCalibName=externalPhotoCalibName,
333 doApplyExternalSkyWcs=doApplyExternalSkyWcs,
334 externalSkyWcsName=externalSkyWcsName,
335 skipTEx=skipTEx, skipNonSrd=skipNonSrd,
336 brightSnrMin=brightSnrMin, brightSnrMax=brightSnrMax)
338 snr = matchedDataset['snr'].quantity
339 bright = (matchedDataset['brightSnrMin'].quantity < snr) & (
340 snr < matchedDataset['brightSnrMax'].quantity)
341 photomModel = build_photometric_error_model(matchedDataset, bright)
342 astromModel = build_astrometric_error_model(matchedDataset, bright)
344 linkedBlobs = [matchedDataset, photomModel, astromModel]
346 metrics = job.metrics
347 specs = job.specs
349 def add_measurement(measurement):
350 for blob in linkedBlobs:
351 measurement.link_blob(blob)
352 job.measurements.insert(measurement)
354 for x, D in zip((1, 2, 3), (5., 20., 200.)):
355 amxName = 'AM{0:d}'.format(x)
356 afxName = 'AF{0:d}'.format(x)
357 adxName = 'AD{0:d}'.format(x)
359 amx = measureAMx(metrics['validate_drp.'+amxName], matchedDataset, D*u.arcmin, verbose=verbose)
360 add_measurement(amx)
362 afx_spec_set = specs.subset(required_meta={'instrument': 'HSC'}, spec_tags=[afxName, ])
363 adx_spec_set = specs.subset(required_meta={'instrument': 'HSC'}, spec_tags=[adxName, ])
364 for afx_spec_key, adx_spec_key in zip(afx_spec_set, adx_spec_set):
365 afx_spec = afx_spec_set[afx_spec_key]
366 adx_spec = adx_spec_set[adx_spec_key]
367 adx = measureADx(metrics[adx_spec.metric_name], amx, afx_spec)
368 add_measurement(adx)
369 afx = measureAFx(metrics[afx_spec.metric_name], amx, adx, adx_spec)
370 add_measurement(afx)
372 pa1 = measurePA1(
373 metrics['validate_drp.PA1'], filterName, matchedDataset.matchesBright, matchedDataset.magKey)
374 add_measurement(pa1)
376 pf1_spec_set = specs.subset(required_meta={'instrument': instrument, 'filter_name': filterName},
377 spec_tags=['PF1', ])
378 pa2_spec_set = specs.subset(required_meta={'instrument': instrument, 'filter_name': filterName},
379 spec_tags=['PA2', ])
380 # I worry these might not always be in the right order. Sorting...
381 pf1_spec_keys = list(pf1_spec_set.keys())
382 pa2_spec_keys = list(pa2_spec_set.keys())
383 pf1_spec_keys.sort()
384 pa2_spec_keys.sort()
385 for pf1_spec_key, pa2_spec_key in zip(pf1_spec_keys, pa2_spec_keys):
386 pf1_spec = pf1_spec_set[pf1_spec_key]
387 pa2_spec = pa2_spec_set[pa2_spec_key]
389 pa2 = measurePA2(metrics[pa2_spec.metric_name], pa1, pf1_spec.threshold)
390 add_measurement(pa2)
392 pf1 = measurePF1(metrics[pf1_spec.metric_name], pa1, pa2_spec)
393 add_measurement(pf1)
395 if not skipTEx:
396 for x, D, bin_range_operator in zip((1, 2), (1.0, 5.0), ("<=", ">=")):
397 texName = 'TE{0:d}'.format(x)
398 tex = measureTEx(metrics['validate_drp.'+texName], matchedDataset, D*u.arcmin,
399 bin_range_operator, verbose=verbose)
400 add_measurement(tex)
402 if not skipNonSrd:
403 model_phot_reps = measure_model_phot_rep(metrics, filterName, matchedDataset)
404 for measurement in model_phot_reps:
405 add_measurement(measurement)
407 if makeJson:
408 job.write(outputPrefix+'.json')
410 return job
413def get_metric(level, metric_label, in_specs):
414 for spec in in_specs:
415 if level in str(spec) and metric_label in str(spec):
416 break
417 return Name(package=spec.package, metric=spec.metric)
420def plot_metrics(job, filterName, outputPrefix=''):
421 """Plot AM1, AM2, AM3, PA1 plus related informational plots.
423 Parameters
424 ----------
425 job : `lsst.validate.base.Job`
426 The job to load data from.
427 filterName : `str`
428 string identifying the filter.
429 """
430 astropy.visualization.quantity_support()
432 specs = job.specs
433 measurements = job.measurements
434 spec_name = 'design'
435 for x in (1, 2, 3):
436 amxName = 'AM{0:d}'.format(x)
437 afxName = 'AF{0:d}'.format(x)
438 # ADx is included on the AFx plots
440 amx = measurements[get_metric(spec_name, amxName, specs)]
441 afx = measurements[get_metric(spec_name, afxName, specs)]
443 if amx.quantity is not None:
444 try:
445 plotAMx(job, amx, afx, filterName, amxSpecName=spec_name,
446 outputPrefix=outputPrefix)
447 except RuntimeError as e:
448 print(e)
449 print('\tSkipped plot{}'.format(amxName))
451 try:
452 pa1 = measurements[get_metric(spec_name, 'PA1', specs)]
453 plotPA1(pa1, outputPrefix=outputPrefix)
454 except RuntimeError as e:
455 print(e)
456 print('\tSkipped plotPA1')
458 try:
459 matchedDataset = pa1.blobs['MatchedMultiVisitDataset']
460 photomModel = pa1.blobs['PhotometricErrorModel']
461 filterName = pa1.extras['filter_name']
462 plotPhotometryErrorModel(matchedDataset, photomModel,
463 filterName=filterName,
464 outputPrefix=outputPrefix)
465 except KeyError as e:
466 print(e)
467 print('\tSkipped plotPhotometryErrorModel')
469 try:
470 am1 = measurements[get_metric(spec_name, 'AM1', specs)]
471 matchedDataset = am1.blobs['MatchedMultiVisitDataset']
472 astromModel = am1.blobs['AnalyticAstrometryModel']
473 plotAstrometryErrorModel(matchedDataset, astromModel,
474 outputPrefix=outputPrefix)
475 except KeyError as e:
476 print(e)
477 print('\tSkipped plotAstrometryErrorModel')
479 for x in (1, 2):
480 texName = 'TE{0:d}'.format(x)
482 try:
483 measurement = measurements[get_metric(spec_name, texName, specs)]
484 plotTEx(job, measurement, filterName,
485 texSpecName='design',
486 outputPrefix=outputPrefix)
487 except (RuntimeError, KeyError) as e:
488 print(e)
489 print('\tSkipped plot{}'.format(texName))
492def get_specs_metrics(job):
493 # Get specs for this filter
494 subset = job.specs.subset(required_meta={'instrument': job.meta['instrument'],
495 'filter_name': job.meta['filter_name']},
496 spec_tags=['chromatic'])
497 # Get specs that don't depend on filter
498 subset.update(job.specs.subset(required_meta={'instrument': job.meta['instrument']},
499 spec_tags=['achromatic']))
500 metrics = {}
501 specs = {}
502 for spec in subset:
503 metric_name = spec.metric.split('_')[0] # Take first part for linked metrics
504 if metric_name in metrics:
505 metrics[metric_name].append(Name(package=spec.package, metric=spec.metric))
506 specs[metric_name].append(spec)
507 else:
508 metrics[metric_name] = [Name(package=spec.package, metric=spec.metric), ]
509 specs[metric_name] = [spec, ]
510 return specs, metrics
513def print_metrics(job, levels=('minimum', 'design', 'stretch')):
514 specs, metrics = get_specs_metrics(job)
516 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
517 print(Bcolors.BOLD + Bcolors.HEADER
518 + '{band} band metric measurements'.format(band=job.meta['filter_name'])
519 + Bcolors.ENDC)
520 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
522 wrapper = TextWrapper(width=65)
523 for metric_name, metric_set in metrics.items():
524 metric = job.metrics[metric_set[0]] # Pick the first one for the description
525 print(Bcolors.HEADER + '{name} - {reference}'.format(
526 name=metric.name, reference=metric.reference))
527 print(wrapper.fill(Bcolors.ENDC + '{description}'.format(
528 description=metric.description).strip()))
530 for spec_key, metric_key in zip(specs[metric_name], metrics[metric_name]):
531 level = None
532 if 'release' in job.specs[spec_key].tags:
533 # Skip release specs
534 continue
535 for lev in levels:
536 if lev in str(spec_key):
537 level = lev
538 try:
539 m = job.measurements[metric_key]
540 except KeyError:
541 print('\tSkipped {metric_key:12s} with spec {spec}: no such measurement'.format(
542 metric_key=metric_name, spec=level))
543 continue
545 if np.isnan(m.quantity):
546 print('\tSkipped {metric_key:12s} no measurement'.format(
547 metric_key=".".join([metric_name, level])))
548 continue
550 spec = job.specs[spec_key]
551 passed = spec.check(m.quantity)
552 if passed:
553 prefix = Bcolors.OKBLUE + '\tPassed '
554 else:
555 prefix = Bcolors.FAIL + '\tFailed '
556 infoStr = '{specName:12s} {meas:.4g} {op} {spec:.4g}'.format(
557 specName=level,
558 meas=m.quantity,
559 op=spec.operator_str,
560 spec=spec.threshold)
561 print(prefix + infoStr + Bcolors.ENDC)
564def print_pass_fail_summary(jobs, levels=('minimum', 'design', 'stretch'), default_level='design'):
565 currentTestCount = 0
566 currentFailCount = 0
567 currentSkippedCount = 0
569 for filterName, job in jobs.items():
570 specs, metrics = get_specs_metrics(job)
571 print('')
572 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
573 print(Bcolors.BOLD + Bcolors.HEADER + '{0} band summary'.format(filterName) + Bcolors.ENDC)
574 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
576 for specName in levels:
577 measurementCount = 0
578 failCount = 0
579 skippedCount = 0
580 for key, m in job.measurements.items():
581 metric = key.metric.split("_") # For compound metrics
582 len_metric = len(metric)
583 if len_metric > 1:
584 if metric[1] != specName:
585 continue
586 if len_metric > 2 and filterName not in metric[2]:
587 continue
588 spec_set = specs.get(metric[0], None)
589 if spec_set is None:
590 continue
591 spec = None
592 for spec_key in spec_set:
593 if specName in spec_key.spec:
594 spec = job.specs[spec_key]
595 if spec is None:
596 for spec_key in spec_set:
597 if specName in spec_key.metric: # For dependent metrics
598 spec = job.specs[spec_key]
599 if spec is not None:
600 measurementCount += 1
601 if np.isnan(m.quantity):
602 skippedCount += 1
603 if not spec.check(m.quantity):
604 failCount += 1
606 if specName == default_level:
607 currentTestCount += measurementCount
608 currentFailCount += failCount
609 currentSkippedCount += skippedCount
611 if failCount == 0:
612 print('Passed {level:12s} {count:d} measurements ({skipped:d} skipped)'.format(
613 level=specName, count=measurementCount, skipped=skippedCount))
614 else:
615 msg = 'Failed {level:12s} {failCount} of {count:d} failed ({skipped:d} skipped)'.format(
616 level=specName, failCount=failCount, count=measurementCount, skipped=skippedCount)
617 print(Bcolors.FAIL + msg + Bcolors.ENDC)
619 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC + '\n')
621 # print summary against current spec level
622 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
623 print(Bcolors.BOLD + Bcolors.HEADER + '{0} level summary'.format(default_level) + Bcolors.ENDC)
624 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
625 if currentFailCount > 0:
626 msg = 'FAILED ({failCount:d}/{count:d} measurements, ({skipped:d} skipped))'.format(
627 failCount=currentFailCount, count=currentTestCount, skipped=currentSkippedCount)
628 print(Bcolors.FAIL + msg + Bcolors.ENDC)
629 else:
630 print('PASSED ({count:d}/{count:d} measurements ({skipped:d} skipped))'.format(
631 count=currentTestCount, skipped=currentSkippedCount))
633 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)