Coverage for python/lsst/validate/drp/validate.py : 9%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# LSST Data Management System
2# Copyright 2008-2019 AURA/LSST.
3#
4# This product includes software developed by the
5# LSST Project (http://www.lsst.org/).
6#
7# This program is free software: you can redistribute it and/or modify
8# it under the terms of the GNU General Public License as published by
9# the Free Software Foundation, either version 3 of the License, or
10# (at your option) any later version.
11#
12# This program is distributed in the hope that it will be useful,
13# but WITHOUT ANY WARRANTY; without even the implied warranty of
14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15# GNU General Public License for more details.
16#
17# You should have received a copy of the LSST License Statement and
18# the GNU General Public License along with this program. If not,
19# see <https://www.lsstcorp.org/LegalNotices/>.
20"""Main driver functions for metric measurements, plotting, specification
21grading, and persistence.
22"""
24__all__ = ['plot_metrics', 'print_metrics', 'print_pass_fail_summary',
25 'run', 'runOneFilter']
27import json
28import os
29import numpy as np
30import astropy.units as u
32from textwrap import TextWrapper
33import astropy.visualization
35from lsst.verify import Name
36from lsst.verify import Job, MetricSet, SpecificationSet
37from lsst import log
38from lsst.daf.persistence import Butler
40from .util import repoNameToPrefix
41from .matchreduce import build_matched_dataset
42from .photerrmodel import build_photometric_error_model
43from .astromerrmodel import build_astrometric_error_model
44from .calcnonsrd import measure_model_phot_rep
45from .calcsrd import (measurePA1, measurePA2, measurePF1, measureAMx,
46 measureAFx, measureADx, measureTEx)
47from .plot import (plotAMx, plotPA1, plotTEx, plotPhotometryErrorModel,
48 plotAstrometryErrorModel)
51class Bcolors:
52 HEADER = '\033[95m'
53 OKBLUE = '\033[94m'
54 OKGREEN = '\033[92m'
55 WARNING = '\033[93m'
56 FAIL = '\033[91m'
57 ENDC = '\033[0m'
58 BOLD = '\033[1m'
59 UNDERLINE = '\033[4m'
62def load_json_output(filepath, metrics_package='verify_metrics'):
63 """Read JSON from a file into a job object.
65 Currently just does a trivial de-serialization with no checking
66 to make sure that one results with a valid validate.base.job object.
68 Parameters
69 ----------
70 filepath : `str`
71 Source file name for JSON output.
73 Returns
74 -------
75 job : A `validate.base.job` object.
76 """
77 with open(filepath, 'r') as infile:
78 json_data = json.load(infile)
80 job = Job.deserialize(**json_data)
81 metrics = MetricSet.load_metrics_package(metrics_package)
82 job.metrics.update(metrics)
83 specs = SpecificationSet.load_metrics_package(metrics_package)
84 job.specs.update(specs)
85 return job
88def get_filter_name_from_job(job):
89 """Get the filtername from a validate.base.job object
91 Assumes there is only one filter name and that it's the one in
92 the first measurement
94 Parameters
95 ----------
96 job : `validate.base.job` object
98 Returns
99 -------
100 filter_name : `str`
101 """
103 return job.meta['filter_name']
106def run(repo_or_json, outputPrefix=None, makePrint=True, makePlot=True,
107 level='design', metrics_package='verify_metrics', **kwargs):
108 """Main entrypoint from ``validateDrp.py``.
110 Parameters
111 ----------
112 repo_or_json : `str`
113 The repository. This is generally the directory on disk
114 that contains the repository and mapper.
115 This can also be the filepath for a JSON file that contains
116 the cached output from a previous run.
117 makePrint : `bool`, optional
118 Print calculated quantities (to stdout).
119 makePlot : `bool`, optional
120 Create plots for metrics. Saved to current working directory.
121 level : `str`
122 Use <level> E.g., 'design', 'minimum', 'stretch'.
123 """
124 base_name, ext = os.path.splitext(repo_or_json)
125 if ext == '.json':
126 load_json = True
127 else:
128 load_json = False
130 # I think I have to interrogate the kwargs to maintain compatibility
131 # between Python 2 and Python 3
132 # In Python 3 I would have let me mix in a keyword default after *args
133 if outputPrefix is None:
134 outputPrefix = repoNameToPrefix(base_name)
136 if load_json:
137 if not os.path.isfile(repo_or_json):
138 print("Could not find JSON file %s" % (repo_or_json))
139 return
141 json_path = repo_or_json
142 job = load_json_output(json_path, metrics_package)
143 filterName = get_filter_name_from_job(job)
144 jobs = {filterName: job}
145 else:
146 if not os.path.isdir(repo_or_json):
147 print("Could not find repo %s" % (repo_or_json))
148 return
150 repo_path = repo_or_json
151 jobs = runOneRepo(repo_path, outputPrefix=outputPrefix,
152 metrics_package=metrics_package, **kwargs)
154 for filterName, job in jobs.items():
155 if makePrint:
156 print_metrics(job)
157 if makePlot:
158 if outputPrefix is None or outputPrefix == '':
159 thisOutputPrefix = "%s" % filterName
160 else:
161 thisOutputPrefix = "%s_%s" % (outputPrefix, filterName)
162 plot_metrics(job, filterName, outputPrefix=thisOutputPrefix)
164 print_pass_fail_summary(jobs, default_level=level)
167def runOneRepo(repo, dataIds=None, outputPrefix='', verbose=False,
168 instrument=None, dataset_repo_url=None,
169 metrics_package='verify_metrics', **kwargs):
170 r"""Calculate statistics for all filters in a repo.
172 Runs multiple filters, if necessary, through repeated calls to `runOneFilter`.
173 Assesses results against SRD specs at specified `level`.
175 Parameters
176 ---------
177 repo : `str`
178 The repository. This is generally the directory on disk
179 that contains the repository and mapper.
180 dataIds : `list` of `dict`
181 List of butler data IDs of Image catalogs to compare to reference.
182 The calexp cpixel image is needed for the photometric calibration.
183 Tract IDs must be included if "doApplyExternalPhotoCalib" or
184 "doApplyExternalSkyWcs" is True.
185 outputPrefix : `str`, optional
186 Specify the beginning filename for output files.
187 The name of each filter will be appended to outputPrefix.
188 level : `str`, optional
189 The level of the specification to check: "design", "minimum", "stretch".
190 verbose : `bool`
191 Provide detailed output.
192 instrument : `str`
193 Name of the instrument. If None will be extracted from the Butler mapper.
194 dataset_repo_url : `str`
195 Location of the dataset used. If None will be set to the path of the repo.
196 metrics_package : `string`
197 Name of the metrics package to be used in the Jobs created.
199 Notes
200 -----
201 Names of plot files or JSON file are generated based on repository name,
202 unless overriden by specifying `ouputPrefix`.
203 E.g., Analyzing a repository ``CFHT/output``
204 will result in filenames that start with ``CFHT_output_``.
205 The filter name is added to this prefix. If the filter name has spaces,
206 there will be annoyance and sadness as those spaces will appear in the filenames.
207 """
209 def extract_instrument_from_repo(repo):
210 """Extract the last part of the mapper name from a Butler repo.
211 'lsst.obs.lsstSim.lsstSimMapper.LsstSimMapper' -> 'LSSTSIM'
212 'lsst.obs.cfht.megacamMapper.MegacamMapper' -> 'CFHT'
213 'lsst.obs.decam.decamMapper.DecamMapper' -> 'DECAM'
214 'lsst.obs.hsc.hscMapper.HscMapper' -> 'HSC'
215 """
216 mapper_class = Butler.getMapperClass(repo)
217 instrument = mapper_class.getCameraName()
218 return instrument.upper()
220 if instrument is None:
221 instrument = extract_instrument_from_repo(repo)
222 if dataset_repo_url is None:
223 dataset_repo_url = repo
225 allFilters = set([d['filter'] for d in dataIds])
227 jobs = {}
228 for filterName in allFilters:
229 # Do this here so that each outputPrefix will have a different name for each filter.
230 if outputPrefix is None or outputPrefix == '':
231 thisOutputPrefix = "%s" % filterName
232 else:
233 thisOutputPrefix = "%s_%s" % (outputPrefix, filterName)
234 theseVisitDataIds = [v for v in dataIds if v['filter'] == filterName]
235 job = runOneFilter(repo, theseVisitDataIds,
236 outputPrefix=thisOutputPrefix,
237 verbose=verbose, filterName=filterName,
238 instrument=instrument,
239 dataset_repo_url=dataset_repo_url,
240 metrics_package=metrics_package, **kwargs)
241 jobs[filterName] = job
243 return jobs
246def runOneFilter(repo, visitDataIds, brightSnrMin=None, brightSnrMax=None,
247 makeJson=True, filterName=None, outputPrefix='',
248 doApplyExternalPhotoCalib=False, externalPhotoCalibName=None,
249 doApplyExternalSkyWcs=False, externalSkyWcsName=None,
250 skipTEx=False, verbose=False,
251 metrics_package='verify_metrics',
252 instrument='Unknown', dataset_repo_url='./',
253 skipNonSrd=False, **kwargs):
254 r"""Main executable for the case where there is just one filter.
256 Plot files and JSON files are generated in the local directory
257 prefixed with the repository name (where '_' replace path separators),
258 unless overriden by specifying `outputPrefix`.
259 E.g., Analyzing a repository ``CFHT/output``
260 will result in filenames that start with ``CFHT_output_``.
262 Parameters
263 ----------
264 repo : string or Butler
265 A Butler or a repository URL that can be used to construct one.
266 dataIds : list of dict
267 List of `butler` data IDs of Image catalogs to compare to reference.
268 The `calexp` pixel image is needed for the photometric calibration
269 unless doApplyExternalPhotoCalib is True such
270 that the appropriate `photoCalib` dataset is used. Note that these
271 have data IDs that include the tract number.
272 brightSnrMin : float, optional
273 Minimum median SNR for a source to be considered bright; passed to
274 `lsst.validate.drp.matchreduce.build_matched_dataset`.
275 brightSnrMax : float, optional
276 Maximum median SNR for a source to be considered bright; passed to
277 `lsst.validate.drp.matchreduce.build_matched_dataset`.
278 makeJson : bool, optional
279 Create JSON output file for metrics. Saved to current working directory.
280 outputPrefix : str, optional
281 Specify the beginning filename for output files.
282 filterName : str, optional
283 Name of the filter (bandpass).
284 doApplyExternalPhotoCalib : bool, optional
285 Apply external photoCalib to calibrate fluxes.
286 externalPhotoCalibName : str, optional
287 Type of external `PhotoCalib` to apply. Currently supported are jointcal,
288 fgcm, and fgcm_tract. Must be set if doApplyExternalPhotoCalib is True.
289 doApplyExternalSkyWcs : bool, optional
290 Apply external wcs to calibrate positions.
291 externalSkyWcsName : str, optional
292 Type of external `wcs` to apply. Currently supported is jointcal.
293 Must be set if "doApplyExternalSkyWcs" is True.
294 skipTEx : bool, optional
295 Skip TEx calculations (useful for older catalogs that don't have
296 PsfShape measurements).
297 verbose : bool, optional
298 Output additional information on the analysis steps.
299 skipNonSrd : bool, optional
300 Skip any metrics not defined in the LSST SRD.
302 Raises
303 ------
304 RuntimeError:
305 Raised if "doApplyExternalPhotoCalib" is True and "externalPhotoCalibName"
306 is None, or if "doApplyExternalSkyWcs" is True and "externalSkyWcsName" is
307 None.
308 """
310 if kwargs:
311 log.warn(f"Extra kwargs - {kwargs}, will be ignored. Did you add extra things to your config file?")
313 if doApplyExternalPhotoCalib and externalPhotoCalibName is None:
314 raise RuntimeError("Must set externalPhotoCalibName if doApplyExternalPhotoCalib is True.")
315 if doApplyExternalSkyWcs and externalSkyWcsName is None:
316 raise RuntimeError("Must set externalSkyWcsName if doApplyExternalSkyWcs is True.")
318 job = Job.load_metrics_package(meta={'instrument': instrument,
319 'filter_name': filterName,
320 'dataset_repo_url': dataset_repo_url},
321 subset='validate_drp',
322 package_name_or_path=metrics_package)
324 matchedDataset = build_matched_dataset(repo, visitDataIds,
325 doApplyExternalPhotoCalib=doApplyExternalPhotoCalib,
326 externalPhotoCalibName=externalPhotoCalibName,
327 doApplyExternalSkyWcs=doApplyExternalSkyWcs,
328 externalSkyWcsName=externalSkyWcsName,
329 skipTEx=skipTEx, skipNonSrd=skipNonSrd,
330 brightSnrMin=brightSnrMin, brightSnrMax=brightSnrMax)
332 snr = matchedDataset['snr'].quantity
333 bright = (matchedDataset['brightSnrMin'].quantity < snr) & (
334 snr < matchedDataset['brightSnrMax'].quantity)
335 photomModel = build_photometric_error_model(matchedDataset, bright)
336 astromModel = build_astrometric_error_model(matchedDataset, bright)
338 linkedBlobs = [matchedDataset, photomModel, astromModel]
340 metrics = job.metrics
341 specs = job.specs
343 def add_measurement(measurement):
344 for blob in linkedBlobs:
345 measurement.link_blob(blob)
346 job.measurements.insert(measurement)
348 for x, D in zip((1, 2, 3), (5., 20., 200.)):
349 amxName = 'AM{0:d}'.format(x)
350 afxName = 'AF{0:d}'.format(x)
351 adxName = 'AD{0:d}'.format(x)
353 amx = measureAMx(metrics['validate_drp.'+amxName], matchedDataset, D*u.arcmin, verbose=verbose)
354 add_measurement(amx)
356 afx_spec_set = specs.subset(required_meta={'instrument': 'HSC'}, spec_tags=[afxName, ])
357 adx_spec_set = specs.subset(required_meta={'instrument': 'HSC'}, spec_tags=[adxName, ])
358 for afx_spec_key, adx_spec_key in zip(afx_spec_set, adx_spec_set):
359 afx_spec = afx_spec_set[afx_spec_key]
360 adx_spec = adx_spec_set[adx_spec_key]
361 adx = measureADx(metrics[adx_spec.metric_name], amx, afx_spec)
362 add_measurement(adx)
363 afx = measureAFx(metrics[afx_spec.metric_name], amx, adx, adx_spec)
364 add_measurement(afx)
366 pa1 = measurePA1(
367 metrics['validate_drp.PA1'], filterName, matchedDataset.matchesBright, matchedDataset.magKey)
368 add_measurement(pa1)
370 pf1_spec_set = specs.subset(required_meta={'instrument': instrument, 'filter_name': filterName},
371 spec_tags=['PF1', ])
372 pa2_spec_set = specs.subset(required_meta={'instrument': instrument, 'filter_name': filterName},
373 spec_tags=['PA2', ])
374 # I worry these might not always be in the right order. Sorting...
375 pf1_spec_keys = list(pf1_spec_set.keys())
376 pa2_spec_keys = list(pa2_spec_set.keys())
377 pf1_spec_keys.sort()
378 pa2_spec_keys.sort()
379 for pf1_spec_key, pa2_spec_key in zip(pf1_spec_keys, pa2_spec_keys):
380 pf1_spec = pf1_spec_set[pf1_spec_key]
381 pa2_spec = pa2_spec_set[pa2_spec_key]
383 pa2 = measurePA2(metrics[pa2_spec.metric_name], pa1, pf1_spec.threshold)
384 add_measurement(pa2)
386 pf1 = measurePF1(metrics[pf1_spec.metric_name], pa1, pa2_spec)
387 add_measurement(pf1)
389 if not skipTEx:
390 for x, D, bin_range_operator in zip((1, 2), (1.0, 5.0), ("<=", ">=")):
391 texName = 'TE{0:d}'.format(x)
392 tex = measureTEx(metrics['validate_drp.'+texName], matchedDataset, D*u.arcmin,
393 bin_range_operator, verbose=verbose)
394 add_measurement(tex)
396 if not skipNonSrd:
397 model_phot_reps = measure_model_phot_rep(metrics, filterName, matchedDataset)
398 for measurement in model_phot_reps:
399 add_measurement(measurement)
401 if makeJson:
402 job.write(outputPrefix+'.json')
404 return job
407def get_metric(level, metric_label, in_specs):
408 for spec in in_specs:
409 if level in str(spec) and metric_label in str(spec):
410 break
411 return Name(package=spec.package, metric=spec.metric)
414def plot_metrics(job, filterName, outputPrefix=''):
415 """Plot AM1, AM2, AM3, PA1 plus related informational plots.
417 Parameters
418 ----------
419 job : `lsst.validate.base.Job`
420 The job to load data from.
421 filterName : `str`
422 string identifying the filter.
423 """
424 astropy.visualization.quantity_support()
426 specs = job.specs
427 measurements = job.measurements
428 spec_name = 'design'
429 for x in (1, 2, 3):
430 amxName = 'AM{0:d}'.format(x)
431 afxName = 'AF{0:d}'.format(x)
432 # ADx is included on the AFx plots
434 amx = measurements[get_metric(spec_name, amxName, specs)]
435 afx = measurements[get_metric(spec_name, afxName, specs)]
437 if amx.quantity is not None:
438 try:
439 plotAMx(job, amx, afx, filterName, amxSpecName=spec_name,
440 outputPrefix=outputPrefix)
441 except RuntimeError as e:
442 print(e)
443 print('\tSkipped plot{}'.format(amxName))
445 try:
446 pa1 = measurements[get_metric(spec_name, 'PA1', specs)]
447 plotPA1(pa1, outputPrefix=outputPrefix)
448 except RuntimeError as e:
449 print(e)
450 print('\tSkipped plotPA1')
452 try:
453 matchedDataset = pa1.blobs['MatchedMultiVisitDataset']
454 photomModel = pa1.blobs['PhotometricErrorModel']
455 filterName = pa1.extras['filter_name']
456 plotPhotometryErrorModel(matchedDataset, photomModel,
457 filterName=filterName,
458 outputPrefix=outputPrefix)
459 except KeyError as e:
460 print(e)
461 print('\tSkipped plotPhotometryErrorModel')
463 try:
464 am1 = measurements[get_metric(spec_name, 'AM1', specs)]
465 matchedDataset = am1.blobs['MatchedMultiVisitDataset']
466 astromModel = am1.blobs['AnalyticAstrometryModel']
467 plotAstrometryErrorModel(matchedDataset, astromModel,
468 outputPrefix=outputPrefix)
469 except KeyError as e:
470 print(e)
471 print('\tSkipped plotAstrometryErrorModel')
473 for x in (1, 2):
474 texName = 'TE{0:d}'.format(x)
476 try:
477 measurement = measurements[get_metric(spec_name, texName, specs)]
478 plotTEx(job, measurement, filterName,
479 texSpecName='design',
480 outputPrefix=outputPrefix)
481 except (RuntimeError, KeyError) as e:
482 print(e)
483 print('\tSkipped plot{}'.format(texName))
486def get_specs_metrics(job):
487 # Get specs for this filter
488 subset = job.specs.subset(required_meta={'instrument': job.meta['instrument'],
489 'filter_name': job.meta['filter_name']},
490 spec_tags=['chromatic'])
491 # Get specs that don't depend on filter
492 subset.update(job.specs.subset(required_meta={'instrument': job.meta['instrument']},
493 spec_tags=['achromatic']))
494 metrics = {}
495 specs = {}
496 for spec in subset:
497 metric_name = spec.metric.split('_')[0] # Take first part for linked metrics
498 if metric_name in metrics:
499 metrics[metric_name].append(Name(package=spec.package, metric=spec.metric))
500 specs[metric_name].append(spec)
501 else:
502 metrics[metric_name] = [Name(package=spec.package, metric=spec.metric), ]
503 specs[metric_name] = [spec, ]
504 return specs, metrics
507def print_metrics(job, levels=('minimum', 'design', 'stretch')):
508 specs, metrics = get_specs_metrics(job)
510 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
511 print(Bcolors.BOLD + Bcolors.HEADER +
512 '{band} band metric measurements'.format(band=job.meta['filter_name']) +
513 Bcolors.ENDC)
514 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
516 wrapper = TextWrapper(width=65)
517 for metric_name, metric_set in metrics.items():
518 metric = job.metrics[metric_set[0]] # Pick the first one for the description
519 print(Bcolors.HEADER + '{name} - {reference}'.format(
520 name=metric.name, reference=metric.reference))
521 print(wrapper.fill(Bcolors.ENDC + '{description}'.format(
522 description=metric.description).strip()))
524 for spec_key, metric_key in zip(specs[metric_name], metrics[metric_name]):
525 level = None
526 if 'release' in job.specs[spec_key].tags:
527 # Skip release specs
528 continue
529 for l in levels:
530 if l in str(spec_key):
531 level = l
532 try:
533 m = job.measurements[metric_key]
534 except KeyError:
535 print('\tSkipped {metric_key:12s} with spec {spec}: no such measurement'.format(
536 metric_key=metric_name, spec=level))
537 continue
539 if np.isnan(m.quantity):
540 print('\tSkipped {metric_key:12s} no measurement'.format(
541 metric_key=".".join([metric_name, level])))
542 continue
544 spec = job.specs[spec_key]
545 passed = spec.check(m.quantity)
546 if passed:
547 prefix = Bcolors.OKBLUE + '\tPassed '
548 else:
549 prefix = Bcolors.FAIL + '\tFailed '
550 infoStr = '{specName:12s} {meas:.4g} {op} {spec:.4g}'.format(
551 specName=level,
552 meas=m.quantity,
553 op=spec.operator_str,
554 spec=spec.threshold)
555 print(prefix + infoStr + Bcolors.ENDC)
558def print_pass_fail_summary(jobs, levels=('minimum', 'design', 'stretch'), default_level='design'):
559 currentTestCount = 0
560 currentFailCount = 0
561 currentSkippedCount = 0
563 for filterName, job in jobs.items():
564 specs, metrics = get_specs_metrics(job)
565 print('')
566 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
567 print(Bcolors.BOLD + Bcolors.HEADER + '{0} band summary'.format(filterName) + Bcolors.ENDC)
568 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
570 for specName in levels:
571 measurementCount = 0
572 failCount = 0
573 skippedCount = 0
574 for key, m in job.measurements.items():
575 metric = key.metric.split("_") # For compound metrics
576 len_metric = len(metric)
577 if len_metric > 1:
578 if metric[1] != specName:
579 continue
580 if len_metric > 2 and filterName not in metric[2]:
581 continue
582 spec_set = specs.get(metric[0], None)
583 if spec_set is None:
584 continue
585 spec = None
586 for spec_key in spec_set:
587 if specName in spec_key.spec:
588 spec = job.specs[spec_key]
589 if spec is None:
590 for spec_key in spec_set:
591 if specName in spec_key.metric: # For dependent metrics
592 spec = job.specs[spec_key]
593 if spec is not None:
594 measurementCount += 1
595 if np.isnan(m.quantity):
596 skippedCount += 1
597 if not spec.check(m.quantity):
598 failCount += 1
600 if specName == default_level:
601 currentTestCount += measurementCount
602 currentFailCount += failCount
603 currentSkippedCount += skippedCount
605 if failCount == 0:
606 print('Passed {level:12s} {count:d} measurements ({skipped:d} skipped)'.format(
607 level=specName, count=measurementCount, skipped=skippedCount))
608 else:
609 msg = 'Failed {level:12s} {failCount} of {count:d} failed ({skipped:d} skipped)'.format(
610 level=specName, failCount=failCount, count=measurementCount, skipped=skippedCount)
611 print(Bcolors.FAIL + msg + Bcolors.ENDC)
613 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC + '\n')
615 # print summary against current spec level
616 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
617 print(Bcolors.BOLD + Bcolors.HEADER + '{0} level summary'.format(default_level) + Bcolors.ENDC)
618 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)
619 if currentFailCount > 0:
620 msg = 'FAILED ({failCount:d}/{count:d} measurements, ({skipped:d} skipped))'.format(
621 failCount=currentFailCount, count=currentTestCount, skipped=currentSkippedCount)
622 print(Bcolors.FAIL + msg + Bcolors.ENDC)
623 else:
624 print('PASSED ({count:d}/{count:d} measurements ({skipped:d} skipped))'.format(
625 count=currentTestCount, skipped=currentSkippedCount))
627 print(Bcolors.BOLD + Bcolors.HEADER + "=" * 65 + Bcolors.ENDC)