Coverage for python/lsst/verify/report.py : 16%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# This file is part of verify. # # Developed for the LSST Data Management System. # This product includes software developed by the LSST Project # (https://www.lsst.org). # See the COPYRIGHT file at the top-level directory of this distribution # for details of code ownership. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>.
r"""Report tabulating specification pass/fail status for a set of `lsst.verify.Measurement`\ s.
Parameters ---------- measurements : `lsst.verify.MeasurementSet` Measurements to be tested. specs : `lsst.verify.SpecificationSet` Specifications to test measurements against. These specifications are assumed to be relevant to the measurements. Use `lsst.verify.SpecificationSet.subset`, passing in job metadata (`lsst.verify.Job.meta`), to ensure this. """
self._meas_set = measurements self._spec_set = specs
"""Make an table summarizing specification tests of measurements.
Returns ------- table : `astropy.table.Table` Table with columns:
- **Status** - **Specification** - **Measurement** - **Test** - **Metric Tags** - **Spec. Tags** """ # Columns for the table statuses = [] spec_name_items = [] measurements = [] tests = [] metric_tags = [] spec_tags = []
spec_names = list(self._spec_set.keys()) spec_names.sort()
for spec_name in spec_names: # Test if there is a measurement for this specification, # if not, we just skip it. metric_name = Name(package=spec_name.package, metric=spec_name.metric) try: meas = self._meas_set[metric_name] except KeyError: # No measurement for this specification, just skip it. continue
spec = self._spec_set[spec_name]
if np.isnan(meas.quantity): # Not measured # https://emojipedia.org/heavy-minus-sign/ statuses.append(u'\U00002796') elif spec.check(meas.quantity): # Passed # http://emojipedia.org/white-heavy-check-mark/ statuses.append(u'\U00002705') else: # Failed. # http://emojipedia.org/cross-mark/ statuses.append(u'\U0000274C')
spec_name_items.append(str(spec_name))
measurements.append(meas._repr_latex_())
tests.append(spec._repr_latex_())
tags = list(spec.tags) tags.sort() spec_tags.append(', '.join(tags))
metric = meas.metric if metric is None: # no metric is available, this is the default metric_tags.append('N/A') else: tags = list(metric.tags) tags.sort() metric_tags.append(', '.join(tags))
table = Table([statuses, spec_name_items, measurements, tests, metric_tags, spec_tags], names=['Status', 'Specification', 'Measurement', 'Test', 'Metric Tags', 'Spec. Tags']) return table
"""HTML representation of the report for Jupyter notebooks.""" table = self.make_table() return table._repr_html_()
"""Display the report in a Jupyter notebook.""" table = self.make_table() return table.show_in_notebook(show_row_index='') |