Coverage for python/lsst/utils/tests.py : 66%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# # LSST Data Management System # # Copyright 2008-2017 AURA/LSST. # # This product includes software developed by the # LSST Project (http://www.lsst.org/). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the LSST License Statement and # the GNU General Public License along with this program. If not, # see <https://www.lsstcorp.org/LegalNotices/>. #
"TestCase", "assertFloatsAlmostEqual", "assertFloatsNotEqual", "assertFloatsEqual"]
# File descriptor leak test will be skipped if psutil can not be imported except ImportError: psutil = None
# Initialize the list of open files to an empty set
"""Return a set containing the list of files currently open in this process.
Returns ------- open_files : `set` Set containing the list of open files. """ return set()
"""Initialize the memory tester and file descriptor leak tester.""" global memId0 global open_files memId0 = dafBase.Citizen.getNextMemId() # used by MemoryTestCase # Reset the list of open files
"""Run a test suite and report the test return status to caller or shell.
.. note:: Deprecated in 13_0 Use `unittest.main()` instead, which automatically detects all tests in a test case and does not require a test suite.
Parameters ---------- suite : `unittest.TestSuite` Test suite to run. exit : `bool`, optional If `True`, Python process will exit with the test exit status.
Returns ------- status : `int` If ``exit`` is `False`, will return 0 if the tests passed, or 1 if the tests failed. """
warnings.warn("lsst.utils.tests.run() is deprecated; please use unittest.main() instead", DeprecationWarning, stacklevel=2)
if unittest.TextTestRunner().run(suite).wasSuccessful(): status = 0 else: status = 1
if exit: sys.exit(status) else: return status
"""Sort supplied test suites such that MemoryTestCases are at the end.
`lsst.utils.tests.MemoryTestCase` tests should always run after any other tests in the module.
Parameters ---------- tests : sequence Sequence of test suites.
Returns ------- suite : `unittest.TestSuite` A combined `~unittest.TestSuite` with `~lsst.utils.tests.MemoryTestCase` at the end. """
# Just test the first test method in the suite for MemoryTestCase # Use loop rather than next as it is possible for a test class # to not have any test methods and the Python community prefers # for loops over catching a StopIteration exception. bases = inspect.getmro(method.__class__) break if bases is not None and MemoryTestCase in bases: memtests.append(test_suite) else: suite.addTests(test_suite) else:
# Replace the suiteClass callable in the defaultTestLoader # so that we can reorder the test ordering. This will have # no effect if no memory test cases are found.
"""Check for memory leaks since memId0 was allocated"""
def tearDownClass(cls): """Reset the leak counter when the tests have been completed"""
"""Check for memory leaks in the preceding tests""" gc.collect() global memId0, nleakPrintMax nleak = dafBase.Citizen.census(0, memId0) if nleak != 0: plural = "s" if nleak != 1 else "" print("\n%d Object%s leaked:" % (nleak, plural))
if nleak <= nleakPrintMax: print(dafBase.Citizen.census(memId0)) else: census = dafBase.Citizen.census() print("...") for i in range(nleakPrintMax - 1, -1, -1): print(census[i].repr())
self.fail("Leaked %d block%s" % (nleak, plural))
"""Check if any file descriptors are open since init() called.""" self.skipTest("Unable to test file descriptor leaks. psutil unavailable.") global open_files
# Some files are opened out of the control of the stack. not f.startswith("/proc/") and not f.endswith(".ttf") and not (f.startswith("/var/lib/") and f.endswith("/passwd")) and not f.endswith("astropy.log"))
for f in diff: print("File open: %s" % f) self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else ""))
"""Test that executables can be run and return good status.
The test methods are dynamically created. Callers must subclass this class in their own test file and invoke the create_executable_tests() class method to register the tests. """
def setUpClass(cls): """Abort testing if automated test creation was enabled and no tests were found."""
raise Exception("No executables discovered.")
"""This test exists to ensure that there is at least one test to be executed. This allows the test runner to trigger the class set up machinery to test whether there are some executables to test."""
"""Check an executable runs and returns good status.
Prints output to standard out. On bad exit status the test fails. If the executable can not be located the test is skipped.
Parameters ---------- executable : `str` Path to an executable. ``root_dir`` is not used if this is an absolute path. root_dir : `str`, optional Directory containing executable. Ignored if `None`. args : `list` or `tuple`, optional Arguments to be provided to the executable. msg : `str`, optional Message to use when the test fails. Can be `None` for default message.
Raises ------ AssertionError The executable did not return 0 exit status. """
# Form the argument list for subprocess
except subprocess.CalledProcessError as e: output = e.output failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode) if msg is None: msg = failmsg self.fail(msg)
def _build_test_method(cls, executable, root_dir): """Build a test method and attach to class.
A test method is created for the supplied excutable located in the supplied root directory. This method is attached to the class so that the test runner will discover the test and run it.
Parameters ---------- cls : `object` The class in which to create the tests. executable : `str` Name of executable. Can be absolute path. root_dir : `str` Path to executable. Not used if executable path is absolute. """ executable = os.path.abspath(os.path.join(root_dir, executable))
# Create the test name from the executable path.
# This is the function that will become the test method
# Give it a name and attach it to the class
"""Discover executables to test and create corresponding test methods.
Scans the directory containing the supplied reference file (usually ``__file__`` supplied from the test class) to look for executables. If executables are found a test method is created for each one. That test method will run the executable and check the returned value.
Executable scripts with a ``.py`` extension and shared libraries are ignored by the scanner.
This class method must be called before test discovery.
Parameters ---------- ref_file : `str` Path to a file within the directory to be searched. If the files are in the same location as the test file, then ``__file__`` can be used. executables : `list` or `tuple`, optional Sequence of executables that can override the automated detection. If an executable mentioned here is not found, a skipped test will be created for it, rather than a failed test.
Examples -------- >>> cls.create_executable_tests(__file__) """
# Get the search directory from the reference file
# Look for executables to test by walking the tree # Skip Python files. Shared libraries are executable.
# Store the number of tests found for later assessment. # Do not raise an exception if we have no executables as this would # cause the testing to abort before the test runner could properly # integrate it into the failure report.
# Create the test functions and attach them to the class
"""Return a path suitable for a temporary file and try to delete the file on success
If the with block completes successfully then the file is deleted, if possible; failure results in a printed warning. If a file is remains when it should not, a RuntimeError exception is raised. This exception is also raised if a file is not present on context manager exit when one is expected to exist. If the block exits with an exception the file if left on disk so it can be examined. The file name has a random component such that nested context managers can be used with the same file suffix.
Parameters ----------
ext : `str` File name extension, e.g. ``.fits``. expectOutput : `bool`, optional If `True`, a file should be created within the context manager. If `False`, a file should not be present when the context manager exits.
Returns ------- `str` Path for a temporary file. The path is a combination of the caller's file path and the name of the top-level function
Notes ----- ::
# file tests/testFoo.py import unittest import lsst.utils.tests class FooTestCase(unittest.TestCase): def testBasics(self): self.runTest()
def runTest(self): with lsst.utils.tests.getTempFilePath(".fits") as tmpFile: # if tests/.tests exists then # tmpFile = "tests/.tests/testFoo_testBasics.fits" # otherwise tmpFile = "testFoo_testBasics.fits" ... # at the end of this "with" block the path tmpFile will be # deleted, but only if the file exists and the "with" # block terminated normally (rather than with an exception) ... """ # get name of first function in the file # this function called the previous function else:
outDir = "" # There should not be a file there given the randomizer. Warn and remove. # Use stacklevel 3 so that the warning is reported from the end of the with block warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,), stacklevel=3) try: os.remove(outPath) except OSError: pass
else: # Try to clean up the file regardless except OSError as e: # Use stacklevel 3 so that the warning is reported from the end of the with block warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3)
"""Subclass of unittest.TestCase that adds some custom assertions for convenience. """
"""A decorator to add a free function to our custom TestCase class, while also making it available as a free function. """
def assertRaisesLsstCpp(testcase, excClass, callableObj, *args, **kwargs): """.. note:: Deprecated in 12_0""" DeprecationWarning, stacklevel=2)
"""Decorator to enter the debugger when there's an uncaught exception
To use, just slap a ``@debugger()`` on your function.
You may provide specific exception classes to catch as arguments to the decorator function, e.g., ``@debugger(RuntimeError, NotImplementedError)``. This defaults to just `AssertionError`, for use on `unittest.TestCase` methods.
Code provided by "Rosh Oxymoron" on StackOverflow: http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails
Notes ----- Consider using ``pytest --pdb`` instead of this decorator. """ if not exceptions: exceptions = (AssertionError, )
def decorator(f): @functools.wraps(f) def wrapper(*args, **kwargs): try: return f(*args, **kwargs) except exceptions: import sys import pdb pdb.post_mortem(sys.exc_info()[2]) return wrapper return decorator
"""Plot the comparison of two 2-d NumPy arrays.
Parameters ---------- lhs : `numpy.ndarray` LHS values to compare; a 2-d NumPy array rhs : `numpy.ndarray` RHS values to compare; a 2-d NumPy array bad : `numpy.ndarray` A 2-d boolean NumPy array of values to emphasize in the plots diff : `numpy.ndarray` difference array; a 2-d NumPy array, or None to show lhs-rhs plotFileName : `str` Filename to save the plot to. If None, the plot will be displayed in a window.
Notes ----- This method uses `matplotlib` and imports it internally; it should be wrapped in a try/except block within packages that do not depend on `matplotlib` (including `~lsst.utils`). """ from matplotlib import pyplot if diff is None: diff = lhs - rhs pyplot.figure() if bad is not None: # make an rgba image that's red and transparent where not bad badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8) badImage[:, :, 0] = 255 badImage[:, :, 1] = 0 badImage[:, :, 2] = 0 badImage[:, :, 3] = 255*bad vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs)) vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs)) vmin2 = numpy.min(diff) vmax2 = numpy.max(diff) for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]): pyplot.subplot(2, 3, n + 1) im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower', vmin=vmin1, vmax=vmax1) if bad is not None: pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower') pyplot.axis("off") pyplot.title(title) pyplot.subplot(2, 3, n + 4) im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower', vmin=vmin2, vmax=vmax2) if bad is not None: pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower') pyplot.axis("off") pyplot.title(title) pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05) cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4]) pyplot.colorbar(im1, cax=cax1) cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4]) pyplot.colorbar(im2, cax=cax2) if plotFileName: pyplot.savefig(plotFileName) else: pyplot.show()
atol=sys.float_info.epsilon, relTo=None, printFailures=True, plotOnFailure=False, plotFileName=None, invert=False, msg=None): """Highly-configurable floating point comparisons for scalars and arrays.
The test assertion will fail if all elements ``lhs`` and ``rhs`` are not equal to within the tolerances specified by ``rtol`` and ``atol``. More precisely, the comparison is:
``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol``
If ``rtol`` or ``atol`` is `None`, that term in the comparison is not performed at all.
When not specified, ``relTo`` is the elementwise maximum of the absolute values of ``lhs`` and ``rhs``. If set manually, it should usually be set to either ``lhs`` or ``rhs``, or a scalar value typical of what is expected.
Parameters ---------- testCase : `unittest.TestCase` Instance the test is part of. lhs : scalar or array-like LHS value(s) to compare; may be a scalar or array-like of any dimension. rhs : scalar or array-like RHS value(s) to compare; may be a scalar or array-like of any dimension. rtol : `float`, optional Relative tolerance for comparison; defaults to double-precision epsilon. atol : `float`, optional Absolute tolerance for comparison; defaults to double-precision epsilon. relTo : `float`, optional Value to which comparison with rtol is relative. printFailures : `bool`, optional Upon failure, print all inequal elements as part of the message. plotOnFailure : `bool`, optional Upon failure, plot the originals and their residual with matplotlib. Only 2-d arrays are supported. plotFileName : `str`, optional Filename to save the plot to. If `None`, the plot will be displayed in a window. invert : `bool`, optional If `True`, invert the comparison and fail only if any elements *are* equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`, which should generally be used instead for clarity. msg : `str`, optional String to append to the error message when assert fails.
Raises ------ AssertionError The values are not almost equal. """ else: else: else: % (lhs, cmpStr, rhs, absDiff, atol)] % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)] else: % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)] else: % (bad.sum(), bad.size, failStr, rtol, atol)] if len(lhs.shape) != 2 or len(rhs.shape) != 2: raise ValueError("plotOnFailure is only valid for 2-d arrays") try: plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName) except ImportError: errMsg.append("Failure plot requested but matplotlib could not be imported.") # Make sure everything is an array if any of them are, so we can treat # them the same (diff and absDiff are arrays if either rhs or lhs is), # and we don't get here if neither is. else:
def assertFloatsNotEqual(testCase, lhs, rhs, **kwds): """Fail a test if the given floating point values are equal to within the given tolerances.
See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with ``rtol=atol=0``) for more information.
Parameters ---------- testCase : `unittest.TestCase` Instance the test is part of. lhs : scalar or array-like LHS value(s) to compare; may be a scalar or array-like of any dimension. rhs : scalar or array-like RHS value(s) to compare; may be a scalar or array-like of any dimension.
Raises ------ AssertionError The values are almost equal. """
def assertFloatsEqual(testCase, lhs, rhs, **kwargs): """ Assert that lhs == rhs (both numeric types, whether scalar or array).
See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with ``rtol=atol=0``) for more information.
Parameters ---------- testCase : `unittest.TestCase` Instance the test is part of. lhs : scalar or array-like LHS value(s) to compare; may be a scalar or array-like of any dimension. rhs : scalar or array-like RHS value(s) to compare; may be a scalar or array-like of any dimension.
Raises ------ AssertionError The values are not equal. """
def assertClose(*args, **kwargs): """.. note:: Deprecated in 12_0""" DeprecationWarning, stacklevel=2)
def assertNotClose(*args, **kwargs): """.. note:: Deprecated in 12_0""" DeprecationWarning, stacklevel=2) |