23 """Support code for running unit tests"""
39 __all__ = [
"init",
"MemoryTestCase",
"ExecutablesTestCase",
"getTempFilePath",
40 "TestCase",
"assertFloatsAlmostEqual",
"assertFloatsNotEqual",
"assertFloatsEqual",
41 "debugger",
"classParameters",
"methodParameters"]
47 def _get_open_files():
48 """Return a set containing the list of files currently open in this
54 Set containing the list of open files.
56 return set(p.path
for p
in psutil.Process().
open_files())
60 """Initialize the memory tester and file descriptor leak tester."""
63 open_files = _get_open_files()
67 """Sort supplied test suites such that MemoryTestCases are at the end.
69 `lsst.utils.tests.MemoryTestCase` tests should always run after any other
75 Sequence of test suites.
79 suite : `unittest.TestSuite`
80 A combined `~unittest.TestSuite` with
81 `~lsst.utils.tests.MemoryTestCase` at the end.
84 suite = unittest.TestSuite()
86 for test_suite
in tests:
93 for method
in test_suite:
94 bases = inspect.getmro(method.__class__)
96 if bases
is not None and MemoryTestCase
in bases:
97 memtests.append(test_suite)
99 suite.addTests(test_suite)
101 if isinstance(test_suite, MemoryTestCase):
102 memtests.append(test_suite)
104 suite.addTest(test_suite)
105 suite.addTests(memtests)
116 unittest.defaultTestLoader.suiteClass = suiteClassWrapper
120 """Check for resource leaks."""
124 """Reset the leak counter when the tests have been completed"""
128 """Check if any file descriptors are open since init() called."""
131 now_open = _get_open_files()
134 now_open = set(f
for f
in now_open
if not f.endswith(
".car")
135 and not f.startswith(
"/proc/")
136 and not f.endswith(
".ttf")
137 and not (f.startswith(
"/var/lib/")
and f.endswith(
"/passwd"))
138 and not f.endswith(
"astropy.log"))
140 diff = now_open.difference(open_files)
143 print(
"File open: %s" % f)
144 self.fail(
"Failed to close %d file%s" % (len(diff),
"s" if len(diff) != 1
else ""))
148 """Test that executables can be run and return good status.
150 The test methods are dynamically created. Callers
151 must subclass this class in their own test file and invoke
152 the create_executable_tests() class method to register the tests.
154 TESTS_DISCOVERED = -1
158 """Abort testing if automated test creation was enabled and
159 no tests were found."""
162 raise RuntimeError(
"No executables discovered.")
165 """This test exists to ensure that there is at least one test to be
166 executed. This allows the test runner to trigger the class set up
167 machinery to test whether there are some executables to test."""
171 """Check an executable runs and returns good status.
173 Prints output to standard out. On bad exit status the test
174 fails. If the executable can not be located the test is skipped.
179 Path to an executable. ``root_dir`` is not used if this is an
181 root_dir : `str`, optional
182 Directory containing executable. Ignored if `None`.
183 args : `list` or `tuple`, optional
184 Arguments to be provided to the executable.
185 msg : `str`, optional
186 Message to use when the test fails. Can be `None` for default
192 The executable did not return 0 exit status.
195 if root_dir
is not None and not os.path.isabs(executable):
196 executable = os.path.join(root_dir, executable)
199 sp_args = [executable]
200 argstr =
"no arguments"
203 argstr =
'arguments "' +
" ".join(args) +
'"'
205 print(
"Running executable '{}' with {}...".format(executable, argstr))
206 if not os.path.exists(executable):
207 self.skipTest(
"Executable {} is unexpectedly missing".format(executable))
210 output = subprocess.check_output(sp_args)
211 except subprocess.CalledProcessError
as e:
213 failmsg =
"Bad exit status from '{}': {}".format(executable, e.returncode)
214 print(output.decode(
'utf-8'))
221 def _build_test_method(cls, executable, root_dir):
222 """Build a test method and attach to class.
224 A test method is created for the supplied excutable located
225 in the supplied root directory. This method is attached to the class
226 so that the test runner will discover the test and run it.
231 The class in which to create the tests.
233 Name of executable. Can be absolute path.
235 Path to executable. Not used if executable path is absolute.
237 if not os.path.isabs(executable):
238 executable = os.path.abspath(os.path.join(root_dir, executable))
241 test_name =
"test_exe_" + executable.replace(
"/",
"_")
244 def test_executable_runs(*args):
246 self.assertExecutable(executable)
249 test_executable_runs.__name__ = test_name
250 setattr(cls, test_name, test_executable_runs)
254 """Discover executables to test and create corresponding test methods.
256 Scans the directory containing the supplied reference file
257 (usually ``__file__`` supplied from the test class) to look for
258 executables. If executables are found a test method is created
259 for each one. That test method will run the executable and
260 check the returned value.
262 Executable scripts with a ``.py`` extension and shared libraries
263 are ignored by the scanner.
265 This class method must be called before test discovery.
270 Path to a file within the directory to be searched.
271 If the files are in the same location as the test file, then
272 ``__file__`` can be used.
273 executables : `list` or `tuple`, optional
274 Sequence of executables that can override the automated
275 detection. If an executable mentioned here is not found, a
276 skipped test will be created for it, rather than a failed
281 >>> cls.create_executable_tests(__file__)
285 ref_dir = os.path.abspath(os.path.dirname(ref_file))
287 if executables
is None:
290 for root, dirs, files
in os.walk(ref_dir):
293 if not f.endswith(
".py")
and not f.endswith(
".so"):
294 full_path = os.path.join(root, f)
295 if os.access(full_path, os.X_OK):
296 executables.append(full_path)
305 for e
in executables:
309 @contextlib.contextmanager
311 """Return a path suitable for a temporary file and try to delete the
314 If the with block completes successfully then the file is deleted,
315 if possible; failure results in a printed warning.
316 If a file is remains when it should not, a RuntimeError exception is
317 raised. This exception is also raised if a file is not present on context
318 manager exit when one is expected to exist.
319 If the block exits with an exception the file if left on disk so it can be
320 examined. The file name has a random component such that nested context
321 managers can be used with the same file suffix.
327 File name extension, e.g. ``.fits``.
328 expectOutput : `bool`, optional
329 If `True`, a file should be created within the context manager.
330 If `False`, a file should not be present when the context manager
336 Path for a temporary file. The path is a combination of the caller's
337 file path and the name of the top-level function
343 # file tests/testFoo.py
345 import lsst.utils.tests
346 class FooTestCase(unittest.TestCase):
347 def testBasics(self):
351 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile:
352 # if tests/.tests exists then
353 # tmpFile = "tests/.tests/testFoo_testBasics.fits"
354 # otherwise tmpFile = "testFoo_testBasics.fits"
356 # at the end of this "with" block the path tmpFile will be
357 # deleted, but only if the file exists and the "with"
358 # block terminated normally (rather than with an exception)
361 stack = inspect.stack()
363 for i
in range(2, len(stack)):
364 frameInfo = inspect.getframeinfo(stack[i][0])
366 callerFilePath = frameInfo.filename
367 callerFuncName = frameInfo.function
368 elif callerFilePath == frameInfo.filename:
370 callerFuncName = frameInfo.function
374 callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
375 callerFileName = os.path.splitext(callerFileNameWithExt)[0]
376 outDir = os.path.join(callerDir,
".tests")
377 if not os.path.isdir(outDir):
379 prefix =
"%s_%s-" % (callerFileName, callerFuncName)
380 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix)
381 if os.path.exists(outPath):
384 warnings.warn(
"Unexpectedly found pre-existing tempfile named %r" % (outPath,),
393 fileExists = os.path.exists(outPath)
396 raise RuntimeError(
"Temp file expected named {} but none found".format(outPath))
399 raise RuntimeError(
"Unexpectedly discovered temp file named {}".format(outPath))
406 warnings.warn(
"Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3)
410 """Subclass of unittest.TestCase that adds some custom assertions for
416 """A decorator to add a free function to our custom TestCase class, while also
417 making it available as a free function.
419 setattr(TestCase, func.__name__, func)
424 """Decorator to enter the debugger when there's an uncaught exception
426 To use, just slap a ``@debugger()`` on your function.
428 You may provide specific exception classes to catch as arguments to
429 the decorator function, e.g.,
430 ``@debugger(RuntimeError, NotImplementedError)``.
431 This defaults to just `AssertionError`, for use on `unittest.TestCase`
434 Code provided by "Rosh Oxymoron" on StackOverflow:
435 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails
439 Consider using ``pytest --pdb`` instead of this decorator.
442 exceptions = (Exception, )
446 def wrapper(*args, **kwargs):
448 return f(*args, **kwargs)
452 pdb.post_mortem(sys.exc_info()[2])
458 """Plot the comparison of two 2-d NumPy arrays.
462 lhs : `numpy.ndarray`
463 LHS values to compare; a 2-d NumPy array
464 rhs : `numpy.ndarray`
465 RHS values to compare; a 2-d NumPy array
466 bad : `numpy.ndarray`
467 A 2-d boolean NumPy array of values to emphasize in the plots
468 diff : `numpy.ndarray`
469 difference array; a 2-d NumPy array, or None to show lhs-rhs
471 Filename to save the plot to. If None, the plot will be displayed in
476 This method uses `matplotlib` and imports it internally; it should be
477 wrapped in a try/except block within packages that do not depend on
478 `matplotlib` (including `~lsst.utils`).
480 from matplotlib
import pyplot
486 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
487 badImage[:, :, 0] = 255
488 badImage[:, :, 1] = 0
489 badImage[:, :, 2] = 0
490 badImage[:, :, 3] = 255*bad
491 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
492 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
493 vmin2 = numpy.min(diff)
494 vmax2 = numpy.max(diff)
495 for n, (image, title)
in enumerate([(lhs,
"lhs"), (rhs,
"rhs"), (diff,
"diff")]):
496 pyplot.subplot(2, 3, n + 1)
497 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
498 vmin=vmin1, vmax=vmax1)
500 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
503 pyplot.subplot(2, 3, n + 4)
504 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
505 vmin=vmin2, vmax=vmax2)
507 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
510 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
511 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
512 pyplot.colorbar(im1, cax=cax1)
513 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
514 pyplot.colorbar(im2, cax=cax2)
516 pyplot.savefig(plotFileName)
523 atol=sys.float_info.epsilon, relTo=None,
524 printFailures=True, plotOnFailure=False,
525 plotFileName=None, invert=False, msg=None,
527 """Highly-configurable floating point comparisons for scalars and arrays.
529 The test assertion will fail if all elements ``lhs`` and ``rhs`` are not
530 equal to within the tolerances specified by ``rtol`` and ``atol``.
531 More precisely, the comparison is:
533 ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol``
535 If ``rtol`` or ``atol`` is `None`, that term in the comparison is not
538 When not specified, ``relTo`` is the elementwise maximum of the absolute
539 values of ``lhs`` and ``rhs``. If set manually, it should usually be set
540 to either ``lhs`` or ``rhs``, or a scalar value typical of what is
545 testCase : `unittest.TestCase`
546 Instance the test is part of.
547 lhs : scalar or array-like
548 LHS value(s) to compare; may be a scalar or array-like of any
550 rhs : scalar or array-like
551 RHS value(s) to compare; may be a scalar or array-like of any
553 rtol : `float`, optional
554 Relative tolerance for comparison; defaults to double-precision
556 atol : `float`, optional
557 Absolute tolerance for comparison; defaults to double-precision
559 relTo : `float`, optional
560 Value to which comparison with rtol is relative.
561 printFailures : `bool`, optional
562 Upon failure, print all inequal elements as part of the message.
563 plotOnFailure : `bool`, optional
564 Upon failure, plot the originals and their residual with matplotlib.
565 Only 2-d arrays are supported.
566 plotFileName : `str`, optional
567 Filename to save the plot to. If `None`, the plot will be displayed in
569 invert : `bool`, optional
570 If `True`, invert the comparison and fail only if any elements *are*
571 equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`,
572 which should generally be used instead for clarity.
574 msg : `str`, optional
575 String to append to the error message when assert fails.
576 ignoreNaNs : `bool`, optional
577 If `True` (`False` is default) mask out any NaNs from operand arrays
578 before performing comparisons if they are in the same locations; NaNs
579 in different locations are trigger test assertion failures, even when
580 ``invert=True``. Scalar NaNs are treated like arrays containing only
581 NaNs of the same shape as the other operand, and no comparisons are
582 performed if both sides are scalar NaNs.
587 The values are not almost equal.
590 lhsMask = numpy.isnan(lhs)
591 rhsMask = numpy.isnan(rhs)
592 if not numpy.all(lhsMask == rhsMask):
593 testCase.fail(f
"lhs has {lhsMask.sum()} NaN values and rhs has {rhsMask.sum()} NaN values, "
594 f
"in different locations.")
595 if numpy.all(lhsMask):
596 assert numpy.all(rhsMask),
"Should be guaranteed by previous if."
600 assert not numpy.all(rhsMask),
"Should be guaranteed by prevoius two ifs."
604 if numpy.any(lhsMask):
605 lhs = lhs[numpy.logical_not(lhsMask)]
606 if numpy.any(rhsMask):
607 rhs = rhs[numpy.logical_not(rhsMask)]
608 if not numpy.isfinite(lhs).all():
609 testCase.fail(
"Non-finite values in lhs")
610 if not numpy.isfinite(rhs).all():
611 testCase.fail(
"Non-finite values in rhs")
613 absDiff = numpy.abs(lhs - rhs)
616 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
618 relTo = numpy.abs(relTo)
619 bad = absDiff > rtol*relTo
621 bad = numpy.logical_and(bad, absDiff > atol)
624 raise ValueError(
"rtol and atol cannot both be None")
626 failed = numpy.any(bad)
629 bad = numpy.logical_not(bad)
631 failStr =
"are the same"
637 if numpy.isscalar(bad):
639 errMsg = [
"%s %s %s; diff=%s with atol=%s"
640 % (lhs, cmpStr, rhs, absDiff, atol)]
642 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s"
643 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)]
645 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s"
646 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)]
648 errMsg = [
"%d/%d elements %s with rtol=%s, atol=%s"
649 % (bad.sum(), bad.size, failStr, rtol, atol)]
651 if len(lhs.shape) != 2
or len(rhs.shape) != 2:
652 raise ValueError(
"plotOnFailure is only valid for 2-d arrays")
654 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
656 errMsg.append(
"Failure plot requested but matplotlib could not be imported.")
661 if numpy.isscalar(relTo):
662 relTo = numpy.ones(bad.shape, dtype=float) * relTo
663 if numpy.isscalar(lhs):
664 lhs = numpy.ones(bad.shape, dtype=float) * lhs
665 if numpy.isscalar(rhs):
666 rhs = numpy.ones(bad.shape, dtype=float) * rhs
668 for a, b, diff
in zip(lhs[bad], rhs[bad], absDiff[bad]):
669 errMsg.append(
"%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
671 for a, b, diff, rel
in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
672 errMsg.append(
"%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel))
676 testCase.assertFalse(failed, msg=
"\n".join(errMsg))
681 """Fail a test if the given floating point values are equal to within the
684 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
685 ``rtol=atol=0``) for more information.
689 testCase : `unittest.TestCase`
690 Instance the test is part of.
691 lhs : scalar or array-like
692 LHS value(s) to compare; may be a scalar or array-like of any
694 rhs : scalar or array-like
695 RHS value(s) to compare; may be a scalar or array-like of any
701 The values are almost equal.
709 Assert that lhs == rhs (both numeric types, whether scalar or array).
711 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
712 ``rtol=atol=0``) for more information.
716 testCase : `unittest.TestCase`
717 Instance the test is part of.
718 lhs : scalar or array-like
719 LHS value(s) to compare; may be a scalar or array-like of any
721 rhs : scalar or array-like
722 RHS value(s) to compare; may be a scalar or array-like of any
728 The values are not equal.
733 def _settingsIterator(settings):
734 """Return an iterator for the provided test settings
738 settings : `dict` (`str`: iterable)
739 Lists of test parameters. Each should be an iterable of the same length.
740 If a string is provided as an iterable, it will be converted to a list
746 If the ``settings`` are not of the same length.
750 parameters : `dict` (`str`: anything)
753 for name, values
in settings.items():
754 if isinstance(values, str):
756 settings[name] = [values]
757 num = len(next(iter(settings.values())))
758 for name, values
in settings.items():
759 assert len(values) == num, f
"Length mismatch for setting {name}: {len(values)} vs {num}"
760 for ii
in range(num):
761 values = [settings[kk][ii]
for kk
in settings]
762 yield dict(zip(settings.keys(), values))
766 """Class decorator for generating unit tests
768 This decorator generates classes with class variables according to the
769 supplied ``settings``.
773 **settings : `dict` (`str`: iterable)
774 The lists of test parameters to set as class variables in turn. Each
775 should be an iterable of the same length.
781 @classParameters(foo=[1, 2], bar=[3, 4])
782 class MyTestCase(unittest.TestCase):
785 will generate two classes, as if you wrote::
787 class MyTestCase_1_3(unittest.TestCase):
792 class MyTestCase_2_4(unittest.TestCase):
797 Note that the values are embedded in the class name.
800 module = sys.modules[cls.__module__].__dict__
801 for params
in _settingsIterator(settings):
802 name = f
"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}"
803 bindings = dict(cls.__dict__)
804 bindings.update(params)
805 module[name] = type(name, (cls,), bindings)
810 """Method decorator for unit tests
812 This decorator iterates over the supplied settings, using
813 ``TestCase.subTest`` to communicate the values in the event of a failure.
817 **settings : `dict` (`str`: iterable)
818 The lists of test parameters. Each should be an iterable of the same
825 @methodParameters(foo=[1, 2], bar=[3, 4])
826 def testSomething(self, foo, bar):
831 testSomething(foo=1, bar=3)
832 testSomething(foo=2, bar=4)
835 @functools.wraps(func)
836 def wrapper(self, *args, **kwargs):
837 for params
in _settingsIterator(settings):
838 kwargs.update(params)
839 with self.subTest(**params):
840 func(self, *args, **kwargs)
845 @contextlib.contextmanager
847 """Context manager that creates and destroys a temporary directory.
849 The difference from `tempfile.TemporaryDirectory` is that this ignores
850 errors when deleting a directory, which may happen with some filesystems.
852 tmpdir = tempfile.mkdtemp()
854 shutil.rmtree(tmpdir, ignore_errors=
True)
def assertExecutable(self, executable, root_dir=None, args=None, msg=None)
def create_executable_tests(cls, ref_file, executables=None)
def _build_test_method(cls, executable, root_dir)
def testFileDescriptorLeaks(self)
def assertFloatsEqual(testCase, lhs, rhs, **kwargs)
def classParameters(**settings)
def assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=sys.float_info.epsilon, atol=sys.float_info.epsilon, relTo=None, printFailures=True, plotOnFailure=False, plotFileName=None, invert=False, msg=None, ignoreNaNs=False)
def methodParameters(**settings)
def debugger(*exceptions)
def assertFloatsNotEqual(testCase, lhs, rhs, **kwds)
def getTempFilePath(ext, expectOutput=True)
def plotImageDiff(lhs, rhs, bad=None, diff=None, plotFileName=None)
def suiteClassWrapper(tests)