23 """Support code for running unit tests"""
24 from __future__
import print_function
25 from __future__
import division
26 from builtins
import zip
27 from builtins
import range
29 from contextlib
import contextmanager
47 import lsst.daf.base
as dafBase
61 def _get_open_files():
62 """Return a set containing the list of open files."""
65 return set(p.path
for p
in psutil.Process().
open_files())
69 """Initialize the memory tester"""
73 memId0 = dafBase.Citizen.getNextMemId()
75 open_files = _get_open_files()
78 def run(suite, exit=True):
79 """!Exit with the status code resulting from running the provided test suite"""
81 if unittest.TextTestRunner().
run(suite).wasSuccessful():
93 """!Go through the supplied sequence of test suites and sort them to ensure that
94 MemoryTestCases are at the end of the test list. Returns a combined
97 suite = unittest.TestSuite()
99 for test_suite
in tests:
106 for method
in test_suite:
107 bases = inspect.getmro(method.__class__)
109 if bases
is not None and MemoryTestCase
in bases:
110 memtests.append(test_suite)
112 suite.addTests(test_suite)
114 if isinstance(test_suite, MemoryTestCase):
115 memtests.append(test_suite)
117 suite.addTest(test_suite)
118 suite.addTests(memtests)
129 unittest.defaultTestLoader.suiteClass = suiteClassWrapper
133 """!Check for memory leaks since memId0 was allocated"""
140 """!Reset the leak counter when the tests have been completed"""
144 """!Check for memory leaks in the preceding tests"""
147 global memId0, nleakPrintMax
148 nleak = dafBase.Citizen.census(0, memId0)
150 plural =
"s" if nleak != 1
else ""
151 print(
"\n%d Object%s leaked:" % (nleak, plural))
153 if nleak <= nleakPrintMax:
154 print(dafBase.Citizen.census(memId0))
156 census = dafBase.Citizen.census()
158 for i
in range(nleakPrintMax - 1, -1, -1):
159 print(census[i].repr())
161 self.fail(
"Leaked %d block%s" % (nleak, plural))
165 self.skipTest(
"Unable to test file descriptor leaks. psutil unavailable.")
168 now_open = _get_open_files()
171 now_open = set(f
for f
in now_open
if not f.endswith(
".car")
and
172 not f.endswith(
".ttf")
and
173 f !=
"/var/lib/sss/mc/passwd" and
174 not f.endswith(
"astropy.log"))
176 diff = now_open.difference(open_files)
179 print(
"File open: %s" % f)
180 self.fail(
"Failed to close %d file%s" % (len(diff),
"s" if len(diff) != 1
else ""))
184 """!Test that executables can be run and return good status.
186 The test methods are dynamically created. Callers
187 must subclass this class in their own test file and invoke
188 the discover_tests() class method to register the tests.
190 TESTS_DISCOVERED = -1
194 """Abort testing if automated test creation was enabled and
195 yet not tests were found."""
197 if cls.TESTS_DISCOVERED == 0:
198 raise Exception(
"No executables discovered.")
201 """This test exists to ensure that there is at least one test to be
202 executed. This allows the test runner to trigger the class set up
203 machinery to test whether there are some executables to test."""
207 """!Check an executable runs and returns good status.
209 @param executable: Path to an executable. root_dir is not used
210 if this is an absolute path.
212 @param root_dir: Directory containing exe. Ignored if None.
214 @param args: List or tuple of arguments to be provided to the
217 @param msg: Message to use when the test fails. Can be None for
220 Prints output to standard out. On bad exit status the test
221 fails. If the executable can not be located the test is skipped.
224 if root_dir
is not None and not os.path.isabs(executable):
225 executable = os.path.join(root_dir, executable)
228 sp_args = [executable]
229 argstr =
"no arguments"
232 argstr =
'arguments "' +
" ".join(args) +
'"'
234 print(
"Running executable '{}' with {}...".format(executable, argstr))
235 if not os.path.exists(executable):
236 self.skipTest(
"Executable {} is unexpectedly missing".format(executable))
239 output = subprocess.check_output(sp_args)
240 except subprocess.CalledProcessError
as e:
242 failmsg =
"Bad exit status from '{}': {}".format(executable, e.returncode)
243 print(output.decode(
'utf-8'))
250 def _build_test_method(cls, executable, root_dir):
251 """!Build a test method and attach to class.
253 The method is built for the supplied excutable located
254 in the supplied root directory.
256 cls._build_test_method(root_dir, executable)
258 @param cls The class in which to create the tests.
260 @param executable Name of executable. Can be absolute path.
262 @param root_dir Path to executable. Not used if executable path is absolute.
264 if not os.path.isabs(executable):
265 executable = os.path.abspath(os.path.join(root_dir, executable))
268 test_name =
"test_exe_" + executable.replace(
"/",
"_")
271 def test_executable_runs(*args):
273 self.assertExecutable(executable)
276 test_executable_runs.__name__ = test_name
277 setattr(cls, test_name, test_executable_runs)
281 """!Discover executables to test and create corresponding test methods.
283 Scans the directory containing the supplied reference file
284 (usually __file__ supplied from the test class) and look for
285 executables. If executables are found a test method is created
286 for each one. That test method will run the executable and
287 check the returned value.
289 Executable scripts with a .py extension and shared libraries
290 are ignored by the scanner.
292 This class method must be called before test discovery.
294 cls.discover_tests(__file__)
296 The list of executables can be overridden by passing in a
297 sequence of explicit executables that should be tested.
298 If an item in the sequence can not be found the
299 test will be configured to skip rather than fail.
303 ref_dir = os.path.abspath(os.path.dirname(ref_file))
305 if executables
is None:
308 for root, dirs, files
in os.walk(ref_dir):
311 if not f.endswith(
".py")
and not f.endswith(
".so"):
312 full_path = os.path.join(root, f)
313 if os.access(full_path, os.X_OK):
314 executables.append(full_path)
320 cls.TESTS_DISCOVERED = len(executables)
323 for e
in executables:
324 cls._build_test_method(e, ref_dir)
328 """!Find file which is specified as a path relative to the toplevel directory;
329 we start in $cwd and walk up until we find the file (or throw IOError if it doesn't exist)
331 This is useful for running tests that may be run from _dir_/tests or _dir_"""
333 if os.path.isfile(ifile):
339 dirname, basename = os.path.split(file)
341 ofile = os.path.join(basename, ofile)
345 if os.path.isfile(ofile):
350 raise IOError(
"Can't find %s" % ifile)
355 """!Return a path suitable for a temporary file and try to delete the file on success
357 If the with block completes successfully then the file is deleted, if possible;
358 failure results in a printed warning.
359 If the block exits with an exception the file if left on disk so it can be examined.
361 @param[in] ext file name extension, e.g. ".fits"
362 @return path for a temporary file. The path is a combination of the caller's file path
363 and the name of the top-level function, as per this simple example:
365 # file tests/testFoo.py
367 import lsst.utils.tests
368 class FooTestCase(unittest.TestCase):
369 def testBasics(self):
373 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile:
374 # if tests/.tests exists then tmpFile = "tests/.tests/testFoo_testBasics.fits"
375 # otherwise tmpFile = "testFoo_testBasics.fits"
377 # at the end of this "with" block the path tmpFile will be deleted, but only if
378 # the file exists and the "with" block terminated normally (rather than with an exception)
382 stack = inspect.stack()
384 for i
in range(2, len(stack)):
385 frameInfo = inspect.getframeinfo(stack[i][0])
387 callerFilePath = frameInfo.filename
388 callerFuncName = frameInfo.function
389 elif callerFilePath == frameInfo.filename:
391 callerFuncName = frameInfo.function
395 callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
396 callerFileName = os.path.splitext(callerFileNameWithExt)[0]
397 outDir = os.path.join(callerDir,
".tests")
398 if not os.path.isdir(outDir):
400 outName =
"%s_%s%s" % (callerFileName, callerFuncName, ext)
401 outPath = os.path.join(outDir, outName)
403 if os.path.isfile(outPath):
407 print(
"Warning: could not remove file %r: %s" % (outPath, e))
409 print(
"Warning: could not find file %r" % (outPath,))
413 """!Subclass of unittest.TestCase that adds some custom assertions for
419 """!A decorator to add a free function to our custom TestCase class, while also
420 making it available as a free function.
422 setattr(TestCase, func.__name__, func)
428 warnings.warn(
"assertRaisesLsstCpp is deprecated; please just use TestCase.assertRaises",
429 DeprecationWarning, stacklevel=2)
430 return testcase.assertRaises(excClass, callableObj, *args, **kwargs)
434 """!Decorator to enter the debugger when there's an uncaught exception
436 To use, just slap a "@debugger()" on your function.
438 You may provide specific exception classes to catch as arguments to
439 the decorator function, e.g., "@debugger(RuntimeError, NotImplementedError)".
440 This defaults to just 'AssertionError', for use on unittest.TestCase methods.
442 Code provided by "Rosh Oxymoron" on StackOverflow:
443 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails
446 exceptions = (AssertionError, )
450 def wrapper(*args, **kwargs):
452 return f(*args, **kwargs)
456 pdb.post_mortem(sys.exc_info()[2])
462 """!Plot the comparison of two 2-d NumPy arrays.
464 NOTE: this method uses matplotlib and imports it internally; it should be
465 wrapped in a try/except block within packages that do not depend on
466 matplotlib (including utils).
468 @param[in] lhs LHS values to compare; a 2-d NumPy array
469 @param[in] rhs RHS values to compare; a 2-d NumPy array
470 @param[in] bad A 2-d boolean NumPy array of values to emphasize in the plots
471 @param[in] diff difference array; a 2-d NumPy array, or None to show lhs-rhs
472 @param[in] plotFileName Filename to save the plot to. If None, the plot will be displayed in a
475 from matplotlib
import pyplot
481 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
482 badImage[:, :, 0] = 255
483 badImage[:, :, 1] = 0
484 badImage[:, :, 2] = 0
485 badImage[:, :, 3] = 255*bad
486 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
487 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
488 vmin2 = numpy.min(diff)
489 vmax2 = numpy.max(diff)
490 for n, (image, title)
in enumerate([(lhs,
"lhs"), (rhs,
"rhs"), (diff,
"diff")]):
491 pyplot.subplot(2, 3, n + 1)
492 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
493 vmin=vmin1, vmax=vmax1)
495 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
498 pyplot.subplot(2, 3, n + 4)
499 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
500 vmin=vmin2, vmax=vmax2)
502 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
505 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
506 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
507 pyplot.colorbar(im1, cax=cax1)
508 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
509 pyplot.colorbar(im2, cax=cax2)
511 pyplot.savefig(plotFileName)
518 atol=sys.float_info.epsilon, relTo=
None,
519 printFailures=
True, plotOnFailure=
False,
520 plotFileName=
None, invert=
False, msg=
None):
521 """!Highly-configurable floating point comparisons for scalars and arrays.
523 The test assertion will fail if all elements lhs and rhs are not equal to within the tolerances
524 specified by rtol and atol. More precisely, the comparison is:
526 abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol
528 If rtol or atol is None, that term in the comparison is not performed at all.
530 When not specified, relTo is the elementwise maximum of the absolute values of lhs and rhs. If
531 set manually, it should usually be set to either lhs or rhs, or a scalar value typical of what
534 @param[in] testCase unittest.TestCase instance the test is part of
535 @param[in] lhs LHS value(s) to compare; may be a scalar or array-like of any dimension
536 @param[in] rhs RHS value(s) to compare; may be a scalar or array-like of any dimension
537 @param[in] rtol Relative tolerance for comparison; defaults to double-precision epsilon.
538 @param[in] atol Absolute tolerance for comparison; defaults to double-precision epsilon.
539 @param[in] relTo Value to which comparison with rtol is relative.
540 @param[in] printFailures Upon failure, print all inequal elements as part of the message.
541 @param[in] plotOnFailure Upon failure, plot the originals and their residual with matplotlib.
542 Only 2-d arrays are supported.
543 @param[in] plotFileName Filename to save the plot to. If None, the plot will be displayed in a
545 @param[in] invert If True, invert the comparison and fail only if any elements *are* equal.
546 Used to implement assertFloatsNotEqual, which should generally be used instead
548 @param[in] msg String to append to the error message when assert fails.
550 if not numpy.isfinite(lhs).all():
551 testCase.fail(
"Non-finite values in lhs")
552 if not numpy.isfinite(rhs).all():
553 testCase.fail(
"Non-finite values in rhs")
555 absDiff = numpy.abs(lhs - rhs)
558 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
560 relTo = numpy.abs(relTo)
561 bad = absDiff > rtol*relTo
563 bad = numpy.logical_and(bad, absDiff > atol)
566 raise ValueError(
"rtol and atol cannot both be None")
568 failed = numpy.any(bad)
571 bad = numpy.logical_not(bad)
573 failStr =
"are the same"
579 if numpy.isscalar(bad):
581 errMsg = [
"%s %s %s; diff=%s with atol=%s"
582 % (lhs, cmpStr, rhs, absDiff, atol)]
584 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s"
585 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)]
587 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s"
588 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)]
590 errMsg = [
"%d/%d elements %s with rtol=%s, atol=%s"
591 % (bad.sum(), bad.size, failStr, rtol, atol)]
593 if len(lhs.shape) != 2
or len(rhs.shape) != 2:
594 raise ValueError(
"plotOnFailure is only valid for 2-d arrays")
596 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
598 errMsg.append(
"Failure plot requested but matplotlib could not be imported.")
603 if numpy.isscalar(relTo):
604 relTo = numpy.ones(bad.shape, dtype=float) * relTo
605 if numpy.isscalar(lhs):
606 lhs = numpy.ones(bad.shape, dtype=float) * lhs
607 if numpy.isscalar(rhs):
608 rhs = numpy.ones(bad.shape, dtype=float) * rhs
610 for a, b, diff
in zip(lhs[bad], rhs[bad], absDiff[bad]):
611 errMsg.append(
"%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
613 for a, b, diff, rel
in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
614 errMsg.append(
"%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel))
618 testCase.assertFalse(failed, msg=
"\n".join(errMsg))
624 Fail a test if the given floating point values are equal to within the given tolerances.
626 See assertClose for more information.
634 Assert that lhs == rhs (both numeric types, whether scalar or array).
636 See assertClose (called with rtol=atol=0) for more information.
643 warnings.warn(
"assertClose is deprecated; please use TestCase.assertFloatsAlmostEqual",
644 DeprecationWarning, stacklevel=2)
650 warnings.warn(
"assertNotClose is deprecated; please use TestCase.assertFloatsNotEqual",
651 DeprecationWarning, stacklevel=2)
def assertFloatsAlmostEqual
Highly-configurable floating point comparisons for scalars and arrays.
def sort_tests
Go through the supplied sequence of test suites and sort them to ensure that MemoryTestCases are at t...
def debugger
Decorator to enter the debugger when there's an uncaught exception.
def findFileFromRoot
Find file which is specified as a path relative to the toplevel directory; we start in $cwd and walk ...
def plotImageDiff
Plot the comparison of two 2-d NumPy arrays.
def tearDownClass
Reset the leak counter when the tests have been completed.
def getTempFilePath
Return a path suitable for a temporary file and try to delete the file on success.
Subclass of unittest.TestCase that adds some custom assertions for convenience.
Test that executables can be run and return good status.
def testLeaks
Check for memory leaks in the preceding tests.
def inTestCase
A decorator to add a free function to our custom TestCase class, while also making it available as a ...
def run
Exit with the status code resulting from running the provided test suite.
def testFileDescriptorLeaks
def assertExecutable
Check an executable runs and returns good status.
def create_executable_tests
Discover executables to test and create corresponding test methods.
Check for memory leaks since memId0 was allocated.