23 """Support code for running unit tests""" 24 from __future__
import print_function
25 from __future__
import division
26 from builtins
import zip
27 from builtins
import range
29 from contextlib
import contextmanager
47 import lsst.daf.base
as dafBase
61 def _get_open_files():
62 """Return a set containing the list of open files.""" 65 return set(p.path
for p
in psutil.Process().
open_files())
69 """Initialize the memory tester""" 73 memId0 = dafBase.Citizen.getNextMemId()
75 open_files = _get_open_files()
78 def run(suite, exit=True):
79 """!Exit with the status code resulting from running the provided test suite""" 81 if unittest.TextTestRunner().
run(suite).wasSuccessful():
93 """!Go through the supplied sequence of test suites and sort them to ensure that 94 MemoryTestCases are at the end of the test list. Returns a combined 97 suite = unittest.TestSuite()
99 for test_suite
in tests:
106 for method
in test_suite:
107 bases = inspect.getmro(method.__class__)
109 if bases
is not None and MemoryTestCase
in bases:
110 memtests.append(test_suite)
112 suite.addTests(test_suite)
114 if isinstance(test_suite, MemoryTestCase):
115 memtests.append(test_suite)
117 suite.addTest(test_suite)
118 suite.addTests(memtests)
129 unittest.defaultTestLoader.suiteClass = suiteClassWrapper
133 """!Check for memory leaks since memId0 was allocated""" 140 """!Reset the leak counter when the tests have been completed""" 144 """!Check for memory leaks in the preceding tests""" 147 global memId0, nleakPrintMax
148 nleak = dafBase.Citizen.census(0, memId0)
150 plural =
"s" if nleak != 1
else "" 151 print(
"\n%d Object%s leaked:" % (nleak, plural))
153 if nleak <= nleakPrintMax:
154 print(dafBase.Citizen.census(memId0))
156 census = dafBase.Citizen.census()
158 for i
in range(nleakPrintMax - 1, -1, -1):
159 print(census[i].repr())
161 self.fail(
"Leaked %d block%s" % (nleak, plural))
165 self.skipTest(
"Unable to test file descriptor leaks. psutil unavailable.")
168 now_open = _get_open_files()
171 now_open = set(f
for f
in now_open
if not f.endswith(
".car")
and 172 not f.endswith(
".ttf")
and 173 f !=
"/var/lib/sss/mc/passwd" and 174 not f.endswith(
"astropy.log"))
176 diff = now_open.difference(open_files)
179 print(
"File open: %s" % f)
180 self.fail(
"Failed to close %d file%s" % (len(diff),
"s" if len(diff) != 1
else ""))
184 """!Test that executables can be run and return good status. 186 The test methods are dynamically created. Callers 187 must subclass this class in their own test file and invoke 188 the create_executable_tests() class method to register the tests. 190 TESTS_DISCOVERED = -1
194 """Abort testing if automated test creation was enabled and 195 yet not tests were found.""" 198 raise Exception(
"No executables discovered.")
201 """This test exists to ensure that there is at least one test to be 202 executed. This allows the test runner to trigger the class set up 203 machinery to test whether there are some executables to test.""" 207 """!Check an executable runs and returns good status. 209 @param executable: Path to an executable. root_dir is not used 210 if this is an absolute path. 212 @param root_dir: Directory containing exe. Ignored if None. 214 @param args: List or tuple of arguments to be provided to the 217 @param msg: Message to use when the test fails. Can be None for 220 Prints output to standard out. On bad exit status the test 221 fails. If the executable can not be located the test is skipped. 224 if root_dir
is not None and not os.path.isabs(executable):
225 executable = os.path.join(root_dir, executable)
228 sp_args = [executable]
229 argstr =
"no arguments" 232 argstr =
'arguments "' +
" ".join(args) +
'"' 234 print(
"Running executable '{}' with {}...".format(executable, argstr))
235 if not os.path.exists(executable):
236 self.skipTest(
"Executable {} is unexpectedly missing".format(executable))
239 output = subprocess.check_output(sp_args)
240 except subprocess.CalledProcessError
as e:
242 failmsg =
"Bad exit status from '{}': {}".format(executable, e.returncode)
243 print(output.decode(
'utf-8'))
250 def _build_test_method(cls, executable, root_dir):
251 """!Build a test method and attach to class. 253 The method is built for the supplied excutable located 254 in the supplied root directory. 256 cls._build_test_method(root_dir, executable) 258 @param cls The class in which to create the tests. 260 @param executable Name of executable. Can be absolute path. 262 @param root_dir Path to executable. Not used if executable path is absolute. 264 if not os.path.isabs(executable):
265 executable = os.path.abspath(os.path.join(root_dir, executable))
268 test_name =
"test_exe_" + executable.replace(
"/",
"_")
271 def test_executable_runs(*args):
273 self.assertExecutable(executable)
276 test_executable_runs.__name__ = test_name
277 setattr(cls, test_name, test_executable_runs)
281 """!Discover executables to test and create corresponding test methods. 283 Scans the directory containing the supplied reference file 284 (usually __file__ supplied from the test class) to look for 285 executables. If executables are found a test method is created 286 for each one. That test method will run the executable and 287 check the returned value. 289 Executable scripts with a .py extension and shared libraries 290 are ignored by the scanner. 292 This class method must be called before test discovery. 296 cls.create_executable_tests(__file__) 298 The list of executables can be overridden by passing in a 299 sequence of explicit executables that should be tested. 300 If an item in the sequence can not be found the 301 test will be configured to skip rather than fail. 305 ref_dir = os.path.abspath(os.path.dirname(ref_file))
307 if executables
is None:
310 for root, dirs, files
in os.walk(ref_dir):
313 if not f.endswith(
".py")
and not f.endswith(
".so"):
314 full_path = os.path.join(root, f)
315 if os.access(full_path, os.X_OK):
316 executables.append(full_path)
325 for e
in executables:
330 """!Find file which is specified as a path relative to the toplevel directory; 331 we start in $cwd and walk up until we find the file (or throw IOError if it doesn't exist) 333 This is useful for running tests that may be run from _dir_/tests or _dir_""" 335 if os.path.isfile(ifile):
341 dirname, basename = os.path.split(file)
343 ofile = os.path.join(basename, ofile)
347 if os.path.isfile(ofile):
352 raise IOError(
"Can't find %s" % ifile)
357 """!Return a path suitable for a temporary file and try to delete the file on success 359 If the with block completes successfully then the file is deleted, if possible; 360 failure results in a printed warning. 361 If the block exits with an exception the file if left on disk so it can be examined. 363 @param[in] ext file name extension, e.g. ".fits" 364 @return path for a temporary file. The path is a combination of the caller's file path 365 and the name of the top-level function, as per this simple example: 367 # file tests/testFoo.py 369 import lsst.utils.tests 370 class FooTestCase(unittest.TestCase): 371 def testBasics(self): 375 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile: 376 # if tests/.tests exists then tmpFile = "tests/.tests/testFoo_testBasics.fits" 377 # otherwise tmpFile = "testFoo_testBasics.fits" 379 # at the end of this "with" block the path tmpFile will be deleted, but only if 380 # the file exists and the "with" block terminated normally (rather than with an exception) 384 stack = inspect.stack()
386 for i
in range(2, len(stack)):
387 frameInfo = inspect.getframeinfo(stack[i][0])
389 callerFilePath = frameInfo.filename
390 callerFuncName = frameInfo.function
391 elif callerFilePath == frameInfo.filename:
393 callerFuncName = frameInfo.function
397 callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
398 callerFileName = os.path.splitext(callerFileNameWithExt)[0]
399 outDir = os.path.join(callerDir,
".tests")
400 if not os.path.isdir(outDir):
402 outName =
"%s_%s%s" % (callerFileName, callerFuncName, ext)
403 outPath = os.path.join(outDir, outName)
405 if os.path.isfile(outPath):
409 print(
"Warning: could not remove file %r: %s" % (outPath, e))
411 print(
"Warning: could not find file %r" % (outPath,))
415 """!Subclass of unittest.TestCase that adds some custom assertions for 421 """!A decorator to add a free function to our custom TestCase class, while also 422 making it available as a free function. 424 setattr(TestCase, func.__name__, func)
430 warnings.warn(
"assertRaisesLsstCpp is deprecated; please just use TestCase.assertRaises",
431 DeprecationWarning, stacklevel=2)
432 return testcase.assertRaises(excClass, callableObj, *args, **kwargs)
436 """!Decorator to enter the debugger when there's an uncaught exception 438 To use, just slap a "@debugger()" on your function. 440 You may provide specific exception classes to catch as arguments to 441 the decorator function, e.g., "@debugger(RuntimeError, NotImplementedError)". 442 This defaults to just 'AssertionError', for use on unittest.TestCase methods. 444 Code provided by "Rosh Oxymoron" on StackOverflow: 445 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails 448 exceptions = (AssertionError, )
452 def wrapper(*args, **kwargs):
454 return f(*args, **kwargs)
458 pdb.post_mortem(sys.exc_info()[2])
464 """!Plot the comparison of two 2-d NumPy arrays. 466 NOTE: this method uses matplotlib and imports it internally; it should be 467 wrapped in a try/except block within packages that do not depend on 468 matplotlib (including utils). 470 @param[in] lhs LHS values to compare; a 2-d NumPy array 471 @param[in] rhs RHS values to compare; a 2-d NumPy array 472 @param[in] bad A 2-d boolean NumPy array of values to emphasize in the plots 473 @param[in] diff difference array; a 2-d NumPy array, or None to show lhs-rhs 474 @param[in] plotFileName Filename to save the plot to. If None, the plot will be displayed in a 477 from matplotlib
import pyplot
483 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
484 badImage[:, :, 0] = 255
485 badImage[:, :, 1] = 0
486 badImage[:, :, 2] = 0
487 badImage[:, :, 3] = 255*bad
488 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
489 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
490 vmin2 = numpy.min(diff)
491 vmax2 = numpy.max(diff)
492 for n, (image, title)
in enumerate([(lhs,
"lhs"), (rhs,
"rhs"), (diff,
"diff")]):
493 pyplot.subplot(2, 3, n + 1)
494 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
495 vmin=vmin1, vmax=vmax1)
497 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
500 pyplot.subplot(2, 3, n + 4)
501 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
502 vmin=vmin2, vmax=vmax2)
504 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
507 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
508 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
509 pyplot.colorbar(im1, cax=cax1)
510 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
511 pyplot.colorbar(im2, cax=cax2)
513 pyplot.savefig(plotFileName)
520 atol=sys.float_info.epsilon, relTo=None,
521 printFailures=True, plotOnFailure=False,
522 plotFileName=None, invert=False, msg=None):
523 """!Highly-configurable floating point comparisons for scalars and arrays. 525 The test assertion will fail if all elements lhs and rhs are not equal to within the tolerances 526 specified by rtol and atol. More precisely, the comparison is: 528 abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol 530 If rtol or atol is None, that term in the comparison is not performed at all. 532 When not specified, relTo is the elementwise maximum of the absolute values of lhs and rhs. If 533 set manually, it should usually be set to either lhs or rhs, or a scalar value typical of what 536 @param[in] testCase unittest.TestCase instance the test is part of 537 @param[in] lhs LHS value(s) to compare; may be a scalar or array-like of any dimension 538 @param[in] rhs RHS value(s) to compare; may be a scalar or array-like of any dimension 539 @param[in] rtol Relative tolerance for comparison; defaults to double-precision epsilon. 540 @param[in] atol Absolute tolerance for comparison; defaults to double-precision epsilon. 541 @param[in] relTo Value to which comparison with rtol is relative. 542 @param[in] printFailures Upon failure, print all inequal elements as part of the message. 543 @param[in] plotOnFailure Upon failure, plot the originals and their residual with matplotlib. 544 Only 2-d arrays are supported. 545 @param[in] plotFileName Filename to save the plot to. If None, the plot will be displayed in a 547 @param[in] invert If True, invert the comparison and fail only if any elements *are* equal. 548 Used to implement assertFloatsNotEqual, which should generally be used instead 550 @param[in] msg String to append to the error message when assert fails. 552 if not numpy.isfinite(lhs).all():
553 testCase.fail(
"Non-finite values in lhs")
554 if not numpy.isfinite(rhs).all():
555 testCase.fail(
"Non-finite values in rhs")
557 absDiff = numpy.abs(lhs - rhs)
560 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
562 relTo = numpy.abs(relTo)
563 bad = absDiff > rtol*relTo
565 bad = numpy.logical_and(bad, absDiff > atol)
568 raise ValueError(
"rtol and atol cannot both be None")
570 failed = numpy.any(bad)
573 bad = numpy.logical_not(bad)
575 failStr =
"are the same" 581 if numpy.isscalar(bad):
583 errMsg = [
"%s %s %s; diff=%s with atol=%s" 584 % (lhs, cmpStr, rhs, absDiff, atol)]
586 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s" 587 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)]
589 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s" 590 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)]
592 errMsg = [
"%d/%d elements %s with rtol=%s, atol=%s" 593 % (bad.sum(), bad.size, failStr, rtol, atol)]
595 if len(lhs.shape) != 2
or len(rhs.shape) != 2:
596 raise ValueError(
"plotOnFailure is only valid for 2-d arrays")
598 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
600 errMsg.append(
"Failure plot requested but matplotlib could not be imported.")
605 if numpy.isscalar(relTo):
606 relTo = numpy.ones(bad.shape, dtype=float) * relTo
607 if numpy.isscalar(lhs):
608 lhs = numpy.ones(bad.shape, dtype=float) * lhs
609 if numpy.isscalar(rhs):
610 rhs = numpy.ones(bad.shape, dtype=float) * rhs
612 for a, b, diff
in zip(lhs[bad], rhs[bad], absDiff[bad]):
613 errMsg.append(
"%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
615 for a, b, diff, rel
in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
616 errMsg.append(
"%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel))
620 testCase.assertFalse(failed, msg=
"\n".join(errMsg))
626 Fail a test if the given floating point values are equal to within the given tolerances. 628 See assertClose for more information. 636 Assert that lhs == rhs (both numeric types, whether scalar or array). 638 See assertClose (called with rtol=atol=0) for more information. 645 warnings.warn(
"assertClose is deprecated; please use TestCase.assertFloatsAlmostEqual",
646 DeprecationWarning, stacklevel=2)
652 warnings.warn(
"assertNotClose is deprecated; please use TestCase.assertFloatsNotEqual",
653 DeprecationWarning, stacklevel=2)
def suiteClassWrapper(tests)
def assertExecutable(self, executable, root_dir=None, args=None, msg=None)
Check an executable runs and returns good status.
def testLeaks(self)
Check for memory leaks in the preceding tests.
def assertFloatsEqual(testCase, lhs, rhs, kwargs)
def tearDownClass(cls)
Reset the leak counter when the tests have been completed.
def plotImageDiff(lhs, rhs, bad=None, diff=None, plotFileName=None)
Plot the comparison of two 2-d NumPy arrays.
def inTestCase(func)
A decorator to add a free function to our custom TestCase class, while also making it available as a ...
def assertClose(args, kwargs)
def _build_test_method(cls, executable, root_dir)
Build a test method and attach to class.
def assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=sys.float_info.epsilon, atol=sys.float_info.epsilon, relTo=None, printFailures=True, plotOnFailure=False, plotFileName=None, invert=False, msg=None)
Highly-configurable floating point comparisons for scalars and arrays.
def assertFloatsNotEqual(testCase, lhs, rhs, kwds)
Subclass of unittest.TestCase that adds some custom assertions for convenience.
def run(suite, exit=True)
Exit with the status code resulting from running the provided test suite.
Test that executables can be run and return good status.
def debugger(exceptions)
Decorator to enter the debugger when there's an uncaught exception.
def findFileFromRoot(ifile)
Find file which is specified as a path relative to the toplevel directory; we start in $cwd and walk ...
def assertNotClose(args, kwargs)
def create_executable_tests(cls, ref_file, executables=None)
Discover executables to test and create corresponding test methods.
def testFileDescriptorLeaks(self)
def assertRaisesLsstCpp(testcase, excClass, callableObj, args, kwargs)
def getTempFilePath(ext)
Return a path suitable for a temporary file and try to delete the file on success.
def sort_tests(tests)
Go through the supplied sequence of test suites and sort them to ensure that MemoryTestCases are at t...
Check for memory leaks since memId0 was allocated.