23 """Support code for running unit tests""" 24 from __future__
import print_function
25 from __future__
import division
26 from builtins
import zip
27 from builtins
import range
29 from contextlib
import contextmanager
47 import lsst.daf.base
as dafBase
61 def _get_open_files():
62 """Return a set containing the list of open files.""" 65 return set(p.path
for p
in psutil.Process().
open_files())
69 """Initialize the memory tester""" 73 memId0 = dafBase.Citizen.getNextMemId()
75 open_files = _get_open_files()
78 def run(suite, exit=True):
79 """!Exit with the status code resulting from running the provided test suite""" 81 if unittest.TextTestRunner().
run(suite).wasSuccessful():
93 """!Go through the supplied sequence of test suites and sort them to ensure that 94 MemoryTestCases are at the end of the test list. Returns a combined 97 suite = unittest.TestSuite()
99 for test_suite
in tests:
106 for method
in test_suite:
107 bases = inspect.getmro(method.__class__)
109 if bases
is not None and MemoryTestCase
in bases:
110 memtests.append(test_suite)
112 suite.addTests(test_suite)
114 if isinstance(test_suite, MemoryTestCase):
115 memtests.append(test_suite)
117 suite.addTest(test_suite)
118 suite.addTests(memtests)
129 unittest.defaultTestLoader.suiteClass = suiteClassWrapper
133 """!Check for memory leaks since memId0 was allocated""" 140 """!Reset the leak counter when the tests have been completed""" 144 """!Check for memory leaks in the preceding tests""" 147 global memId0, nleakPrintMax
148 nleak = dafBase.Citizen.census(0, memId0)
150 plural =
"s" if nleak != 1
else "" 151 print(
"\n%d Object%s leaked:" % (nleak, plural))
153 if nleak <= nleakPrintMax:
154 print(dafBase.Citizen.census(memId0))
156 census = dafBase.Citizen.census()
158 for i
in range(nleakPrintMax - 1, -1, -1):
159 print(census[i].repr())
161 self.fail(
"Leaked %d block%s" % (nleak, plural))
165 self.skipTest(
"Unable to test file descriptor leaks. psutil unavailable.")
168 now_open = _get_open_files()
171 now_open = set(f
for f
in now_open
if not f.endswith(
".car")
and 172 not f.startswith(
"/proc/")
and 173 not f.endswith(
".ttf")
and 174 f !=
"/var/lib/sss/mc/passwd" and 175 not f.endswith(
"astropy.log"))
177 diff = now_open.difference(open_files)
180 print(
"File open: %s" % f)
181 self.fail(
"Failed to close %d file%s" % (len(diff),
"s" if len(diff) != 1
else ""))
185 """!Test that executables can be run and return good status. 187 The test methods are dynamically created. Callers 188 must subclass this class in their own test file and invoke 189 the create_executable_tests() class method to register the tests. 191 TESTS_DISCOVERED = -1
195 """Abort testing if automated test creation was enabled and 196 yet not tests were found.""" 199 raise Exception(
"No executables discovered.")
202 """This test exists to ensure that there is at least one test to be 203 executed. This allows the test runner to trigger the class set up 204 machinery to test whether there are some executables to test.""" 208 """!Check an executable runs and returns good status. 210 @param executable: Path to an executable. root_dir is not used 211 if this is an absolute path. 213 @param root_dir: Directory containing exe. Ignored if None. 215 @param args: List or tuple of arguments to be provided to the 218 @param msg: Message to use when the test fails. Can be None for 221 Prints output to standard out. On bad exit status the test 222 fails. If the executable can not be located the test is skipped. 225 if root_dir
is not None and not os.path.isabs(executable):
226 executable = os.path.join(root_dir, executable)
229 sp_args = [executable]
230 argstr =
"no arguments" 233 argstr =
'arguments "' +
" ".join(args) +
'"' 235 print(
"Running executable '{}' with {}...".format(executable, argstr))
236 if not os.path.exists(executable):
237 self.skipTest(
"Executable {} is unexpectedly missing".format(executable))
240 output = subprocess.check_output(sp_args)
241 except subprocess.CalledProcessError
as e:
243 failmsg =
"Bad exit status from '{}': {}".format(executable, e.returncode)
244 print(output.decode(
'utf-8'))
251 def _build_test_method(cls, executable, root_dir):
252 """!Build a test method and attach to class. 254 The method is built for the supplied excutable located 255 in the supplied root directory. 257 cls._build_test_method(root_dir, executable) 259 @param cls The class in which to create the tests. 261 @param executable Name of executable. Can be absolute path. 263 @param root_dir Path to executable. Not used if executable path is absolute. 265 if not os.path.isabs(executable):
266 executable = os.path.abspath(os.path.join(root_dir, executable))
269 test_name =
"test_exe_" + executable.replace(
"/",
"_")
272 def test_executable_runs(*args):
274 self.assertExecutable(executable)
277 test_executable_runs.__name__ = test_name
278 setattr(cls, test_name, test_executable_runs)
282 """!Discover executables to test and create corresponding test methods. 284 Scans the directory containing the supplied reference file 285 (usually __file__ supplied from the test class) to look for 286 executables. If executables are found a test method is created 287 for each one. That test method will run the executable and 288 check the returned value. 290 Executable scripts with a .py extension and shared libraries 291 are ignored by the scanner. 293 This class method must be called before test discovery. 297 cls.create_executable_tests(__file__) 299 The list of executables can be overridden by passing in a 300 sequence of explicit executables that should be tested. 301 If an item in the sequence can not be found the 302 test will be configured to skip rather than fail. 306 ref_dir = os.path.abspath(os.path.dirname(ref_file))
308 if executables
is None:
311 for root, dirs, files
in os.walk(ref_dir):
314 if not f.endswith(
".py")
and not f.endswith(
".so"):
315 full_path = os.path.join(root, f)
316 if os.access(full_path, os.X_OK):
317 executables.append(full_path)
326 for e
in executables:
331 """!Find file which is specified as a path relative to the toplevel directory; 332 we start in $cwd and walk up until we find the file (or throw IOError if it doesn't exist) 334 This is useful for running tests that may be run from _dir_/tests or _dir_""" 336 if os.path.isfile(ifile):
342 dirname, basename = os.path.split(file)
344 ofile = os.path.join(basename, ofile)
348 if os.path.isfile(ofile):
353 raise IOError(
"Can't find %s" % ifile)
358 """!Return a path suitable for a temporary file and try to delete the file on success 360 If the with block completes successfully then the file is deleted, if possible; 361 failure results in a printed warning. 362 If the block exits with an exception the file if left on disk so it can be examined. 364 @param[in] ext file name extension, e.g. ".fits" 365 @return path for a temporary file. The path is a combination of the caller's file path 366 and the name of the top-level function, as per this simple example: 368 # file tests/testFoo.py 370 import lsst.utils.tests 371 class FooTestCase(unittest.TestCase): 372 def testBasics(self): 376 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile: 377 # if tests/.tests exists then tmpFile = "tests/.tests/testFoo_testBasics.fits" 378 # otherwise tmpFile = "testFoo_testBasics.fits" 380 # at the end of this "with" block the path tmpFile will be deleted, but only if 381 # the file exists and the "with" block terminated normally (rather than with an exception) 385 stack = inspect.stack()
387 for i
in range(2, len(stack)):
388 frameInfo = inspect.getframeinfo(stack[i][0])
390 callerFilePath = frameInfo.filename
391 callerFuncName = frameInfo.function
392 elif callerFilePath == frameInfo.filename:
394 callerFuncName = frameInfo.function
398 callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
399 callerFileName = os.path.splitext(callerFileNameWithExt)[0]
400 outDir = os.path.join(callerDir,
".tests")
401 if not os.path.isdir(outDir):
403 outName =
"%s_%s%s" % (callerFileName, callerFuncName, ext)
404 outPath = os.path.join(outDir, outName)
406 if os.path.isfile(outPath):
410 print(
"Warning: could not remove file %r: %s" % (outPath, e))
412 print(
"Warning: could not find file %r" % (outPath,))
416 """!Subclass of unittest.TestCase that adds some custom assertions for 422 """!A decorator to add a free function to our custom TestCase class, while also 423 making it available as a free function. 425 setattr(TestCase, func.__name__, func)
431 warnings.warn(
"assertRaisesLsstCpp is deprecated; please just use TestCase.assertRaises",
432 DeprecationWarning, stacklevel=2)
433 return testcase.assertRaises(excClass, callableObj, *args, **kwargs)
437 """!Decorator to enter the debugger when there's an uncaught exception 439 To use, just slap a "@debugger()" on your function. 441 You may provide specific exception classes to catch as arguments to 442 the decorator function, e.g., "@debugger(RuntimeError, NotImplementedError)". 443 This defaults to just 'AssertionError', for use on unittest.TestCase methods. 445 Code provided by "Rosh Oxymoron" on StackOverflow: 446 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails 449 exceptions = (AssertionError, )
453 def wrapper(*args, **kwargs):
455 return f(*args, **kwargs)
459 pdb.post_mortem(sys.exc_info()[2])
465 """!Plot the comparison of two 2-d NumPy arrays. 467 NOTE: this method uses matplotlib and imports it internally; it should be 468 wrapped in a try/except block within packages that do not depend on 469 matplotlib (including utils). 471 @param[in] lhs LHS values to compare; a 2-d NumPy array 472 @param[in] rhs RHS values to compare; a 2-d NumPy array 473 @param[in] bad A 2-d boolean NumPy array of values to emphasize in the plots 474 @param[in] diff difference array; a 2-d NumPy array, or None to show lhs-rhs 475 @param[in] plotFileName Filename to save the plot to. If None, the plot will be displayed in a 478 from matplotlib
import pyplot
484 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
485 badImage[:, :, 0] = 255
486 badImage[:, :, 1] = 0
487 badImage[:, :, 2] = 0
488 badImage[:, :, 3] = 255*bad
489 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
490 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
491 vmin2 = numpy.min(diff)
492 vmax2 = numpy.max(diff)
493 for n, (image, title)
in enumerate([(lhs,
"lhs"), (rhs,
"rhs"), (diff,
"diff")]):
494 pyplot.subplot(2, 3, n + 1)
495 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
496 vmin=vmin1, vmax=vmax1)
498 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
501 pyplot.subplot(2, 3, n + 4)
502 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
503 vmin=vmin2, vmax=vmax2)
505 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
508 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
509 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
510 pyplot.colorbar(im1, cax=cax1)
511 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
512 pyplot.colorbar(im2, cax=cax2)
514 pyplot.savefig(plotFileName)
521 atol=sys.float_info.epsilon, relTo=None,
522 printFailures=True, plotOnFailure=False,
523 plotFileName=None, invert=False, msg=None):
524 """!Highly-configurable floating point comparisons for scalars and arrays. 526 The test assertion will fail if all elements lhs and rhs are not equal to within the tolerances 527 specified by rtol and atol. More precisely, the comparison is: 529 abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol 531 If rtol or atol is None, that term in the comparison is not performed at all. 533 When not specified, relTo is the elementwise maximum of the absolute values of lhs and rhs. If 534 set manually, it should usually be set to either lhs or rhs, or a scalar value typical of what 537 @param[in] testCase unittest.TestCase instance the test is part of 538 @param[in] lhs LHS value(s) to compare; may be a scalar or array-like of any dimension 539 @param[in] rhs RHS value(s) to compare; may be a scalar or array-like of any dimension 540 @param[in] rtol Relative tolerance for comparison; defaults to double-precision epsilon. 541 @param[in] atol Absolute tolerance for comparison; defaults to double-precision epsilon. 542 @param[in] relTo Value to which comparison with rtol is relative. 543 @param[in] printFailures Upon failure, print all inequal elements as part of the message. 544 @param[in] plotOnFailure Upon failure, plot the originals and their residual with matplotlib. 545 Only 2-d arrays are supported. 546 @param[in] plotFileName Filename to save the plot to. If None, the plot will be displayed in a 548 @param[in] invert If True, invert the comparison and fail only if any elements *are* equal. 549 Used to implement assertFloatsNotEqual, which should generally be used instead 551 @param[in] msg String to append to the error message when assert fails. 553 if not numpy.isfinite(lhs).all():
554 testCase.fail(
"Non-finite values in lhs")
555 if not numpy.isfinite(rhs).all():
556 testCase.fail(
"Non-finite values in rhs")
558 absDiff = numpy.abs(lhs - rhs)
561 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
563 relTo = numpy.abs(relTo)
564 bad = absDiff > rtol*relTo
566 bad = numpy.logical_and(bad, absDiff > atol)
569 raise ValueError(
"rtol and atol cannot both be None")
571 failed = numpy.any(bad)
574 bad = numpy.logical_not(bad)
576 failStr =
"are the same" 582 if numpy.isscalar(bad):
584 errMsg = [
"%s %s %s; diff=%s with atol=%s" 585 % (lhs, cmpStr, rhs, absDiff, atol)]
587 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s" 588 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)]
590 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s" 591 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)]
593 errMsg = [
"%d/%d elements %s with rtol=%s, atol=%s" 594 % (bad.sum(), bad.size, failStr, rtol, atol)]
596 if len(lhs.shape) != 2
or len(rhs.shape) != 2:
597 raise ValueError(
"plotOnFailure is only valid for 2-d arrays")
599 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
601 errMsg.append(
"Failure plot requested but matplotlib could not be imported.")
606 if numpy.isscalar(relTo):
607 relTo = numpy.ones(bad.shape, dtype=float) * relTo
608 if numpy.isscalar(lhs):
609 lhs = numpy.ones(bad.shape, dtype=float) * lhs
610 if numpy.isscalar(rhs):
611 rhs = numpy.ones(bad.shape, dtype=float) * rhs
613 for a, b, diff
in zip(lhs[bad], rhs[bad], absDiff[bad]):
614 errMsg.append(
"%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
616 for a, b, diff, rel
in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
617 errMsg.append(
"%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel))
621 testCase.assertFalse(failed, msg=
"\n".join(errMsg))
627 Fail a test if the given floating point values are equal to within the given tolerances. 629 See assertClose for more information. 637 Assert that lhs == rhs (both numeric types, whether scalar or array). 639 See assertClose (called with rtol=atol=0) for more information. 646 warnings.warn(
"assertClose is deprecated; please use TestCase.assertFloatsAlmostEqual",
647 DeprecationWarning, stacklevel=2)
653 warnings.warn(
"assertNotClose is deprecated; please use TestCase.assertFloatsNotEqual",
654 DeprecationWarning, stacklevel=2)
def suiteClassWrapper(tests)
def assertExecutable(self, executable, root_dir=None, args=None, msg=None)
Check an executable runs and returns good status.
def testLeaks(self)
Check for memory leaks in the preceding tests.
def assertFloatsEqual(testCase, lhs, rhs, kwargs)
def tearDownClass(cls)
Reset the leak counter when the tests have been completed.
def plotImageDiff(lhs, rhs, bad=None, diff=None, plotFileName=None)
Plot the comparison of two 2-d NumPy arrays.
def inTestCase(func)
A decorator to add a free function to our custom TestCase class, while also making it available as a ...
def assertClose(args, kwargs)
def _build_test_method(cls, executable, root_dir)
Build a test method and attach to class.
def assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=sys.float_info.epsilon, atol=sys.float_info.epsilon, relTo=None, printFailures=True, plotOnFailure=False, plotFileName=None, invert=False, msg=None)
Highly-configurable floating point comparisons for scalars and arrays.
def assertFloatsNotEqual(testCase, lhs, rhs, kwds)
Subclass of unittest.TestCase that adds some custom assertions for convenience.
def run(suite, exit=True)
Exit with the status code resulting from running the provided test suite.
Test that executables can be run and return good status.
def debugger(exceptions)
Decorator to enter the debugger when there's an uncaught exception.
def findFileFromRoot(ifile)
Find file which is specified as a path relative to the toplevel directory; we start in $cwd and walk ...
def assertNotClose(args, kwargs)
def create_executable_tests(cls, ref_file, executables=None)
Discover executables to test and create corresponding test methods.
def testFileDescriptorLeaks(self)
def assertRaisesLsstCpp(testcase, excClass, callableObj, args, kwargs)
def getTempFilePath(ext)
Return a path suitable for a temporary file and try to delete the file on success.
def sort_tests(tests)
Go through the supplied sequence of test suites and sort them to ensure that MemoryTestCases are at t...
Check for memory leaks since memId0 was allocated.