23 """Support code for running unit tests""" 24 from __future__
import print_function
25 from __future__
import division
26 from builtins
import zip
27 from builtins
import range
29 from contextlib
import contextmanager
48 import lsst.daf.base
as dafBase
62 def _get_open_files():
63 """Return a set containing the list of open files.""" 66 return set(p.path
for p
in psutil.Process().
open_files())
70 """Initialize the memory tester""" 74 memId0 = dafBase.Citizen.getNextMemId()
76 open_files = _get_open_files()
79 def run(suite, exit=True):
80 """!Exit with the status code resulting from running the provided test suite""" 82 if unittest.TextTestRunner().
run(suite).wasSuccessful():
94 """!Go through the supplied sequence of test suites and sort them to ensure that 95 MemoryTestCases are at the end of the test list. Returns a combined 98 suite = unittest.TestSuite()
100 for test_suite
in tests:
107 for method
in test_suite:
108 bases = inspect.getmro(method.__class__)
110 if bases
is not None and MemoryTestCase
in bases:
111 memtests.append(test_suite)
113 suite.addTests(test_suite)
115 if isinstance(test_suite, MemoryTestCase):
116 memtests.append(test_suite)
118 suite.addTest(test_suite)
119 suite.addTests(memtests)
130 unittest.defaultTestLoader.suiteClass = suiteClassWrapper
134 """!Check for memory leaks since memId0 was allocated""" 141 """!Reset the leak counter when the tests have been completed""" 145 """!Check for memory leaks in the preceding tests""" 148 global memId0, nleakPrintMax
149 nleak = dafBase.Citizen.census(0, memId0)
151 plural =
"s" if nleak != 1
else "" 152 print(
"\n%d Object%s leaked:" % (nleak, plural))
154 if nleak <= nleakPrintMax:
155 print(dafBase.Citizen.census(memId0))
157 census = dafBase.Citizen.census()
159 for i
in range(nleakPrintMax - 1, -1, -1):
160 print(census[i].repr())
162 self.fail(
"Leaked %d block%s" % (nleak, plural))
166 self.skipTest(
"Unable to test file descriptor leaks. psutil unavailable.")
169 now_open = _get_open_files()
172 now_open = set(f
for f
in now_open
if not f.endswith(
".car")
and 173 not f.startswith(
"/proc/")
and 174 not f.endswith(
".ttf")
and 175 f !=
"/var/lib/sss/mc/passwd" and 176 not f.endswith(
"astropy.log"))
178 diff = now_open.difference(open_files)
181 print(
"File open: %s" % f)
182 self.fail(
"Failed to close %d file%s" % (len(diff),
"s" if len(diff) != 1
else ""))
186 """!Test that executables can be run and return good status. 188 The test methods are dynamically created. Callers 189 must subclass this class in their own test file and invoke 190 the create_executable_tests() class method to register the tests. 192 TESTS_DISCOVERED = -1
196 """Abort testing if automated test creation was enabled and 197 yet not tests were found.""" 200 raise Exception(
"No executables discovered.")
203 """This test exists to ensure that there is at least one test to be 204 executed. This allows the test runner to trigger the class set up 205 machinery to test whether there are some executables to test.""" 209 """!Check an executable runs and returns good status. 211 @param executable: Path to an executable. root_dir is not used 212 if this is an absolute path. 214 @param root_dir: Directory containing exe. Ignored if None. 216 @param args: List or tuple of arguments to be provided to the 219 @param msg: Message to use when the test fails. Can be None for 222 Prints output to standard out. On bad exit status the test 223 fails. If the executable can not be located the test is skipped. 226 if root_dir
is not None and not os.path.isabs(executable):
227 executable = os.path.join(root_dir, executable)
230 sp_args = [executable]
231 argstr =
"no arguments" 234 argstr =
'arguments "' +
" ".join(args) +
'"' 236 print(
"Running executable '{}' with {}...".format(executable, argstr))
237 if not os.path.exists(executable):
238 self.skipTest(
"Executable {} is unexpectedly missing".format(executable))
241 output = subprocess.check_output(sp_args)
242 except subprocess.CalledProcessError
as e:
244 failmsg =
"Bad exit status from '{}': {}".format(executable, e.returncode)
245 print(output.decode(
'utf-8'))
252 def _build_test_method(cls, executable, root_dir):
253 """!Build a test method and attach to class. 255 The method is built for the supplied excutable located 256 in the supplied root directory. 258 cls._build_test_method(root_dir, executable) 260 @param cls The class in which to create the tests. 262 @param executable Name of executable. Can be absolute path. 264 @param root_dir Path to executable. Not used if executable path is absolute. 266 if not os.path.isabs(executable):
267 executable = os.path.abspath(os.path.join(root_dir, executable))
270 test_name =
"test_exe_" + executable.replace(
"/",
"_")
273 def test_executable_runs(*args):
275 self.assertExecutable(executable)
278 test_executable_runs.__name__ = test_name
279 setattr(cls, test_name, test_executable_runs)
283 """!Discover executables to test and create corresponding test methods. 285 Scans the directory containing the supplied reference file 286 (usually __file__ supplied from the test class) to look for 287 executables. If executables are found a test method is created 288 for each one. That test method will run the executable and 289 check the returned value. 291 Executable scripts with a .py extension and shared libraries 292 are ignored by the scanner. 294 This class method must be called before test discovery. 298 cls.create_executable_tests(__file__) 300 The list of executables can be overridden by passing in a 301 sequence of explicit executables that should be tested. 302 If an item in the sequence can not be found the 303 test will be configured to skip rather than fail. 307 ref_dir = os.path.abspath(os.path.dirname(ref_file))
309 if executables
is None:
312 for root, dirs, files
in os.walk(ref_dir):
315 if not f.endswith(
".py")
and not f.endswith(
".so"):
316 full_path = os.path.join(root, f)
317 if os.access(full_path, os.X_OK):
318 executables.append(full_path)
327 for e
in executables:
332 """!Find file which is specified as a path relative to the toplevel directory; 333 we start in $cwd and walk up until we find the file (or throw IOError if it doesn't exist) 335 This is useful for running tests that may be run from _dir_/tests or _dir_""" 337 if os.path.isfile(ifile):
343 dirname, basename = os.path.split(file)
345 ofile = os.path.join(basename, ofile)
349 if os.path.isfile(ofile):
354 raise IOError(
"Can't find %s" % ifile)
359 """!Return a path suitable for a temporary file and try to delete the file on success 361 If the with block completes successfully then the file is deleted, if possible; 362 failure results in a printed warning. 363 If a file is remains when it should not, a RuntimeError exception is raised. This 364 exception is also raised if a file is not present on context manager exit when one 365 is expected to exist. 366 If the block exits with an exception the file if left on disk so it can be examined. 367 The file name has a random component such that nested context managers can be used 368 with the same file suffix. 370 @param[in] ext file name extension, e.g. ".fits" 371 @param[in] expectOutput If true, a file should be created within the context manager. 372 If false, a file should not be present when the context manager 374 @return path for a temporary file. The path is a combination of the caller's file path 375 and the name of the top-level function, as per this simple example: 377 # file tests/testFoo.py 379 import lsst.utils.tests 380 class FooTestCase(unittest.TestCase): 381 def testBasics(self): 385 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile: 386 # if tests/.tests exists then tmpFile = "tests/.tests/testFoo_testBasics.fits" 387 # otherwise tmpFile = "testFoo_testBasics.fits" 389 # at the end of this "with" block the path tmpFile will be deleted, but only if 390 # the file exists and the "with" block terminated normally (rather than with an exception) 394 stack = inspect.stack()
396 for i
in range(2, len(stack)):
397 frameInfo = inspect.getframeinfo(stack[i][0])
399 callerFilePath = frameInfo.filename
400 callerFuncName = frameInfo.function
401 elif callerFilePath == frameInfo.filename:
403 callerFuncName = frameInfo.function
407 callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
408 callerFileName = os.path.splitext(callerFileNameWithExt)[0]
409 outDir = os.path.join(callerDir,
".tests")
410 if not os.path.isdir(outDir):
412 prefix =
"%s_%s-" % (callerFileName, callerFuncName)
413 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix)
414 if os.path.exists(outPath):
417 warnings.warn(
"Unexpectedly found pre-existing tempfile named %r" % (outPath,),
426 fileExists = os.path.exists(outPath)
429 raise RuntimeError(
"Temp file expected named {} but none found".format(outPath))
432 raise RuntimeError(
"Unexpectedly discovered temp file named {}".format(outPath))
439 warnings.warn(
"Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3)
443 """!Subclass of unittest.TestCase that adds some custom assertions for 449 """!A decorator to add a free function to our custom TestCase class, while also 450 making it available as a free function. 452 setattr(TestCase, func.__name__, func)
458 warnings.warn(
"assertRaisesLsstCpp is deprecated; please just use TestCase.assertRaises",
459 DeprecationWarning, stacklevel=2)
460 return testcase.assertRaises(excClass, callableObj, *args, **kwargs)
464 """!Decorator to enter the debugger when there's an uncaught exception 466 To use, just slap a "@debugger()" on your function. 468 You may provide specific exception classes to catch as arguments to 469 the decorator function, e.g., "@debugger(RuntimeError, NotImplementedError)". 470 This defaults to just 'AssertionError', for use on unittest.TestCase methods. 472 Code provided by "Rosh Oxymoron" on StackOverflow: 473 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails 476 exceptions = (AssertionError, )
480 def wrapper(*args, **kwargs):
482 return f(*args, **kwargs)
486 pdb.post_mortem(sys.exc_info()[2])
492 """!Plot the comparison of two 2-d NumPy arrays. 494 NOTE: this method uses matplotlib and imports it internally; it should be 495 wrapped in a try/except block within packages that do not depend on 496 matplotlib (including utils). 498 @param[in] lhs LHS values to compare; a 2-d NumPy array 499 @param[in] rhs RHS values to compare; a 2-d NumPy array 500 @param[in] bad A 2-d boolean NumPy array of values to emphasize in the plots 501 @param[in] diff difference array; a 2-d NumPy array, or None to show lhs-rhs 502 @param[in] plotFileName Filename to save the plot to. If None, the plot will be displayed in a 505 from matplotlib
import pyplot
511 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
512 badImage[:, :, 0] = 255
513 badImage[:, :, 1] = 0
514 badImage[:, :, 2] = 0
515 badImage[:, :, 3] = 255*bad
516 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
517 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
518 vmin2 = numpy.min(diff)
519 vmax2 = numpy.max(diff)
520 for n, (image, title)
in enumerate([(lhs,
"lhs"), (rhs,
"rhs"), (diff,
"diff")]):
521 pyplot.subplot(2, 3, n + 1)
522 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
523 vmin=vmin1, vmax=vmax1)
525 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
528 pyplot.subplot(2, 3, n + 4)
529 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation=
'nearest', origin=
'lower',
530 vmin=vmin2, vmax=vmax2)
532 pyplot.imshow(badImage, alpha=0.2, interpolation=
'nearest', origin=
'lower')
535 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
536 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
537 pyplot.colorbar(im1, cax=cax1)
538 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
539 pyplot.colorbar(im2, cax=cax2)
541 pyplot.savefig(plotFileName)
548 atol=sys.float_info.epsilon, relTo=None,
549 printFailures=True, plotOnFailure=False,
550 plotFileName=None, invert=False, msg=None):
551 """!Highly-configurable floating point comparisons for scalars and arrays. 553 The test assertion will fail if all elements lhs and rhs are not equal to within the tolerances 554 specified by rtol and atol. More precisely, the comparison is: 556 abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol 558 If rtol or atol is None, that term in the comparison is not performed at all. 560 When not specified, relTo is the elementwise maximum of the absolute values of lhs and rhs. If 561 set manually, it should usually be set to either lhs or rhs, or a scalar value typical of what 564 @param[in] testCase unittest.TestCase instance the test is part of 565 @param[in] lhs LHS value(s) to compare; may be a scalar or array-like of any dimension 566 @param[in] rhs RHS value(s) to compare; may be a scalar or array-like of any dimension 567 @param[in] rtol Relative tolerance for comparison; defaults to double-precision epsilon. 568 @param[in] atol Absolute tolerance for comparison; defaults to double-precision epsilon. 569 @param[in] relTo Value to which comparison with rtol is relative. 570 @param[in] printFailures Upon failure, print all inequal elements as part of the message. 571 @param[in] plotOnFailure Upon failure, plot the originals and their residual with matplotlib. 572 Only 2-d arrays are supported. 573 @param[in] plotFileName Filename to save the plot to. If None, the plot will be displayed in a 575 @param[in] invert If True, invert the comparison and fail only if any elements *are* equal. 576 Used to implement assertFloatsNotEqual, which should generally be used instead 578 @param[in] msg String to append to the error message when assert fails. 580 if not numpy.isfinite(lhs).all():
581 testCase.fail(
"Non-finite values in lhs")
582 if not numpy.isfinite(rhs).all():
583 testCase.fail(
"Non-finite values in rhs")
585 absDiff = numpy.abs(lhs - rhs)
588 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
590 relTo = numpy.abs(relTo)
591 bad = absDiff > rtol*relTo
593 bad = numpy.logical_and(bad, absDiff > atol)
596 raise ValueError(
"rtol and atol cannot both be None")
598 failed = numpy.any(bad)
601 bad = numpy.logical_not(bad)
603 failStr =
"are the same" 609 if numpy.isscalar(bad):
611 errMsg = [
"%s %s %s; diff=%s with atol=%s" 612 % (lhs, cmpStr, rhs, absDiff, atol)]
614 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s" 615 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)]
617 errMsg = [
"%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s" 618 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)]
620 errMsg = [
"%d/%d elements %s with rtol=%s, atol=%s" 621 % (bad.sum(), bad.size, failStr, rtol, atol)]
623 if len(lhs.shape) != 2
or len(rhs.shape) != 2:
624 raise ValueError(
"plotOnFailure is only valid for 2-d arrays")
626 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
628 errMsg.append(
"Failure plot requested but matplotlib could not be imported.")
633 if numpy.isscalar(relTo):
634 relTo = numpy.ones(bad.shape, dtype=float) * relTo
635 if numpy.isscalar(lhs):
636 lhs = numpy.ones(bad.shape, dtype=float) * lhs
637 if numpy.isscalar(rhs):
638 rhs = numpy.ones(bad.shape, dtype=float) * rhs
640 for a, b, diff
in zip(lhs[bad], rhs[bad], absDiff[bad]):
641 errMsg.append(
"%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
643 for a, b, diff, rel
in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
644 errMsg.append(
"%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel))
648 testCase.assertFalse(failed, msg=
"\n".join(errMsg))
654 Fail a test if the given floating point values are equal to within the given tolerances. 656 See assertClose for more information. 664 Assert that lhs == rhs (both numeric types, whether scalar or array). 666 See assertClose (called with rtol=atol=0) for more information. 673 warnings.warn(
"assertClose is deprecated; please use TestCase.assertFloatsAlmostEqual",
674 DeprecationWarning, stacklevel=2)
680 warnings.warn(
"assertNotClose is deprecated; please use TestCase.assertFloatsNotEqual",
681 DeprecationWarning, stacklevel=2)
def suiteClassWrapper(tests)
def assertExecutable(self, executable, root_dir=None, args=None, msg=None)
Check an executable runs and returns good status.
def testLeaks(self)
Check for memory leaks in the preceding tests.
def assertFloatsEqual(testCase, lhs, rhs, kwargs)
def tearDownClass(cls)
Reset the leak counter when the tests have been completed.
def plotImageDiff(lhs, rhs, bad=None, diff=None, plotFileName=None)
Plot the comparison of two 2-d NumPy arrays.
def inTestCase(func)
A decorator to add a free function to our custom TestCase class, while also making it available as a ...
def assertClose(args, kwargs)
def _build_test_method(cls, executable, root_dir)
Build a test method and attach to class.
def assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=sys.float_info.epsilon, atol=sys.float_info.epsilon, relTo=None, printFailures=True, plotOnFailure=False, plotFileName=None, invert=False, msg=None)
Highly-configurable floating point comparisons for scalars and arrays.
def assertFloatsNotEqual(testCase, lhs, rhs, kwds)
Subclass of unittest.TestCase that adds some custom assertions for convenience.
def run(suite, exit=True)
Exit with the status code resulting from running the provided test suite.
Test that executables can be run and return good status.
def getTempFilePath(ext, expectOutput=True)
Return a path suitable for a temporary file and try to delete the file on success.
def debugger(exceptions)
Decorator to enter the debugger when there's an uncaught exception.
def findFileFromRoot(ifile)
Find file which is specified as a path relative to the toplevel directory; we start in $cwd and walk ...
def assertNotClose(args, kwargs)
def create_executable_tests(cls, ref_file, executables=None)
Discover executables to test and create corresponding test methods.
def testFileDescriptorLeaks(self)
def assertRaisesLsstCpp(testcase, excClass, callableObj, args, kwargs)
def sort_tests(tests)
Go through the supplied sequence of test suites and sort them to ensure that MemoryTestCases are at t...
Check for memory leaks since memId0 was allocated.