Coverage for python/lsst/utils/tests.py: 30%
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of utils.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# Use of this source code is governed by a 3-clause BSD-style
10# license that can be found in the LICENSE file.
12"""Support code for running unit tests"""
14__all__ = ["init", "MemoryTestCase", "ExecutablesTestCase", "getTempFilePath",
15 "TestCase", "assertFloatsAlmostEqual", "assertFloatsNotEqual", "assertFloatsEqual",
16 "debugger", "classParameters", "methodParameters"]
18import contextlib
19import gc
20import inspect
21import os
22import subprocess
23import sys
24import unittest
25import warnings
26import numpy
27import psutil
28import functools
29import tempfile
30import shutil
31import itertools
33from typing import (
34 Any,
35 Callable,
36 Dict,
37 Iterator,
38 List,
39 Optional,
40 Mapping,
41 Set,
42 Sequence,
43 Type,
44 Union,
45)
47# Initialize the list of open files to an empty set
48open_files = set()
51def _get_open_files() -> Set[str]:
52 """Return a set containing the list of files currently open in this
53 process.
55 Returns
56 -------
57 open_files : `set`
58 Set containing the list of open files.
59 """
60 return set(p.path for p in psutil.Process().open_files())
63def init() -> None:
64 """Initialize the memory tester and file descriptor leak tester."""
65 global open_files
66 # Reset the list of open files
67 open_files = _get_open_files()
70def sort_tests(tests) -> unittest.TestSuite:
71 """Sort supplied test suites such that MemoryTestCases are at the end.
73 `lsst.utils.tests.MemoryTestCase` tests should always run after any other
74 tests in the module.
76 Parameters
77 ----------
78 tests : sequence
79 Sequence of test suites.
81 Returns
82 -------
83 suite : `unittest.TestSuite`
84 A combined `~unittest.TestSuite` with
85 `~lsst.utils.tests.MemoryTestCase` at the end.
86 """
88 suite = unittest.TestSuite()
89 memtests = []
90 for test_suite in tests:
91 try:
92 # Just test the first test method in the suite for MemoryTestCase
93 # Use loop rather than next as it is possible for a test class
94 # to not have any test methods and the Python community prefers
95 # for loops over catching a StopIteration exception.
96 bases = None
97 for method in test_suite:
98 bases = inspect.getmro(method.__class__)
99 break
100 if bases is not None and MemoryTestCase in bases:
101 memtests.append(test_suite)
102 else:
103 suite.addTests(test_suite)
104 except TypeError:
105 if isinstance(test_suite, MemoryTestCase):
106 memtests.append(test_suite)
107 else:
108 suite.addTest(test_suite)
109 suite.addTests(memtests)
110 return suite
113def suiteClassWrapper(tests):
114 return unittest.TestSuite(sort_tests(tests))
117# Replace the suiteClass callable in the defaultTestLoader
118# so that we can reorder the test ordering. This will have
119# no effect if no memory test cases are found.
120unittest.defaultTestLoader.suiteClass = suiteClassWrapper
123class MemoryTestCase(unittest.TestCase):
124 """Check for resource leaks."""
126 @classmethod
127 def tearDownClass(cls) -> None:
128 """Reset the leak counter when the tests have been completed"""
129 init()
131 def testFileDescriptorLeaks(self) -> None:
132 """Check if any file descriptors are open since init() called."""
133 gc.collect()
134 global open_files
135 now_open = _get_open_files()
137 # Some files are opened out of the control of the stack.
138 now_open = set(f for f in now_open if not f.endswith(".car")
139 and not f.startswith("/proc/")
140 and not f.endswith(".ttf")
141 and not (f.startswith("/var/lib/") and f.endswith("/passwd"))
142 and not f.endswith("astropy.log"))
144 diff = now_open.difference(open_files)
145 if diff:
146 for f in diff:
147 print("File open: %s" % f)
148 self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else ""))
151class ExecutablesTestCase(unittest.TestCase):
152 """Test that executables can be run and return good status.
154 The test methods are dynamically created. Callers
155 must subclass this class in their own test file and invoke
156 the create_executable_tests() class method to register the tests.
157 """
158 TESTS_DISCOVERED = -1
160 @classmethod
161 def setUpClass(cls) -> None:
162 """Abort testing if automated test creation was enabled and
163 no tests were found."""
165 if cls.TESTS_DISCOVERED == 0:
166 raise RuntimeError("No executables discovered.")
168 def testSanity(self) -> None:
169 """This test exists to ensure that there is at least one test to be
170 executed. This allows the test runner to trigger the class set up
171 machinery to test whether there are some executables to test."""
172 pass
174 def assertExecutable(self, executable: str, root_dir: Optional[str] = None,
175 args: Optional[Sequence[str]] = None, msg: Optional[str] = None) -> None:
176 """Check an executable runs and returns good status.
178 Prints output to standard out. On bad exit status the test
179 fails. If the executable can not be located the test is skipped.
181 Parameters
182 ----------
183 executable : `str`
184 Path to an executable. ``root_dir`` is not used if this is an
185 absolute path.
186 root_dir : `str`, optional
187 Directory containing executable. Ignored if `None`.
188 args : `list` or `tuple`, optional
189 Arguments to be provided to the executable.
190 msg : `str`, optional
191 Message to use when the test fails. Can be `None` for default
192 message.
194 Raises
195 ------
196 AssertionError
197 The executable did not return 0 exit status.
198 """
200 if root_dir is not None and not os.path.isabs(executable):
201 executable = os.path.join(root_dir, executable)
203 # Form the argument list for subprocess
204 sp_args = [executable]
205 argstr = "no arguments"
206 if args is not None:
207 sp_args.extend(args)
208 argstr = 'arguments "' + " ".join(args) + '"'
210 print("Running executable '{}' with {}...".format(executable, argstr))
211 if not os.path.exists(executable):
212 self.skipTest("Executable {} is unexpectedly missing".format(executable))
213 failmsg = None
214 try:
215 output = subprocess.check_output(sp_args)
216 except subprocess.CalledProcessError as e:
217 output = e.output
218 failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode)
219 print(output.decode('utf-8'))
220 if failmsg:
221 if msg is None:
222 msg = failmsg
223 self.fail(msg)
225 @classmethod
226 def _build_test_method(cls, executable: str, root_dir: str) -> None:
227 """Build a test method and attach to class.
229 A test method is created for the supplied excutable located
230 in the supplied root directory. This method is attached to the class
231 so that the test runner will discover the test and run it.
233 Parameters
234 ----------
235 cls : `object`
236 The class in which to create the tests.
237 executable : `str`
238 Name of executable. Can be absolute path.
239 root_dir : `str`
240 Path to executable. Not used if executable path is absolute.
241 """
242 if not os.path.isabs(executable): 242 ↛ 243line 242 didn't jump to line 243, because the condition on line 242 was never true
243 executable = os.path.abspath(os.path.join(root_dir, executable))
245 # Create the test name from the executable path.
246 test_name = "test_exe_" + executable.replace("/", "_")
248 # This is the function that will become the test method
249 def test_executable_runs(*args: Any) -> None:
250 self = args[0]
251 self.assertExecutable(executable)
253 # Give it a name and attach it to the class
254 test_executable_runs.__name__ = test_name
255 setattr(cls, test_name, test_executable_runs)
257 @classmethod
258 def create_executable_tests(cls, ref_file: str, executables: Optional[Sequence[str]] = None) -> None:
259 """Discover executables to test and create corresponding test methods.
261 Scans the directory containing the supplied reference file
262 (usually ``__file__`` supplied from the test class) to look for
263 executables. If executables are found a test method is created
264 for each one. That test method will run the executable and
265 check the returned value.
267 Executable scripts with a ``.py`` extension and shared libraries
268 are ignored by the scanner.
270 This class method must be called before test discovery.
272 Parameters
273 ----------
274 ref_file : `str`
275 Path to a file within the directory to be searched.
276 If the files are in the same location as the test file, then
277 ``__file__`` can be used.
278 executables : `list` or `tuple`, optional
279 Sequence of executables that can override the automated
280 detection. If an executable mentioned here is not found, a
281 skipped test will be created for it, rather than a failed
282 test.
284 Examples
285 --------
286 >>> cls.create_executable_tests(__file__)
287 """
289 # Get the search directory from the reference file
290 ref_dir = os.path.abspath(os.path.dirname(ref_file))
292 if executables is None: 292 ↛ 307line 292 didn't jump to line 307, because the condition on line 292 was never false
293 # Look for executables to test by walking the tree
294 executables = []
295 for root, dirs, files in os.walk(ref_dir):
296 for f in files:
297 # Skip Python files. Shared libraries are executable.
298 if not f.endswith(".py") and not f.endswith(".so"):
299 full_path = os.path.join(root, f)
300 if os.access(full_path, os.X_OK):
301 executables.append(full_path)
303 # Store the number of tests found for later assessment.
304 # Do not raise an exception if we have no executables as this would
305 # cause the testing to abort before the test runner could properly
306 # integrate it into the failure report.
307 cls.TESTS_DISCOVERED = len(executables)
309 # Create the test functions and attach them to the class
310 for e in executables:
311 cls._build_test_method(e, ref_dir)
314@contextlib.contextmanager
315def getTempFilePath(ext: str, expectOutput: bool = True) -> Iterator[str]:
316 """Return a path suitable for a temporary file and try to delete the
317 file on success
319 If the with block completes successfully then the file is deleted,
320 if possible; failure results in a printed warning.
321 If a file is remains when it should not, a RuntimeError exception is
322 raised. This exception is also raised if a file is not present on context
323 manager exit when one is expected to exist.
324 If the block exits with an exception the file if left on disk so it can be
325 examined. The file name has a random component such that nested context
326 managers can be used with the same file suffix.
328 Parameters
329 ----------
331 ext : `str`
332 File name extension, e.g. ``.fits``.
333 expectOutput : `bool`, optional
334 If `True`, a file should be created within the context manager.
335 If `False`, a file should not be present when the context manager
336 exits.
338 Returns
339 -------
340 `str`
341 Path for a temporary file. The path is a combination of the caller's
342 file path and the name of the top-level function
344 Notes
345 -----
346 ::
348 # file tests/testFoo.py
349 import unittest
350 import lsst.utils.tests
351 class FooTestCase(unittest.TestCase):
352 def testBasics(self):
353 self.runTest()
355 def runTest(self):
356 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile:
357 # if tests/.tests exists then
358 # tmpFile = "tests/.tests/testFoo_testBasics.fits"
359 # otherwise tmpFile = "testFoo_testBasics.fits"
360 ...
361 # at the end of this "with" block the path tmpFile will be
362 # deleted, but only if the file exists and the "with"
363 # block terminated normally (rather than with an exception)
364 ...
365 """
366 stack = inspect.stack()
367 # get name of first function in the file
368 for i in range(2, len(stack)):
369 frameInfo = inspect.getframeinfo(stack[i][0])
370 if i == 2:
371 callerFilePath = frameInfo.filename
372 callerFuncName = frameInfo.function
373 elif callerFilePath == frameInfo.filename:
374 # this function called the previous function
375 callerFuncName = frameInfo.function
376 else:
377 break
379 callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
380 callerFileName = os.path.splitext(callerFileNameWithExt)[0]
381 outDir = os.path.join(callerDir, ".tests")
382 if not os.path.isdir(outDir):
383 outDir = ""
384 prefix = "%s_%s-" % (callerFileName, callerFuncName)
385 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix)
386 if os.path.exists(outPath):
387 # There should not be a file there given the randomizer. Warn and
388 # remove.
389 # Use stacklevel 3 so that the warning is reported from the end of the
390 # with block
391 warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,),
392 stacklevel=3)
393 try:
394 os.remove(outPath)
395 except OSError:
396 pass
398 yield outPath
400 fileExists = os.path.exists(outPath)
401 if expectOutput:
402 if not fileExists:
403 raise RuntimeError("Temp file expected named {} but none found".format(outPath))
404 else:
405 if fileExists:
406 raise RuntimeError("Unexpectedly discovered temp file named {}".format(outPath))
407 # Try to clean up the file regardless
408 if fileExists:
409 try:
410 os.remove(outPath)
411 except OSError as e:
412 # Use stacklevel 3 so that the warning is reported from the end of
413 # the with block.
414 warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3)
417class TestCase(unittest.TestCase):
418 """Subclass of unittest.TestCase that adds some custom assertions for
419 convenience.
420 """
423def inTestCase(func: Callable) -> Callable:
424 """A decorator to add a free function to our custom TestCase class, while
425 also making it available as a free function.
426 """
427 setattr(TestCase, func.__name__, func)
428 return func
431def debugger(*exceptions):
432 """Decorator to enter the debugger when there's an uncaught exception
434 To use, just slap a ``@debugger()`` on your function.
436 You may provide specific exception classes to catch as arguments to
437 the decorator function, e.g.,
438 ``@debugger(RuntimeError, NotImplementedError)``.
439 This defaults to just `AssertionError`, for use on `unittest.TestCase`
440 methods.
442 Code provided by "Rosh Oxymoron" on StackOverflow:
443 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails
445 Notes
446 -----
447 Consider using ``pytest --pdb`` instead of this decorator.
448 """
449 if not exceptions:
450 exceptions = (Exception, )
452 def decorator(f):
453 @functools.wraps(f)
454 def wrapper(*args, **kwargs):
455 try:
456 return f(*args, **kwargs)
457 except exceptions:
458 import sys
459 import pdb
460 pdb.post_mortem(sys.exc_info()[2])
461 return wrapper
462 return decorator
465def plotImageDiff(lhs: numpy.ndarray, rhs: numpy.ndarray, bad: Optional[numpy.ndarray] = None,
466 diff: Optional[numpy.ndarray] = None, plotFileName: Optional[str] = None) -> None:
467 """Plot the comparison of two 2-d NumPy arrays.
469 Parameters
470 ----------
471 lhs : `numpy.ndarray`
472 LHS values to compare; a 2-d NumPy array
473 rhs : `numpy.ndarray`
474 RHS values to compare; a 2-d NumPy array
475 bad : `numpy.ndarray`
476 A 2-d boolean NumPy array of values to emphasize in the plots
477 diff : `numpy.ndarray`
478 difference array; a 2-d NumPy array, or None to show lhs-rhs
479 plotFileName : `str`
480 Filename to save the plot to. If None, the plot will be displayed in
481 a window.
483 Notes
484 -----
485 This method uses `matplotlib` and imports it internally; it should be
486 wrapped in a try/except block within packages that do not depend on
487 `matplotlib` (including `~lsst.utils`).
488 """
489 from matplotlib import pyplot
490 if diff is None:
491 diff = lhs - rhs
492 pyplot.figure()
493 if bad is not None:
494 # make an rgba image that's red and transparent where not bad
495 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
496 badImage[:, :, 0] = 255
497 badImage[:, :, 1] = 0
498 badImage[:, :, 2] = 0
499 badImage[:, :, 3] = 255*bad
500 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
501 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
502 vmin2 = numpy.min(diff)
503 vmax2 = numpy.max(diff)
504 for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]):
505 pyplot.subplot(2, 3, n + 1)
506 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower',
507 vmin=vmin1, vmax=vmax1)
508 if bad is not None:
509 pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower')
510 pyplot.axis("off")
511 pyplot.title(title)
512 pyplot.subplot(2, 3, n + 4)
513 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower',
514 vmin=vmin2, vmax=vmax2)
515 if bad is not None:
516 pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower')
517 pyplot.axis("off")
518 pyplot.title(title)
519 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
520 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
521 pyplot.colorbar(im1, cax=cax1)
522 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
523 pyplot.colorbar(im2, cax=cax2)
524 if plotFileName:
525 pyplot.savefig(plotFileName)
526 else:
527 pyplot.show()
530@inTestCase
531def assertFloatsAlmostEqual(testCase: unittest.TestCase, lhs: Union[float, numpy.ndarray],
532 rhs: Union[float, numpy.ndarray],
533 rtol: Optional[float] = sys.float_info.epsilon,
534 atol: Optional[float] = sys.float_info.epsilon, relTo: Optional[float] = None,
535 printFailures: bool = True, plotOnFailure: bool = False,
536 plotFileName: Optional[str] = None, invert: bool = False,
537 msg: Optional[str] = None, ignoreNaNs: bool = False) -> None:
538 """Highly-configurable floating point comparisons for scalars and arrays.
540 The test assertion will fail if all elements ``lhs`` and ``rhs`` are not
541 equal to within the tolerances specified by ``rtol`` and ``atol``.
542 More precisely, the comparison is:
544 ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol``
546 If ``rtol`` or ``atol`` is `None`, that term in the comparison is not
547 performed at all.
549 When not specified, ``relTo`` is the elementwise maximum of the absolute
550 values of ``lhs`` and ``rhs``. If set manually, it should usually be set
551 to either ``lhs`` or ``rhs``, or a scalar value typical of what is
552 expected.
554 Parameters
555 ----------
556 testCase : `unittest.TestCase`
557 Instance the test is part of.
558 lhs : scalar or array-like
559 LHS value(s) to compare; may be a scalar or array-like of any
560 dimension.
561 rhs : scalar or array-like
562 RHS value(s) to compare; may be a scalar or array-like of any
563 dimension.
564 rtol : `float`, optional
565 Relative tolerance for comparison; defaults to double-precision
566 epsilon.
567 atol : `float`, optional
568 Absolute tolerance for comparison; defaults to double-precision
569 epsilon.
570 relTo : `float`, optional
571 Value to which comparison with rtol is relative.
572 printFailures : `bool`, optional
573 Upon failure, print all inequal elements as part of the message.
574 plotOnFailure : `bool`, optional
575 Upon failure, plot the originals and their residual with matplotlib.
576 Only 2-d arrays are supported.
577 plotFileName : `str`, optional
578 Filename to save the plot to. If `None`, the plot will be displayed in
579 a window.
580 invert : `bool`, optional
581 If `True`, invert the comparison and fail only if any elements *are*
582 equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`,
583 which should generally be used instead for clarity.
584 will return `True`).
585 msg : `str`, optional
586 String to append to the error message when assert fails.
587 ignoreNaNs : `bool`, optional
588 If `True` (`False` is default) mask out any NaNs from operand arrays
589 before performing comparisons if they are in the same locations; NaNs
590 in different locations are trigger test assertion failures, even when
591 ``invert=True``. Scalar NaNs are treated like arrays containing only
592 NaNs of the same shape as the other operand, and no comparisons are
593 performed if both sides are scalar NaNs.
595 Raises
596 ------
597 AssertionError
598 The values are not almost equal.
599 """
600 if ignoreNaNs:
601 lhsMask = numpy.isnan(lhs)
602 rhsMask = numpy.isnan(rhs)
603 if not numpy.all(lhsMask == rhsMask):
604 testCase.fail(f"lhs has {lhsMask.sum()} NaN values and rhs has {rhsMask.sum()} NaN values, "
605 f"in different locations.")
606 if numpy.all(lhsMask):
607 assert numpy.all(rhsMask), "Should be guaranteed by previous if."
608 # All operands are fully NaN (either scalar NaNs or arrays of only
609 # NaNs).
610 return
611 assert not numpy.all(rhsMask), "Should be guaranteed by prevoius two ifs."
612 # If either operand is an array select just its not-NaN values. Note
613 # that these expressions are never True for scalar operands, because if
614 # they are NaN then the numpy.all checks above will catch them.
615 if numpy.any(lhsMask):
616 lhs = lhs[numpy.logical_not(lhsMask)]
617 if numpy.any(rhsMask):
618 rhs = rhs[numpy.logical_not(rhsMask)]
619 if not numpy.isfinite(lhs).all():
620 testCase.fail("Non-finite values in lhs")
621 if not numpy.isfinite(rhs).all():
622 testCase.fail("Non-finite values in rhs")
623 diff = lhs - rhs
624 absDiff = numpy.abs(lhs - rhs)
625 if rtol is not None:
626 if relTo is None:
627 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
628 else:
629 relTo = numpy.abs(relTo)
630 bad = absDiff > rtol*relTo
631 if atol is not None:
632 bad = numpy.logical_and(bad, absDiff > atol)
633 else:
634 if atol is None:
635 raise ValueError("rtol and atol cannot both be None")
636 bad = absDiff > atol
637 failed = numpy.any(bad)
638 if invert:
639 failed = not failed
640 bad = numpy.logical_not(bad)
641 cmpStr = "=="
642 failStr = "are the same"
643 else:
644 cmpStr = "!="
645 failStr = "differ"
646 errMsg = []
647 if failed:
648 if numpy.isscalar(bad):
649 if rtol is None:
650 errMsg = ["%s %s %s; diff=%s with atol=%s"
651 % (lhs, cmpStr, rhs, absDiff, atol)]
652 elif atol is None:
653 errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s"
654 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)]
655 else:
656 errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s"
657 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)]
658 else:
659 errMsg = ["%d/%d elements %s with rtol=%s, atol=%s"
660 % (bad.sum(), bad.size, failStr, rtol, atol)]
661 if plotOnFailure:
662 if len(lhs.shape) != 2 or len(rhs.shape) != 2:
663 raise ValueError("plotOnFailure is only valid for 2-d arrays")
664 try:
665 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
666 except ImportError:
667 errMsg.append("Failure plot requested but matplotlib could not be imported.")
668 if printFailures:
669 # Make sure everything is an array if any of them are, so we
670 # can treat them the same (diff and absDiff are arrays if
671 # either rhs or lhs is), and we don't get here if neither is.
672 if numpy.isscalar(relTo):
673 relTo = numpy.ones(bad.shape, dtype=float) * relTo
674 if numpy.isscalar(lhs):
675 lhs = numpy.ones(bad.shape, dtype=float) * lhs
676 if numpy.isscalar(rhs):
677 rhs = numpy.ones(bad.shape, dtype=float) * rhs
678 if rtol is None:
679 for a, b, diff in zip(lhs[bad], rhs[bad], absDiff[bad]):
680 errMsg.append("%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
681 else:
682 for a, b, diff, rel in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
683 errMsg.append("%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel))
685 if msg is not None:
686 errMsg.append(msg)
687 testCase.assertFalse(failed, msg="\n".join(errMsg))
690@inTestCase
691def assertFloatsNotEqual(testCase: unittest.TestCase, lhs: Union[float, numpy.ndarray],
692 rhs: Union[float, numpy.ndarray], **kwds: Any) -> None:
693 """Fail a test if the given floating point values are equal to within the
694 given tolerances.
696 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
697 ``rtol=atol=0``) for more information.
699 Parameters
700 ----------
701 testCase : `unittest.TestCase`
702 Instance the test is part of.
703 lhs : scalar or array-like
704 LHS value(s) to compare; may be a scalar or array-like of any
705 dimension.
706 rhs : scalar or array-like
707 RHS value(s) to compare; may be a scalar or array-like of any
708 dimension.
710 Raises
711 ------
712 AssertionError
713 The values are almost equal.
714 """
715 return assertFloatsAlmostEqual(testCase, lhs, rhs, invert=True, **kwds)
718@inTestCase
719def assertFloatsEqual(testCase: unittest.TestCase, lhs: Union[float, numpy.ndarray],
720 rhs: Union[float, numpy.ndarray], **kwargs: Any) -> None:
721 """
722 Assert that lhs == rhs (both numeric types, whether scalar or array).
724 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
725 ``rtol=atol=0``) for more information.
727 Parameters
728 ----------
729 testCase : `unittest.TestCase`
730 Instance the test is part of.
731 lhs : scalar or array-like
732 LHS value(s) to compare; may be a scalar or array-like of any
733 dimension.
734 rhs : scalar or array-like
735 RHS value(s) to compare; may be a scalar or array-like of any
736 dimension.
738 Raises
739 ------
740 AssertionError
741 The values are not equal.
742 """
743 return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs)
746def _settingsIterator(settings: Dict[str, Sequence[Any]]) -> Iterator[Dict[str, Any]]:
747 """Return an iterator for the provided test settings
749 Parameters
750 ----------
751 settings : `dict` (`str`: iterable)
752 Lists of test parameters. Each should be an iterable of the same
753 length. If a string is provided as an iterable, it will be converted
754 to a list of a single string.
756 Raises
757 ------
758 AssertionError
759 If the ``settings`` are not of the same length.
761 Yields
762 ------
763 parameters : `dict` (`str`: anything)
764 Set of parameters.
765 """
766 for name, values in settings.items():
767 if isinstance(values, str): 767 ↛ 770line 767 didn't jump to line 770, because the condition on line 767 was never true
768 # Probably meant as a single-element string, rather than an
769 # iterable of chars.
770 settings[name] = [values]
771 num = len(next(iter(settings.values()))) # Number of settings
772 for name, values in settings.items():
773 assert len(values) == num, f"Length mismatch for setting {name}: {len(values)} vs {num}"
774 for ii in range(num):
775 values = [settings[kk][ii] for kk in settings]
776 yield dict(zip(settings, values))
779def classParameters(**settings: Sequence[Any]) -> Callable:
780 """Class decorator for generating unit tests
782 This decorator generates classes with class variables according to the
783 supplied ``settings``.
785 Parameters
786 ----------
787 **settings : `dict` (`str`: iterable)
788 The lists of test parameters to set as class variables in turn. Each
789 should be an iterable of the same length.
791 Examples
792 --------
793 ::
795 @classParameters(foo=[1, 2], bar=[3, 4])
796 class MyTestCase(unittest.TestCase):
797 ...
799 will generate two classes, as if you wrote::
801 class MyTestCase_1_3(unittest.TestCase):
802 foo = 1
803 bar = 3
804 ...
806 class MyTestCase_2_4(unittest.TestCase):
807 foo = 2
808 bar = 4
809 ...
811 Note that the values are embedded in the class name.
812 """
813 def decorator(cls: Type) -> None:
814 module = sys.modules[cls.__module__].__dict__
815 for params in _settingsIterator(settings):
816 name = f"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}"
817 bindings = dict(cls.__dict__)
818 bindings.update(params)
819 module[name] = type(name, (cls,), bindings)
820 return decorator
823def methodParameters(**settings: Sequence[Any]) -> Callable:
824 """Method decorator for unit tests
826 This decorator iterates over the supplied settings, using
827 ``TestCase.subTest`` to communicate the values in the event of a failure.
829 Parameters
830 ----------
831 **settings : `dict` (`str`: iterable)
832 The lists of test parameters. Each should be an iterable of the same
833 length.
835 Examples
836 --------
837 ::
839 @methodParameters(foo=[1, 2], bar=[3, 4])
840 def testSomething(self, foo, bar):
841 ...
843 will run::
845 testSomething(foo=1, bar=3)
846 testSomething(foo=2, bar=4)
847 """
848 def decorator(func: Callable) -> Callable:
849 @functools.wraps(func)
850 def wrapper(self: unittest.TestCase, *args: Any, **kwargs: Any) -> None:
851 for params in _settingsIterator(settings):
852 kwargs.update(params)
853 with self.subTest(**params):
854 func(self, *args, **kwargs)
855 return wrapper
856 return decorator
859def _cartesianProduct(settings: Mapping[str, Sequence[Any]]) -> Mapping[str, Sequence[Any]]:
860 """Return the cartesian product of the settings
862 Parameters
863 ----------
864 settings : `dict` mapping `str` to `iterable`
865 Parameter combinations.
867 Returns
868 -------
869 product : `dict` mapping `str` to `iterable`
870 Parameter combinations covering the cartesian product (all possible
871 combinations) of the input parameters.
873 Example
874 -------
876 cartesianProduct({"foo": [1, 2], "bar": ["black", "white"]})
878 returns:
880 {"foo": [1, 1, 2, 2], "bar": ["black", "white", "black", "white"]}
881 """
882 product: Dict[str, List[Any]] = {kk: [] for kk in settings}
883 for values in itertools.product(*settings.values()):
884 for kk, vv in zip(settings.keys(), values):
885 product[kk].append(vv)
886 return product
889def classParametersProduct(**settings: Sequence[Any]) -> Callable:
890 """Class decorator for generating unit tests
892 This decorator generates classes with class variables according to the
893 cartesian product of the supplied ``settings``.
895 Parameters
896 ----------
897 **settings : `dict` (`str`: iterable)
898 The lists of test parameters to set as class variables in turn. Each
899 should be an iterable.
901 Examples
902 --------
903 ::
905 @classParametersProduct(foo=[1, 2], bar=[3, 4])
906 class MyTestCase(unittest.TestCase):
907 ...
909 will generate four classes, as if you wrote::
911 class MyTestCase_1_3(unittest.TestCase):
912 foo = 1
913 bar = 3
914 ...
916 class MyTestCase_1_4(unittest.TestCase):
917 foo = 1
918 bar = 4
919 ...
921 class MyTestCase_2_3(unittest.TestCase):
922 foo = 2
923 bar = 3
924 ...
926 class MyTestCase_2_4(unittest.TestCase):
927 foo = 2
928 bar = 4
929 ...
931 Note that the values are embedded in the class name.
932 """
933 return classParameters(**_cartesianProduct(settings))
936def methodParametersProduct(**settings: Sequence[Any]) -> Callable:
937 """Method decorator for unit tests
939 This decorator iterates over the cartesian product of the supplied
940 settings, using `~unittest.TestCase.subTest` to communicate the values in
941 the event of a failure.
943 Parameters
944 ----------
945 **settings : `dict` (`str`: iterable)
946 The parameter combinations to test. Each should be an iterable.
948 Example
949 -------
951 @methodParametersProduct(foo=[1, 2], bar=["black", "white"])
952 def testSomething(self, foo, bar):
953 ...
955 will run:
957 testSomething(foo=1, bar="black")
958 testSomething(foo=1, bar="white")
959 testSomething(foo=2, bar="black")
960 testSomething(foo=2, bar="white")
961 """
962 return methodParameters(**_cartesianProduct(settings))
965@contextlib.contextmanager
966def temporaryDirectory() -> Iterator[str]:
967 """Context manager that creates and destroys a temporary directory.
969 The difference from `tempfile.TemporaryDirectory` is that this ignores
970 errors when deleting a directory, which may happen with some filesystems.
971 """
972 tmpdir = tempfile.mkdtemp()
973 yield tmpdir
974 shutil.rmtree(tmpdir, ignore_errors=True)