Coverage for python/lsst/utils/tests.py: 29%
323 statements
« prev ^ index » next coverage.py v7.2.3, created at 2023-04-19 10:38 +0000
« prev ^ index » next coverage.py v7.2.3, created at 2023-04-19 10:38 +0000
1# This file is part of utils.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# Use of this source code is governed by a 3-clause BSD-style
10# license that can be found in the LICENSE file.
12"""Support code for running unit tests"""
14__all__ = [
15 "init",
16 "MemoryTestCase",
17 "ExecutablesTestCase",
18 "getTempFilePath",
19 "TestCase",
20 "assertFloatsAlmostEqual",
21 "assertFloatsNotEqual",
22 "assertFloatsEqual",
23 "debugger",
24 "classParameters",
25 "methodParameters",
26 "temporaryDirectory",
27]
29import contextlib
30import functools
31import gc
32import inspect
33import itertools
34import os
35import re
36import shutil
37import subprocess
38import sys
39import tempfile
40import unittest
41import warnings
42from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Sequence, Set, Type, Union
44import numpy
45import psutil
47# Initialize the list of open files to an empty set
48open_files = set()
51def _get_open_files() -> Set[str]:
52 """Return a set containing the list of files currently open in this
53 process.
55 Returns
56 -------
57 open_files : `set`
58 Set containing the list of open files.
59 """
60 return set(p.path for p in psutil.Process().open_files())
63def init() -> None:
64 """Initialize the memory tester and file descriptor leak tester."""
65 global open_files
66 # Reset the list of open files
67 open_files = _get_open_files()
70def sort_tests(tests) -> unittest.TestSuite:
71 """Sort supplied test suites such that MemoryTestCases are at the end.
73 `lsst.utils.tests.MemoryTestCase` tests should always run after any other
74 tests in the module.
76 Parameters
77 ----------
78 tests : sequence
79 Sequence of test suites.
81 Returns
82 -------
83 suite : `unittest.TestSuite`
84 A combined `~unittest.TestSuite` with
85 `~lsst.utils.tests.MemoryTestCase` at the end.
86 """
87 suite = unittest.TestSuite()
88 memtests = []
89 for test_suite in tests:
90 try:
91 # Just test the first test method in the suite for MemoryTestCase
92 # Use loop rather than next as it is possible for a test class
93 # to not have any test methods and the Python community prefers
94 # for loops over catching a StopIteration exception.
95 bases = None
96 for method in test_suite:
97 bases = inspect.getmro(method.__class__)
98 break
99 if bases is not None and MemoryTestCase in bases:
100 memtests.append(test_suite)
101 else:
102 suite.addTests(test_suite)
103 except TypeError:
104 if isinstance(test_suite, MemoryTestCase):
105 memtests.append(test_suite)
106 else:
107 suite.addTest(test_suite)
108 suite.addTests(memtests)
109 return suite
112def suiteClassWrapper(tests):
113 return unittest.TestSuite(sort_tests(tests))
116# Replace the suiteClass callable in the defaultTestLoader
117# so that we can reorder the test ordering. This will have
118# no effect if no memory test cases are found.
119unittest.defaultTestLoader.suiteClass = suiteClassWrapper
122class MemoryTestCase(unittest.TestCase):
123 """Check for resource leaks."""
125 ignore_regexps: List[str] = []
126 """List of regexps to ignore when checking for open files."""
128 @classmethod
129 def tearDownClass(cls) -> None:
130 """Reset the leak counter when the tests have been completed"""
131 init()
133 def testFileDescriptorLeaks(self) -> None:
134 """Check if any file descriptors are open since init() called.
136 Ignores files with certain known path components and any files
137 that match regexp patterns in class property ``ignore_regexps``.
138 """
139 gc.collect()
140 global open_files
141 now_open = _get_open_files()
143 # Some files are opened out of the control of the stack.
144 now_open = set(
145 f
146 for f in now_open
147 if not f.endswith(".car")
148 and not f.startswith("/proc/")
149 and not f.endswith(".ttf")
150 and not (f.startswith("/var/lib/") and f.endswith("/passwd"))
151 and not f.endswith("astropy.log")
152 and not f.endswith("mime/mime.cache")
153 and not any([re.search(r, f) for r in self.ignore_regexps])
154 )
156 diff = now_open.difference(open_files)
157 if diff:
158 for f in diff:
159 print("File open: %s" % f)
160 self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else ""))
163class ExecutablesTestCase(unittest.TestCase):
164 """Test that executables can be run and return good status.
166 The test methods are dynamically created. Callers
167 must subclass this class in their own test file and invoke
168 the create_executable_tests() class method to register the tests.
169 """
171 TESTS_DISCOVERED = -1
173 @classmethod
174 def setUpClass(cls) -> None:
175 """Abort testing if automated test creation was enabled and
176 no tests were found.
177 """
178 if cls.TESTS_DISCOVERED == 0:
179 raise RuntimeError("No executables discovered.")
181 def testSanity(self) -> None:
182 """Ensure that there is at least one test to be
183 executed. This allows the test runner to trigger the class set up
184 machinery to test whether there are some executables to test.
185 """
186 pass
188 def assertExecutable(
189 self,
190 executable: str,
191 root_dir: Optional[str] = None,
192 args: Optional[Sequence[str]] = None,
193 msg: Optional[str] = None,
194 ) -> None:
195 """Check an executable runs and returns good status.
197 Prints output to standard out. On bad exit status the test
198 fails. If the executable can not be located the test is skipped.
200 Parameters
201 ----------
202 executable : `str`
203 Path to an executable. ``root_dir`` is not used if this is an
204 absolute path.
205 root_dir : `str`, optional
206 Directory containing executable. Ignored if `None`.
207 args : `list` or `tuple`, optional
208 Arguments to be provided to the executable.
209 msg : `str`, optional
210 Message to use when the test fails. Can be `None` for default
211 message.
213 Raises
214 ------
215 AssertionError
216 The executable did not return 0 exit status.
217 """
218 if root_dir is not None and not os.path.isabs(executable):
219 executable = os.path.join(root_dir, executable)
221 # Form the argument list for subprocess
222 sp_args = [executable]
223 argstr = "no arguments"
224 if args is not None:
225 sp_args.extend(args)
226 argstr = 'arguments "' + " ".join(args) + '"'
228 print("Running executable '{}' with {}...".format(executable, argstr))
229 if not os.path.exists(executable):
230 self.skipTest("Executable {} is unexpectedly missing".format(executable))
231 failmsg = None
232 try:
233 output = subprocess.check_output(sp_args)
234 except subprocess.CalledProcessError as e:
235 output = e.output
236 failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode)
237 print(output.decode("utf-8"))
238 if failmsg:
239 if msg is None:
240 msg = failmsg
241 self.fail(msg)
243 @classmethod
244 def _build_test_method(cls, executable: str, root_dir: str) -> None:
245 """Build a test method and attach to class.
247 A test method is created for the supplied excutable located
248 in the supplied root directory. This method is attached to the class
249 so that the test runner will discover the test and run it.
251 Parameters
252 ----------
253 cls : `object`
254 The class in which to create the tests.
255 executable : `str`
256 Name of executable. Can be absolute path.
257 root_dir : `str`
258 Path to executable. Not used if executable path is absolute.
259 """
260 if not os.path.isabs(executable): 260 ↛ 261line 260 didn't jump to line 261, because the condition on line 260 was never true
261 executable = os.path.abspath(os.path.join(root_dir, executable))
263 # Create the test name from the executable path.
264 test_name = "test_exe_" + executable.replace("/", "_")
266 # This is the function that will become the test method
267 def test_executable_runs(*args: Any) -> None:
268 self = args[0]
269 self.assertExecutable(executable)
271 # Give it a name and attach it to the class
272 test_executable_runs.__name__ = test_name
273 setattr(cls, test_name, test_executable_runs)
275 @classmethod
276 def create_executable_tests(cls, ref_file: str, executables: Optional[Sequence[str]] = None) -> None:
277 """Discover executables to test and create corresponding test methods.
279 Scans the directory containing the supplied reference file
280 (usually ``__file__`` supplied from the test class) to look for
281 executables. If executables are found a test method is created
282 for each one. That test method will run the executable and
283 check the returned value.
285 Executable scripts with a ``.py`` extension and shared libraries
286 are ignored by the scanner.
288 This class method must be called before test discovery.
290 Parameters
291 ----------
292 ref_file : `str`
293 Path to a file within the directory to be searched.
294 If the files are in the same location as the test file, then
295 ``__file__`` can be used.
296 executables : `list` or `tuple`, optional
297 Sequence of executables that can override the automated
298 detection. If an executable mentioned here is not found, a
299 skipped test will be created for it, rather than a failed
300 test.
302 Examples
303 --------
304 >>> cls.create_executable_tests(__file__)
305 """
306 # Get the search directory from the reference file
307 ref_dir = os.path.abspath(os.path.dirname(ref_file))
309 if executables is None: 309 ↛ 324line 309 didn't jump to line 324, because the condition on line 309 was never false
310 # Look for executables to test by walking the tree
311 executables = []
312 for root, dirs, files in os.walk(ref_dir):
313 for f in files:
314 # Skip Python files. Shared libraries are executable.
315 if not f.endswith(".py") and not f.endswith(".so"):
316 full_path = os.path.join(root, f)
317 if os.access(full_path, os.X_OK):
318 executables.append(full_path)
320 # Store the number of tests found for later assessment.
321 # Do not raise an exception if we have no executables as this would
322 # cause the testing to abort before the test runner could properly
323 # integrate it into the failure report.
324 cls.TESTS_DISCOVERED = len(executables)
326 # Create the test functions and attach them to the class
327 for e in executables:
328 cls._build_test_method(e, ref_dir)
331@contextlib.contextmanager
332def getTempFilePath(ext: str, expectOutput: bool = True) -> Iterator[str]:
333 """Return a path suitable for a temporary file and try to delete the
334 file on success
336 If the with block completes successfully then the file is deleted,
337 if possible; failure results in a printed warning.
338 If a file is remains when it should not, a RuntimeError exception is
339 raised. This exception is also raised if a file is not present on context
340 manager exit when one is expected to exist.
341 If the block exits with an exception the file if left on disk so it can be
342 examined. The file name has a random component such that nested context
343 managers can be used with the same file suffix.
345 Parameters
346 ----------
347 ext : `str`
348 File name extension, e.g. ``.fits``.
349 expectOutput : `bool`, optional
350 If `True`, a file should be created within the context manager.
351 If `False`, a file should not be present when the context manager
352 exits.
354 Returns
355 -------
356 path : `str`
357 Path for a temporary file. The path is a combination of the caller's
358 file path and the name of the top-level function
360 Examples
361 --------
362 .. code-block:: python
364 # file tests/testFoo.py
365 import unittest
366 import lsst.utils.tests
367 class FooTestCase(unittest.TestCase):
368 def testBasics(self):
369 self.runTest()
371 def runTest(self):
372 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile:
373 # if tests/.tests exists then
374 # tmpFile = "tests/.tests/testFoo_testBasics.fits"
375 # otherwise tmpFile = "testFoo_testBasics.fits"
376 ...
377 # at the end of this "with" block the path tmpFile will be
378 # deleted, but only if the file exists and the "with"
379 # block terminated normally (rather than with an exception)
380 ...
381 """
382 stack = inspect.stack()
383 # get name of first function in the file
384 for i in range(2, len(stack)):
385 frameInfo = inspect.getframeinfo(stack[i][0])
386 if i == 2:
387 callerFilePath = frameInfo.filename
388 callerFuncName = frameInfo.function
389 elif callerFilePath == frameInfo.filename:
390 # this function called the previous function
391 callerFuncName = frameInfo.function
392 else:
393 break
395 callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
396 callerFileName = os.path.splitext(callerFileNameWithExt)[0]
397 outDir = os.path.join(callerDir, ".tests")
398 if not os.path.isdir(outDir):
399 outDir = ""
400 prefix = "%s_%s-" % (callerFileName, callerFuncName)
401 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix)
402 if os.path.exists(outPath):
403 # There should not be a file there given the randomizer. Warn and
404 # remove.
405 # Use stacklevel 3 so that the warning is reported from the end of the
406 # with block
407 warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,), stacklevel=3)
408 try:
409 os.remove(outPath)
410 except OSError:
411 pass
413 yield outPath
415 fileExists = os.path.exists(outPath)
416 if expectOutput:
417 if not fileExists:
418 raise RuntimeError("Temp file expected named {} but none found".format(outPath))
419 else:
420 if fileExists:
421 raise RuntimeError("Unexpectedly discovered temp file named {}".format(outPath))
422 # Try to clean up the file regardless
423 if fileExists:
424 try:
425 os.remove(outPath)
426 except OSError as e:
427 # Use stacklevel 3 so that the warning is reported from the end of
428 # the with block.
429 warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3)
432class TestCase(unittest.TestCase):
433 """Subclass of unittest.TestCase that adds some custom assertions for
434 convenience.
435 """
438def inTestCase(func: Callable) -> Callable:
439 """Add a free function to our custom TestCase class, while
440 also making it available as a free function.
441 """
442 setattr(TestCase, func.__name__, func)
443 return func
446def debugger(*exceptions):
447 """Enter the debugger when there's an uncaught exception
449 To use, just slap a ``@debugger()`` on your function.
451 You may provide specific exception classes to catch as arguments to
452 the decorator function, e.g.,
453 ``@debugger(RuntimeError, NotImplementedError)``.
454 This defaults to just `AssertionError`, for use on `unittest.TestCase`
455 methods.
457 Code provided by "Rosh Oxymoron" on StackOverflow:
458 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails
460 Notes
461 -----
462 Consider using ``pytest --pdb`` instead of this decorator.
463 """
464 if not exceptions:
465 exceptions = (Exception,)
467 def decorator(f):
468 @functools.wraps(f)
469 def wrapper(*args, **kwargs):
470 try:
471 return f(*args, **kwargs)
472 except exceptions:
473 import pdb
474 import sys
476 pdb.post_mortem(sys.exc_info()[2])
478 return wrapper
480 return decorator
483def plotImageDiff(
484 lhs: numpy.ndarray,
485 rhs: numpy.ndarray,
486 bad: Optional[numpy.ndarray] = None,
487 diff: Optional[numpy.ndarray] = None,
488 plotFileName: Optional[str] = None,
489) -> None:
490 """Plot the comparison of two 2-d NumPy arrays.
492 Parameters
493 ----------
494 lhs : `numpy.ndarray`
495 LHS values to compare; a 2-d NumPy array
496 rhs : `numpy.ndarray`
497 RHS values to compare; a 2-d NumPy array
498 bad : `numpy.ndarray`
499 A 2-d boolean NumPy array of values to emphasize in the plots
500 diff : `numpy.ndarray`
501 difference array; a 2-d NumPy array, or None to show lhs-rhs
502 plotFileName : `str`
503 Filename to save the plot to. If None, the plot will be displayed in
504 a window.
506 Notes
507 -----
508 This method uses `matplotlib` and imports it internally; it should be
509 wrapped in a try/except block within packages that do not depend on
510 `matplotlib` (including `~lsst.utils`).
511 """
512 from matplotlib import pyplot
514 if diff is None:
515 diff = lhs - rhs
516 pyplot.figure()
517 if bad is not None:
518 # make an rgba image that's red and transparent where not bad
519 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
520 badImage[:, :, 0] = 255
521 badImage[:, :, 1] = 0
522 badImage[:, :, 2] = 0
523 badImage[:, :, 3] = 255 * bad
524 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
525 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
526 vmin2 = numpy.min(diff)
527 vmax2 = numpy.max(diff)
528 for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]):
529 pyplot.subplot(2, 3, n + 1)
530 im1 = pyplot.imshow(
531 image, cmap=pyplot.cm.gray, interpolation="nearest", origin="lower", vmin=vmin1, vmax=vmax1
532 )
533 if bad is not None:
534 pyplot.imshow(badImage, alpha=0.2, interpolation="nearest", origin="lower")
535 pyplot.axis("off")
536 pyplot.title(title)
537 pyplot.subplot(2, 3, n + 4)
538 im2 = pyplot.imshow(
539 image, cmap=pyplot.cm.gray, interpolation="nearest", origin="lower", vmin=vmin2, vmax=vmax2
540 )
541 if bad is not None:
542 pyplot.imshow(badImage, alpha=0.2, interpolation="nearest", origin="lower")
543 pyplot.axis("off")
544 pyplot.title(title)
545 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
546 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
547 pyplot.colorbar(im1, cax=cax1)
548 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
549 pyplot.colorbar(im2, cax=cax2)
550 if plotFileName:
551 pyplot.savefig(plotFileName)
552 else:
553 pyplot.show()
556@inTestCase
557def assertFloatsAlmostEqual(
558 testCase: unittest.TestCase,
559 lhs: Union[float, numpy.ndarray],
560 rhs: Union[float, numpy.ndarray],
561 rtol: Optional[float] = sys.float_info.epsilon,
562 atol: Optional[float] = sys.float_info.epsilon,
563 relTo: Optional[float] = None,
564 printFailures: bool = True,
565 plotOnFailure: bool = False,
566 plotFileName: Optional[str] = None,
567 invert: bool = False,
568 msg: Optional[str] = None,
569 ignoreNaNs: bool = False,
570) -> None:
571 """Highly-configurable floating point comparisons for scalars and arrays.
573 The test assertion will fail if all elements ``lhs`` and ``rhs`` are not
574 equal to within the tolerances specified by ``rtol`` and ``atol``.
575 More precisely, the comparison is:
577 ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol``
579 If ``rtol`` or ``atol`` is `None`, that term in the comparison is not
580 performed at all.
582 When not specified, ``relTo`` is the elementwise maximum of the absolute
583 values of ``lhs`` and ``rhs``. If set manually, it should usually be set
584 to either ``lhs`` or ``rhs``, or a scalar value typical of what is
585 expected.
587 Parameters
588 ----------
589 testCase : `unittest.TestCase`
590 Instance the test is part of.
591 lhs : scalar or array-like
592 LHS value(s) to compare; may be a scalar or array-like of any
593 dimension.
594 rhs : scalar or array-like
595 RHS value(s) to compare; may be a scalar or array-like of any
596 dimension.
597 rtol : `float`, optional
598 Relative tolerance for comparison; defaults to double-precision
599 epsilon.
600 atol : `float`, optional
601 Absolute tolerance for comparison; defaults to double-precision
602 epsilon.
603 relTo : `float`, optional
604 Value to which comparison with rtol is relative.
605 printFailures : `bool`, optional
606 Upon failure, print all inequal elements as part of the message.
607 plotOnFailure : `bool`, optional
608 Upon failure, plot the originals and their residual with matplotlib.
609 Only 2-d arrays are supported.
610 plotFileName : `str`, optional
611 Filename to save the plot to. If `None`, the plot will be displayed in
612 a window.
613 invert : `bool`, optional
614 If `True`, invert the comparison and fail only if any elements *are*
615 equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`,
616 which should generally be used instead for clarity.
617 will return `True`).
618 msg : `str`, optional
619 String to append to the error message when assert fails.
620 ignoreNaNs : `bool`, optional
621 If `True` (`False` is default) mask out any NaNs from operand arrays
622 before performing comparisons if they are in the same locations; NaNs
623 in different locations are trigger test assertion failures, even when
624 ``invert=True``. Scalar NaNs are treated like arrays containing only
625 NaNs of the same shape as the other operand, and no comparisons are
626 performed if both sides are scalar NaNs.
628 Raises
629 ------
630 AssertionError
631 The values are not almost equal.
632 """
633 if ignoreNaNs:
634 lhsMask = numpy.isnan(lhs)
635 rhsMask = numpy.isnan(rhs)
636 if not numpy.all(lhsMask == rhsMask):
637 testCase.fail(
638 f"lhs has {lhsMask.sum()} NaN values and rhs has {rhsMask.sum()} NaN values, "
639 "in different locations."
640 )
641 if numpy.all(lhsMask):
642 assert numpy.all(rhsMask), "Should be guaranteed by previous if."
643 # All operands are fully NaN (either scalar NaNs or arrays of only
644 # NaNs).
645 return
646 assert not numpy.all(rhsMask), "Should be guaranteed by prevoius two ifs."
647 # If either operand is an array select just its not-NaN values. Note
648 # that these expressions are never True for scalar operands, because if
649 # they are NaN then the numpy.all checks above will catch them.
650 if numpy.any(lhsMask):
651 lhs = lhs[numpy.logical_not(lhsMask)]
652 if numpy.any(rhsMask):
653 rhs = rhs[numpy.logical_not(rhsMask)]
654 if not numpy.isfinite(lhs).all():
655 testCase.fail("Non-finite values in lhs")
656 if not numpy.isfinite(rhs).all():
657 testCase.fail("Non-finite values in rhs")
658 diff = lhs - rhs
659 absDiff = numpy.abs(lhs - rhs)
660 if rtol is not None:
661 if relTo is None:
662 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
663 else:
664 relTo = numpy.abs(relTo)
665 bad = absDiff > rtol * relTo
666 if atol is not None:
667 bad = numpy.logical_and(bad, absDiff > atol)
668 else:
669 if atol is None:
670 raise ValueError("rtol and atol cannot both be None")
671 bad = absDiff > atol
672 failed = numpy.any(bad)
673 if invert:
674 failed = not failed
675 bad = numpy.logical_not(bad)
676 cmpStr = "=="
677 failStr = "are the same"
678 else:
679 cmpStr = "!="
680 failStr = "differ"
681 errMsg = []
682 if failed:
683 if numpy.isscalar(bad):
684 if rtol is None:
685 errMsg = ["%s %s %s; diff=%s with atol=%s" % (lhs, cmpStr, rhs, absDiff, atol)]
686 elif atol is None:
687 errMsg = [
688 "%s %s %s; diff=%s/%s=%s with rtol=%s"
689 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff / relTo, rtol)
690 ]
691 else:
692 errMsg = [
693 "%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s"
694 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff / relTo, rtol, atol)
695 ]
696 else:
697 errMsg = ["%d/%d elements %s with rtol=%s, atol=%s" % (bad.sum(), bad.size, failStr, rtol, atol)]
698 if plotOnFailure:
699 if len(lhs.shape) != 2 or len(rhs.shape) != 2:
700 raise ValueError("plotOnFailure is only valid for 2-d arrays")
701 try:
702 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
703 except ImportError:
704 errMsg.append("Failure plot requested but matplotlib could not be imported.")
705 if printFailures:
706 # Make sure everything is an array if any of them are, so we
707 # can treat them the same (diff and absDiff are arrays if
708 # either rhs or lhs is), and we don't get here if neither is.
709 if numpy.isscalar(relTo):
710 relTo = numpy.ones(bad.shape, dtype=float) * relTo
711 if numpy.isscalar(lhs):
712 lhs = numpy.ones(bad.shape, dtype=float) * lhs
713 if numpy.isscalar(rhs):
714 rhs = numpy.ones(bad.shape, dtype=float) * rhs
715 if rtol is None:
716 for a, b, diff in zip(lhs[bad], rhs[bad], absDiff[bad]):
717 errMsg.append("%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
718 else:
719 for a, b, diff, rel in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
720 errMsg.append("%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff / rel))
722 if msg is not None:
723 errMsg.append(msg)
724 testCase.assertFalse(failed, msg="\n".join(errMsg))
727@inTestCase
728def assertFloatsNotEqual(
729 testCase: unittest.TestCase,
730 lhs: Union[float, numpy.ndarray],
731 rhs: Union[float, numpy.ndarray],
732 **kwds: Any,
733) -> None:
734 """Fail a test if the given floating point values are equal to within the
735 given tolerances.
737 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
738 ``rtol=atol=0``) for more information.
740 Parameters
741 ----------
742 testCase : `unittest.TestCase`
743 Instance the test is part of.
744 lhs : scalar or array-like
745 LHS value(s) to compare; may be a scalar or array-like of any
746 dimension.
747 rhs : scalar or array-like
748 RHS value(s) to compare; may be a scalar or array-like of any
749 dimension.
751 Raises
752 ------
753 AssertionError
754 The values are almost equal.
755 """
756 return assertFloatsAlmostEqual(testCase, lhs, rhs, invert=True, **kwds)
759@inTestCase
760def assertFloatsEqual(
761 testCase: unittest.TestCase,
762 lhs: Union[float, numpy.ndarray],
763 rhs: Union[float, numpy.ndarray],
764 **kwargs: Any,
765) -> None:
766 """
767 Assert that lhs == rhs (both numeric types, whether scalar or array).
769 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
770 ``rtol=atol=0``) for more information.
772 Parameters
773 ----------
774 testCase : `unittest.TestCase`
775 Instance the test is part of.
776 lhs : scalar or array-like
777 LHS value(s) to compare; may be a scalar or array-like of any
778 dimension.
779 rhs : scalar or array-like
780 RHS value(s) to compare; may be a scalar or array-like of any
781 dimension.
783 Raises
784 ------
785 AssertionError
786 The values are not equal.
787 """
788 return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs)
791def _settingsIterator(settings: Dict[str, Sequence[Any]]) -> Iterator[Dict[str, Any]]:
792 """Return an iterator for the provided test settings
794 Parameters
795 ----------
796 settings : `dict` (`str`: iterable)
797 Lists of test parameters. Each should be an iterable of the same
798 length. If a string is provided as an iterable, it will be converted
799 to a list of a single string.
801 Raises
802 ------
803 AssertionError
804 If the ``settings`` are not of the same length.
806 Yields
807 ------
808 parameters : `dict` (`str`: anything)
809 Set of parameters.
810 """
811 for name, values in settings.items():
812 if isinstance(values, str): 812 ↛ 815line 812 didn't jump to line 815, because the condition on line 812 was never true
813 # Probably meant as a single-element string, rather than an
814 # iterable of chars.
815 settings[name] = [values]
816 num = len(next(iter(settings.values()))) # Number of settings
817 for name, values in settings.items():
818 assert len(values) == num, f"Length mismatch for setting {name}: {len(values)} vs {num}"
819 for ii in range(num):
820 values = [settings[kk][ii] for kk in settings]
821 yield dict(zip(settings, values))
824def classParameters(**settings: Sequence[Any]) -> Callable:
825 """Class decorator for generating unit tests
827 This decorator generates classes with class variables according to the
828 supplied ``settings``.
830 Parameters
831 ----------
832 **settings : `dict` (`str`: iterable)
833 The lists of test parameters to set as class variables in turn. Each
834 should be an iterable of the same length.
836 Examples
837 --------
838 ::
840 @classParameters(foo=[1, 2], bar=[3, 4])
841 class MyTestCase(unittest.TestCase):
842 ...
844 will generate two classes, as if you wrote::
846 class MyTestCase_1_3(unittest.TestCase):
847 foo = 1
848 bar = 3
849 ...
851 class MyTestCase_2_4(unittest.TestCase):
852 foo = 2
853 bar = 4
854 ...
856 Note that the values are embedded in the class name.
857 """
859 def decorator(cls: Type) -> None:
860 module = sys.modules[cls.__module__].__dict__
861 for params in _settingsIterator(settings):
862 name = f"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}"
863 bindings = dict(cls.__dict__)
864 bindings.update(params)
865 module[name] = type(name, (cls,), bindings)
867 return decorator
870def methodParameters(**settings: Sequence[Any]) -> Callable:
871 """Iterate over supplied settings to create subtests automatically.
873 This decorator iterates over the supplied settings, using
874 ``TestCase.subTest`` to communicate the values in the event of a failure.
876 Parameters
877 ----------
878 **settings : `dict` (`str`: iterable)
879 The lists of test parameters. Each should be an iterable of the same
880 length.
882 Examples
883 --------
884 .. code-block:: python
886 @methodParameters(foo=[1, 2], bar=[3, 4])
887 def testSomething(self, foo, bar):
888 ...
890 will run:
892 .. code-block:: python
894 testSomething(foo=1, bar=3)
895 testSomething(foo=2, bar=4)
896 """
898 def decorator(func: Callable) -> Callable:
899 @functools.wraps(func)
900 def wrapper(self: unittest.TestCase, *args: Any, **kwargs: Any) -> None:
901 for params in _settingsIterator(settings):
902 kwargs.update(params)
903 with self.subTest(**params):
904 func(self, *args, **kwargs)
906 return wrapper
908 return decorator
911def _cartesianProduct(settings: Mapping[str, Sequence[Any]]) -> Mapping[str, Sequence[Any]]:
912 """Return the cartesian product of the settings
914 Parameters
915 ----------
916 settings : `dict` mapping `str` to `iterable`
917 Parameter combinations.
919 Returns
920 -------
921 product : `dict` mapping `str` to `iterable`
922 Parameter combinations covering the cartesian product (all possible
923 combinations) of the input parameters.
925 Examples
926 --------
927 .. code-block:: python
929 cartesianProduct({"foo": [1, 2], "bar": ["black", "white"]})
931 will return:
933 .. code-block:: python
935 {"foo": [1, 1, 2, 2], "bar": ["black", "white", "black", "white"]}
936 """
937 product: Dict[str, List[Any]] = {kk: [] for kk in settings}
938 for values in itertools.product(*settings.values()):
939 for kk, vv in zip(settings.keys(), values):
940 product[kk].append(vv)
941 return product
944def classParametersProduct(**settings: Sequence[Any]) -> Callable:
945 """Class decorator for generating unit tests
947 This decorator generates classes with class variables according to the
948 cartesian product of the supplied ``settings``.
950 Parameters
951 ----------
952 **settings : `dict` (`str`: iterable)
953 The lists of test parameters to set as class variables in turn. Each
954 should be an iterable.
956 Examples
957 --------
958 .. code-block:: python
960 @classParametersProduct(foo=[1, 2], bar=[3, 4])
961 class MyTestCase(unittest.TestCase):
962 ...
964 will generate four classes, as if you wrote::
966 .. code-block:: python
968 class MyTestCase_1_3(unittest.TestCase):
969 foo = 1
970 bar = 3
971 ...
973 class MyTestCase_1_4(unittest.TestCase):
974 foo = 1
975 bar = 4
976 ...
978 class MyTestCase_2_3(unittest.TestCase):
979 foo = 2
980 bar = 3
981 ...
983 class MyTestCase_2_4(unittest.TestCase):
984 foo = 2
985 bar = 4
986 ...
988 Note that the values are embedded in the class name.
989 """
990 return classParameters(**_cartesianProduct(settings))
993def methodParametersProduct(**settings: Sequence[Any]) -> Callable:
994 """Iterate over cartesian product creating sub tests.
996 This decorator iterates over the cartesian product of the supplied
997 settings, using `~unittest.TestCase.subTest` to communicate the values in
998 the event of a failure.
1000 Parameters
1001 ----------
1002 **settings : `dict` (`str`: iterable)
1003 The parameter combinations to test. Each should be an iterable.
1005 Example
1006 -------
1008 @methodParametersProduct(foo=[1, 2], bar=["black", "white"])
1009 def testSomething(self, foo, bar):
1010 ...
1012 will run:
1014 testSomething(foo=1, bar="black")
1015 testSomething(foo=1, bar="white")
1016 testSomething(foo=2, bar="black")
1017 testSomething(foo=2, bar="white")
1018 """
1019 return methodParameters(**_cartesianProduct(settings))
1022@contextlib.contextmanager
1023def temporaryDirectory() -> Iterator[str]:
1024 """Context manager that creates and destroys a temporary directory.
1026 The difference from `tempfile.TemporaryDirectory` is that this ignores
1027 errors when deleting a directory, which may happen with some filesystems.
1028 """
1029 tmpdir = tempfile.mkdtemp()
1030 yield tmpdir
1031 shutil.rmtree(tmpdir, ignore_errors=True)