lsst.utils  22.0.1-3-g0503b2e+6b209d634c
tests.py
Go to the documentation of this file.
1 #
2 # LSST Data Management System
3 #
4 # Copyright 2008-2017 AURA/LSST.
5 #
6 # This product includes software developed by the
7 # LSST Project (http://www.lsst.org/).
8 #
9 # This program is free software: you can redistribute it and/or modify
10 # it under the terms of the GNU General Public License as published by
11 # the Free Software Foundation, either version 3 of the License, or
12 # (at your option) any later version.
13 #
14 # This program is distributed in the hope that it will be useful,
15 # but WITHOUT ANY WARRANTY; without even the implied warranty of
16 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 # GNU General Public License for more details.
18 #
19 # You should have received a copy of the LSST License Statement and
20 # the GNU General Public License along with this program. If not,
21 # see <https://www.lsstcorp.org/LegalNotices/>.
22 #
23 """Support code for running unit tests"""
24 
25 import contextlib
26 import gc
27 import inspect
28 import os
29 import subprocess
30 import sys
31 import unittest
32 import warnings
33 import numpy
34 import psutil
35 import functools
36 import tempfile
37 import shutil
38 
39 __all__ = ["init", "MemoryTestCase", "ExecutablesTestCase", "getTempFilePath",
40  "TestCase", "assertFloatsAlmostEqual", "assertFloatsNotEqual", "assertFloatsEqual",
41  "debugger", "classParameters", "methodParameters"]
42 
43 # Initialize the list of open files to an empty set
44 open_files = set()
45 
46 
47 def _get_open_files():
48  """Return a set containing the list of files currently open in this
49  process.
50 
51  Returns
52  -------
53  open_files : `set`
54  Set containing the list of open files.
55  """
56  return set(p.path for p in psutil.Process().open_files())
57 
58 
59 def init():
60  """Initialize the memory tester and file descriptor leak tester."""
61  global open_files
62  # Reset the list of open files
63  open_files = _get_open_files()
64 
65 
66 def sort_tests(tests):
67  """Sort supplied test suites such that MemoryTestCases are at the end.
68 
69  `lsst.utils.tests.MemoryTestCase` tests should always run after any other
70  tests in the module.
71 
72  Parameters
73  ----------
74  tests : sequence
75  Sequence of test suites.
76 
77  Returns
78  -------
79  suite : `unittest.TestSuite`
80  A combined `~unittest.TestSuite` with
81  `~lsst.utils.tests.MemoryTestCase` at the end.
82  """
83 
84  suite = unittest.TestSuite()
85  memtests = []
86  for test_suite in tests:
87  try:
88  # Just test the first test method in the suite for MemoryTestCase
89  # Use loop rather than next as it is possible for a test class
90  # to not have any test methods and the Python community prefers
91  # for loops over catching a StopIteration exception.
92  bases = None
93  for method in test_suite:
94  bases = inspect.getmro(method.__class__)
95  break
96  if bases is not None and MemoryTestCase in bases:
97  memtests.append(test_suite)
98  else:
99  suite.addTests(test_suite)
100  except TypeError:
101  if isinstance(test_suite, MemoryTestCase):
102  memtests.append(test_suite)
103  else:
104  suite.addTest(test_suite)
105  suite.addTests(memtests)
106  return suite
107 
108 
109 def suiteClassWrapper(tests):
110  return unittest.TestSuite(sort_tests(tests))
111 
112 
113 # Replace the suiteClass callable in the defaultTestLoader
114 # so that we can reorder the test ordering. This will have
115 # no effect if no memory test cases are found.
116 unittest.defaultTestLoader.suiteClass = suiteClassWrapper
117 
118 
119 class MemoryTestCase(unittest.TestCase):
120  """Check for resource leaks."""
121 
122  @classmethod
123  def tearDownClass(cls):
124  """Reset the leak counter when the tests have been completed"""
125  init()
126 
128  """Check if any file descriptors are open since init() called."""
129  gc.collect()
130  global open_files
131  now_open = _get_open_files()
132 
133  # Some files are opened out of the control of the stack.
134  now_open = set(f for f in now_open if not f.endswith(".car")
135  and not f.startswith("/proc/")
136  and not f.endswith(".ttf")
137  and not (f.startswith("/var/lib/") and f.endswith("/passwd"))
138  and not f.endswith("astropy.log"))
139 
140  diff = now_open.difference(open_files)
141  if diff:
142  for f in diff:
143  print("File open: %s" % f)
144  self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else ""))
145 
146 
147 class ExecutablesTestCase(unittest.TestCase):
148  """Test that executables can be run and return good status.
149 
150  The test methods are dynamically created. Callers
151  must subclass this class in their own test file and invoke
152  the create_executable_tests() class method to register the tests.
153  """
154  TESTS_DISCOVERED = -1
155 
156  @classmethod
157  def setUpClass(cls):
158  """Abort testing if automated test creation was enabled and
159  no tests were found."""
160 
161  if cls.TESTS_DISCOVEREDTESTS_DISCOVEREDTESTS_DISCOVERED == 0:
162  raise RuntimeError("No executables discovered.")
163 
164  def testSanity(self):
165  """This test exists to ensure that there is at least one test to be
166  executed. This allows the test runner to trigger the class set up
167  machinery to test whether there are some executables to test."""
168  pass
169 
170  def assertExecutable(self, executable, root_dir=None, args=None, msg=None):
171  """Check an executable runs and returns good status.
172 
173  Prints output to standard out. On bad exit status the test
174  fails. If the executable can not be located the test is skipped.
175 
176  Parameters
177  ----------
178  executable : `str`
179  Path to an executable. ``root_dir`` is not used if this is an
180  absolute path.
181  root_dir : `str`, optional
182  Directory containing executable. Ignored if `None`.
183  args : `list` or `tuple`, optional
184  Arguments to be provided to the executable.
185  msg : `str`, optional
186  Message to use when the test fails. Can be `None` for default
187  message.
188 
189  Raises
190  ------
191  AssertionError
192  The executable did not return 0 exit status.
193  """
194 
195  if root_dir is not None and not os.path.isabs(executable):
196  executable = os.path.join(root_dir, executable)
197 
198  # Form the argument list for subprocess
199  sp_args = [executable]
200  argstr = "no arguments"
201  if args is not None:
202  sp_args.extend(args)
203  argstr = 'arguments "' + " ".join(args) + '"'
204 
205  print("Running executable '{}' with {}...".format(executable, argstr))
206  if not os.path.exists(executable):
207  self.skipTest("Executable {} is unexpectedly missing".format(executable))
208  failmsg = None
209  try:
210  output = subprocess.check_output(sp_args)
211  except subprocess.CalledProcessError as e:
212  output = e.output
213  failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode)
214  print(output.decode('utf-8'))
215  if failmsg:
216  if msg is None:
217  msg = failmsg
218  self.fail(msg)
219 
220  @classmethod
221  def _build_test_method(cls, executable, root_dir):
222  """Build a test method and attach to class.
223 
224  A test method is created for the supplied excutable located
225  in the supplied root directory. This method is attached to the class
226  so that the test runner will discover the test and run it.
227 
228  Parameters
229  ----------
230  cls : `object`
231  The class in which to create the tests.
232  executable : `str`
233  Name of executable. Can be absolute path.
234  root_dir : `str`
235  Path to executable. Not used if executable path is absolute.
236  """
237  if not os.path.isabs(executable):
238  executable = os.path.abspath(os.path.join(root_dir, executable))
239 
240  # Create the test name from the executable path.
241  test_name = "test_exe_" + executable.replace("/", "_")
242 
243  # This is the function that will become the test method
244  def test_executable_runs(*args):
245  self = args[0]
246  self.assertExecutable(executable)
247 
248  # Give it a name and attach it to the class
249  test_executable_runs.__name__ = test_name
250  setattr(cls, test_name, test_executable_runs)
251 
252  @classmethod
253  def create_executable_tests(cls, ref_file, executables=None):
254  """Discover executables to test and create corresponding test methods.
255 
256  Scans the directory containing the supplied reference file
257  (usually ``__file__`` supplied from the test class) to look for
258  executables. If executables are found a test method is created
259  for each one. That test method will run the executable and
260  check the returned value.
261 
262  Executable scripts with a ``.py`` extension and shared libraries
263  are ignored by the scanner.
264 
265  This class method must be called before test discovery.
266 
267  Parameters
268  ----------
269  ref_file : `str`
270  Path to a file within the directory to be searched.
271  If the files are in the same location as the test file, then
272  ``__file__`` can be used.
273  executables : `list` or `tuple`, optional
274  Sequence of executables that can override the automated
275  detection. If an executable mentioned here is not found, a
276  skipped test will be created for it, rather than a failed
277  test.
278 
279  Examples
280  --------
281  >>> cls.create_executable_tests(__file__)
282  """
283 
284  # Get the search directory from the reference file
285  ref_dir = os.path.abspath(os.path.dirname(ref_file))
286 
287  if executables is None:
288  # Look for executables to test by walking the tree
289  executables = []
290  for root, dirs, files in os.walk(ref_dir):
291  for f in files:
292  # Skip Python files. Shared libraries are executable.
293  if not f.endswith(".py") and not f.endswith(".so"):
294  full_path = os.path.join(root, f)
295  if os.access(full_path, os.X_OK):
296  executables.append(full_path)
297 
298  # Store the number of tests found for later assessment.
299  # Do not raise an exception if we have no executables as this would
300  # cause the testing to abort before the test runner could properly
301  # integrate it into the failure report.
302  cls.TESTS_DISCOVEREDTESTS_DISCOVEREDTESTS_DISCOVERED = len(executables)
303 
304  # Create the test functions and attach them to the class
305  for e in executables:
306  cls._build_test_method_build_test_method(e, ref_dir)
307 
308 
309 @contextlib.contextmanager
310 def getTempFilePath(ext, expectOutput=True):
311  """Return a path suitable for a temporary file and try to delete the
312  file on success
313 
314  If the with block completes successfully then the file is deleted,
315  if possible; failure results in a printed warning.
316  If a file is remains when it should not, a RuntimeError exception is
317  raised. This exception is also raised if a file is not present on context
318  manager exit when one is expected to exist.
319  If the block exits with an exception the file if left on disk so it can be
320  examined. The file name has a random component such that nested context
321  managers can be used with the same file suffix.
322 
323  Parameters
324  ----------
325 
326  ext : `str`
327  File name extension, e.g. ``.fits``.
328  expectOutput : `bool`, optional
329  If `True`, a file should be created within the context manager.
330  If `False`, a file should not be present when the context manager
331  exits.
332 
333  Returns
334  -------
335  `str`
336  Path for a temporary file. The path is a combination of the caller's
337  file path and the name of the top-level function
338 
339  Notes
340  -----
341  ::
342 
343  # file tests/testFoo.py
344  import unittest
345  import lsst.utils.tests
346  class FooTestCase(unittest.TestCase):
347  def testBasics(self):
348  self.runTest()
349 
350  def runTest(self):
351  with lsst.utils.tests.getTempFilePath(".fits") as tmpFile:
352  # if tests/.tests exists then
353  # tmpFile = "tests/.tests/testFoo_testBasics.fits"
354  # otherwise tmpFile = "testFoo_testBasics.fits"
355  ...
356  # at the end of this "with" block the path tmpFile will be
357  # deleted, but only if the file exists and the "with"
358  # block terminated normally (rather than with an exception)
359  ...
360  """
361  stack = inspect.stack()
362  # get name of first function in the file
363  for i in range(2, len(stack)):
364  frameInfo = inspect.getframeinfo(stack[i][0])
365  if i == 2:
366  callerFilePath = frameInfo.filename
367  callerFuncName = frameInfo.function
368  elif callerFilePath == frameInfo.filename:
369  # this function called the previous function
370  callerFuncName = frameInfo.function
371  else:
372  break
373 
374  callerDir, callerFileNameWithExt = os.path.split(callerFilePath)
375  callerFileName = os.path.splitext(callerFileNameWithExt)[0]
376  outDir = os.path.join(callerDir, ".tests")
377  if not os.path.isdir(outDir):
378  outDir = ""
379  prefix = "%s_%s-" % (callerFileName, callerFuncName)
380  outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix)
381  if os.path.exists(outPath):
382  # There should not be a file there given the randomizer. Warn and remove.
383  # Use stacklevel 3 so that the warning is reported from the end of the with block
384  warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,),
385  stacklevel=3)
386  try:
387  os.remove(outPath)
388  except OSError:
389  pass
390 
391  yield outPath
392 
393  fileExists = os.path.exists(outPath)
394  if expectOutput:
395  if not fileExists:
396  raise RuntimeError("Temp file expected named {} but none found".format(outPath))
397  else:
398  if fileExists:
399  raise RuntimeError("Unexpectedly discovered temp file named {}".format(outPath))
400  # Try to clean up the file regardless
401  if fileExists:
402  try:
403  os.remove(outPath)
404  except OSError as e:
405  # Use stacklevel 3 so that the warning is reported from the end of the with block
406  warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3)
407 
408 
409 class TestCase(unittest.TestCase):
410  """Subclass of unittest.TestCase that adds some custom assertions for
411  convenience.
412  """
413 
414 
415 def inTestCase(func):
416  """A decorator to add a free function to our custom TestCase class, while also
417  making it available as a free function.
418  """
419  setattr(TestCase, func.__name__, func)
420  return func
421 
422 
423 def debugger(*exceptions):
424  """Decorator to enter the debugger when there's an uncaught exception
425 
426  To use, just slap a ``@debugger()`` on your function.
427 
428  You may provide specific exception classes to catch as arguments to
429  the decorator function, e.g.,
430  ``@debugger(RuntimeError, NotImplementedError)``.
431  This defaults to just `AssertionError`, for use on `unittest.TestCase`
432  methods.
433 
434  Code provided by "Rosh Oxymoron" on StackOverflow:
435  http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails
436 
437  Notes
438  -----
439  Consider using ``pytest --pdb`` instead of this decorator.
440  """
441  if not exceptions:
442  exceptions = (Exception, )
443 
444  def decorator(f):
445  @functools.wraps(f)
446  def wrapper(*args, **kwargs):
447  try:
448  return f(*args, **kwargs)
449  except exceptions:
450  import sys
451  import pdb
452  pdb.post_mortem(sys.exc_info()[2])
453  return wrapper
454  return decorator
455 
456 
457 def plotImageDiff(lhs, rhs, bad=None, diff=None, plotFileName=None):
458  """Plot the comparison of two 2-d NumPy arrays.
459 
460  Parameters
461  ----------
462  lhs : `numpy.ndarray`
463  LHS values to compare; a 2-d NumPy array
464  rhs : `numpy.ndarray`
465  RHS values to compare; a 2-d NumPy array
466  bad : `numpy.ndarray`
467  A 2-d boolean NumPy array of values to emphasize in the plots
468  diff : `numpy.ndarray`
469  difference array; a 2-d NumPy array, or None to show lhs-rhs
470  plotFileName : `str`
471  Filename to save the plot to. If None, the plot will be displayed in
472  a window.
473 
474  Notes
475  -----
476  This method uses `matplotlib` and imports it internally; it should be
477  wrapped in a try/except block within packages that do not depend on
478  `matplotlib` (including `~lsst.utils`).
479  """
480  from matplotlib import pyplot
481  if diff is None:
482  diff = lhs - rhs
483  pyplot.figure()
484  if bad is not None:
485  # make an rgba image that's red and transparent where not bad
486  badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8)
487  badImage[:, :, 0] = 255
488  badImage[:, :, 1] = 0
489  badImage[:, :, 2] = 0
490  badImage[:, :, 3] = 255*bad
491  vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs))
492  vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs))
493  vmin2 = numpy.min(diff)
494  vmax2 = numpy.max(diff)
495  for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]):
496  pyplot.subplot(2, 3, n + 1)
497  im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower',
498  vmin=vmin1, vmax=vmax1)
499  if bad is not None:
500  pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower')
501  pyplot.axis("off")
502  pyplot.title(title)
503  pyplot.subplot(2, 3, n + 4)
504  im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower',
505  vmin=vmin2, vmax=vmax2)
506  if bad is not None:
507  pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower')
508  pyplot.axis("off")
509  pyplot.title(title)
510  pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05)
511  cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4])
512  pyplot.colorbar(im1, cax=cax1)
513  cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4])
514  pyplot.colorbar(im2, cax=cax2)
515  if plotFileName:
516  pyplot.savefig(plotFileName)
517  else:
518  pyplot.show()
519 
520 
521 @inTestCase
522 def assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=sys.float_info.epsilon,
523  atol=sys.float_info.epsilon, relTo=None,
524  printFailures=True, plotOnFailure=False,
525  plotFileName=None, invert=False, msg=None,
526  ignoreNaNs=False):
527  """Highly-configurable floating point comparisons for scalars and arrays.
528 
529  The test assertion will fail if all elements ``lhs`` and ``rhs`` are not
530  equal to within the tolerances specified by ``rtol`` and ``atol``.
531  More precisely, the comparison is:
532 
533  ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol``
534 
535  If ``rtol`` or ``atol`` is `None`, that term in the comparison is not
536  performed at all.
537 
538  When not specified, ``relTo`` is the elementwise maximum of the absolute
539  values of ``lhs`` and ``rhs``. If set manually, it should usually be set
540  to either ``lhs`` or ``rhs``, or a scalar value typical of what is
541  expected.
542 
543  Parameters
544  ----------
545  testCase : `unittest.TestCase`
546  Instance the test is part of.
547  lhs : scalar or array-like
548  LHS value(s) to compare; may be a scalar or array-like of any
549  dimension.
550  rhs : scalar or array-like
551  RHS value(s) to compare; may be a scalar or array-like of any
552  dimension.
553  rtol : `float`, optional
554  Relative tolerance for comparison; defaults to double-precision
555  epsilon.
556  atol : `float`, optional
557  Absolute tolerance for comparison; defaults to double-precision
558  epsilon.
559  relTo : `float`, optional
560  Value to which comparison with rtol is relative.
561  printFailures : `bool`, optional
562  Upon failure, print all inequal elements as part of the message.
563  plotOnFailure : `bool`, optional
564  Upon failure, plot the originals and their residual with matplotlib.
565  Only 2-d arrays are supported.
566  plotFileName : `str`, optional
567  Filename to save the plot to. If `None`, the plot will be displayed in
568  a window.
569  invert : `bool`, optional
570  If `True`, invert the comparison and fail only if any elements *are*
571  equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`,
572  which should generally be used instead for clarity.
573  will return `True`).
574  msg : `str`, optional
575  String to append to the error message when assert fails.
576  ignoreNaNs : `bool`, optional
577  If `True` (`False` is default) mask out any NaNs from operand arrays
578  before performing comparisons if they are in the same locations; NaNs
579  in different locations are trigger test assertion failures, even when
580  ``invert=True``. Scalar NaNs are treated like arrays containing only
581  NaNs of the same shape as the other operand, and no comparisons are
582  performed if both sides are scalar NaNs.
583 
584  Raises
585  ------
586  AssertionError
587  The values are not almost equal.
588  """
589  if ignoreNaNs:
590  lhsMask = numpy.isnan(lhs)
591  rhsMask = numpy.isnan(rhs)
592  if not numpy.all(lhsMask == rhsMask):
593  testCase.fail(f"lhs has {lhsMask.sum()} NaN values and rhs has {rhsMask.sum()} NaN values, "
594  f"in different locations.")
595  if numpy.all(lhsMask):
596  assert numpy.all(rhsMask), "Should be guaranteed by previous if."
597  # All operands are fully NaN (either scalar NaNs or arrays of only
598  # NaNs).
599  return
600  assert not numpy.all(rhsMask), "Should be guaranteed by prevoius two ifs."
601  # If either operand is an array select just its not-NaN values. Note
602  # that these expressions are never True for scalar operands, because if
603  # they are NaN then the numpy.all checks above will catch them.
604  if numpy.any(lhsMask):
605  lhs = lhs[numpy.logical_not(lhsMask)]
606  if numpy.any(rhsMask):
607  rhs = rhs[numpy.logical_not(rhsMask)]
608  if not numpy.isfinite(lhs).all():
609  testCase.fail("Non-finite values in lhs")
610  if not numpy.isfinite(rhs).all():
611  testCase.fail("Non-finite values in rhs")
612  diff = lhs - rhs
613  absDiff = numpy.abs(lhs - rhs)
614  if rtol is not None:
615  if relTo is None:
616  relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs))
617  else:
618  relTo = numpy.abs(relTo)
619  bad = absDiff > rtol*relTo
620  if atol is not None:
621  bad = numpy.logical_and(bad, absDiff > atol)
622  else:
623  if atol is None:
624  raise ValueError("rtol and atol cannot both be None")
625  bad = absDiff > atol
626  failed = numpy.any(bad)
627  if invert:
628  failed = not failed
629  bad = numpy.logical_not(bad)
630  cmpStr = "=="
631  failStr = "are the same"
632  else:
633  cmpStr = "!="
634  failStr = "differ"
635  errMsg = []
636  if failed:
637  if numpy.isscalar(bad):
638  if rtol is None:
639  errMsg = ["%s %s %s; diff=%s with atol=%s"
640  % (lhs, cmpStr, rhs, absDiff, atol)]
641  elif atol is None:
642  errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s"
643  % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)]
644  else:
645  errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s"
646  % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)]
647  else:
648  errMsg = ["%d/%d elements %s with rtol=%s, atol=%s"
649  % (bad.sum(), bad.size, failStr, rtol, atol)]
650  if plotOnFailure:
651  if len(lhs.shape) != 2 or len(rhs.shape) != 2:
652  raise ValueError("plotOnFailure is only valid for 2-d arrays")
653  try:
654  plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName)
655  except ImportError:
656  errMsg.append("Failure plot requested but matplotlib could not be imported.")
657  if printFailures:
658  # Make sure everything is an array if any of them are, so we can treat
659  # them the same (diff and absDiff are arrays if either rhs or lhs is),
660  # and we don't get here if neither is.
661  if numpy.isscalar(relTo):
662  relTo = numpy.ones(bad.shape, dtype=float) * relTo
663  if numpy.isscalar(lhs):
664  lhs = numpy.ones(bad.shape, dtype=float) * lhs
665  if numpy.isscalar(rhs):
666  rhs = numpy.ones(bad.shape, dtype=float) * rhs
667  if rtol is None:
668  for a, b, diff in zip(lhs[bad], rhs[bad], absDiff[bad]):
669  errMsg.append("%s %s %s (diff=%s)" % (a, cmpStr, b, diff))
670  else:
671  for a, b, diff, rel in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]):
672  errMsg.append("%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel))
673 
674  if msg is not None:
675  errMsg.append(msg)
676  testCase.assertFalse(failed, msg="\n".join(errMsg))
677 
678 
679 @inTestCase
680 def assertFloatsNotEqual(testCase, lhs, rhs, **kwds):
681  """Fail a test if the given floating point values are equal to within the
682  given tolerances.
683 
684  See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
685  ``rtol=atol=0``) for more information.
686 
687  Parameters
688  ----------
689  testCase : `unittest.TestCase`
690  Instance the test is part of.
691  lhs : scalar or array-like
692  LHS value(s) to compare; may be a scalar or array-like of any
693  dimension.
694  rhs : scalar or array-like
695  RHS value(s) to compare; may be a scalar or array-like of any
696  dimension.
697 
698  Raises
699  ------
700  AssertionError
701  The values are almost equal.
702  """
703  return assertFloatsAlmostEqual(testCase, lhs, rhs, invert=True, **kwds)
704 
705 
706 @inTestCase
707 def assertFloatsEqual(testCase, lhs, rhs, **kwargs):
708  """
709  Assert that lhs == rhs (both numeric types, whether scalar or array).
710 
711  See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with
712  ``rtol=atol=0``) for more information.
713 
714  Parameters
715  ----------
716  testCase : `unittest.TestCase`
717  Instance the test is part of.
718  lhs : scalar or array-like
719  LHS value(s) to compare; may be a scalar or array-like of any
720  dimension.
721  rhs : scalar or array-like
722  RHS value(s) to compare; may be a scalar or array-like of any
723  dimension.
724 
725  Raises
726  ------
727  AssertionError
728  The values are not equal.
729  """
730  return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs)
731 
732 
733 def _settingsIterator(settings):
734  """Return an iterator for the provided test settings
735 
736  Parameters
737  ----------
738  settings : `dict` (`str`: iterable)
739  Lists of test parameters. Each should be an iterable of the same length.
740  If a string is provided as an iterable, it will be converted to a list
741  of a single string.
742 
743  Raises
744  ------
745  AssertionError
746  If the ``settings`` are not of the same length.
747 
748  Yields
749  ------
750  parameters : `dict` (`str`: anything)
751  Set of parameters.
752  """
753  for name, values in settings.items():
754  if isinstance(values, str):
755  # Probably meant as a single-element string, rather than an iterable of chars
756  settings[name] = [values]
757  num = len(next(iter(settings.values()))) # Number of settings
758  for name, values in settings.items():
759  assert len(values) == num, f"Length mismatch for setting {name}: {len(values)} vs {num}"
760  for ii in range(num):
761  values = [settings[kk][ii] for kk in settings]
762  yield dict(zip(settings.keys(), values))
763 
764 
765 def classParameters(**settings):
766  """Class decorator for generating unit tests
767 
768  This decorator generates classes with class variables according to the
769  supplied ``settings``.
770 
771  Parameters
772  ----------
773  **settings : `dict` (`str`: iterable)
774  The lists of test parameters to set as class variables in turn. Each
775  should be an iterable of the same length.
776 
777  Examples
778  --------
779  ::
780 
781  @classParameters(foo=[1, 2], bar=[3, 4])
782  class MyTestCase(unittest.TestCase):
783  ...
784 
785  will generate two classes, as if you wrote::
786 
787  class MyTestCase_1_3(unittest.TestCase):
788  foo = 1
789  bar = 3
790  ...
791 
792  class MyTestCase_2_4(unittest.TestCase):
793  foo = 2
794  bar = 4
795  ...
796 
797  Note that the values are embedded in the class name.
798  """
799  def decorator(cls):
800  module = sys.modules[cls.__module__].__dict__
801  for params in _settingsIterator(settings):
802  name = f"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}"
803  bindings = dict(cls.__dict__)
804  bindings.update(params)
805  module[name] = type(name, (cls,), bindings)
806  return decorator
807 
808 
809 def methodParameters(**settings):
810  """Method decorator for unit tests
811 
812  This decorator iterates over the supplied settings, using
813  ``TestCase.subTest`` to communicate the values in the event of a failure.
814 
815  Parameters
816  ----------
817  **settings : `dict` (`str`: iterable)
818  The lists of test parameters. Each should be an iterable of the same
819  length.
820 
821  Examples
822  --------
823  ::
824 
825  @methodParameters(foo=[1, 2], bar=[3, 4])
826  def testSomething(self, foo, bar):
827  ...
828 
829  will run::
830 
831  testSomething(foo=1, bar=3)
832  testSomething(foo=2, bar=4)
833  """
834  def decorator(func):
835  @functools.wraps(func)
836  def wrapper(self, *args, **kwargs):
837  for params in _settingsIterator(settings):
838  kwargs.update(params)
839  with self.subTest(**params):
840  func(self, *args, **kwargs)
841  return wrapper
842  return decorator
843 
844 
845 @contextlib.contextmanager
847  """Context manager that creates and destroys a temporary directory.
848 
849  The difference from `tempfile.TemporaryDirectory` is that this ignores
850  errors when deleting a directory, which may happen with some filesystems.
851  """
852  tmpdir = tempfile.mkdtemp()
853  yield tmpdir
854  shutil.rmtree(tmpdir, ignore_errors=True)
def assertExecutable(self, executable, root_dir=None, args=None, msg=None)
Definition: tests.py:170
def create_executable_tests(cls, ref_file, executables=None)
Definition: tests.py:253
def _build_test_method(cls, executable, root_dir)
Definition: tests.py:221
def assertFloatsEqual(testCase, lhs, rhs, **kwargs)
Definition: tests.py:707
def classParameters(**settings)
Definition: tests.py:765
def assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=sys.float_info.epsilon, atol=sys.float_info.epsilon, relTo=None, printFailures=True, plotOnFailure=False, plotFileName=None, invert=False, msg=None, ignoreNaNs=False)
Definition: tests.py:526
def temporaryDirectory()
Definition: tests.py:846
def methodParameters(**settings)
Definition: tests.py:809
def debugger(*exceptions)
Definition: tests.py:423
def assertFloatsNotEqual(testCase, lhs, rhs, **kwds)
Definition: tests.py:680
def getTempFilePath(ext, expectOutput=True)
Definition: tests.py:310
def plotImageDiff(lhs, rhs, bad=None, diff=None, plotFileName=None)
Definition: tests.py:457
def sort_tests(tests)
Definition: tests.py:66
def inTestCase(func)
Definition: tests.py:415
def suiteClassWrapper(tests)
Definition: tests.py:109
def init()
Definition: tests.py:59