Coverage for python/lsst/utils/tests.py: 29%

323 statements  

« prev     ^ index     » next       coverage.py v7.2.7, created at 2023-06-01 02:29 -0700

1# This file is part of utils. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# Use of this source code is governed by a 3-clause BSD-style 

10# license that can be found in the LICENSE file. 

11 

12"""Support code for running unit tests""" 

13 

14__all__ = [ 

15 "init", 

16 "MemoryTestCase", 

17 "ExecutablesTestCase", 

18 "getTempFilePath", 

19 "TestCase", 

20 "assertFloatsAlmostEqual", 

21 "assertFloatsNotEqual", 

22 "assertFloatsEqual", 

23 "debugger", 

24 "classParameters", 

25 "methodParameters", 

26 "temporaryDirectory", 

27] 

28 

29import contextlib 

30import functools 

31import gc 

32import inspect 

33import itertools 

34import os 

35import re 

36import shutil 

37import subprocess 

38import sys 

39import tempfile 

40import unittest 

41import warnings 

42from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Sequence, Set, Type, Union 

43 

44import numpy 

45import psutil 

46 

47# Initialize the list of open files to an empty set 

48open_files = set() 

49 

50 

51def _get_open_files() -> Set[str]: 

52 """Return a set containing the list of files currently open in this 

53 process. 

54 

55 Returns 

56 ------- 

57 open_files : `set` 

58 Set containing the list of open files. 

59 """ 

60 return set(p.path for p in psutil.Process().open_files()) 

61 

62 

63def init() -> None: 

64 """Initialize the memory tester and file descriptor leak tester.""" 

65 global open_files 

66 # Reset the list of open files 

67 open_files = _get_open_files() 

68 

69 

70def sort_tests(tests) -> unittest.TestSuite: 

71 """Sort supplied test suites such that MemoryTestCases are at the end. 

72 

73 `lsst.utils.tests.MemoryTestCase` tests should always run after any other 

74 tests in the module. 

75 

76 Parameters 

77 ---------- 

78 tests : sequence 

79 Sequence of test suites. 

80 

81 Returns 

82 ------- 

83 suite : `unittest.TestSuite` 

84 A combined `~unittest.TestSuite` with 

85 `~lsst.utils.tests.MemoryTestCase` at the end. 

86 """ 

87 suite = unittest.TestSuite() 

88 memtests = [] 

89 for test_suite in tests: 

90 try: 

91 # Just test the first test method in the suite for MemoryTestCase 

92 # Use loop rather than next as it is possible for a test class 

93 # to not have any test methods and the Python community prefers 

94 # for loops over catching a StopIteration exception. 

95 bases = None 

96 for method in test_suite: 

97 bases = inspect.getmro(method.__class__) 

98 break 

99 if bases is not None and MemoryTestCase in bases: 

100 memtests.append(test_suite) 

101 else: 

102 suite.addTests(test_suite) 

103 except TypeError: 

104 if isinstance(test_suite, MemoryTestCase): 

105 memtests.append(test_suite) 

106 else: 

107 suite.addTest(test_suite) 

108 suite.addTests(memtests) 

109 return suite 

110 

111 

112def suiteClassWrapper(tests): 

113 return unittest.TestSuite(sort_tests(tests)) 

114 

115 

116# Replace the suiteClass callable in the defaultTestLoader 

117# so that we can reorder the test ordering. This will have 

118# no effect if no memory test cases are found. 

119unittest.defaultTestLoader.suiteClass = suiteClassWrapper 

120 

121 

122class MemoryTestCase(unittest.TestCase): 

123 """Check for resource leaks.""" 

124 

125 ignore_regexps: List[str] = [] 

126 """List of regexps to ignore when checking for open files.""" 

127 

128 @classmethod 

129 def tearDownClass(cls) -> None: 

130 """Reset the leak counter when the tests have been completed""" 

131 init() 

132 

133 def testFileDescriptorLeaks(self) -> None: 

134 """Check if any file descriptors are open since init() called. 

135 

136 Ignores files with certain known path components and any files 

137 that match regexp patterns in class property ``ignore_regexps``. 

138 """ 

139 gc.collect() 

140 global open_files 

141 now_open = _get_open_files() 

142 

143 # Some files are opened out of the control of the stack. 

144 now_open = set( 

145 f 

146 for f in now_open 

147 if not f.endswith(".car") 

148 and not f.startswith("/proc/") 

149 and not f.endswith(".ttf") 

150 and not (f.startswith("/var/lib/") and f.endswith("/passwd")) 

151 and not f.endswith("astropy.log") 

152 and not f.endswith("mime/mime.cache") 

153 and not f.endswith(".sqlite3") 

154 and not any([re.search(r, f) for r in self.ignore_regexps]) 

155 ) 

156 

157 diff = now_open.difference(open_files) 

158 if diff: 

159 for f in diff: 

160 print("File open: %s" % f) 

161 self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else "")) 

162 

163 

164class ExecutablesTestCase(unittest.TestCase): 

165 """Test that executables can be run and return good status. 

166 

167 The test methods are dynamically created. Callers 

168 must subclass this class in their own test file and invoke 

169 the create_executable_tests() class method to register the tests. 

170 """ 

171 

172 TESTS_DISCOVERED = -1 

173 

174 @classmethod 

175 def setUpClass(cls) -> None: 

176 """Abort testing if automated test creation was enabled and 

177 no tests were found. 

178 """ 

179 if cls.TESTS_DISCOVERED == 0: 

180 raise RuntimeError("No executables discovered.") 

181 

182 def testSanity(self) -> None: 

183 """Ensure that there is at least one test to be 

184 executed. This allows the test runner to trigger the class set up 

185 machinery to test whether there are some executables to test. 

186 """ 

187 pass 

188 

189 def assertExecutable( 

190 self, 

191 executable: str, 

192 root_dir: Optional[str] = None, 

193 args: Optional[Sequence[str]] = None, 

194 msg: Optional[str] = None, 

195 ) -> None: 

196 """Check an executable runs and returns good status. 

197 

198 Prints output to standard out. On bad exit status the test 

199 fails. If the executable can not be located the test is skipped. 

200 

201 Parameters 

202 ---------- 

203 executable : `str` 

204 Path to an executable. ``root_dir`` is not used if this is an 

205 absolute path. 

206 root_dir : `str`, optional 

207 Directory containing executable. Ignored if `None`. 

208 args : `list` or `tuple`, optional 

209 Arguments to be provided to the executable. 

210 msg : `str`, optional 

211 Message to use when the test fails. Can be `None` for default 

212 message. 

213 

214 Raises 

215 ------ 

216 AssertionError 

217 The executable did not return 0 exit status. 

218 """ 

219 if root_dir is not None and not os.path.isabs(executable): 

220 executable = os.path.join(root_dir, executable) 

221 

222 # Form the argument list for subprocess 

223 sp_args = [executable] 

224 argstr = "no arguments" 

225 if args is not None: 

226 sp_args.extend(args) 

227 argstr = 'arguments "' + " ".join(args) + '"' 

228 

229 print("Running executable '{}' with {}...".format(executable, argstr)) 

230 if not os.path.exists(executable): 

231 self.skipTest("Executable {} is unexpectedly missing".format(executable)) 

232 failmsg = None 

233 try: 

234 output = subprocess.check_output(sp_args) 

235 except subprocess.CalledProcessError as e: 

236 output = e.output 

237 failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode) 

238 print(output.decode("utf-8")) 

239 if failmsg: 

240 if msg is None: 

241 msg = failmsg 

242 self.fail(msg) 

243 

244 @classmethod 

245 def _build_test_method(cls, executable: str, root_dir: str) -> None: 

246 """Build a test method and attach to class. 

247 

248 A test method is created for the supplied excutable located 

249 in the supplied root directory. This method is attached to the class 

250 so that the test runner will discover the test and run it. 

251 

252 Parameters 

253 ---------- 

254 cls : `object` 

255 The class in which to create the tests. 

256 executable : `str` 

257 Name of executable. Can be absolute path. 

258 root_dir : `str` 

259 Path to executable. Not used if executable path is absolute. 

260 """ 

261 if not os.path.isabs(executable): 261 ↛ 262line 261 didn't jump to line 262, because the condition on line 261 was never true

262 executable = os.path.abspath(os.path.join(root_dir, executable)) 

263 

264 # Create the test name from the executable path. 

265 test_name = "test_exe_" + executable.replace("/", "_") 

266 

267 # This is the function that will become the test method 

268 def test_executable_runs(*args: Any) -> None: 

269 self = args[0] 

270 self.assertExecutable(executable) 

271 

272 # Give it a name and attach it to the class 

273 test_executable_runs.__name__ = test_name 

274 setattr(cls, test_name, test_executable_runs) 

275 

276 @classmethod 

277 def create_executable_tests(cls, ref_file: str, executables: Optional[Sequence[str]] = None) -> None: 

278 """Discover executables to test and create corresponding test methods. 

279 

280 Scans the directory containing the supplied reference file 

281 (usually ``__file__`` supplied from the test class) to look for 

282 executables. If executables are found a test method is created 

283 for each one. That test method will run the executable and 

284 check the returned value. 

285 

286 Executable scripts with a ``.py`` extension and shared libraries 

287 are ignored by the scanner. 

288 

289 This class method must be called before test discovery. 

290 

291 Parameters 

292 ---------- 

293 ref_file : `str` 

294 Path to a file within the directory to be searched. 

295 If the files are in the same location as the test file, then 

296 ``__file__`` can be used. 

297 executables : `list` or `tuple`, optional 

298 Sequence of executables that can override the automated 

299 detection. If an executable mentioned here is not found, a 

300 skipped test will be created for it, rather than a failed 

301 test. 

302 

303 Examples 

304 -------- 

305 >>> cls.create_executable_tests(__file__) 

306 """ 

307 # Get the search directory from the reference file 

308 ref_dir = os.path.abspath(os.path.dirname(ref_file)) 

309 

310 if executables is None: 310 ↛ 325line 310 didn't jump to line 325, because the condition on line 310 was never false

311 # Look for executables to test by walking the tree 

312 executables = [] 

313 for root, dirs, files in os.walk(ref_dir): 

314 for f in files: 

315 # Skip Python files. Shared libraries are executable. 

316 if not f.endswith(".py") and not f.endswith(".so"): 

317 full_path = os.path.join(root, f) 

318 if os.access(full_path, os.X_OK): 

319 executables.append(full_path) 

320 

321 # Store the number of tests found for later assessment. 

322 # Do not raise an exception if we have no executables as this would 

323 # cause the testing to abort before the test runner could properly 

324 # integrate it into the failure report. 

325 cls.TESTS_DISCOVERED = len(executables) 

326 

327 # Create the test functions and attach them to the class 

328 for e in executables: 

329 cls._build_test_method(e, ref_dir) 

330 

331 

332@contextlib.contextmanager 

333def getTempFilePath(ext: str, expectOutput: bool = True) -> Iterator[str]: 

334 """Return a path suitable for a temporary file and try to delete the 

335 file on success 

336 

337 If the with block completes successfully then the file is deleted, 

338 if possible; failure results in a printed warning. 

339 If a file is remains when it should not, a RuntimeError exception is 

340 raised. This exception is also raised if a file is not present on context 

341 manager exit when one is expected to exist. 

342 If the block exits with an exception the file if left on disk so it can be 

343 examined. The file name has a random component such that nested context 

344 managers can be used with the same file suffix. 

345 

346 Parameters 

347 ---------- 

348 ext : `str` 

349 File name extension, e.g. ``.fits``. 

350 expectOutput : `bool`, optional 

351 If `True`, a file should be created within the context manager. 

352 If `False`, a file should not be present when the context manager 

353 exits. 

354 

355 Returns 

356 ------- 

357 path : `str` 

358 Path for a temporary file. The path is a combination of the caller's 

359 file path and the name of the top-level function 

360 

361 Examples 

362 -------- 

363 .. code-block:: python 

364 

365 # file tests/testFoo.py 

366 import unittest 

367 import lsst.utils.tests 

368 class FooTestCase(unittest.TestCase): 

369 def testBasics(self): 

370 self.runTest() 

371 

372 def runTest(self): 

373 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile: 

374 # if tests/.tests exists then 

375 # tmpFile = "tests/.tests/testFoo_testBasics.fits" 

376 # otherwise tmpFile = "testFoo_testBasics.fits" 

377 ... 

378 # at the end of this "with" block the path tmpFile will be 

379 # deleted, but only if the file exists and the "with" 

380 # block terminated normally (rather than with an exception) 

381 ... 

382 """ 

383 stack = inspect.stack() 

384 # get name of first function in the file 

385 for i in range(2, len(stack)): 

386 frameInfo = inspect.getframeinfo(stack[i][0]) 

387 if i == 2: 

388 callerFilePath = frameInfo.filename 

389 callerFuncName = frameInfo.function 

390 elif callerFilePath == frameInfo.filename: 

391 # this function called the previous function 

392 callerFuncName = frameInfo.function 

393 else: 

394 break 

395 

396 callerDir, callerFileNameWithExt = os.path.split(callerFilePath) 

397 callerFileName = os.path.splitext(callerFileNameWithExt)[0] 

398 outDir = os.path.join(callerDir, ".tests") 

399 if not os.path.isdir(outDir): 

400 outDir = "" 

401 prefix = "%s_%s-" % (callerFileName, callerFuncName) 

402 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix) 

403 if os.path.exists(outPath): 

404 # There should not be a file there given the randomizer. Warn and 

405 # remove. 

406 # Use stacklevel 3 so that the warning is reported from the end of the 

407 # with block 

408 warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,), stacklevel=3) 

409 try: 

410 os.remove(outPath) 

411 except OSError: 

412 pass 

413 

414 yield outPath 

415 

416 fileExists = os.path.exists(outPath) 

417 if expectOutput: 

418 if not fileExists: 

419 raise RuntimeError("Temp file expected named {} but none found".format(outPath)) 

420 else: 

421 if fileExists: 

422 raise RuntimeError("Unexpectedly discovered temp file named {}".format(outPath)) 

423 # Try to clean up the file regardless 

424 if fileExists: 

425 try: 

426 os.remove(outPath) 

427 except OSError as e: 

428 # Use stacklevel 3 so that the warning is reported from the end of 

429 # the with block. 

430 warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3) 

431 

432 

433class TestCase(unittest.TestCase): 

434 """Subclass of unittest.TestCase that adds some custom assertions for 

435 convenience. 

436 """ 

437 

438 

439def inTestCase(func: Callable) -> Callable: 

440 """Add a free function to our custom TestCase class, while 

441 also making it available as a free function. 

442 """ 

443 setattr(TestCase, func.__name__, func) 

444 return func 

445 

446 

447def debugger(*exceptions): 

448 """Enter the debugger when there's an uncaught exception 

449 

450 To use, just slap a ``@debugger()`` on your function. 

451 

452 You may provide specific exception classes to catch as arguments to 

453 the decorator function, e.g., 

454 ``@debugger(RuntimeError, NotImplementedError)``. 

455 This defaults to just `AssertionError`, for use on `unittest.TestCase` 

456 methods. 

457 

458 Code provided by "Rosh Oxymoron" on StackOverflow: 

459 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails 

460 

461 Notes 

462 ----- 

463 Consider using ``pytest --pdb`` instead of this decorator. 

464 """ 

465 if not exceptions: 

466 exceptions = (Exception,) 

467 

468 def decorator(f): 

469 @functools.wraps(f) 

470 def wrapper(*args, **kwargs): 

471 try: 

472 return f(*args, **kwargs) 

473 except exceptions: 

474 import pdb 

475 import sys 

476 

477 pdb.post_mortem(sys.exc_info()[2]) 

478 

479 return wrapper 

480 

481 return decorator 

482 

483 

484def plotImageDiff( 

485 lhs: numpy.ndarray, 

486 rhs: numpy.ndarray, 

487 bad: Optional[numpy.ndarray] = None, 

488 diff: Optional[numpy.ndarray] = None, 

489 plotFileName: Optional[str] = None, 

490) -> None: 

491 """Plot the comparison of two 2-d NumPy arrays. 

492 

493 Parameters 

494 ---------- 

495 lhs : `numpy.ndarray` 

496 LHS values to compare; a 2-d NumPy array 

497 rhs : `numpy.ndarray` 

498 RHS values to compare; a 2-d NumPy array 

499 bad : `numpy.ndarray` 

500 A 2-d boolean NumPy array of values to emphasize in the plots 

501 diff : `numpy.ndarray` 

502 difference array; a 2-d NumPy array, or None to show lhs-rhs 

503 plotFileName : `str` 

504 Filename to save the plot to. If None, the plot will be displayed in 

505 a window. 

506 

507 Notes 

508 ----- 

509 This method uses `matplotlib` and imports it internally; it should be 

510 wrapped in a try/except block within packages that do not depend on 

511 `matplotlib` (including `~lsst.utils`). 

512 """ 

513 from matplotlib import pyplot 

514 

515 if diff is None: 

516 diff = lhs - rhs 

517 pyplot.figure() 

518 if bad is not None: 

519 # make an rgba image that's red and transparent where not bad 

520 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8) 

521 badImage[:, :, 0] = 255 

522 badImage[:, :, 1] = 0 

523 badImage[:, :, 2] = 0 

524 badImage[:, :, 3] = 255 * bad 

525 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs)) 

526 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs)) 

527 vmin2 = numpy.min(diff) 

528 vmax2 = numpy.max(diff) 

529 for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]): 

530 pyplot.subplot(2, 3, n + 1) 

531 im1 = pyplot.imshow( 

532 image, cmap=pyplot.cm.gray, interpolation="nearest", origin="lower", vmin=vmin1, vmax=vmax1 

533 ) 

534 if bad is not None: 

535 pyplot.imshow(badImage, alpha=0.2, interpolation="nearest", origin="lower") 

536 pyplot.axis("off") 

537 pyplot.title(title) 

538 pyplot.subplot(2, 3, n + 4) 

539 im2 = pyplot.imshow( 

540 image, cmap=pyplot.cm.gray, interpolation="nearest", origin="lower", vmin=vmin2, vmax=vmax2 

541 ) 

542 if bad is not None: 

543 pyplot.imshow(badImage, alpha=0.2, interpolation="nearest", origin="lower") 

544 pyplot.axis("off") 

545 pyplot.title(title) 

546 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05) 

547 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4]) 

548 pyplot.colorbar(im1, cax=cax1) 

549 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4]) 

550 pyplot.colorbar(im2, cax=cax2) 

551 if plotFileName: 

552 pyplot.savefig(plotFileName) 

553 else: 

554 pyplot.show() 

555 

556 

557@inTestCase 

558def assertFloatsAlmostEqual( 

559 testCase: unittest.TestCase, 

560 lhs: Union[float, numpy.ndarray], 

561 rhs: Union[float, numpy.ndarray], 

562 rtol: Optional[float] = sys.float_info.epsilon, 

563 atol: Optional[float] = sys.float_info.epsilon, 

564 relTo: Optional[float] = None, 

565 printFailures: bool = True, 

566 plotOnFailure: bool = False, 

567 plotFileName: Optional[str] = None, 

568 invert: bool = False, 

569 msg: Optional[str] = None, 

570 ignoreNaNs: bool = False, 

571) -> None: 

572 """Highly-configurable floating point comparisons for scalars and arrays. 

573 

574 The test assertion will fail if all elements ``lhs`` and ``rhs`` are not 

575 equal to within the tolerances specified by ``rtol`` and ``atol``. 

576 More precisely, the comparison is: 

577 

578 ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol`` 

579 

580 If ``rtol`` or ``atol`` is `None`, that term in the comparison is not 

581 performed at all. 

582 

583 When not specified, ``relTo`` is the elementwise maximum of the absolute 

584 values of ``lhs`` and ``rhs``. If set manually, it should usually be set 

585 to either ``lhs`` or ``rhs``, or a scalar value typical of what is 

586 expected. 

587 

588 Parameters 

589 ---------- 

590 testCase : `unittest.TestCase` 

591 Instance the test is part of. 

592 lhs : scalar or array-like 

593 LHS value(s) to compare; may be a scalar or array-like of any 

594 dimension. 

595 rhs : scalar or array-like 

596 RHS value(s) to compare; may be a scalar or array-like of any 

597 dimension. 

598 rtol : `float`, optional 

599 Relative tolerance for comparison; defaults to double-precision 

600 epsilon. 

601 atol : `float`, optional 

602 Absolute tolerance for comparison; defaults to double-precision 

603 epsilon. 

604 relTo : `float`, optional 

605 Value to which comparison with rtol is relative. 

606 printFailures : `bool`, optional 

607 Upon failure, print all inequal elements as part of the message. 

608 plotOnFailure : `bool`, optional 

609 Upon failure, plot the originals and their residual with matplotlib. 

610 Only 2-d arrays are supported. 

611 plotFileName : `str`, optional 

612 Filename to save the plot to. If `None`, the plot will be displayed in 

613 a window. 

614 invert : `bool`, optional 

615 If `True`, invert the comparison and fail only if any elements *are* 

616 equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`, 

617 which should generally be used instead for clarity. 

618 will return `True`). 

619 msg : `str`, optional 

620 String to append to the error message when assert fails. 

621 ignoreNaNs : `bool`, optional 

622 If `True` (`False` is default) mask out any NaNs from operand arrays 

623 before performing comparisons if they are in the same locations; NaNs 

624 in different locations are trigger test assertion failures, even when 

625 ``invert=True``. Scalar NaNs are treated like arrays containing only 

626 NaNs of the same shape as the other operand, and no comparisons are 

627 performed if both sides are scalar NaNs. 

628 

629 Raises 

630 ------ 

631 AssertionError 

632 The values are not almost equal. 

633 """ 

634 if ignoreNaNs: 

635 lhsMask = numpy.isnan(lhs) 

636 rhsMask = numpy.isnan(rhs) 

637 if not numpy.all(lhsMask == rhsMask): 

638 testCase.fail( 

639 f"lhs has {lhsMask.sum()} NaN values and rhs has {rhsMask.sum()} NaN values, " 

640 "in different locations." 

641 ) 

642 if numpy.all(lhsMask): 

643 assert numpy.all(rhsMask), "Should be guaranteed by previous if." 

644 # All operands are fully NaN (either scalar NaNs or arrays of only 

645 # NaNs). 

646 return 

647 assert not numpy.all(rhsMask), "Should be guaranteed by prevoius two ifs." 

648 # If either operand is an array select just its not-NaN values. Note 

649 # that these expressions are never True for scalar operands, because if 

650 # they are NaN then the numpy.all checks above will catch them. 

651 if numpy.any(lhsMask): 

652 lhs = lhs[numpy.logical_not(lhsMask)] 

653 if numpy.any(rhsMask): 

654 rhs = rhs[numpy.logical_not(rhsMask)] 

655 if not numpy.isfinite(lhs).all(): 

656 testCase.fail("Non-finite values in lhs") 

657 if not numpy.isfinite(rhs).all(): 

658 testCase.fail("Non-finite values in rhs") 

659 diff = lhs - rhs 

660 absDiff = numpy.abs(lhs - rhs) 

661 if rtol is not None: 

662 if relTo is None: 

663 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs)) 

664 else: 

665 relTo = numpy.abs(relTo) 

666 bad = absDiff > rtol * relTo 

667 if atol is not None: 

668 bad = numpy.logical_and(bad, absDiff > atol) 

669 else: 

670 if atol is None: 

671 raise ValueError("rtol and atol cannot both be None") 

672 bad = absDiff > atol 

673 failed = numpy.any(bad) 

674 if invert: 

675 failed = not failed 

676 bad = numpy.logical_not(bad) 

677 cmpStr = "==" 

678 failStr = "are the same" 

679 else: 

680 cmpStr = "!=" 

681 failStr = "differ" 

682 errMsg = [] 

683 if failed: 

684 if numpy.isscalar(bad): 

685 if rtol is None: 

686 errMsg = ["%s %s %s; diff=%s with atol=%s" % (lhs, cmpStr, rhs, absDiff, atol)] 

687 elif atol is None: 

688 errMsg = [ 

689 "%s %s %s; diff=%s/%s=%s with rtol=%s" 

690 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff / relTo, rtol) 

691 ] 

692 else: 

693 errMsg = [ 

694 "%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s" 

695 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff / relTo, rtol, atol) 

696 ] 

697 else: 

698 errMsg = ["%d/%d elements %s with rtol=%s, atol=%s" % (bad.sum(), bad.size, failStr, rtol, atol)] 

699 if plotOnFailure: 

700 if len(lhs.shape) != 2 or len(rhs.shape) != 2: 

701 raise ValueError("plotOnFailure is only valid for 2-d arrays") 

702 try: 

703 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName) 

704 except ImportError: 

705 errMsg.append("Failure plot requested but matplotlib could not be imported.") 

706 if printFailures: 

707 # Make sure everything is an array if any of them are, so we 

708 # can treat them the same (diff and absDiff are arrays if 

709 # either rhs or lhs is), and we don't get here if neither is. 

710 if numpy.isscalar(relTo): 

711 relTo = numpy.ones(bad.shape, dtype=float) * relTo 

712 if numpy.isscalar(lhs): 

713 lhs = numpy.ones(bad.shape, dtype=float) * lhs 

714 if numpy.isscalar(rhs): 

715 rhs = numpy.ones(bad.shape, dtype=float) * rhs 

716 if rtol is None: 

717 for a, b, diff in zip(lhs[bad], rhs[bad], absDiff[bad]): 

718 errMsg.append("%s %s %s (diff=%s)" % (a, cmpStr, b, diff)) 

719 else: 

720 for a, b, diff, rel in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]): 

721 errMsg.append("%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff / rel)) 

722 

723 if msg is not None: 

724 errMsg.append(msg) 

725 testCase.assertFalse(failed, msg="\n".join(errMsg)) 

726 

727 

728@inTestCase 

729def assertFloatsNotEqual( 

730 testCase: unittest.TestCase, 

731 lhs: Union[float, numpy.ndarray], 

732 rhs: Union[float, numpy.ndarray], 

733 **kwds: Any, 

734) -> None: 

735 """Fail a test if the given floating point values are equal to within the 

736 given tolerances. 

737 

738 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

739 ``rtol=atol=0``) for more information. 

740 

741 Parameters 

742 ---------- 

743 testCase : `unittest.TestCase` 

744 Instance the test is part of. 

745 lhs : scalar or array-like 

746 LHS value(s) to compare; may be a scalar or array-like of any 

747 dimension. 

748 rhs : scalar or array-like 

749 RHS value(s) to compare; may be a scalar or array-like of any 

750 dimension. 

751 

752 Raises 

753 ------ 

754 AssertionError 

755 The values are almost equal. 

756 """ 

757 return assertFloatsAlmostEqual(testCase, lhs, rhs, invert=True, **kwds) 

758 

759 

760@inTestCase 

761def assertFloatsEqual( 

762 testCase: unittest.TestCase, 

763 lhs: Union[float, numpy.ndarray], 

764 rhs: Union[float, numpy.ndarray], 

765 **kwargs: Any, 

766) -> None: 

767 """ 

768 Assert that lhs == rhs (both numeric types, whether scalar or array). 

769 

770 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

771 ``rtol=atol=0``) for more information. 

772 

773 Parameters 

774 ---------- 

775 testCase : `unittest.TestCase` 

776 Instance the test is part of. 

777 lhs : scalar or array-like 

778 LHS value(s) to compare; may be a scalar or array-like of any 

779 dimension. 

780 rhs : scalar or array-like 

781 RHS value(s) to compare; may be a scalar or array-like of any 

782 dimension. 

783 

784 Raises 

785 ------ 

786 AssertionError 

787 The values are not equal. 

788 """ 

789 return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs) 

790 

791 

792def _settingsIterator(settings: Dict[str, Sequence[Any]]) -> Iterator[Dict[str, Any]]: 

793 """Return an iterator for the provided test settings 

794 

795 Parameters 

796 ---------- 

797 settings : `dict` (`str`: iterable) 

798 Lists of test parameters. Each should be an iterable of the same 

799 length. If a string is provided as an iterable, it will be converted 

800 to a list of a single string. 

801 

802 Raises 

803 ------ 

804 AssertionError 

805 If the ``settings`` are not of the same length. 

806 

807 Yields 

808 ------ 

809 parameters : `dict` (`str`: anything) 

810 Set of parameters. 

811 """ 

812 for name, values in settings.items(): 

813 if isinstance(values, str): 813 ↛ 816line 813 didn't jump to line 816, because the condition on line 813 was never true

814 # Probably meant as a single-element string, rather than an 

815 # iterable of chars. 

816 settings[name] = [values] 

817 num = len(next(iter(settings.values()))) # Number of settings 

818 for name, values in settings.items(): 

819 assert len(values) == num, f"Length mismatch for setting {name}: {len(values)} vs {num}" 

820 for ii in range(num): 

821 values = [settings[kk][ii] for kk in settings] 

822 yield dict(zip(settings, values)) 

823 

824 

825def classParameters(**settings: Sequence[Any]) -> Callable: 

826 """Class decorator for generating unit tests 

827 

828 This decorator generates classes with class variables according to the 

829 supplied ``settings``. 

830 

831 Parameters 

832 ---------- 

833 **settings : `dict` (`str`: iterable) 

834 The lists of test parameters to set as class variables in turn. Each 

835 should be an iterable of the same length. 

836 

837 Examples 

838 -------- 

839 :: 

840 

841 @classParameters(foo=[1, 2], bar=[3, 4]) 

842 class MyTestCase(unittest.TestCase): 

843 ... 

844 

845 will generate two classes, as if you wrote:: 

846 

847 class MyTestCase_1_3(unittest.TestCase): 

848 foo = 1 

849 bar = 3 

850 ... 

851 

852 class MyTestCase_2_4(unittest.TestCase): 

853 foo = 2 

854 bar = 4 

855 ... 

856 

857 Note that the values are embedded in the class name. 

858 """ 

859 

860 def decorator(cls: Type) -> None: 

861 module = sys.modules[cls.__module__].__dict__ 

862 for params in _settingsIterator(settings): 

863 name = f"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}" 

864 bindings = dict(cls.__dict__) 

865 bindings.update(params) 

866 module[name] = type(name, (cls,), bindings) 

867 

868 return decorator 

869 

870 

871def methodParameters(**settings: Sequence[Any]) -> Callable: 

872 """Iterate over supplied settings to create subtests automatically. 

873 

874 This decorator iterates over the supplied settings, using 

875 ``TestCase.subTest`` to communicate the values in the event of a failure. 

876 

877 Parameters 

878 ---------- 

879 **settings : `dict` (`str`: iterable) 

880 The lists of test parameters. Each should be an iterable of the same 

881 length. 

882 

883 Examples 

884 -------- 

885 .. code-block:: python 

886 

887 @methodParameters(foo=[1, 2], bar=[3, 4]) 

888 def testSomething(self, foo, bar): 

889 ... 

890 

891 will run: 

892 

893 .. code-block:: python 

894 

895 testSomething(foo=1, bar=3) 

896 testSomething(foo=2, bar=4) 

897 """ 

898 

899 def decorator(func: Callable) -> Callable: 

900 @functools.wraps(func) 

901 def wrapper(self: unittest.TestCase, *args: Any, **kwargs: Any) -> None: 

902 for params in _settingsIterator(settings): 

903 kwargs.update(params) 

904 with self.subTest(**params): 

905 func(self, *args, **kwargs) 

906 

907 return wrapper 

908 

909 return decorator 

910 

911 

912def _cartesianProduct(settings: Mapping[str, Sequence[Any]]) -> Mapping[str, Sequence[Any]]: 

913 """Return the cartesian product of the settings 

914 

915 Parameters 

916 ---------- 

917 settings : `dict` mapping `str` to `iterable` 

918 Parameter combinations. 

919 

920 Returns 

921 ------- 

922 product : `dict` mapping `str` to `iterable` 

923 Parameter combinations covering the cartesian product (all possible 

924 combinations) of the input parameters. 

925 

926 Examples 

927 -------- 

928 .. code-block:: python 

929 

930 cartesianProduct({"foo": [1, 2], "bar": ["black", "white"]}) 

931 

932 will return: 

933 

934 .. code-block:: python 

935 

936 {"foo": [1, 1, 2, 2], "bar": ["black", "white", "black", "white"]} 

937 """ 

938 product: Dict[str, List[Any]] = {kk: [] for kk in settings} 

939 for values in itertools.product(*settings.values()): 

940 for kk, vv in zip(settings.keys(), values): 

941 product[kk].append(vv) 

942 return product 

943 

944 

945def classParametersProduct(**settings: Sequence[Any]) -> Callable: 

946 """Class decorator for generating unit tests 

947 

948 This decorator generates classes with class variables according to the 

949 cartesian product of the supplied ``settings``. 

950 

951 Parameters 

952 ---------- 

953 **settings : `dict` (`str`: iterable) 

954 The lists of test parameters to set as class variables in turn. Each 

955 should be an iterable. 

956 

957 Examples 

958 -------- 

959 .. code-block:: python 

960 

961 @classParametersProduct(foo=[1, 2], bar=[3, 4]) 

962 class MyTestCase(unittest.TestCase): 

963 ... 

964 

965 will generate four classes, as if you wrote:: 

966 

967 .. code-block:: python 

968 

969 class MyTestCase_1_3(unittest.TestCase): 

970 foo = 1 

971 bar = 3 

972 ... 

973 

974 class MyTestCase_1_4(unittest.TestCase): 

975 foo = 1 

976 bar = 4 

977 ... 

978 

979 class MyTestCase_2_3(unittest.TestCase): 

980 foo = 2 

981 bar = 3 

982 ... 

983 

984 class MyTestCase_2_4(unittest.TestCase): 

985 foo = 2 

986 bar = 4 

987 ... 

988 

989 Note that the values are embedded in the class name. 

990 """ 

991 return classParameters(**_cartesianProduct(settings)) 

992 

993 

994def methodParametersProduct(**settings: Sequence[Any]) -> Callable: 

995 """Iterate over cartesian product creating sub tests. 

996 

997 This decorator iterates over the cartesian product of the supplied 

998 settings, using `~unittest.TestCase.subTest` to communicate the values in 

999 the event of a failure. 

1000 

1001 Parameters 

1002 ---------- 

1003 **settings : `dict` (`str`: iterable) 

1004 The parameter combinations to test. Each should be an iterable. 

1005 

1006 Example 

1007 ------- 

1008 

1009 @methodParametersProduct(foo=[1, 2], bar=["black", "white"]) 

1010 def testSomething(self, foo, bar): 

1011 ... 

1012 

1013 will run: 

1014 

1015 testSomething(foo=1, bar="black") 

1016 testSomething(foo=1, bar="white") 

1017 testSomething(foo=2, bar="black") 

1018 testSomething(foo=2, bar="white") 

1019 """ 

1020 return methodParameters(**_cartesianProduct(settings)) 

1021 

1022 

1023@contextlib.contextmanager 

1024def temporaryDirectory() -> Iterator[str]: 

1025 """Context manager that creates and destroys a temporary directory. 

1026 

1027 The difference from `tempfile.TemporaryDirectory` is that this ignores 

1028 errors when deleting a directory, which may happen with some filesystems. 

1029 """ 

1030 tmpdir = tempfile.mkdtemp() 

1031 yield tmpdir 

1032 shutil.rmtree(tmpdir, ignore_errors=True)