Coverage for python/lsst/utils/tests.py: 30%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

320 statements  

1# This file is part of utils. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# Use of this source code is governed by a 3-clause BSD-style 

10# license that can be found in the LICENSE file. 

11 

12"""Support code for running unit tests""" 

13 

14__all__ = ["init", "MemoryTestCase", "ExecutablesTestCase", "getTempFilePath", 

15 "TestCase", "assertFloatsAlmostEqual", "assertFloatsNotEqual", "assertFloatsEqual", 

16 "debugger", "classParameters", "methodParameters", "temporaryDirectory"] 

17 

18import contextlib 

19import gc 

20import inspect 

21import os 

22import subprocess 

23import sys 

24import unittest 

25import warnings 

26import numpy 

27import psutil 

28import functools 

29import tempfile 

30import shutil 

31import itertools 

32 

33from typing import ( 

34 Any, 

35 Callable, 

36 Dict, 

37 Iterator, 

38 List, 

39 Optional, 

40 Mapping, 

41 Set, 

42 Sequence, 

43 Type, 

44 Union, 

45) 

46 

47# Initialize the list of open files to an empty set 

48open_files = set() 

49 

50 

51def _get_open_files() -> Set[str]: 

52 """Return a set containing the list of files currently open in this 

53 process. 

54 

55 Returns 

56 ------- 

57 open_files : `set` 

58 Set containing the list of open files. 

59 """ 

60 return set(p.path for p in psutil.Process().open_files()) 

61 

62 

63def init() -> None: 

64 """Initialize the memory tester and file descriptor leak tester.""" 

65 global open_files 

66 # Reset the list of open files 

67 open_files = _get_open_files() 

68 

69 

70def sort_tests(tests) -> unittest.TestSuite: 

71 """Sort supplied test suites such that MemoryTestCases are at the end. 

72 

73 `lsst.utils.tests.MemoryTestCase` tests should always run after any other 

74 tests in the module. 

75 

76 Parameters 

77 ---------- 

78 tests : sequence 

79 Sequence of test suites. 

80 

81 Returns 

82 ------- 

83 suite : `unittest.TestSuite` 

84 A combined `~unittest.TestSuite` with 

85 `~lsst.utils.tests.MemoryTestCase` at the end. 

86 """ 

87 suite = unittest.TestSuite() 

88 memtests = [] 

89 for test_suite in tests: 

90 try: 

91 # Just test the first test method in the suite for MemoryTestCase 

92 # Use loop rather than next as it is possible for a test class 

93 # to not have any test methods and the Python community prefers 

94 # for loops over catching a StopIteration exception. 

95 bases = None 

96 for method in test_suite: 

97 bases = inspect.getmro(method.__class__) 

98 break 

99 if bases is not None and MemoryTestCase in bases: 

100 memtests.append(test_suite) 

101 else: 

102 suite.addTests(test_suite) 

103 except TypeError: 

104 if isinstance(test_suite, MemoryTestCase): 

105 memtests.append(test_suite) 

106 else: 

107 suite.addTest(test_suite) 

108 suite.addTests(memtests) 

109 return suite 

110 

111 

112def suiteClassWrapper(tests): 

113 return unittest.TestSuite(sort_tests(tests)) 

114 

115 

116# Replace the suiteClass callable in the defaultTestLoader 

117# so that we can reorder the test ordering. This will have 

118# no effect if no memory test cases are found. 

119unittest.defaultTestLoader.suiteClass = suiteClassWrapper 

120 

121 

122class MemoryTestCase(unittest.TestCase): 

123 """Check for resource leaks.""" 

124 

125 @classmethod 

126 def tearDownClass(cls) -> None: 

127 """Reset the leak counter when the tests have been completed""" 

128 init() 

129 

130 def testFileDescriptorLeaks(self) -> None: 

131 """Check if any file descriptors are open since init() called.""" 

132 gc.collect() 

133 global open_files 

134 now_open = _get_open_files() 

135 

136 # Some files are opened out of the control of the stack. 

137 now_open = set(f for f in now_open if not f.endswith(".car") 

138 and not f.startswith("/proc/") 

139 and not f.endswith(".ttf") 

140 and not (f.startswith("/var/lib/") and f.endswith("/passwd")) 

141 and not f.endswith("astropy.log") 

142 and not f.endswith("mime/mime.cache") 

143 ) 

144 

145 diff = now_open.difference(open_files) 

146 if diff: 

147 for f in diff: 

148 print("File open: %s" % f) 

149 self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else "")) 

150 

151 

152class ExecutablesTestCase(unittest.TestCase): 

153 """Test that executables can be run and return good status. 

154 

155 The test methods are dynamically created. Callers 

156 must subclass this class in their own test file and invoke 

157 the create_executable_tests() class method to register the tests. 

158 """ 

159 

160 TESTS_DISCOVERED = -1 

161 

162 @classmethod 

163 def setUpClass(cls) -> None: 

164 """Abort testing if automated test creation was enabled and 

165 no tests were found. 

166 """ 

167 if cls.TESTS_DISCOVERED == 0: 

168 raise RuntimeError("No executables discovered.") 

169 

170 def testSanity(self) -> None: 

171 """Ensure that there is at least one test to be 

172 executed. This allows the test runner to trigger the class set up 

173 machinery to test whether there are some executables to test. 

174 """ 

175 pass 

176 

177 def assertExecutable(self, executable: str, root_dir: Optional[str] = None, 

178 args: Optional[Sequence[str]] = None, msg: Optional[str] = None) -> None: 

179 """Check an executable runs and returns good status. 

180 

181 Prints output to standard out. On bad exit status the test 

182 fails. If the executable can not be located the test is skipped. 

183 

184 Parameters 

185 ---------- 

186 executable : `str` 

187 Path to an executable. ``root_dir`` is not used if this is an 

188 absolute path. 

189 root_dir : `str`, optional 

190 Directory containing executable. Ignored if `None`. 

191 args : `list` or `tuple`, optional 

192 Arguments to be provided to the executable. 

193 msg : `str`, optional 

194 Message to use when the test fails. Can be `None` for default 

195 message. 

196 

197 Raises 

198 ------ 

199 AssertionError 

200 The executable did not return 0 exit status. 

201 """ 

202 if root_dir is not None and not os.path.isabs(executable): 

203 executable = os.path.join(root_dir, executable) 

204 

205 # Form the argument list for subprocess 

206 sp_args = [executable] 

207 argstr = "no arguments" 

208 if args is not None: 

209 sp_args.extend(args) 

210 argstr = 'arguments "' + " ".join(args) + '"' 

211 

212 print("Running executable '{}' with {}...".format(executable, argstr)) 

213 if not os.path.exists(executable): 

214 self.skipTest("Executable {} is unexpectedly missing".format(executable)) 

215 failmsg = None 

216 try: 

217 output = subprocess.check_output(sp_args) 

218 except subprocess.CalledProcessError as e: 

219 output = e.output 

220 failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode) 

221 print(output.decode('utf-8')) 

222 if failmsg: 

223 if msg is None: 

224 msg = failmsg 

225 self.fail(msg) 

226 

227 @classmethod 

228 def _build_test_method(cls, executable: str, root_dir: str) -> None: 

229 """Build a test method and attach to class. 

230 

231 A test method is created for the supplied excutable located 

232 in the supplied root directory. This method is attached to the class 

233 so that the test runner will discover the test and run it. 

234 

235 Parameters 

236 ---------- 

237 cls : `object` 

238 The class in which to create the tests. 

239 executable : `str` 

240 Name of executable. Can be absolute path. 

241 root_dir : `str` 

242 Path to executable. Not used if executable path is absolute. 

243 """ 

244 if not os.path.isabs(executable): 244 ↛ 245line 244 didn't jump to line 245, because the condition on line 244 was never true

245 executable = os.path.abspath(os.path.join(root_dir, executable)) 

246 

247 # Create the test name from the executable path. 

248 test_name = "test_exe_" + executable.replace("/", "_") 

249 

250 # This is the function that will become the test method 

251 def test_executable_runs(*args: Any) -> None: 

252 self = args[0] 

253 self.assertExecutable(executable) 

254 

255 # Give it a name and attach it to the class 

256 test_executable_runs.__name__ = test_name 

257 setattr(cls, test_name, test_executable_runs) 

258 

259 @classmethod 

260 def create_executable_tests(cls, ref_file: str, executables: Optional[Sequence[str]] = None) -> None: 

261 """Discover executables to test and create corresponding test methods. 

262 

263 Scans the directory containing the supplied reference file 

264 (usually ``__file__`` supplied from the test class) to look for 

265 executables. If executables are found a test method is created 

266 for each one. That test method will run the executable and 

267 check the returned value. 

268 

269 Executable scripts with a ``.py`` extension and shared libraries 

270 are ignored by the scanner. 

271 

272 This class method must be called before test discovery. 

273 

274 Parameters 

275 ---------- 

276 ref_file : `str` 

277 Path to a file within the directory to be searched. 

278 If the files are in the same location as the test file, then 

279 ``__file__`` can be used. 

280 executables : `list` or `tuple`, optional 

281 Sequence of executables that can override the automated 

282 detection. If an executable mentioned here is not found, a 

283 skipped test will be created for it, rather than a failed 

284 test. 

285 

286 Examples 

287 -------- 

288 >>> cls.create_executable_tests(__file__) 

289 """ 

290 # Get the search directory from the reference file 

291 ref_dir = os.path.abspath(os.path.dirname(ref_file)) 

292 

293 if executables is None: 293 ↛ 308line 293 didn't jump to line 308, because the condition on line 293 was never false

294 # Look for executables to test by walking the tree 

295 executables = [] 

296 for root, dirs, files in os.walk(ref_dir): 

297 for f in files: 

298 # Skip Python files. Shared libraries are executable. 

299 if not f.endswith(".py") and not f.endswith(".so"): 

300 full_path = os.path.join(root, f) 

301 if os.access(full_path, os.X_OK): 

302 executables.append(full_path) 

303 

304 # Store the number of tests found for later assessment. 

305 # Do not raise an exception if we have no executables as this would 

306 # cause the testing to abort before the test runner could properly 

307 # integrate it into the failure report. 

308 cls.TESTS_DISCOVERED = len(executables) 

309 

310 # Create the test functions and attach them to the class 

311 for e in executables: 

312 cls._build_test_method(e, ref_dir) 

313 

314 

315@contextlib.contextmanager 

316def getTempFilePath(ext: str, expectOutput: bool = True) -> Iterator[str]: 

317 """Return a path suitable for a temporary file and try to delete the 

318 file on success 

319 

320 If the with block completes successfully then the file is deleted, 

321 if possible; failure results in a printed warning. 

322 If a file is remains when it should not, a RuntimeError exception is 

323 raised. This exception is also raised if a file is not present on context 

324 manager exit when one is expected to exist. 

325 If the block exits with an exception the file if left on disk so it can be 

326 examined. The file name has a random component such that nested context 

327 managers can be used with the same file suffix. 

328 

329 Parameters 

330 ---------- 

331 ext : `str` 

332 File name extension, e.g. ``.fits``. 

333 expectOutput : `bool`, optional 

334 If `True`, a file should be created within the context manager. 

335 If `False`, a file should not be present when the context manager 

336 exits. 

337 

338 Returns 

339 ------- 

340 path : `str` 

341 Path for a temporary file. The path is a combination of the caller's 

342 file path and the name of the top-level function 

343 

344 Examples 

345 -------- 

346 .. code-block:: python 

347 

348 # file tests/testFoo.py 

349 import unittest 

350 import lsst.utils.tests 

351 class FooTestCase(unittest.TestCase): 

352 def testBasics(self): 

353 self.runTest() 

354 

355 def runTest(self): 

356 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile: 

357 # if tests/.tests exists then 

358 # tmpFile = "tests/.tests/testFoo_testBasics.fits" 

359 # otherwise tmpFile = "testFoo_testBasics.fits" 

360 ... 

361 # at the end of this "with" block the path tmpFile will be 

362 # deleted, but only if the file exists and the "with" 

363 # block terminated normally (rather than with an exception) 

364 ... 

365 """ 

366 stack = inspect.stack() 

367 # get name of first function in the file 

368 for i in range(2, len(stack)): 

369 frameInfo = inspect.getframeinfo(stack[i][0]) 

370 if i == 2: 

371 callerFilePath = frameInfo.filename 

372 callerFuncName = frameInfo.function 

373 elif callerFilePath == frameInfo.filename: 

374 # this function called the previous function 

375 callerFuncName = frameInfo.function 

376 else: 

377 break 

378 

379 callerDir, callerFileNameWithExt = os.path.split(callerFilePath) 

380 callerFileName = os.path.splitext(callerFileNameWithExt)[0] 

381 outDir = os.path.join(callerDir, ".tests") 

382 if not os.path.isdir(outDir): 

383 outDir = "" 

384 prefix = "%s_%s-" % (callerFileName, callerFuncName) 

385 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix) 

386 if os.path.exists(outPath): 

387 # There should not be a file there given the randomizer. Warn and 

388 # remove. 

389 # Use stacklevel 3 so that the warning is reported from the end of the 

390 # with block 

391 warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,), 

392 stacklevel=3) 

393 try: 

394 os.remove(outPath) 

395 except OSError: 

396 pass 

397 

398 yield outPath 

399 

400 fileExists = os.path.exists(outPath) 

401 if expectOutput: 

402 if not fileExists: 

403 raise RuntimeError("Temp file expected named {} but none found".format(outPath)) 

404 else: 

405 if fileExists: 

406 raise RuntimeError("Unexpectedly discovered temp file named {}".format(outPath)) 

407 # Try to clean up the file regardless 

408 if fileExists: 

409 try: 

410 os.remove(outPath) 

411 except OSError as e: 

412 # Use stacklevel 3 so that the warning is reported from the end of 

413 # the with block. 

414 warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3) 

415 

416 

417class TestCase(unittest.TestCase): 

418 """Subclass of unittest.TestCase that adds some custom assertions for 

419 convenience. 

420 """ 

421 

422 

423def inTestCase(func: Callable) -> Callable: 

424 """Add a free function to our custom TestCase class, while 

425 also making it available as a free function. 

426 """ 

427 setattr(TestCase, func.__name__, func) 

428 return func 

429 

430 

431def debugger(*exceptions): 

432 """Enter the debugger when there's an uncaught exception 

433 

434 To use, just slap a ``@debugger()`` on your function. 

435 

436 You may provide specific exception classes to catch as arguments to 

437 the decorator function, e.g., 

438 ``@debugger(RuntimeError, NotImplementedError)``. 

439 This defaults to just `AssertionError`, for use on `unittest.TestCase` 

440 methods. 

441 

442 Code provided by "Rosh Oxymoron" on StackOverflow: 

443 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails 

444 

445 Notes 

446 ----- 

447 Consider using ``pytest --pdb`` instead of this decorator. 

448 """ 

449 if not exceptions: 

450 exceptions = (Exception, ) 

451 

452 def decorator(f): 

453 @functools.wraps(f) 

454 def wrapper(*args, **kwargs): 

455 try: 

456 return f(*args, **kwargs) 

457 except exceptions: 

458 import sys 

459 import pdb 

460 pdb.post_mortem(sys.exc_info()[2]) 

461 return wrapper 

462 return decorator 

463 

464 

465def plotImageDiff(lhs: numpy.ndarray, rhs: numpy.ndarray, bad: Optional[numpy.ndarray] = None, 

466 diff: Optional[numpy.ndarray] = None, plotFileName: Optional[str] = None) -> None: 

467 """Plot the comparison of two 2-d NumPy arrays. 

468 

469 Parameters 

470 ---------- 

471 lhs : `numpy.ndarray` 

472 LHS values to compare; a 2-d NumPy array 

473 rhs : `numpy.ndarray` 

474 RHS values to compare; a 2-d NumPy array 

475 bad : `numpy.ndarray` 

476 A 2-d boolean NumPy array of values to emphasize in the plots 

477 diff : `numpy.ndarray` 

478 difference array; a 2-d NumPy array, or None to show lhs-rhs 

479 plotFileName : `str` 

480 Filename to save the plot to. If None, the plot will be displayed in 

481 a window. 

482 

483 Notes 

484 ----- 

485 This method uses `matplotlib` and imports it internally; it should be 

486 wrapped in a try/except block within packages that do not depend on 

487 `matplotlib` (including `~lsst.utils`). 

488 """ 

489 from matplotlib import pyplot 

490 if diff is None: 

491 diff = lhs - rhs 

492 pyplot.figure() 

493 if bad is not None: 

494 # make an rgba image that's red and transparent where not bad 

495 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8) 

496 badImage[:, :, 0] = 255 

497 badImage[:, :, 1] = 0 

498 badImage[:, :, 2] = 0 

499 badImage[:, :, 3] = 255*bad 

500 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs)) 

501 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs)) 

502 vmin2 = numpy.min(diff) 

503 vmax2 = numpy.max(diff) 

504 for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]): 

505 pyplot.subplot(2, 3, n + 1) 

506 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower', 

507 vmin=vmin1, vmax=vmax1) 

508 if bad is not None: 

509 pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower') 

510 pyplot.axis("off") 

511 pyplot.title(title) 

512 pyplot.subplot(2, 3, n + 4) 

513 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower', 

514 vmin=vmin2, vmax=vmax2) 

515 if bad is not None: 

516 pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower') 

517 pyplot.axis("off") 

518 pyplot.title(title) 

519 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05) 

520 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4]) 

521 pyplot.colorbar(im1, cax=cax1) 

522 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4]) 

523 pyplot.colorbar(im2, cax=cax2) 

524 if plotFileName: 

525 pyplot.savefig(plotFileName) 

526 else: 

527 pyplot.show() 

528 

529 

530@inTestCase 

531def assertFloatsAlmostEqual(testCase: unittest.TestCase, lhs: Union[float, numpy.ndarray], 

532 rhs: Union[float, numpy.ndarray], 

533 rtol: Optional[float] = sys.float_info.epsilon, 

534 atol: Optional[float] = sys.float_info.epsilon, relTo: Optional[float] = None, 

535 printFailures: bool = True, plotOnFailure: bool = False, 

536 plotFileName: Optional[str] = None, invert: bool = False, 

537 msg: Optional[str] = None, ignoreNaNs: bool = False) -> None: 

538 """Highly-configurable floating point comparisons for scalars and arrays. 

539 

540 The test assertion will fail if all elements ``lhs`` and ``rhs`` are not 

541 equal to within the tolerances specified by ``rtol`` and ``atol``. 

542 More precisely, the comparison is: 

543 

544 ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol`` 

545 

546 If ``rtol`` or ``atol`` is `None`, that term in the comparison is not 

547 performed at all. 

548 

549 When not specified, ``relTo`` is the elementwise maximum of the absolute 

550 values of ``lhs`` and ``rhs``. If set manually, it should usually be set 

551 to either ``lhs`` or ``rhs``, or a scalar value typical of what is 

552 expected. 

553 

554 Parameters 

555 ---------- 

556 testCase : `unittest.TestCase` 

557 Instance the test is part of. 

558 lhs : scalar or array-like 

559 LHS value(s) to compare; may be a scalar or array-like of any 

560 dimension. 

561 rhs : scalar or array-like 

562 RHS value(s) to compare; may be a scalar or array-like of any 

563 dimension. 

564 rtol : `float`, optional 

565 Relative tolerance for comparison; defaults to double-precision 

566 epsilon. 

567 atol : `float`, optional 

568 Absolute tolerance for comparison; defaults to double-precision 

569 epsilon. 

570 relTo : `float`, optional 

571 Value to which comparison with rtol is relative. 

572 printFailures : `bool`, optional 

573 Upon failure, print all inequal elements as part of the message. 

574 plotOnFailure : `bool`, optional 

575 Upon failure, plot the originals and their residual with matplotlib. 

576 Only 2-d arrays are supported. 

577 plotFileName : `str`, optional 

578 Filename to save the plot to. If `None`, the plot will be displayed in 

579 a window. 

580 invert : `bool`, optional 

581 If `True`, invert the comparison and fail only if any elements *are* 

582 equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`, 

583 which should generally be used instead for clarity. 

584 will return `True`). 

585 msg : `str`, optional 

586 String to append to the error message when assert fails. 

587 ignoreNaNs : `bool`, optional 

588 If `True` (`False` is default) mask out any NaNs from operand arrays 

589 before performing comparisons if they are in the same locations; NaNs 

590 in different locations are trigger test assertion failures, even when 

591 ``invert=True``. Scalar NaNs are treated like arrays containing only 

592 NaNs of the same shape as the other operand, and no comparisons are 

593 performed if both sides are scalar NaNs. 

594 

595 Raises 

596 ------ 

597 AssertionError 

598 The values are not almost equal. 

599 """ 

600 if ignoreNaNs: 

601 lhsMask = numpy.isnan(lhs) 

602 rhsMask = numpy.isnan(rhs) 

603 if not numpy.all(lhsMask == rhsMask): 

604 testCase.fail(f"lhs has {lhsMask.sum()} NaN values and rhs has {rhsMask.sum()} NaN values, " 

605 f"in different locations.") 

606 if numpy.all(lhsMask): 

607 assert numpy.all(rhsMask), "Should be guaranteed by previous if." 

608 # All operands are fully NaN (either scalar NaNs or arrays of only 

609 # NaNs). 

610 return 

611 assert not numpy.all(rhsMask), "Should be guaranteed by prevoius two ifs." 

612 # If either operand is an array select just its not-NaN values. Note 

613 # that these expressions are never True for scalar operands, because if 

614 # they are NaN then the numpy.all checks above will catch them. 

615 if numpy.any(lhsMask): 

616 lhs = lhs[numpy.logical_not(lhsMask)] 

617 if numpy.any(rhsMask): 

618 rhs = rhs[numpy.logical_not(rhsMask)] 

619 if not numpy.isfinite(lhs).all(): 

620 testCase.fail("Non-finite values in lhs") 

621 if not numpy.isfinite(rhs).all(): 

622 testCase.fail("Non-finite values in rhs") 

623 diff = lhs - rhs 

624 absDiff = numpy.abs(lhs - rhs) 

625 if rtol is not None: 

626 if relTo is None: 

627 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs)) 

628 else: 

629 relTo = numpy.abs(relTo) 

630 bad = absDiff > rtol*relTo 

631 if atol is not None: 

632 bad = numpy.logical_and(bad, absDiff > atol) 

633 else: 

634 if atol is None: 

635 raise ValueError("rtol and atol cannot both be None") 

636 bad = absDiff > atol 

637 failed = numpy.any(bad) 

638 if invert: 

639 failed = not failed 

640 bad = numpy.logical_not(bad) 

641 cmpStr = "==" 

642 failStr = "are the same" 

643 else: 

644 cmpStr = "!=" 

645 failStr = "differ" 

646 errMsg = [] 

647 if failed: 

648 if numpy.isscalar(bad): 

649 if rtol is None: 

650 errMsg = ["%s %s %s; diff=%s with atol=%s" 

651 % (lhs, cmpStr, rhs, absDiff, atol)] 

652 elif atol is None: 

653 errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s" 

654 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)] 

655 else: 

656 errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s" 

657 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)] 

658 else: 

659 errMsg = ["%d/%d elements %s with rtol=%s, atol=%s" 

660 % (bad.sum(), bad.size, failStr, rtol, atol)] 

661 if plotOnFailure: 

662 if len(lhs.shape) != 2 or len(rhs.shape) != 2: 

663 raise ValueError("plotOnFailure is only valid for 2-d arrays") 

664 try: 

665 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName) 

666 except ImportError: 

667 errMsg.append("Failure plot requested but matplotlib could not be imported.") 

668 if printFailures: 

669 # Make sure everything is an array if any of them are, so we 

670 # can treat them the same (diff and absDiff are arrays if 

671 # either rhs or lhs is), and we don't get here if neither is. 

672 if numpy.isscalar(relTo): 

673 relTo = numpy.ones(bad.shape, dtype=float) * relTo 

674 if numpy.isscalar(lhs): 

675 lhs = numpy.ones(bad.shape, dtype=float) * lhs 

676 if numpy.isscalar(rhs): 

677 rhs = numpy.ones(bad.shape, dtype=float) * rhs 

678 if rtol is None: 

679 for a, b, diff in zip(lhs[bad], rhs[bad], absDiff[bad]): 

680 errMsg.append("%s %s %s (diff=%s)" % (a, cmpStr, b, diff)) 

681 else: 

682 for a, b, diff, rel in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]): 

683 errMsg.append("%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel)) 

684 

685 if msg is not None: 

686 errMsg.append(msg) 

687 testCase.assertFalse(failed, msg="\n".join(errMsg)) 

688 

689 

690@inTestCase 

691def assertFloatsNotEqual(testCase: unittest.TestCase, lhs: Union[float, numpy.ndarray], 

692 rhs: Union[float, numpy.ndarray], **kwds: Any) -> None: 

693 """Fail a test if the given floating point values are equal to within the 

694 given tolerances. 

695 

696 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

697 ``rtol=atol=0``) for more information. 

698 

699 Parameters 

700 ---------- 

701 testCase : `unittest.TestCase` 

702 Instance the test is part of. 

703 lhs : scalar or array-like 

704 LHS value(s) to compare; may be a scalar or array-like of any 

705 dimension. 

706 rhs : scalar or array-like 

707 RHS value(s) to compare; may be a scalar or array-like of any 

708 dimension. 

709 

710 Raises 

711 ------ 

712 AssertionError 

713 The values are almost equal. 

714 """ 

715 return assertFloatsAlmostEqual(testCase, lhs, rhs, invert=True, **kwds) 

716 

717 

718@inTestCase 

719def assertFloatsEqual(testCase: unittest.TestCase, lhs: Union[float, numpy.ndarray], 

720 rhs: Union[float, numpy.ndarray], **kwargs: Any) -> None: 

721 """ 

722 Assert that lhs == rhs (both numeric types, whether scalar or array). 

723 

724 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

725 ``rtol=atol=0``) for more information. 

726 

727 Parameters 

728 ---------- 

729 testCase : `unittest.TestCase` 

730 Instance the test is part of. 

731 lhs : scalar or array-like 

732 LHS value(s) to compare; may be a scalar or array-like of any 

733 dimension. 

734 rhs : scalar or array-like 

735 RHS value(s) to compare; may be a scalar or array-like of any 

736 dimension. 

737 

738 Raises 

739 ------ 

740 AssertionError 

741 The values are not equal. 

742 """ 

743 return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs) 

744 

745 

746def _settingsIterator(settings: Dict[str, Sequence[Any]]) -> Iterator[Dict[str, Any]]: 

747 """Return an iterator for the provided test settings 

748 

749 Parameters 

750 ---------- 

751 settings : `dict` (`str`: iterable) 

752 Lists of test parameters. Each should be an iterable of the same 

753 length. If a string is provided as an iterable, it will be converted 

754 to a list of a single string. 

755 

756 Raises 

757 ------ 

758 AssertionError 

759 If the ``settings`` are not of the same length. 

760 

761 Yields 

762 ------ 

763 parameters : `dict` (`str`: anything) 

764 Set of parameters. 

765 """ 

766 for name, values in settings.items(): 

767 if isinstance(values, str): 767 ↛ 770line 767 didn't jump to line 770, because the condition on line 767 was never true

768 # Probably meant as a single-element string, rather than an 

769 # iterable of chars. 

770 settings[name] = [values] 

771 num = len(next(iter(settings.values()))) # Number of settings 

772 for name, values in settings.items(): 

773 assert len(values) == num, f"Length mismatch for setting {name}: {len(values)} vs {num}" 

774 for ii in range(num): 

775 values = [settings[kk][ii] for kk in settings] 

776 yield dict(zip(settings, values)) 

777 

778 

779def classParameters(**settings: Sequence[Any]) -> Callable: 

780 """Class decorator for generating unit tests 

781 

782 This decorator generates classes with class variables according to the 

783 supplied ``settings``. 

784 

785 Parameters 

786 ---------- 

787 **settings : `dict` (`str`: iterable) 

788 The lists of test parameters to set as class variables in turn. Each 

789 should be an iterable of the same length. 

790 

791 Examples 

792 -------- 

793 :: 

794 

795 @classParameters(foo=[1, 2], bar=[3, 4]) 

796 class MyTestCase(unittest.TestCase): 

797 ... 

798 

799 will generate two classes, as if you wrote:: 

800 

801 class MyTestCase_1_3(unittest.TestCase): 

802 foo = 1 

803 bar = 3 

804 ... 

805 

806 class MyTestCase_2_4(unittest.TestCase): 

807 foo = 2 

808 bar = 4 

809 ... 

810 

811 Note that the values are embedded in the class name. 

812 """ 

813 def decorator(cls: Type) -> None: 

814 module = sys.modules[cls.__module__].__dict__ 

815 for params in _settingsIterator(settings): 

816 name = f"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}" 

817 bindings = dict(cls.__dict__) 

818 bindings.update(params) 

819 module[name] = type(name, (cls,), bindings) 

820 return decorator 

821 

822 

823def methodParameters(**settings: Sequence[Any]) -> Callable: 

824 """Iterate over supplied settings to create subtests automatically. 

825 

826 This decorator iterates over the supplied settings, using 

827 ``TestCase.subTest`` to communicate the values in the event of a failure. 

828 

829 Parameters 

830 ---------- 

831 **settings : `dict` (`str`: iterable) 

832 The lists of test parameters. Each should be an iterable of the same 

833 length. 

834 

835 Examples 

836 -------- 

837 .. code-block:: python 

838 

839 @methodParameters(foo=[1, 2], bar=[3, 4]) 

840 def testSomething(self, foo, bar): 

841 ... 

842 

843 will run: 

844 

845 .. code-block:: python 

846 

847 testSomething(foo=1, bar=3) 

848 testSomething(foo=2, bar=4) 

849 """ 

850 def decorator(func: Callable) -> Callable: 

851 @functools.wraps(func) 

852 def wrapper(self: unittest.TestCase, *args: Any, **kwargs: Any) -> None: 

853 for params in _settingsIterator(settings): 

854 kwargs.update(params) 

855 with self.subTest(**params): 

856 func(self, *args, **kwargs) 

857 return wrapper 

858 return decorator 

859 

860 

861def _cartesianProduct(settings: Mapping[str, Sequence[Any]]) -> Mapping[str, Sequence[Any]]: 

862 """Return the cartesian product of the settings 

863 

864 Parameters 

865 ---------- 

866 settings : `dict` mapping `str` to `iterable` 

867 Parameter combinations. 

868 

869 Returns 

870 ------- 

871 product : `dict` mapping `str` to `iterable` 

872 Parameter combinations covering the cartesian product (all possible 

873 combinations) of the input parameters. 

874 

875 Examples 

876 -------- 

877 .. code-block:: python 

878 

879 cartesianProduct({"foo": [1, 2], "bar": ["black", "white"]}) 

880 

881 will return: 

882 

883 .. code-block:: python 

884 

885 {"foo": [1, 1, 2, 2], "bar": ["black", "white", "black", "white"]} 

886 """ 

887 product: Dict[str, List[Any]] = {kk: [] for kk in settings} 

888 for values in itertools.product(*settings.values()): 

889 for kk, vv in zip(settings.keys(), values): 

890 product[kk].append(vv) 

891 return product 

892 

893 

894def classParametersProduct(**settings: Sequence[Any]) -> Callable: 

895 """Class decorator for generating unit tests 

896 

897 This decorator generates classes with class variables according to the 

898 cartesian product of the supplied ``settings``. 

899 

900 Parameters 

901 ---------- 

902 **settings : `dict` (`str`: iterable) 

903 The lists of test parameters to set as class variables in turn. Each 

904 should be an iterable. 

905 

906 Examples 

907 -------- 

908 .. code-block:: python 

909 

910 @classParametersProduct(foo=[1, 2], bar=[3, 4]) 

911 class MyTestCase(unittest.TestCase): 

912 ... 

913 

914 will generate four classes, as if you wrote:: 

915 

916 .. code-block:: python 

917 

918 class MyTestCase_1_3(unittest.TestCase): 

919 foo = 1 

920 bar = 3 

921 ... 

922 

923 class MyTestCase_1_4(unittest.TestCase): 

924 foo = 1 

925 bar = 4 

926 ... 

927 

928 class MyTestCase_2_3(unittest.TestCase): 

929 foo = 2 

930 bar = 3 

931 ... 

932 

933 class MyTestCase_2_4(unittest.TestCase): 

934 foo = 2 

935 bar = 4 

936 ... 

937 

938 Note that the values are embedded in the class name. 

939 """ 

940 return classParameters(**_cartesianProduct(settings)) 

941 

942 

943def methodParametersProduct(**settings: Sequence[Any]) -> Callable: 

944 """Iterate over cartesian product creating sub tests. 

945 

946 This decorator iterates over the cartesian product of the supplied 

947 settings, using `~unittest.TestCase.subTest` to communicate the values in 

948 the event of a failure. 

949 

950 Parameters 

951 ---------- 

952 **settings : `dict` (`str`: iterable) 

953 The parameter combinations to test. Each should be an iterable. 

954 

955 Example 

956 ------- 

957 

958 @methodParametersProduct(foo=[1, 2], bar=["black", "white"]) 

959 def testSomething(self, foo, bar): 

960 ... 

961 

962 will run: 

963 

964 testSomething(foo=1, bar="black") 

965 testSomething(foo=1, bar="white") 

966 testSomething(foo=2, bar="black") 

967 testSomething(foo=2, bar="white") 

968 """ 

969 return methodParameters(**_cartesianProduct(settings)) 

970 

971 

972@contextlib.contextmanager 

973def temporaryDirectory() -> Iterator[str]: 

974 """Context manager that creates and destroys a temporary directory. 

975 

976 The difference from `tempfile.TemporaryDirectory` is that this ignores 

977 errors when deleting a directory, which may happen with some filesystems. 

978 """ 

979 tmpdir = tempfile.mkdtemp() 

980 yield tmpdir 

981 shutil.rmtree(tmpdir, ignore_errors=True)