Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of utils. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# Use of this source code is governed by a 3-clause BSD-style 

10# license that can be found in the LICENSE file. 

11 

12"""Support code for running unit tests""" 

13 

14__all__ = ["init", "MemoryTestCase", "ExecutablesTestCase", "getTempFilePath", 

15 "TestCase", "assertFloatsAlmostEqual", "assertFloatsNotEqual", "assertFloatsEqual", 

16 "debugger", "classParameters", "methodParameters"] 

17 

18import contextlib 

19import gc 

20import inspect 

21import os 

22import subprocess 

23import sys 

24import unittest 

25import warnings 

26import numpy 

27import psutil 

28import functools 

29import tempfile 

30import shutil 

31import itertools 

32 

33from typing import ( 

34 Any, 

35 Callable, 

36 Dict, 

37 Iterator, 

38 List, 

39 Optional, 

40 Mapping, 

41 Set, 

42 Sequence, 

43 Type, 

44 Union, 

45) 

46 

47# Initialize the list of open files to an empty set 

48open_files = set() 

49 

50 

51def _get_open_files() -> Set[str]: 

52 """Return a set containing the list of files currently open in this 

53 process. 

54 

55 Returns 

56 ------- 

57 open_files : `set` 

58 Set containing the list of open files. 

59 """ 

60 return set(p.path for p in psutil.Process().open_files()) 

61 

62 

63def init() -> None: 

64 """Initialize the memory tester and file descriptor leak tester.""" 

65 global open_files 

66 # Reset the list of open files 

67 open_files = _get_open_files() 

68 

69 

70def sort_tests(tests) -> unittest.TestSuite: 

71 """Sort supplied test suites such that MemoryTestCases are at the end. 

72 

73 `lsst.utils.tests.MemoryTestCase` tests should always run after any other 

74 tests in the module. 

75 

76 Parameters 

77 ---------- 

78 tests : sequence 

79 Sequence of test suites. 

80 

81 Returns 

82 ------- 

83 suite : `unittest.TestSuite` 

84 A combined `~unittest.TestSuite` with 

85 `~lsst.utils.tests.MemoryTestCase` at the end. 

86 """ 

87 

88 suite = unittest.TestSuite() 

89 memtests = [] 

90 for test_suite in tests: 

91 try: 

92 # Just test the first test method in the suite for MemoryTestCase 

93 # Use loop rather than next as it is possible for a test class 

94 # to not have any test methods and the Python community prefers 

95 # for loops over catching a StopIteration exception. 

96 bases = None 

97 for method in test_suite: 

98 bases = inspect.getmro(method.__class__) 

99 break 

100 if bases is not None and MemoryTestCase in bases: 

101 memtests.append(test_suite) 

102 else: 

103 suite.addTests(test_suite) 

104 except TypeError: 

105 if isinstance(test_suite, MemoryTestCase): 

106 memtests.append(test_suite) 

107 else: 

108 suite.addTest(test_suite) 

109 suite.addTests(memtests) 

110 return suite 

111 

112 

113def suiteClassWrapper(tests): 

114 return unittest.TestSuite(sort_tests(tests)) 

115 

116 

117# Replace the suiteClass callable in the defaultTestLoader 

118# so that we can reorder the test ordering. This will have 

119# no effect if no memory test cases are found. 

120unittest.defaultTestLoader.suiteClass = suiteClassWrapper 

121 

122 

123class MemoryTestCase(unittest.TestCase): 

124 """Check for resource leaks.""" 

125 

126 @classmethod 

127 def tearDownClass(cls) -> None: 

128 """Reset the leak counter when the tests have been completed""" 

129 init() 

130 

131 def testFileDescriptorLeaks(self) -> None: 

132 """Check if any file descriptors are open since init() called.""" 

133 gc.collect() 

134 global open_files 

135 now_open = _get_open_files() 

136 

137 # Some files are opened out of the control of the stack. 

138 now_open = set(f for f in now_open if not f.endswith(".car") 

139 and not f.startswith("/proc/") 

140 and not f.endswith(".ttf") 

141 and not (f.startswith("/var/lib/") and f.endswith("/passwd")) 

142 and not f.endswith("astropy.log") 

143 and not f.endswith("mime/mime.cache") 

144 ) 

145 

146 diff = now_open.difference(open_files) 

147 if diff: 

148 for f in diff: 

149 print("File open: %s" % f) 

150 self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else "")) 

151 

152 

153class ExecutablesTestCase(unittest.TestCase): 

154 """Test that executables can be run and return good status. 

155 

156 The test methods are dynamically created. Callers 

157 must subclass this class in their own test file and invoke 

158 the create_executable_tests() class method to register the tests. 

159 """ 

160 TESTS_DISCOVERED = -1 

161 

162 @classmethod 

163 def setUpClass(cls) -> None: 

164 """Abort testing if automated test creation was enabled and 

165 no tests were found.""" 

166 

167 if cls.TESTS_DISCOVERED == 0: 

168 raise RuntimeError("No executables discovered.") 

169 

170 def testSanity(self) -> None: 

171 """This test exists to ensure that there is at least one test to be 

172 executed. This allows the test runner to trigger the class set up 

173 machinery to test whether there are some executables to test.""" 

174 pass 

175 

176 def assertExecutable(self, executable: str, root_dir: Optional[str] = None, 

177 args: Optional[Sequence[str]] = None, msg: Optional[str] = None) -> None: 

178 """Check an executable runs and returns good status. 

179 

180 Prints output to standard out. On bad exit status the test 

181 fails. If the executable can not be located the test is skipped. 

182 

183 Parameters 

184 ---------- 

185 executable : `str` 

186 Path to an executable. ``root_dir`` is not used if this is an 

187 absolute path. 

188 root_dir : `str`, optional 

189 Directory containing executable. Ignored if `None`. 

190 args : `list` or `tuple`, optional 

191 Arguments to be provided to the executable. 

192 msg : `str`, optional 

193 Message to use when the test fails. Can be `None` for default 

194 message. 

195 

196 Raises 

197 ------ 

198 AssertionError 

199 The executable did not return 0 exit status. 

200 """ 

201 

202 if root_dir is not None and not os.path.isabs(executable): 

203 executable = os.path.join(root_dir, executable) 

204 

205 # Form the argument list for subprocess 

206 sp_args = [executable] 

207 argstr = "no arguments" 

208 if args is not None: 

209 sp_args.extend(args) 

210 argstr = 'arguments "' + " ".join(args) + '"' 

211 

212 print("Running executable '{}' with {}...".format(executable, argstr)) 

213 if not os.path.exists(executable): 

214 self.skipTest("Executable {} is unexpectedly missing".format(executable)) 

215 failmsg = None 

216 try: 

217 output = subprocess.check_output(sp_args) 

218 except subprocess.CalledProcessError as e: 

219 output = e.output 

220 failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode) 

221 print(output.decode('utf-8')) 

222 if failmsg: 

223 if msg is None: 

224 msg = failmsg 

225 self.fail(msg) 

226 

227 @classmethod 

228 def _build_test_method(cls, executable: str, root_dir: str) -> None: 

229 """Build a test method and attach to class. 

230 

231 A test method is created for the supplied excutable located 

232 in the supplied root directory. This method is attached to the class 

233 so that the test runner will discover the test and run it. 

234 

235 Parameters 

236 ---------- 

237 cls : `object` 

238 The class in which to create the tests. 

239 executable : `str` 

240 Name of executable. Can be absolute path. 

241 root_dir : `str` 

242 Path to executable. Not used if executable path is absolute. 

243 """ 

244 if not os.path.isabs(executable): 244 ↛ 245line 244 didn't jump to line 245, because the condition on line 244 was never true

245 executable = os.path.abspath(os.path.join(root_dir, executable)) 

246 

247 # Create the test name from the executable path. 

248 test_name = "test_exe_" + executable.replace("/", "_") 

249 

250 # This is the function that will become the test method 

251 def test_executable_runs(*args: Any) -> None: 

252 self = args[0] 

253 self.assertExecutable(executable) 

254 

255 # Give it a name and attach it to the class 

256 test_executable_runs.__name__ = test_name 

257 setattr(cls, test_name, test_executable_runs) 

258 

259 @classmethod 

260 def create_executable_tests(cls, ref_file: str, executables: Optional[Sequence[str]] = None) -> None: 

261 """Discover executables to test and create corresponding test methods. 

262 

263 Scans the directory containing the supplied reference file 

264 (usually ``__file__`` supplied from the test class) to look for 

265 executables. If executables are found a test method is created 

266 for each one. That test method will run the executable and 

267 check the returned value. 

268 

269 Executable scripts with a ``.py`` extension and shared libraries 

270 are ignored by the scanner. 

271 

272 This class method must be called before test discovery. 

273 

274 Parameters 

275 ---------- 

276 ref_file : `str` 

277 Path to a file within the directory to be searched. 

278 If the files are in the same location as the test file, then 

279 ``__file__`` can be used. 

280 executables : `list` or `tuple`, optional 

281 Sequence of executables that can override the automated 

282 detection. If an executable mentioned here is not found, a 

283 skipped test will be created for it, rather than a failed 

284 test. 

285 

286 Examples 

287 -------- 

288 >>> cls.create_executable_tests(__file__) 

289 """ 

290 

291 # Get the search directory from the reference file 

292 ref_dir = os.path.abspath(os.path.dirname(ref_file)) 

293 

294 if executables is None: 294 ↛ 309line 294 didn't jump to line 309, because the condition on line 294 was never false

295 # Look for executables to test by walking the tree 

296 executables = [] 

297 for root, dirs, files in os.walk(ref_dir): 

298 for f in files: 

299 # Skip Python files. Shared libraries are executable. 

300 if not f.endswith(".py") and not f.endswith(".so"): 

301 full_path = os.path.join(root, f) 

302 if os.access(full_path, os.X_OK): 

303 executables.append(full_path) 

304 

305 # Store the number of tests found for later assessment. 

306 # Do not raise an exception if we have no executables as this would 

307 # cause the testing to abort before the test runner could properly 

308 # integrate it into the failure report. 

309 cls.TESTS_DISCOVERED = len(executables) 

310 

311 # Create the test functions and attach them to the class 

312 for e in executables: 

313 cls._build_test_method(e, ref_dir) 

314 

315 

316@contextlib.contextmanager 

317def getTempFilePath(ext: str, expectOutput: bool = True) -> Iterator[str]: 

318 """Return a path suitable for a temporary file and try to delete the 

319 file on success 

320 

321 If the with block completes successfully then the file is deleted, 

322 if possible; failure results in a printed warning. 

323 If a file is remains when it should not, a RuntimeError exception is 

324 raised. This exception is also raised if a file is not present on context 

325 manager exit when one is expected to exist. 

326 If the block exits with an exception the file if left on disk so it can be 

327 examined. The file name has a random component such that nested context 

328 managers can be used with the same file suffix. 

329 

330 Parameters 

331 ---------- 

332 

333 ext : `str` 

334 File name extension, e.g. ``.fits``. 

335 expectOutput : `bool`, optional 

336 If `True`, a file should be created within the context manager. 

337 If `False`, a file should not be present when the context manager 

338 exits. 

339 

340 Returns 

341 ------- 

342 `str` 

343 Path for a temporary file. The path is a combination of the caller's 

344 file path and the name of the top-level function 

345 

346 Notes 

347 ----- 

348 :: 

349 

350 # file tests/testFoo.py 

351 import unittest 

352 import lsst.utils.tests 

353 class FooTestCase(unittest.TestCase): 

354 def testBasics(self): 

355 self.runTest() 

356 

357 def runTest(self): 

358 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile: 

359 # if tests/.tests exists then 

360 # tmpFile = "tests/.tests/testFoo_testBasics.fits" 

361 # otherwise tmpFile = "testFoo_testBasics.fits" 

362 ... 

363 # at the end of this "with" block the path tmpFile will be 

364 # deleted, but only if the file exists and the "with" 

365 # block terminated normally (rather than with an exception) 

366 ... 

367 """ 

368 stack = inspect.stack() 

369 # get name of first function in the file 

370 for i in range(2, len(stack)): 

371 frameInfo = inspect.getframeinfo(stack[i][0]) 

372 if i == 2: 

373 callerFilePath = frameInfo.filename 

374 callerFuncName = frameInfo.function 

375 elif callerFilePath == frameInfo.filename: 

376 # this function called the previous function 

377 callerFuncName = frameInfo.function 

378 else: 

379 break 

380 

381 callerDir, callerFileNameWithExt = os.path.split(callerFilePath) 

382 callerFileName = os.path.splitext(callerFileNameWithExt)[0] 

383 outDir = os.path.join(callerDir, ".tests") 

384 if not os.path.isdir(outDir): 

385 outDir = "" 

386 prefix = "%s_%s-" % (callerFileName, callerFuncName) 

387 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix) 

388 if os.path.exists(outPath): 

389 # There should not be a file there given the randomizer. Warn and 

390 # remove. 

391 # Use stacklevel 3 so that the warning is reported from the end of the 

392 # with block 

393 warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,), 

394 stacklevel=3) 

395 try: 

396 os.remove(outPath) 

397 except OSError: 

398 pass 

399 

400 yield outPath 

401 

402 fileExists = os.path.exists(outPath) 

403 if expectOutput: 

404 if not fileExists: 

405 raise RuntimeError("Temp file expected named {} but none found".format(outPath)) 

406 else: 

407 if fileExists: 

408 raise RuntimeError("Unexpectedly discovered temp file named {}".format(outPath)) 

409 # Try to clean up the file regardless 

410 if fileExists: 

411 try: 

412 os.remove(outPath) 

413 except OSError as e: 

414 # Use stacklevel 3 so that the warning is reported from the end of 

415 # the with block. 

416 warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3) 

417 

418 

419class TestCase(unittest.TestCase): 

420 """Subclass of unittest.TestCase that adds some custom assertions for 

421 convenience. 

422 """ 

423 

424 

425def inTestCase(func: Callable) -> Callable: 

426 """A decorator to add a free function to our custom TestCase class, while 

427 also making it available as a free function. 

428 """ 

429 setattr(TestCase, func.__name__, func) 

430 return func 

431 

432 

433def debugger(*exceptions): 

434 """Decorator to enter the debugger when there's an uncaught exception 

435 

436 To use, just slap a ``@debugger()`` on your function. 

437 

438 You may provide specific exception classes to catch as arguments to 

439 the decorator function, e.g., 

440 ``@debugger(RuntimeError, NotImplementedError)``. 

441 This defaults to just `AssertionError`, for use on `unittest.TestCase` 

442 methods. 

443 

444 Code provided by "Rosh Oxymoron" on StackOverflow: 

445 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails 

446 

447 Notes 

448 ----- 

449 Consider using ``pytest --pdb`` instead of this decorator. 

450 """ 

451 if not exceptions: 

452 exceptions = (Exception, ) 

453 

454 def decorator(f): 

455 @functools.wraps(f) 

456 def wrapper(*args, **kwargs): 

457 try: 

458 return f(*args, **kwargs) 

459 except exceptions: 

460 import sys 

461 import pdb 

462 pdb.post_mortem(sys.exc_info()[2]) 

463 return wrapper 

464 return decorator 

465 

466 

467def plotImageDiff(lhs: numpy.ndarray, rhs: numpy.ndarray, bad: Optional[numpy.ndarray] = None, 

468 diff: Optional[numpy.ndarray] = None, plotFileName: Optional[str] = None) -> None: 

469 """Plot the comparison of two 2-d NumPy arrays. 

470 

471 Parameters 

472 ---------- 

473 lhs : `numpy.ndarray` 

474 LHS values to compare; a 2-d NumPy array 

475 rhs : `numpy.ndarray` 

476 RHS values to compare; a 2-d NumPy array 

477 bad : `numpy.ndarray` 

478 A 2-d boolean NumPy array of values to emphasize in the plots 

479 diff : `numpy.ndarray` 

480 difference array; a 2-d NumPy array, or None to show lhs-rhs 

481 plotFileName : `str` 

482 Filename to save the plot to. If None, the plot will be displayed in 

483 a window. 

484 

485 Notes 

486 ----- 

487 This method uses `matplotlib` and imports it internally; it should be 

488 wrapped in a try/except block within packages that do not depend on 

489 `matplotlib` (including `~lsst.utils`). 

490 """ 

491 from matplotlib import pyplot 

492 if diff is None: 

493 diff = lhs - rhs 

494 pyplot.figure() 

495 if bad is not None: 

496 # make an rgba image that's red and transparent where not bad 

497 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8) 

498 badImage[:, :, 0] = 255 

499 badImage[:, :, 1] = 0 

500 badImage[:, :, 2] = 0 

501 badImage[:, :, 3] = 255*bad 

502 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs)) 

503 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs)) 

504 vmin2 = numpy.min(diff) 

505 vmax2 = numpy.max(diff) 

506 for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]): 

507 pyplot.subplot(2, 3, n + 1) 

508 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower', 

509 vmin=vmin1, vmax=vmax1) 

510 if bad is not None: 

511 pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower') 

512 pyplot.axis("off") 

513 pyplot.title(title) 

514 pyplot.subplot(2, 3, n + 4) 

515 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower', 

516 vmin=vmin2, vmax=vmax2) 

517 if bad is not None: 

518 pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower') 

519 pyplot.axis("off") 

520 pyplot.title(title) 

521 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05) 

522 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4]) 

523 pyplot.colorbar(im1, cax=cax1) 

524 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4]) 

525 pyplot.colorbar(im2, cax=cax2) 

526 if plotFileName: 

527 pyplot.savefig(plotFileName) 

528 else: 

529 pyplot.show() 

530 

531 

532@inTestCase 

533def assertFloatsAlmostEqual(testCase: unittest.TestCase, lhs: Union[float, numpy.ndarray], 

534 rhs: Union[float, numpy.ndarray], 

535 rtol: Optional[float] = sys.float_info.epsilon, 

536 atol: Optional[float] = sys.float_info.epsilon, relTo: Optional[float] = None, 

537 printFailures: bool = True, plotOnFailure: bool = False, 

538 plotFileName: Optional[str] = None, invert: bool = False, 

539 msg: Optional[str] = None, ignoreNaNs: bool = False) -> None: 

540 """Highly-configurable floating point comparisons for scalars and arrays. 

541 

542 The test assertion will fail if all elements ``lhs`` and ``rhs`` are not 

543 equal to within the tolerances specified by ``rtol`` and ``atol``. 

544 More precisely, the comparison is: 

545 

546 ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol`` 

547 

548 If ``rtol`` or ``atol`` is `None`, that term in the comparison is not 

549 performed at all. 

550 

551 When not specified, ``relTo`` is the elementwise maximum of the absolute 

552 values of ``lhs`` and ``rhs``. If set manually, it should usually be set 

553 to either ``lhs`` or ``rhs``, or a scalar value typical of what is 

554 expected. 

555 

556 Parameters 

557 ---------- 

558 testCase : `unittest.TestCase` 

559 Instance the test is part of. 

560 lhs : scalar or array-like 

561 LHS value(s) to compare; may be a scalar or array-like of any 

562 dimension. 

563 rhs : scalar or array-like 

564 RHS value(s) to compare; may be a scalar or array-like of any 

565 dimension. 

566 rtol : `float`, optional 

567 Relative tolerance for comparison; defaults to double-precision 

568 epsilon. 

569 atol : `float`, optional 

570 Absolute tolerance for comparison; defaults to double-precision 

571 epsilon. 

572 relTo : `float`, optional 

573 Value to which comparison with rtol is relative. 

574 printFailures : `bool`, optional 

575 Upon failure, print all inequal elements as part of the message. 

576 plotOnFailure : `bool`, optional 

577 Upon failure, plot the originals and their residual with matplotlib. 

578 Only 2-d arrays are supported. 

579 plotFileName : `str`, optional 

580 Filename to save the plot to. If `None`, the plot will be displayed in 

581 a window. 

582 invert : `bool`, optional 

583 If `True`, invert the comparison and fail only if any elements *are* 

584 equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`, 

585 which should generally be used instead for clarity. 

586 will return `True`). 

587 msg : `str`, optional 

588 String to append to the error message when assert fails. 

589 ignoreNaNs : `bool`, optional 

590 If `True` (`False` is default) mask out any NaNs from operand arrays 

591 before performing comparisons if they are in the same locations; NaNs 

592 in different locations are trigger test assertion failures, even when 

593 ``invert=True``. Scalar NaNs are treated like arrays containing only 

594 NaNs of the same shape as the other operand, and no comparisons are 

595 performed if both sides are scalar NaNs. 

596 

597 Raises 

598 ------ 

599 AssertionError 

600 The values are not almost equal. 

601 """ 

602 if ignoreNaNs: 

603 lhsMask = numpy.isnan(lhs) 

604 rhsMask = numpy.isnan(rhs) 

605 if not numpy.all(lhsMask == rhsMask): 

606 testCase.fail(f"lhs has {lhsMask.sum()} NaN values and rhs has {rhsMask.sum()} NaN values, " 

607 f"in different locations.") 

608 if numpy.all(lhsMask): 

609 assert numpy.all(rhsMask), "Should be guaranteed by previous if." 

610 # All operands are fully NaN (either scalar NaNs or arrays of only 

611 # NaNs). 

612 return 

613 assert not numpy.all(rhsMask), "Should be guaranteed by prevoius two ifs." 

614 # If either operand is an array select just its not-NaN values. Note 

615 # that these expressions are never True for scalar operands, because if 

616 # they are NaN then the numpy.all checks above will catch them. 

617 if numpy.any(lhsMask): 

618 lhs = lhs[numpy.logical_not(lhsMask)] 

619 if numpy.any(rhsMask): 

620 rhs = rhs[numpy.logical_not(rhsMask)] 

621 if not numpy.isfinite(lhs).all(): 

622 testCase.fail("Non-finite values in lhs") 

623 if not numpy.isfinite(rhs).all(): 

624 testCase.fail("Non-finite values in rhs") 

625 diff = lhs - rhs 

626 absDiff = numpy.abs(lhs - rhs) 

627 if rtol is not None: 

628 if relTo is None: 

629 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs)) 

630 else: 

631 relTo = numpy.abs(relTo) 

632 bad = absDiff > rtol*relTo 

633 if atol is not None: 

634 bad = numpy.logical_and(bad, absDiff > atol) 

635 else: 

636 if atol is None: 

637 raise ValueError("rtol and atol cannot both be None") 

638 bad = absDiff > atol 

639 failed = numpy.any(bad) 

640 if invert: 

641 failed = not failed 

642 bad = numpy.logical_not(bad) 

643 cmpStr = "==" 

644 failStr = "are the same" 

645 else: 

646 cmpStr = "!=" 

647 failStr = "differ" 

648 errMsg = [] 

649 if failed: 

650 if numpy.isscalar(bad): 

651 if rtol is None: 

652 errMsg = ["%s %s %s; diff=%s with atol=%s" 

653 % (lhs, cmpStr, rhs, absDiff, atol)] 

654 elif atol is None: 

655 errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s" 

656 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)] 

657 else: 

658 errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s" 

659 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)] 

660 else: 

661 errMsg = ["%d/%d elements %s with rtol=%s, atol=%s" 

662 % (bad.sum(), bad.size, failStr, rtol, atol)] 

663 if plotOnFailure: 

664 if len(lhs.shape) != 2 or len(rhs.shape) != 2: 

665 raise ValueError("plotOnFailure is only valid for 2-d arrays") 

666 try: 

667 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName) 

668 except ImportError: 

669 errMsg.append("Failure plot requested but matplotlib could not be imported.") 

670 if printFailures: 

671 # Make sure everything is an array if any of them are, so we 

672 # can treat them the same (diff and absDiff are arrays if 

673 # either rhs or lhs is), and we don't get here if neither is. 

674 if numpy.isscalar(relTo): 

675 relTo = numpy.ones(bad.shape, dtype=float) * relTo 

676 if numpy.isscalar(lhs): 

677 lhs = numpy.ones(bad.shape, dtype=float) * lhs 

678 if numpy.isscalar(rhs): 

679 rhs = numpy.ones(bad.shape, dtype=float) * rhs 

680 if rtol is None: 

681 for a, b, diff in zip(lhs[bad], rhs[bad], absDiff[bad]): 

682 errMsg.append("%s %s %s (diff=%s)" % (a, cmpStr, b, diff)) 

683 else: 

684 for a, b, diff, rel in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]): 

685 errMsg.append("%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel)) 

686 

687 if msg is not None: 

688 errMsg.append(msg) 

689 testCase.assertFalse(failed, msg="\n".join(errMsg)) 

690 

691 

692@inTestCase 

693def assertFloatsNotEqual(testCase: unittest.TestCase, lhs: Union[float, numpy.ndarray], 

694 rhs: Union[float, numpy.ndarray], **kwds: Any) -> None: 

695 """Fail a test if the given floating point values are equal to within the 

696 given tolerances. 

697 

698 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

699 ``rtol=atol=0``) for more information. 

700 

701 Parameters 

702 ---------- 

703 testCase : `unittest.TestCase` 

704 Instance the test is part of. 

705 lhs : scalar or array-like 

706 LHS value(s) to compare; may be a scalar or array-like of any 

707 dimension. 

708 rhs : scalar or array-like 

709 RHS value(s) to compare; may be a scalar or array-like of any 

710 dimension. 

711 

712 Raises 

713 ------ 

714 AssertionError 

715 The values are almost equal. 

716 """ 

717 return assertFloatsAlmostEqual(testCase, lhs, rhs, invert=True, **kwds) 

718 

719 

720@inTestCase 

721def assertFloatsEqual(testCase: unittest.TestCase, lhs: Union[float, numpy.ndarray], 

722 rhs: Union[float, numpy.ndarray], **kwargs: Any) -> None: 

723 """ 

724 Assert that lhs == rhs (both numeric types, whether scalar or array). 

725 

726 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

727 ``rtol=atol=0``) for more information. 

728 

729 Parameters 

730 ---------- 

731 testCase : `unittest.TestCase` 

732 Instance the test is part of. 

733 lhs : scalar or array-like 

734 LHS value(s) to compare; may be a scalar or array-like of any 

735 dimension. 

736 rhs : scalar or array-like 

737 RHS value(s) to compare; may be a scalar or array-like of any 

738 dimension. 

739 

740 Raises 

741 ------ 

742 AssertionError 

743 The values are not equal. 

744 """ 

745 return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs) 

746 

747 

748def _settingsIterator(settings: Dict[str, Sequence[Any]]) -> Iterator[Dict[str, Any]]: 

749 """Return an iterator for the provided test settings 

750 

751 Parameters 

752 ---------- 

753 settings : `dict` (`str`: iterable) 

754 Lists of test parameters. Each should be an iterable of the same 

755 length. If a string is provided as an iterable, it will be converted 

756 to a list of a single string. 

757 

758 Raises 

759 ------ 

760 AssertionError 

761 If the ``settings`` are not of the same length. 

762 

763 Yields 

764 ------ 

765 parameters : `dict` (`str`: anything) 

766 Set of parameters. 

767 """ 

768 for name, values in settings.items(): 

769 if isinstance(values, str): 769 ↛ 772line 769 didn't jump to line 772, because the condition on line 769 was never true

770 # Probably meant as a single-element string, rather than an 

771 # iterable of chars. 

772 settings[name] = [values] 

773 num = len(next(iter(settings.values()))) # Number of settings 

774 for name, values in settings.items(): 

775 assert len(values) == num, f"Length mismatch for setting {name}: {len(values)} vs {num}" 

776 for ii in range(num): 

777 values = [settings[kk][ii] for kk in settings] 

778 yield dict(zip(settings, values)) 

779 

780 

781def classParameters(**settings: Sequence[Any]) -> Callable: 

782 """Class decorator for generating unit tests 

783 

784 This decorator generates classes with class variables according to the 

785 supplied ``settings``. 

786 

787 Parameters 

788 ---------- 

789 **settings : `dict` (`str`: iterable) 

790 The lists of test parameters to set as class variables in turn. Each 

791 should be an iterable of the same length. 

792 

793 Examples 

794 -------- 

795 :: 

796 

797 @classParameters(foo=[1, 2], bar=[3, 4]) 

798 class MyTestCase(unittest.TestCase): 

799 ... 

800 

801 will generate two classes, as if you wrote:: 

802 

803 class MyTestCase_1_3(unittest.TestCase): 

804 foo = 1 

805 bar = 3 

806 ... 

807 

808 class MyTestCase_2_4(unittest.TestCase): 

809 foo = 2 

810 bar = 4 

811 ... 

812 

813 Note that the values are embedded in the class name. 

814 """ 

815 def decorator(cls: Type) -> None: 

816 module = sys.modules[cls.__module__].__dict__ 

817 for params in _settingsIterator(settings): 

818 name = f"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}" 

819 bindings = dict(cls.__dict__) 

820 bindings.update(params) 

821 module[name] = type(name, (cls,), bindings) 

822 return decorator 

823 

824 

825def methodParameters(**settings: Sequence[Any]) -> Callable: 

826 """Method decorator for unit tests 

827 

828 This decorator iterates over the supplied settings, using 

829 ``TestCase.subTest`` to communicate the values in the event of a failure. 

830 

831 Parameters 

832 ---------- 

833 **settings : `dict` (`str`: iterable) 

834 The lists of test parameters. Each should be an iterable of the same 

835 length. 

836 

837 Examples 

838 -------- 

839 :: 

840 

841 @methodParameters(foo=[1, 2], bar=[3, 4]) 

842 def testSomething(self, foo, bar): 

843 ... 

844 

845 will run:: 

846 

847 testSomething(foo=1, bar=3) 

848 testSomething(foo=2, bar=4) 

849 """ 

850 def decorator(func: Callable) -> Callable: 

851 @functools.wraps(func) 

852 def wrapper(self: unittest.TestCase, *args: Any, **kwargs: Any) -> None: 

853 for params in _settingsIterator(settings): 

854 kwargs.update(params) 

855 with self.subTest(**params): 

856 func(self, *args, **kwargs) 

857 return wrapper 

858 return decorator 

859 

860 

861def _cartesianProduct(settings: Mapping[str, Sequence[Any]]) -> Mapping[str, Sequence[Any]]: 

862 """Return the cartesian product of the settings 

863 

864 Parameters 

865 ---------- 

866 settings : `dict` mapping `str` to `iterable` 

867 Parameter combinations. 

868 

869 Returns 

870 ------- 

871 product : `dict` mapping `str` to `iterable` 

872 Parameter combinations covering the cartesian product (all possible 

873 combinations) of the input parameters. 

874 

875 Example 

876 ------- 

877 

878 cartesianProduct({"foo": [1, 2], "bar": ["black", "white"]}) 

879 

880 returns: 

881 

882 {"foo": [1, 1, 2, 2], "bar": ["black", "white", "black", "white"]} 

883 """ 

884 product: Dict[str, List[Any]] = {kk: [] for kk in settings} 

885 for values in itertools.product(*settings.values()): 

886 for kk, vv in zip(settings.keys(), values): 

887 product[kk].append(vv) 

888 return product 

889 

890 

891def classParametersProduct(**settings: Sequence[Any]) -> Callable: 

892 """Class decorator for generating unit tests 

893 

894 This decorator generates classes with class variables according to the 

895 cartesian product of the supplied ``settings``. 

896 

897 Parameters 

898 ---------- 

899 **settings : `dict` (`str`: iterable) 

900 The lists of test parameters to set as class variables in turn. Each 

901 should be an iterable. 

902 

903 Examples 

904 -------- 

905 :: 

906 

907 @classParametersProduct(foo=[1, 2], bar=[3, 4]) 

908 class MyTestCase(unittest.TestCase): 

909 ... 

910 

911 will generate four classes, as if you wrote:: 

912 

913 class MyTestCase_1_3(unittest.TestCase): 

914 foo = 1 

915 bar = 3 

916 ... 

917 

918 class MyTestCase_1_4(unittest.TestCase): 

919 foo = 1 

920 bar = 4 

921 ... 

922 

923 class MyTestCase_2_3(unittest.TestCase): 

924 foo = 2 

925 bar = 3 

926 ... 

927 

928 class MyTestCase_2_4(unittest.TestCase): 

929 foo = 2 

930 bar = 4 

931 ... 

932 

933 Note that the values are embedded in the class name. 

934 """ 

935 return classParameters(**_cartesianProduct(settings)) 

936 

937 

938def methodParametersProduct(**settings: Sequence[Any]) -> Callable: 

939 """Method decorator for unit tests 

940 

941 This decorator iterates over the cartesian product of the supplied 

942 settings, using `~unittest.TestCase.subTest` to communicate the values in 

943 the event of a failure. 

944 

945 Parameters 

946 ---------- 

947 **settings : `dict` (`str`: iterable) 

948 The parameter combinations to test. Each should be an iterable. 

949 

950 Example 

951 ------- 

952 

953 @methodParametersProduct(foo=[1, 2], bar=["black", "white"]) 

954 def testSomething(self, foo, bar): 

955 ... 

956 

957 will run: 

958 

959 testSomething(foo=1, bar="black") 

960 testSomething(foo=1, bar="white") 

961 testSomething(foo=2, bar="black") 

962 testSomething(foo=2, bar="white") 

963 """ 

964 return methodParameters(**_cartesianProduct(settings)) 

965 

966 

967@contextlib.contextmanager 

968def temporaryDirectory() -> Iterator[str]: 

969 """Context manager that creates and destroys a temporary directory. 

970 

971 The difference from `tempfile.TemporaryDirectory` is that this ignores 

972 errors when deleting a directory, which may happen with some filesystems. 

973 """ 

974 tmpdir = tempfile.mkdtemp() 

975 yield tmpdir 

976 shutil.rmtree(tmpdir, ignore_errors=True)