Coverage for python/lsst/utils/tests.py: 29%

320 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2022-10-12 02:19 -0700

1# This file is part of utils. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# Use of this source code is governed by a 3-clause BSD-style 

10# license that can be found in the LICENSE file. 

11 

12"""Support code for running unit tests""" 

13 

14__all__ = [ 

15 "init", 

16 "MemoryTestCase", 

17 "ExecutablesTestCase", 

18 "getTempFilePath", 

19 "TestCase", 

20 "assertFloatsAlmostEqual", 

21 "assertFloatsNotEqual", 

22 "assertFloatsEqual", 

23 "debugger", 

24 "classParameters", 

25 "methodParameters", 

26 "temporaryDirectory", 

27] 

28 

29import contextlib 

30import functools 

31import gc 

32import inspect 

33import itertools 

34import os 

35import shutil 

36import subprocess 

37import sys 

38import tempfile 

39import unittest 

40import warnings 

41from typing import Any, Callable, Dict, Iterator, List, Mapping, Optional, Sequence, Set, Type, Union 

42 

43import numpy 

44import psutil 

45 

46# Initialize the list of open files to an empty set 

47open_files = set() 

48 

49 

50def _get_open_files() -> Set[str]: 

51 """Return a set containing the list of files currently open in this 

52 process. 

53 

54 Returns 

55 ------- 

56 open_files : `set` 

57 Set containing the list of open files. 

58 """ 

59 return set(p.path for p in psutil.Process().open_files()) 

60 

61 

62def init() -> None: 

63 """Initialize the memory tester and file descriptor leak tester.""" 

64 global open_files 

65 # Reset the list of open files 

66 open_files = _get_open_files() 

67 

68 

69def sort_tests(tests) -> unittest.TestSuite: 

70 """Sort supplied test suites such that MemoryTestCases are at the end. 

71 

72 `lsst.utils.tests.MemoryTestCase` tests should always run after any other 

73 tests in the module. 

74 

75 Parameters 

76 ---------- 

77 tests : sequence 

78 Sequence of test suites. 

79 

80 Returns 

81 ------- 

82 suite : `unittest.TestSuite` 

83 A combined `~unittest.TestSuite` with 

84 `~lsst.utils.tests.MemoryTestCase` at the end. 

85 """ 

86 suite = unittest.TestSuite() 

87 memtests = [] 

88 for test_suite in tests: 

89 try: 

90 # Just test the first test method in the suite for MemoryTestCase 

91 # Use loop rather than next as it is possible for a test class 

92 # to not have any test methods and the Python community prefers 

93 # for loops over catching a StopIteration exception. 

94 bases = None 

95 for method in test_suite: 

96 bases = inspect.getmro(method.__class__) 

97 break 

98 if bases is not None and MemoryTestCase in bases: 

99 memtests.append(test_suite) 

100 else: 

101 suite.addTests(test_suite) 

102 except TypeError: 

103 if isinstance(test_suite, MemoryTestCase): 

104 memtests.append(test_suite) 

105 else: 

106 suite.addTest(test_suite) 

107 suite.addTests(memtests) 

108 return suite 

109 

110 

111def suiteClassWrapper(tests): 

112 return unittest.TestSuite(sort_tests(tests)) 

113 

114 

115# Replace the suiteClass callable in the defaultTestLoader 

116# so that we can reorder the test ordering. This will have 

117# no effect if no memory test cases are found. 

118unittest.defaultTestLoader.suiteClass = suiteClassWrapper 

119 

120 

121class MemoryTestCase(unittest.TestCase): 

122 """Check for resource leaks.""" 

123 

124 @classmethod 

125 def tearDownClass(cls) -> None: 

126 """Reset the leak counter when the tests have been completed""" 

127 init() 

128 

129 def testFileDescriptorLeaks(self) -> None: 

130 """Check if any file descriptors are open since init() called.""" 

131 gc.collect() 

132 global open_files 

133 now_open = _get_open_files() 

134 

135 # Some files are opened out of the control of the stack. 

136 now_open = set( 

137 f 

138 for f in now_open 

139 if not f.endswith(".car") 

140 and not f.startswith("/proc/") 

141 and not f.endswith(".ttf") 

142 and not (f.startswith("/var/lib/") and f.endswith("/passwd")) 

143 and not f.endswith("astropy.log") 

144 and not f.endswith("mime/mime.cache") 

145 ) 

146 

147 diff = now_open.difference(open_files) 

148 if diff: 

149 for f in diff: 

150 print("File open: %s" % f) 

151 self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else "")) 

152 

153 

154class ExecutablesTestCase(unittest.TestCase): 

155 """Test that executables can be run and return good status. 

156 

157 The test methods are dynamically created. Callers 

158 must subclass this class in their own test file and invoke 

159 the create_executable_tests() class method to register the tests. 

160 """ 

161 

162 TESTS_DISCOVERED = -1 

163 

164 @classmethod 

165 def setUpClass(cls) -> None: 

166 """Abort testing if automated test creation was enabled and 

167 no tests were found. 

168 """ 

169 if cls.TESTS_DISCOVERED == 0: 

170 raise RuntimeError("No executables discovered.") 

171 

172 def testSanity(self) -> None: 

173 """Ensure that there is at least one test to be 

174 executed. This allows the test runner to trigger the class set up 

175 machinery to test whether there are some executables to test. 

176 """ 

177 pass 

178 

179 def assertExecutable( 

180 self, 

181 executable: str, 

182 root_dir: Optional[str] = None, 

183 args: Optional[Sequence[str]] = None, 

184 msg: Optional[str] = None, 

185 ) -> None: 

186 """Check an executable runs and returns good status. 

187 

188 Prints output to standard out. On bad exit status the test 

189 fails. If the executable can not be located the test is skipped. 

190 

191 Parameters 

192 ---------- 

193 executable : `str` 

194 Path to an executable. ``root_dir`` is not used if this is an 

195 absolute path. 

196 root_dir : `str`, optional 

197 Directory containing executable. Ignored if `None`. 

198 args : `list` or `tuple`, optional 

199 Arguments to be provided to the executable. 

200 msg : `str`, optional 

201 Message to use when the test fails. Can be `None` for default 

202 message. 

203 

204 Raises 

205 ------ 

206 AssertionError 

207 The executable did not return 0 exit status. 

208 """ 

209 if root_dir is not None and not os.path.isabs(executable): 

210 executable = os.path.join(root_dir, executable) 

211 

212 # Form the argument list for subprocess 

213 sp_args = [executable] 

214 argstr = "no arguments" 

215 if args is not None: 

216 sp_args.extend(args) 

217 argstr = 'arguments "' + " ".join(args) + '"' 

218 

219 print("Running executable '{}' with {}...".format(executable, argstr)) 

220 if not os.path.exists(executable): 

221 self.skipTest("Executable {} is unexpectedly missing".format(executable)) 

222 failmsg = None 

223 try: 

224 output = subprocess.check_output(sp_args) 

225 except subprocess.CalledProcessError as e: 

226 output = e.output 

227 failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode) 

228 print(output.decode("utf-8")) 

229 if failmsg: 

230 if msg is None: 

231 msg = failmsg 

232 self.fail(msg) 

233 

234 @classmethod 

235 def _build_test_method(cls, executable: str, root_dir: str) -> None: 

236 """Build a test method and attach to class. 

237 

238 A test method is created for the supplied excutable located 

239 in the supplied root directory. This method is attached to the class 

240 so that the test runner will discover the test and run it. 

241 

242 Parameters 

243 ---------- 

244 cls : `object` 

245 The class in which to create the tests. 

246 executable : `str` 

247 Name of executable. Can be absolute path. 

248 root_dir : `str` 

249 Path to executable. Not used if executable path is absolute. 

250 """ 

251 if not os.path.isabs(executable): 251 ↛ 252line 251 didn't jump to line 252, because the condition on line 251 was never true

252 executable = os.path.abspath(os.path.join(root_dir, executable)) 

253 

254 # Create the test name from the executable path. 

255 test_name = "test_exe_" + executable.replace("/", "_") 

256 

257 # This is the function that will become the test method 

258 def test_executable_runs(*args: Any) -> None: 

259 self = args[0] 

260 self.assertExecutable(executable) 

261 

262 # Give it a name and attach it to the class 

263 test_executable_runs.__name__ = test_name 

264 setattr(cls, test_name, test_executable_runs) 

265 

266 @classmethod 

267 def create_executable_tests(cls, ref_file: str, executables: Optional[Sequence[str]] = None) -> None: 

268 """Discover executables to test and create corresponding test methods. 

269 

270 Scans the directory containing the supplied reference file 

271 (usually ``__file__`` supplied from the test class) to look for 

272 executables. If executables are found a test method is created 

273 for each one. That test method will run the executable and 

274 check the returned value. 

275 

276 Executable scripts with a ``.py`` extension and shared libraries 

277 are ignored by the scanner. 

278 

279 This class method must be called before test discovery. 

280 

281 Parameters 

282 ---------- 

283 ref_file : `str` 

284 Path to a file within the directory to be searched. 

285 If the files are in the same location as the test file, then 

286 ``__file__`` can be used. 

287 executables : `list` or `tuple`, optional 

288 Sequence of executables that can override the automated 

289 detection. If an executable mentioned here is not found, a 

290 skipped test will be created for it, rather than a failed 

291 test. 

292 

293 Examples 

294 -------- 

295 >>> cls.create_executable_tests(__file__) 

296 """ 

297 # Get the search directory from the reference file 

298 ref_dir = os.path.abspath(os.path.dirname(ref_file)) 

299 

300 if executables is None: 300 ↛ 315line 300 didn't jump to line 315, because the condition on line 300 was never false

301 # Look for executables to test by walking the tree 

302 executables = [] 

303 for root, dirs, files in os.walk(ref_dir): 

304 for f in files: 

305 # Skip Python files. Shared libraries are executable. 

306 if not f.endswith(".py") and not f.endswith(".so"): 

307 full_path = os.path.join(root, f) 

308 if os.access(full_path, os.X_OK): 

309 executables.append(full_path) 

310 

311 # Store the number of tests found for later assessment. 

312 # Do not raise an exception if we have no executables as this would 

313 # cause the testing to abort before the test runner could properly 

314 # integrate it into the failure report. 

315 cls.TESTS_DISCOVERED = len(executables) 

316 

317 # Create the test functions and attach them to the class 

318 for e in executables: 

319 cls._build_test_method(e, ref_dir) 

320 

321 

322@contextlib.contextmanager 

323def getTempFilePath(ext: str, expectOutput: bool = True) -> Iterator[str]: 

324 """Return a path suitable for a temporary file and try to delete the 

325 file on success 

326 

327 If the with block completes successfully then the file is deleted, 

328 if possible; failure results in a printed warning. 

329 If a file is remains when it should not, a RuntimeError exception is 

330 raised. This exception is also raised if a file is not present on context 

331 manager exit when one is expected to exist. 

332 If the block exits with an exception the file if left on disk so it can be 

333 examined. The file name has a random component such that nested context 

334 managers can be used with the same file suffix. 

335 

336 Parameters 

337 ---------- 

338 ext : `str` 

339 File name extension, e.g. ``.fits``. 

340 expectOutput : `bool`, optional 

341 If `True`, a file should be created within the context manager. 

342 If `False`, a file should not be present when the context manager 

343 exits. 

344 

345 Returns 

346 ------- 

347 path : `str` 

348 Path for a temporary file. The path is a combination of the caller's 

349 file path and the name of the top-level function 

350 

351 Examples 

352 -------- 

353 .. code-block:: python 

354 

355 # file tests/testFoo.py 

356 import unittest 

357 import lsst.utils.tests 

358 class FooTestCase(unittest.TestCase): 

359 def testBasics(self): 

360 self.runTest() 

361 

362 def runTest(self): 

363 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile: 

364 # if tests/.tests exists then 

365 # tmpFile = "tests/.tests/testFoo_testBasics.fits" 

366 # otherwise tmpFile = "testFoo_testBasics.fits" 

367 ... 

368 # at the end of this "with" block the path tmpFile will be 

369 # deleted, but only if the file exists and the "with" 

370 # block terminated normally (rather than with an exception) 

371 ... 

372 """ 

373 stack = inspect.stack() 

374 # get name of first function in the file 

375 for i in range(2, len(stack)): 

376 frameInfo = inspect.getframeinfo(stack[i][0]) 

377 if i == 2: 

378 callerFilePath = frameInfo.filename 

379 callerFuncName = frameInfo.function 

380 elif callerFilePath == frameInfo.filename: 

381 # this function called the previous function 

382 callerFuncName = frameInfo.function 

383 else: 

384 break 

385 

386 callerDir, callerFileNameWithExt = os.path.split(callerFilePath) 

387 callerFileName = os.path.splitext(callerFileNameWithExt)[0] 

388 outDir = os.path.join(callerDir, ".tests") 

389 if not os.path.isdir(outDir): 

390 outDir = "" 

391 prefix = "%s_%s-" % (callerFileName, callerFuncName) 

392 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix) 

393 if os.path.exists(outPath): 

394 # There should not be a file there given the randomizer. Warn and 

395 # remove. 

396 # Use stacklevel 3 so that the warning is reported from the end of the 

397 # with block 

398 warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,), stacklevel=3) 

399 try: 

400 os.remove(outPath) 

401 except OSError: 

402 pass 

403 

404 yield outPath 

405 

406 fileExists = os.path.exists(outPath) 

407 if expectOutput: 

408 if not fileExists: 

409 raise RuntimeError("Temp file expected named {} but none found".format(outPath)) 

410 else: 

411 if fileExists: 

412 raise RuntimeError("Unexpectedly discovered temp file named {}".format(outPath)) 

413 # Try to clean up the file regardless 

414 if fileExists: 

415 try: 

416 os.remove(outPath) 

417 except OSError as e: 

418 # Use stacklevel 3 so that the warning is reported from the end of 

419 # the with block. 

420 warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3) 

421 

422 

423class TestCase(unittest.TestCase): 

424 """Subclass of unittest.TestCase that adds some custom assertions for 

425 convenience. 

426 """ 

427 

428 

429def inTestCase(func: Callable) -> Callable: 

430 """Add a free function to our custom TestCase class, while 

431 also making it available as a free function. 

432 """ 

433 setattr(TestCase, func.__name__, func) 

434 return func 

435 

436 

437def debugger(*exceptions): 

438 """Enter the debugger when there's an uncaught exception 

439 

440 To use, just slap a ``@debugger()`` on your function. 

441 

442 You may provide specific exception classes to catch as arguments to 

443 the decorator function, e.g., 

444 ``@debugger(RuntimeError, NotImplementedError)``. 

445 This defaults to just `AssertionError`, for use on `unittest.TestCase` 

446 methods. 

447 

448 Code provided by "Rosh Oxymoron" on StackOverflow: 

449 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails 

450 

451 Notes 

452 ----- 

453 Consider using ``pytest --pdb`` instead of this decorator. 

454 """ 

455 if not exceptions: 

456 exceptions = (Exception,) 

457 

458 def decorator(f): 

459 @functools.wraps(f) 

460 def wrapper(*args, **kwargs): 

461 try: 

462 return f(*args, **kwargs) 

463 except exceptions: 

464 import pdb 

465 import sys 

466 

467 pdb.post_mortem(sys.exc_info()[2]) 

468 

469 return wrapper 

470 

471 return decorator 

472 

473 

474def plotImageDiff( 

475 lhs: numpy.ndarray, 

476 rhs: numpy.ndarray, 

477 bad: Optional[numpy.ndarray] = None, 

478 diff: Optional[numpy.ndarray] = None, 

479 plotFileName: Optional[str] = None, 

480) -> None: 

481 """Plot the comparison of two 2-d NumPy arrays. 

482 

483 Parameters 

484 ---------- 

485 lhs : `numpy.ndarray` 

486 LHS values to compare; a 2-d NumPy array 

487 rhs : `numpy.ndarray` 

488 RHS values to compare; a 2-d NumPy array 

489 bad : `numpy.ndarray` 

490 A 2-d boolean NumPy array of values to emphasize in the plots 

491 diff : `numpy.ndarray` 

492 difference array; a 2-d NumPy array, or None to show lhs-rhs 

493 plotFileName : `str` 

494 Filename to save the plot to. If None, the plot will be displayed in 

495 a window. 

496 

497 Notes 

498 ----- 

499 This method uses `matplotlib` and imports it internally; it should be 

500 wrapped in a try/except block within packages that do not depend on 

501 `matplotlib` (including `~lsst.utils`). 

502 """ 

503 from matplotlib import pyplot 

504 

505 if diff is None: 

506 diff = lhs - rhs 

507 pyplot.figure() 

508 if bad is not None: 

509 # make an rgba image that's red and transparent where not bad 

510 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8) 

511 badImage[:, :, 0] = 255 

512 badImage[:, :, 1] = 0 

513 badImage[:, :, 2] = 0 

514 badImage[:, :, 3] = 255 * bad 

515 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs)) 

516 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs)) 

517 vmin2 = numpy.min(diff) 

518 vmax2 = numpy.max(diff) 

519 for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]): 

520 pyplot.subplot(2, 3, n + 1) 

521 im1 = pyplot.imshow( 

522 image, cmap=pyplot.cm.gray, interpolation="nearest", origin="lower", vmin=vmin1, vmax=vmax1 

523 ) 

524 if bad is not None: 

525 pyplot.imshow(badImage, alpha=0.2, interpolation="nearest", origin="lower") 

526 pyplot.axis("off") 

527 pyplot.title(title) 

528 pyplot.subplot(2, 3, n + 4) 

529 im2 = pyplot.imshow( 

530 image, cmap=pyplot.cm.gray, interpolation="nearest", origin="lower", vmin=vmin2, vmax=vmax2 

531 ) 

532 if bad is not None: 

533 pyplot.imshow(badImage, alpha=0.2, interpolation="nearest", origin="lower") 

534 pyplot.axis("off") 

535 pyplot.title(title) 

536 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05) 

537 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4]) 

538 pyplot.colorbar(im1, cax=cax1) 

539 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4]) 

540 pyplot.colorbar(im2, cax=cax2) 

541 if plotFileName: 

542 pyplot.savefig(plotFileName) 

543 else: 

544 pyplot.show() 

545 

546 

547@inTestCase 

548def assertFloatsAlmostEqual( 

549 testCase: unittest.TestCase, 

550 lhs: Union[float, numpy.ndarray], 

551 rhs: Union[float, numpy.ndarray], 

552 rtol: Optional[float] = sys.float_info.epsilon, 

553 atol: Optional[float] = sys.float_info.epsilon, 

554 relTo: Optional[float] = None, 

555 printFailures: bool = True, 

556 plotOnFailure: bool = False, 

557 plotFileName: Optional[str] = None, 

558 invert: bool = False, 

559 msg: Optional[str] = None, 

560 ignoreNaNs: bool = False, 

561) -> None: 

562 """Highly-configurable floating point comparisons for scalars and arrays. 

563 

564 The test assertion will fail if all elements ``lhs`` and ``rhs`` are not 

565 equal to within the tolerances specified by ``rtol`` and ``atol``. 

566 More precisely, the comparison is: 

567 

568 ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol`` 

569 

570 If ``rtol`` or ``atol`` is `None`, that term in the comparison is not 

571 performed at all. 

572 

573 When not specified, ``relTo`` is the elementwise maximum of the absolute 

574 values of ``lhs`` and ``rhs``. If set manually, it should usually be set 

575 to either ``lhs`` or ``rhs``, or a scalar value typical of what is 

576 expected. 

577 

578 Parameters 

579 ---------- 

580 testCase : `unittest.TestCase` 

581 Instance the test is part of. 

582 lhs : scalar or array-like 

583 LHS value(s) to compare; may be a scalar or array-like of any 

584 dimension. 

585 rhs : scalar or array-like 

586 RHS value(s) to compare; may be a scalar or array-like of any 

587 dimension. 

588 rtol : `float`, optional 

589 Relative tolerance for comparison; defaults to double-precision 

590 epsilon. 

591 atol : `float`, optional 

592 Absolute tolerance for comparison; defaults to double-precision 

593 epsilon. 

594 relTo : `float`, optional 

595 Value to which comparison with rtol is relative. 

596 printFailures : `bool`, optional 

597 Upon failure, print all inequal elements as part of the message. 

598 plotOnFailure : `bool`, optional 

599 Upon failure, plot the originals and their residual with matplotlib. 

600 Only 2-d arrays are supported. 

601 plotFileName : `str`, optional 

602 Filename to save the plot to. If `None`, the plot will be displayed in 

603 a window. 

604 invert : `bool`, optional 

605 If `True`, invert the comparison and fail only if any elements *are* 

606 equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`, 

607 which should generally be used instead for clarity. 

608 will return `True`). 

609 msg : `str`, optional 

610 String to append to the error message when assert fails. 

611 ignoreNaNs : `bool`, optional 

612 If `True` (`False` is default) mask out any NaNs from operand arrays 

613 before performing comparisons if they are in the same locations; NaNs 

614 in different locations are trigger test assertion failures, even when 

615 ``invert=True``. Scalar NaNs are treated like arrays containing only 

616 NaNs of the same shape as the other operand, and no comparisons are 

617 performed if both sides are scalar NaNs. 

618 

619 Raises 

620 ------ 

621 AssertionError 

622 The values are not almost equal. 

623 """ 

624 if ignoreNaNs: 

625 lhsMask = numpy.isnan(lhs) 

626 rhsMask = numpy.isnan(rhs) 

627 if not numpy.all(lhsMask == rhsMask): 

628 testCase.fail( 

629 f"lhs has {lhsMask.sum()} NaN values and rhs has {rhsMask.sum()} NaN values, " 

630 f"in different locations." 

631 ) 

632 if numpy.all(lhsMask): 

633 assert numpy.all(rhsMask), "Should be guaranteed by previous if." 

634 # All operands are fully NaN (either scalar NaNs or arrays of only 

635 # NaNs). 

636 return 

637 assert not numpy.all(rhsMask), "Should be guaranteed by prevoius two ifs." 

638 # If either operand is an array select just its not-NaN values. Note 

639 # that these expressions are never True for scalar operands, because if 

640 # they are NaN then the numpy.all checks above will catch them. 

641 if numpy.any(lhsMask): 

642 lhs = lhs[numpy.logical_not(lhsMask)] 

643 if numpy.any(rhsMask): 

644 rhs = rhs[numpy.logical_not(rhsMask)] 

645 if not numpy.isfinite(lhs).all(): 

646 testCase.fail("Non-finite values in lhs") 

647 if not numpy.isfinite(rhs).all(): 

648 testCase.fail("Non-finite values in rhs") 

649 diff = lhs - rhs 

650 absDiff = numpy.abs(lhs - rhs) 

651 if rtol is not None: 

652 if relTo is None: 

653 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs)) 

654 else: 

655 relTo = numpy.abs(relTo) 

656 bad = absDiff > rtol * relTo 

657 if atol is not None: 

658 bad = numpy.logical_and(bad, absDiff > atol) 

659 else: 

660 if atol is None: 

661 raise ValueError("rtol and atol cannot both be None") 

662 bad = absDiff > atol 

663 failed = numpy.any(bad) 

664 if invert: 

665 failed = not failed 

666 bad = numpy.logical_not(bad) 

667 cmpStr = "==" 

668 failStr = "are the same" 

669 else: 

670 cmpStr = "!=" 

671 failStr = "differ" 

672 errMsg = [] 

673 if failed: 

674 if numpy.isscalar(bad): 

675 if rtol is None: 

676 errMsg = ["%s %s %s; diff=%s with atol=%s" % (lhs, cmpStr, rhs, absDiff, atol)] 

677 elif atol is None: 

678 errMsg = [ 

679 "%s %s %s; diff=%s/%s=%s with rtol=%s" 

680 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff / relTo, rtol) 

681 ] 

682 else: 

683 errMsg = [ 

684 "%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s" 

685 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff / relTo, rtol, atol) 

686 ] 

687 else: 

688 errMsg = ["%d/%d elements %s with rtol=%s, atol=%s" % (bad.sum(), bad.size, failStr, rtol, atol)] 

689 if plotOnFailure: 

690 if len(lhs.shape) != 2 or len(rhs.shape) != 2: 

691 raise ValueError("plotOnFailure is only valid for 2-d arrays") 

692 try: 

693 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName) 

694 except ImportError: 

695 errMsg.append("Failure plot requested but matplotlib could not be imported.") 

696 if printFailures: 

697 # Make sure everything is an array if any of them are, so we 

698 # can treat them the same (diff and absDiff are arrays if 

699 # either rhs or lhs is), and we don't get here if neither is. 

700 if numpy.isscalar(relTo): 

701 relTo = numpy.ones(bad.shape, dtype=float) * relTo 

702 if numpy.isscalar(lhs): 

703 lhs = numpy.ones(bad.shape, dtype=float) * lhs 

704 if numpy.isscalar(rhs): 

705 rhs = numpy.ones(bad.shape, dtype=float) * rhs 

706 if rtol is None: 

707 for a, b, diff in zip(lhs[bad], rhs[bad], absDiff[bad]): 

708 errMsg.append("%s %s %s (diff=%s)" % (a, cmpStr, b, diff)) 

709 else: 

710 for a, b, diff, rel in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]): 

711 errMsg.append("%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff / rel)) 

712 

713 if msg is not None: 

714 errMsg.append(msg) 

715 testCase.assertFalse(failed, msg="\n".join(errMsg)) 

716 

717 

718@inTestCase 

719def assertFloatsNotEqual( 

720 testCase: unittest.TestCase, 

721 lhs: Union[float, numpy.ndarray], 

722 rhs: Union[float, numpy.ndarray], 

723 **kwds: Any, 

724) -> None: 

725 """Fail a test if the given floating point values are equal to within the 

726 given tolerances. 

727 

728 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

729 ``rtol=atol=0``) for more information. 

730 

731 Parameters 

732 ---------- 

733 testCase : `unittest.TestCase` 

734 Instance the test is part of. 

735 lhs : scalar or array-like 

736 LHS value(s) to compare; may be a scalar or array-like of any 

737 dimension. 

738 rhs : scalar or array-like 

739 RHS value(s) to compare; may be a scalar or array-like of any 

740 dimension. 

741 

742 Raises 

743 ------ 

744 AssertionError 

745 The values are almost equal. 

746 """ 

747 return assertFloatsAlmostEqual(testCase, lhs, rhs, invert=True, **kwds) 

748 

749 

750@inTestCase 

751def assertFloatsEqual( 

752 testCase: unittest.TestCase, 

753 lhs: Union[float, numpy.ndarray], 

754 rhs: Union[float, numpy.ndarray], 

755 **kwargs: Any, 

756) -> None: 

757 """ 

758 Assert that lhs == rhs (both numeric types, whether scalar or array). 

759 

760 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

761 ``rtol=atol=0``) for more information. 

762 

763 Parameters 

764 ---------- 

765 testCase : `unittest.TestCase` 

766 Instance the test is part of. 

767 lhs : scalar or array-like 

768 LHS value(s) to compare; may be a scalar or array-like of any 

769 dimension. 

770 rhs : scalar or array-like 

771 RHS value(s) to compare; may be a scalar or array-like of any 

772 dimension. 

773 

774 Raises 

775 ------ 

776 AssertionError 

777 The values are not equal. 

778 """ 

779 return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs) 

780 

781 

782def _settingsIterator(settings: Dict[str, Sequence[Any]]) -> Iterator[Dict[str, Any]]: 

783 """Return an iterator for the provided test settings 

784 

785 Parameters 

786 ---------- 

787 settings : `dict` (`str`: iterable) 

788 Lists of test parameters. Each should be an iterable of the same 

789 length. If a string is provided as an iterable, it will be converted 

790 to a list of a single string. 

791 

792 Raises 

793 ------ 

794 AssertionError 

795 If the ``settings`` are not of the same length. 

796 

797 Yields 

798 ------ 

799 parameters : `dict` (`str`: anything) 

800 Set of parameters. 

801 """ 

802 for name, values in settings.items(): 

803 if isinstance(values, str): 803 ↛ 806line 803 didn't jump to line 806, because the condition on line 803 was never true

804 # Probably meant as a single-element string, rather than an 

805 # iterable of chars. 

806 settings[name] = [values] 

807 num = len(next(iter(settings.values()))) # Number of settings 

808 for name, values in settings.items(): 

809 assert len(values) == num, f"Length mismatch for setting {name}: {len(values)} vs {num}" 

810 for ii in range(num): 

811 values = [settings[kk][ii] for kk in settings] 

812 yield dict(zip(settings, values)) 

813 

814 

815def classParameters(**settings: Sequence[Any]) -> Callable: 

816 """Class decorator for generating unit tests 

817 

818 This decorator generates classes with class variables according to the 

819 supplied ``settings``. 

820 

821 Parameters 

822 ---------- 

823 **settings : `dict` (`str`: iterable) 

824 The lists of test parameters to set as class variables in turn. Each 

825 should be an iterable of the same length. 

826 

827 Examples 

828 -------- 

829 :: 

830 

831 @classParameters(foo=[1, 2], bar=[3, 4]) 

832 class MyTestCase(unittest.TestCase): 

833 ... 

834 

835 will generate two classes, as if you wrote:: 

836 

837 class MyTestCase_1_3(unittest.TestCase): 

838 foo = 1 

839 bar = 3 

840 ... 

841 

842 class MyTestCase_2_4(unittest.TestCase): 

843 foo = 2 

844 bar = 4 

845 ... 

846 

847 Note that the values are embedded in the class name. 

848 """ 

849 

850 def decorator(cls: Type) -> None: 

851 module = sys.modules[cls.__module__].__dict__ 

852 for params in _settingsIterator(settings): 

853 name = f"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}" 

854 bindings = dict(cls.__dict__) 

855 bindings.update(params) 

856 module[name] = type(name, (cls,), bindings) 

857 

858 return decorator 

859 

860 

861def methodParameters(**settings: Sequence[Any]) -> Callable: 

862 """Iterate over supplied settings to create subtests automatically. 

863 

864 This decorator iterates over the supplied settings, using 

865 ``TestCase.subTest`` to communicate the values in the event of a failure. 

866 

867 Parameters 

868 ---------- 

869 **settings : `dict` (`str`: iterable) 

870 The lists of test parameters. Each should be an iterable of the same 

871 length. 

872 

873 Examples 

874 -------- 

875 .. code-block:: python 

876 

877 @methodParameters(foo=[1, 2], bar=[3, 4]) 

878 def testSomething(self, foo, bar): 

879 ... 

880 

881 will run: 

882 

883 .. code-block:: python 

884 

885 testSomething(foo=1, bar=3) 

886 testSomething(foo=2, bar=4) 

887 """ 

888 

889 def decorator(func: Callable) -> Callable: 

890 @functools.wraps(func) 

891 def wrapper(self: unittest.TestCase, *args: Any, **kwargs: Any) -> None: 

892 for params in _settingsIterator(settings): 

893 kwargs.update(params) 

894 with self.subTest(**params): 

895 func(self, *args, **kwargs) 

896 

897 return wrapper 

898 

899 return decorator 

900 

901 

902def _cartesianProduct(settings: Mapping[str, Sequence[Any]]) -> Mapping[str, Sequence[Any]]: 

903 """Return the cartesian product of the settings 

904 

905 Parameters 

906 ---------- 

907 settings : `dict` mapping `str` to `iterable` 

908 Parameter combinations. 

909 

910 Returns 

911 ------- 

912 product : `dict` mapping `str` to `iterable` 

913 Parameter combinations covering the cartesian product (all possible 

914 combinations) of the input parameters. 

915 

916 Examples 

917 -------- 

918 .. code-block:: python 

919 

920 cartesianProduct({"foo": [1, 2], "bar": ["black", "white"]}) 

921 

922 will return: 

923 

924 .. code-block:: python 

925 

926 {"foo": [1, 1, 2, 2], "bar": ["black", "white", "black", "white"]} 

927 """ 

928 product: Dict[str, List[Any]] = {kk: [] for kk in settings} 

929 for values in itertools.product(*settings.values()): 

930 for kk, vv in zip(settings.keys(), values): 

931 product[kk].append(vv) 

932 return product 

933 

934 

935def classParametersProduct(**settings: Sequence[Any]) -> Callable: 

936 """Class decorator for generating unit tests 

937 

938 This decorator generates classes with class variables according to the 

939 cartesian product of the supplied ``settings``. 

940 

941 Parameters 

942 ---------- 

943 **settings : `dict` (`str`: iterable) 

944 The lists of test parameters to set as class variables in turn. Each 

945 should be an iterable. 

946 

947 Examples 

948 -------- 

949 .. code-block:: python 

950 

951 @classParametersProduct(foo=[1, 2], bar=[3, 4]) 

952 class MyTestCase(unittest.TestCase): 

953 ... 

954 

955 will generate four classes, as if you wrote:: 

956 

957 .. code-block:: python 

958 

959 class MyTestCase_1_3(unittest.TestCase): 

960 foo = 1 

961 bar = 3 

962 ... 

963 

964 class MyTestCase_1_4(unittest.TestCase): 

965 foo = 1 

966 bar = 4 

967 ... 

968 

969 class MyTestCase_2_3(unittest.TestCase): 

970 foo = 2 

971 bar = 3 

972 ... 

973 

974 class MyTestCase_2_4(unittest.TestCase): 

975 foo = 2 

976 bar = 4 

977 ... 

978 

979 Note that the values are embedded in the class name. 

980 """ 

981 return classParameters(**_cartesianProduct(settings)) 

982 

983 

984def methodParametersProduct(**settings: Sequence[Any]) -> Callable: 

985 """Iterate over cartesian product creating sub tests. 

986 

987 This decorator iterates over the cartesian product of the supplied 

988 settings, using `~unittest.TestCase.subTest` to communicate the values in 

989 the event of a failure. 

990 

991 Parameters 

992 ---------- 

993 **settings : `dict` (`str`: iterable) 

994 The parameter combinations to test. Each should be an iterable. 

995 

996 Example 

997 ------- 

998 

999 @methodParametersProduct(foo=[1, 2], bar=["black", "white"]) 

1000 def testSomething(self, foo, bar): 

1001 ... 

1002 

1003 will run: 

1004 

1005 testSomething(foo=1, bar="black") 

1006 testSomething(foo=1, bar="white") 

1007 testSomething(foo=2, bar="black") 

1008 testSomething(foo=2, bar="white") 

1009 """ 

1010 return methodParameters(**_cartesianProduct(settings)) 

1011 

1012 

1013@contextlib.contextmanager 

1014def temporaryDirectory() -> Iterator[str]: 

1015 """Context manager that creates and destroys a temporary directory. 

1016 

1017 The difference from `tempfile.TemporaryDirectory` is that this ignores 

1018 errors when deleting a directory, which may happen with some filesystems. 

1019 """ 

1020 tmpdir = tempfile.mkdtemp() 

1021 yield tmpdir 

1022 shutil.rmtree(tmpdir, ignore_errors=True)