Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# 

2# LSST Data Management System 

3# 

4# Copyright 2008-2017 AURA/LSST. 

5# 

6# This product includes software developed by the 

7# LSST Project (http://www.lsst.org/). 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the LSST License Statement and 

20# the GNU General Public License along with this program. If not, 

21# see <https://www.lsstcorp.org/LegalNotices/>. 

22# 

23"""Support code for running unit tests""" 

24 

25import contextlib 

26import gc 

27import inspect 

28import os 

29import subprocess 

30import sys 

31import unittest 

32import warnings 

33import numpy 

34import psutil 

35import functools 

36import tempfile 

37 

38__all__ = ["init", "MemoryTestCase", "ExecutablesTestCase", "getTempFilePath", 

39 "TestCase", "assertFloatsAlmostEqual", "assertFloatsNotEqual", "assertFloatsEqual", 

40 "debugger", "classParameters", "methodParameters"] 

41 

42# Initialize the list of open files to an empty set 

43open_files = set() 

44 

45 

46def _get_open_files(): 

47 """Return a set containing the list of files currently open in this 

48 process. 

49 

50 Returns 

51 ------- 

52 open_files : `set` 

53 Set containing the list of open files. 

54 """ 

55 return set(p.path for p in psutil.Process().open_files()) 

56 

57 

58def init(): 

59 """Initialize the memory tester and file descriptor leak tester.""" 

60 global open_files 

61 # Reset the list of open files 

62 open_files = _get_open_files() 

63 

64 

65def sort_tests(tests): 

66 """Sort supplied test suites such that MemoryTestCases are at the end. 

67 

68 `lsst.utils.tests.MemoryTestCase` tests should always run after any other 

69 tests in the module. 

70 

71 Parameters 

72 ---------- 

73 tests : sequence 

74 Sequence of test suites. 

75 

76 Returns 

77 ------- 

78 suite : `unittest.TestSuite` 

79 A combined `~unittest.TestSuite` with 

80 `~lsst.utils.tests.MemoryTestCase` at the end. 

81 """ 

82 

83 suite = unittest.TestSuite() 

84 memtests = [] 

85 for test_suite in tests: 

86 try: 

87 # Just test the first test method in the suite for MemoryTestCase 

88 # Use loop rather than next as it is possible for a test class 

89 # to not have any test methods and the Python community prefers 

90 # for loops over catching a StopIteration exception. 

91 bases = None 

92 for method in test_suite: 

93 bases = inspect.getmro(method.__class__) 

94 break 

95 if bases is not None and MemoryTestCase in bases: 

96 memtests.append(test_suite) 

97 else: 

98 suite.addTests(test_suite) 

99 except TypeError: 

100 if isinstance(test_suite, MemoryTestCase): 

101 memtests.append(test_suite) 

102 else: 

103 suite.addTest(test_suite) 

104 suite.addTests(memtests) 

105 return suite 

106 

107 

108def suiteClassWrapper(tests): 

109 return unittest.TestSuite(sort_tests(tests)) 

110 

111 

112# Replace the suiteClass callable in the defaultTestLoader 

113# so that we can reorder the test ordering. This will have 

114# no effect if no memory test cases are found. 

115unittest.defaultTestLoader.suiteClass = suiteClassWrapper 

116 

117 

118class MemoryTestCase(unittest.TestCase): 

119 """Check for resource leaks.""" 

120 

121 @classmethod 

122 def tearDownClass(cls): 

123 """Reset the leak counter when the tests have been completed""" 

124 init() 

125 

126 def testFileDescriptorLeaks(self): 

127 """Check if any file descriptors are open since init() called.""" 

128 gc.collect() 

129 global open_files 

130 now_open = _get_open_files() 

131 

132 # Some files are opened out of the control of the stack. 

133 now_open = set(f for f in now_open if not f.endswith(".car") and 

134 not f.startswith("/proc/") and 

135 not f.endswith(".ttf") and 

136 not (f.startswith("/var/lib/") and f.endswith("/passwd")) and 

137 not f.endswith("astropy.log")) 

138 

139 diff = now_open.difference(open_files) 

140 if diff: 

141 for f in diff: 

142 print("File open: %s" % f) 

143 self.fail("Failed to close %d file%s" % (len(diff), "s" if len(diff) != 1 else "")) 

144 

145 

146class ExecutablesTestCase(unittest.TestCase): 

147 """Test that executables can be run and return good status. 

148 

149 The test methods are dynamically created. Callers 

150 must subclass this class in their own test file and invoke 

151 the create_executable_tests() class method to register the tests. 

152 """ 

153 TESTS_DISCOVERED = -1 

154 

155 @classmethod 

156 def setUpClass(cls): 

157 """Abort testing if automated test creation was enabled and 

158 no tests were found.""" 

159 

160 if cls.TESTS_DISCOVERED == 0: 

161 raise Exception("No executables discovered.") 

162 

163 def testSanity(self): 

164 """This test exists to ensure that there is at least one test to be 

165 executed. This allows the test runner to trigger the class set up 

166 machinery to test whether there are some executables to test.""" 

167 pass 

168 

169 def assertExecutable(self, executable, root_dir=None, args=None, msg=None): 

170 """Check an executable runs and returns good status. 

171 

172 Prints output to standard out. On bad exit status the test 

173 fails. If the executable can not be located the test is skipped. 

174 

175 Parameters 

176 ---------- 

177 executable : `str` 

178 Path to an executable. ``root_dir`` is not used if this is an 

179 absolute path. 

180 root_dir : `str`, optional 

181 Directory containing executable. Ignored if `None`. 

182 args : `list` or `tuple`, optional 

183 Arguments to be provided to the executable. 

184 msg : `str`, optional 

185 Message to use when the test fails. Can be `None` for default 

186 message. 

187 

188 Raises 

189 ------ 

190 AssertionError 

191 The executable did not return 0 exit status. 

192 """ 

193 

194 if root_dir is not None and not os.path.isabs(executable): 

195 executable = os.path.join(root_dir, executable) 

196 

197 # Form the argument list for subprocess 

198 sp_args = [executable] 

199 argstr = "no arguments" 

200 if args is not None: 

201 sp_args.extend(args) 

202 argstr = 'arguments "' + " ".join(args) + '"' 

203 

204 print("Running executable '{}' with {}...".format(executable, argstr)) 

205 if not os.path.exists(executable): 

206 self.skipTest("Executable {} is unexpectedly missing".format(executable)) 

207 failmsg = None 

208 try: 

209 output = subprocess.check_output(sp_args) 

210 except subprocess.CalledProcessError as e: 

211 output = e.output 

212 failmsg = "Bad exit status from '{}': {}".format(executable, e.returncode) 

213 print(output.decode('utf-8')) 

214 if failmsg: 

215 if msg is None: 

216 msg = failmsg 

217 self.fail(msg) 

218 

219 @classmethod 

220 def _build_test_method(cls, executable, root_dir): 

221 """Build a test method and attach to class. 

222 

223 A test method is created for the supplied excutable located 

224 in the supplied root directory. This method is attached to the class 

225 so that the test runner will discover the test and run it. 

226 

227 Parameters 

228 ---------- 

229 cls : `object` 

230 The class in which to create the tests. 

231 executable : `str` 

232 Name of executable. Can be absolute path. 

233 root_dir : `str` 

234 Path to executable. Not used if executable path is absolute. 

235 """ 

236 if not os.path.isabs(executable): 236 ↛ 237line 236 didn't jump to line 237, because the condition on line 236 was never true

237 executable = os.path.abspath(os.path.join(root_dir, executable)) 

238 

239 # Create the test name from the executable path. 

240 test_name = "test_exe_" + executable.replace("/", "_") 

241 

242 # This is the function that will become the test method 

243 def test_executable_runs(*args): 

244 self = args[0] 

245 self.assertExecutable(executable) 

246 

247 # Give it a name and attach it to the class 

248 test_executable_runs.__name__ = test_name 

249 setattr(cls, test_name, test_executable_runs) 

250 

251 @classmethod 

252 def create_executable_tests(cls, ref_file, executables=None): 

253 """Discover executables to test and create corresponding test methods. 

254 

255 Scans the directory containing the supplied reference file 

256 (usually ``__file__`` supplied from the test class) to look for 

257 executables. If executables are found a test method is created 

258 for each one. That test method will run the executable and 

259 check the returned value. 

260 

261 Executable scripts with a ``.py`` extension and shared libraries 

262 are ignored by the scanner. 

263 

264 This class method must be called before test discovery. 

265 

266 Parameters 

267 ---------- 

268 ref_file : `str` 

269 Path to a file within the directory to be searched. 

270 If the files are in the same location as the test file, then 

271 ``__file__`` can be used. 

272 executables : `list` or `tuple`, optional 

273 Sequence of executables that can override the automated 

274 detection. If an executable mentioned here is not found, a 

275 skipped test will be created for it, rather than a failed 

276 test. 

277 

278 Examples 

279 -------- 

280 >>> cls.create_executable_tests(__file__) 

281 """ 

282 

283 # Get the search directory from the reference file 

284 ref_dir = os.path.abspath(os.path.dirname(ref_file)) 

285 

286 if executables is None: 286 ↛ 301line 286 didn't jump to line 301, because the condition on line 286 was never false

287 # Look for executables to test by walking the tree 

288 executables = [] 

289 for root, dirs, files in os.walk(ref_dir): 

290 for f in files: 

291 # Skip Python files. Shared libraries are executable. 

292 if not f.endswith(".py") and not f.endswith(".so"): 

293 full_path = os.path.join(root, f) 

294 if os.access(full_path, os.X_OK): 

295 executables.append(full_path) 

296 

297 # Store the number of tests found for later assessment. 

298 # Do not raise an exception if we have no executables as this would 

299 # cause the testing to abort before the test runner could properly 

300 # integrate it into the failure report. 

301 cls.TESTS_DISCOVERED = len(executables) 

302 

303 # Create the test functions and attach them to the class 

304 for e in executables: 

305 cls._build_test_method(e, ref_dir) 

306 

307 

308@contextlib.contextmanager 

309def getTempFilePath(ext, expectOutput=True): 

310 """Return a path suitable for a temporary file and try to delete the 

311 file on success 

312 

313 If the with block completes successfully then the file is deleted, 

314 if possible; failure results in a printed warning. 

315 If a file is remains when it should not, a RuntimeError exception is 

316 raised. This exception is also raised if a file is not present on context 

317 manager exit when one is expected to exist. 

318 If the block exits with an exception the file if left on disk so it can be 

319 examined. The file name has a random component such that nested context 

320 managers can be used with the same file suffix. 

321 

322 Parameters 

323 ---------- 

324 

325 ext : `str` 

326 File name extension, e.g. ``.fits``. 

327 expectOutput : `bool`, optional 

328 If `True`, a file should be created within the context manager. 

329 If `False`, a file should not be present when the context manager 

330 exits. 

331 

332 Returns 

333 ------- 

334 `str` 

335 Path for a temporary file. The path is a combination of the caller's 

336 file path and the name of the top-level function 

337 

338 Notes 

339 ----- 

340 :: 

341 

342 # file tests/testFoo.py 

343 import unittest 

344 import lsst.utils.tests 

345 class FooTestCase(unittest.TestCase): 

346 def testBasics(self): 

347 self.runTest() 

348 

349 def runTest(self): 

350 with lsst.utils.tests.getTempFilePath(".fits") as tmpFile: 

351 # if tests/.tests exists then 

352 # tmpFile = "tests/.tests/testFoo_testBasics.fits" 

353 # otherwise tmpFile = "testFoo_testBasics.fits" 

354 ... 

355 # at the end of this "with" block the path tmpFile will be 

356 # deleted, but only if the file exists and the "with" 

357 # block terminated normally (rather than with an exception) 

358 ... 

359 """ 

360 stack = inspect.stack() 

361 # get name of first function in the file 

362 for i in range(2, len(stack)): 

363 frameInfo = inspect.getframeinfo(stack[i][0]) 

364 if i == 2: 

365 callerFilePath = frameInfo.filename 

366 callerFuncName = frameInfo.function 

367 elif callerFilePath == frameInfo.filename: 

368 # this function called the previous function 

369 callerFuncName = frameInfo.function 

370 else: 

371 break 

372 

373 callerDir, callerFileNameWithExt = os.path.split(callerFilePath) 

374 callerFileName = os.path.splitext(callerFileNameWithExt)[0] 

375 outDir = os.path.join(callerDir, ".tests") 

376 if not os.path.isdir(outDir): 

377 outDir = "" 

378 prefix = "%s_%s-" % (callerFileName, callerFuncName) 

379 outPath = tempfile.mktemp(dir=outDir, suffix=ext, prefix=prefix) 

380 if os.path.exists(outPath): 

381 # There should not be a file there given the randomizer. Warn and remove. 

382 # Use stacklevel 3 so that the warning is reported from the end of the with block 

383 warnings.warn("Unexpectedly found pre-existing tempfile named %r" % (outPath,), 

384 stacklevel=3) 

385 try: 

386 os.remove(outPath) 

387 except OSError: 

388 pass 

389 

390 yield outPath 

391 

392 fileExists = os.path.exists(outPath) 

393 if expectOutput: 

394 if not fileExists: 

395 raise RuntimeError("Temp file expected named {} but none found".format(outPath)) 

396 else: 

397 if fileExists: 

398 raise RuntimeError("Unexpectedly discovered temp file named {}".format(outPath)) 

399 # Try to clean up the file regardless 

400 if fileExists: 

401 try: 

402 os.remove(outPath) 

403 except OSError as e: 

404 # Use stacklevel 3 so that the warning is reported from the end of the with block 

405 warnings.warn("Warning: could not remove file %r: %s" % (outPath, e), stacklevel=3) 

406 

407 

408class TestCase(unittest.TestCase): 

409 """Subclass of unittest.TestCase that adds some custom assertions for 

410 convenience. 

411 """ 

412 

413 

414def inTestCase(func): 

415 """A decorator to add a free function to our custom TestCase class, while also 

416 making it available as a free function. 

417 """ 

418 setattr(TestCase, func.__name__, func) 

419 return func 

420 

421 

422def debugger(*exceptions): 

423 """Decorator to enter the debugger when there's an uncaught exception 

424 

425 To use, just slap a ``@debugger()`` on your function. 

426 

427 You may provide specific exception classes to catch as arguments to 

428 the decorator function, e.g., 

429 ``@debugger(RuntimeError, NotImplementedError)``. 

430 This defaults to just `AssertionError`, for use on `unittest.TestCase` 

431 methods. 

432 

433 Code provided by "Rosh Oxymoron" on StackOverflow: 

434 http://stackoverflow.com/questions/4398967/python-unit-testing-automatically-running-the-debugger-when-a-test-fails 

435 

436 Notes 

437 ----- 

438 Consider using ``pytest --pdb`` instead of this decorator. 

439 """ 

440 if not exceptions: 

441 exceptions = (Exception, ) 

442 

443 def decorator(f): 

444 @functools.wraps(f) 

445 def wrapper(*args, **kwargs): 

446 try: 

447 return f(*args, **kwargs) 

448 except exceptions: 

449 import sys 

450 import pdb 

451 pdb.post_mortem(sys.exc_info()[2]) 

452 return wrapper 

453 return decorator 

454 

455 

456def plotImageDiff(lhs, rhs, bad=None, diff=None, plotFileName=None): 

457 """Plot the comparison of two 2-d NumPy arrays. 

458 

459 Parameters 

460 ---------- 

461 lhs : `numpy.ndarray` 

462 LHS values to compare; a 2-d NumPy array 

463 rhs : `numpy.ndarray` 

464 RHS values to compare; a 2-d NumPy array 

465 bad : `numpy.ndarray` 

466 A 2-d boolean NumPy array of values to emphasize in the plots 

467 diff : `numpy.ndarray` 

468 difference array; a 2-d NumPy array, or None to show lhs-rhs 

469 plotFileName : `str` 

470 Filename to save the plot to. If None, the plot will be displayed in 

471 a window. 

472 

473 Notes 

474 ----- 

475 This method uses `matplotlib` and imports it internally; it should be 

476 wrapped in a try/except block within packages that do not depend on 

477 `matplotlib` (including `~lsst.utils`). 

478 """ 

479 from matplotlib import pyplot 

480 if diff is None: 

481 diff = lhs - rhs 

482 pyplot.figure() 

483 if bad is not None: 

484 # make an rgba image that's red and transparent where not bad 

485 badImage = numpy.zeros(bad.shape + (4,), dtype=numpy.uint8) 

486 badImage[:, :, 0] = 255 

487 badImage[:, :, 1] = 0 

488 badImage[:, :, 2] = 0 

489 badImage[:, :, 3] = 255*bad 

490 vmin1 = numpy.minimum(numpy.min(lhs), numpy.min(rhs)) 

491 vmax1 = numpy.maximum(numpy.max(lhs), numpy.max(rhs)) 

492 vmin2 = numpy.min(diff) 

493 vmax2 = numpy.max(diff) 

494 for n, (image, title) in enumerate([(lhs, "lhs"), (rhs, "rhs"), (diff, "diff")]): 

495 pyplot.subplot(2, 3, n + 1) 

496 im1 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower', 

497 vmin=vmin1, vmax=vmax1) 

498 if bad is not None: 

499 pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower') 

500 pyplot.axis("off") 

501 pyplot.title(title) 

502 pyplot.subplot(2, 3, n + 4) 

503 im2 = pyplot.imshow(image, cmap=pyplot.cm.gray, interpolation='nearest', origin='lower', 

504 vmin=vmin2, vmax=vmax2) 

505 if bad is not None: 

506 pyplot.imshow(badImage, alpha=0.2, interpolation='nearest', origin='lower') 

507 pyplot.axis("off") 

508 pyplot.title(title) 

509 pyplot.subplots_adjust(left=0.05, bottom=0.05, top=0.92, right=0.75, wspace=0.05, hspace=0.05) 

510 cax1 = pyplot.axes([0.8, 0.55, 0.05, 0.4]) 

511 pyplot.colorbar(im1, cax=cax1) 

512 cax2 = pyplot.axes([0.8, 0.05, 0.05, 0.4]) 

513 pyplot.colorbar(im2, cax=cax2) 

514 if plotFileName: 

515 pyplot.savefig(plotFileName) 

516 else: 

517 pyplot.show() 

518 

519 

520@inTestCase 

521def assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=sys.float_info.epsilon, 

522 atol=sys.float_info.epsilon, relTo=None, 

523 printFailures=True, plotOnFailure=False, 

524 plotFileName=None, invert=False, msg=None): 

525 """Highly-configurable floating point comparisons for scalars and arrays. 

526 

527 The test assertion will fail if all elements ``lhs`` and ``rhs`` are not 

528 equal to within the tolerances specified by ``rtol`` and ``atol``. 

529 More precisely, the comparison is: 

530 

531 ``abs(lhs - rhs) <= relTo*rtol OR abs(lhs - rhs) <= atol`` 

532 

533 If ``rtol`` or ``atol`` is `None`, that term in the comparison is not 

534 performed at all. 

535 

536 When not specified, ``relTo`` is the elementwise maximum of the absolute 

537 values of ``lhs`` and ``rhs``. If set manually, it should usually be set 

538 to either ``lhs`` or ``rhs``, or a scalar value typical of what is 

539 expected. 

540 

541 Parameters 

542 ---------- 

543 testCase : `unittest.TestCase` 

544 Instance the test is part of. 

545 lhs : scalar or array-like 

546 LHS value(s) to compare; may be a scalar or array-like of any 

547 dimension. 

548 rhs : scalar or array-like 

549 RHS value(s) to compare; may be a scalar or array-like of any 

550 dimension. 

551 rtol : `float`, optional 

552 Relative tolerance for comparison; defaults to double-precision 

553 epsilon. 

554 atol : `float`, optional 

555 Absolute tolerance for comparison; defaults to double-precision 

556 epsilon. 

557 relTo : `float`, optional 

558 Value to which comparison with rtol is relative. 

559 printFailures : `bool`, optional 

560 Upon failure, print all inequal elements as part of the message. 

561 plotOnFailure : `bool`, optional 

562 Upon failure, plot the originals and their residual with matplotlib. 

563 Only 2-d arrays are supported. 

564 plotFileName : `str`, optional 

565 Filename to save the plot to. If `None`, the plot will be displayed in 

566 a window. 

567 invert : `bool`, optional 

568 If `True`, invert the comparison and fail only if any elements *are* 

569 equal. Used to implement `~lsst.utils.tests.assertFloatsNotEqual`, 

570 which should generally be used instead for clarity. 

571 msg : `str`, optional 

572 String to append to the error message when assert fails. 

573 

574 Raises 

575 ------ 

576 AssertionError 

577 The values are not almost equal. 

578 """ 

579 if not numpy.isfinite(lhs).all(): 

580 testCase.fail("Non-finite values in lhs") 

581 if not numpy.isfinite(rhs).all(): 

582 testCase.fail("Non-finite values in rhs") 

583 diff = lhs - rhs 

584 absDiff = numpy.abs(lhs - rhs) 

585 if rtol is not None: 

586 if relTo is None: 

587 relTo = numpy.maximum(numpy.abs(lhs), numpy.abs(rhs)) 

588 else: 

589 relTo = numpy.abs(relTo) 

590 bad = absDiff > rtol*relTo 

591 if atol is not None: 

592 bad = numpy.logical_and(bad, absDiff > atol) 

593 else: 

594 if atol is None: 

595 raise ValueError("rtol and atol cannot both be None") 

596 bad = absDiff > atol 

597 failed = numpy.any(bad) 

598 if invert: 

599 failed = not failed 

600 bad = numpy.logical_not(bad) 

601 cmpStr = "==" 

602 failStr = "are the same" 

603 else: 

604 cmpStr = "!=" 

605 failStr = "differ" 

606 errMsg = [] 

607 if failed: 

608 if numpy.isscalar(bad): 

609 if rtol is None: 

610 errMsg = ["%s %s %s; diff=%s with atol=%s" 

611 % (lhs, cmpStr, rhs, absDiff, atol)] 

612 elif atol is None: 

613 errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s" 

614 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol)] 

615 else: 

616 errMsg = ["%s %s %s; diff=%s/%s=%s with rtol=%s, atol=%s" 

617 % (lhs, cmpStr, rhs, absDiff, relTo, absDiff/relTo, rtol, atol)] 

618 else: 

619 errMsg = ["%d/%d elements %s with rtol=%s, atol=%s" 

620 % (bad.sum(), bad.size, failStr, rtol, atol)] 

621 if plotOnFailure: 

622 if len(lhs.shape) != 2 or len(rhs.shape) != 2: 

623 raise ValueError("plotOnFailure is only valid for 2-d arrays") 

624 try: 

625 plotImageDiff(lhs, rhs, bad, diff=diff, plotFileName=plotFileName) 

626 except ImportError: 

627 errMsg.append("Failure plot requested but matplotlib could not be imported.") 

628 if printFailures: 

629 # Make sure everything is an array if any of them are, so we can treat 

630 # them the same (diff and absDiff are arrays if either rhs or lhs is), 

631 # and we don't get here if neither is. 

632 if numpy.isscalar(relTo): 

633 relTo = numpy.ones(bad.shape, dtype=float) * relTo 

634 if numpy.isscalar(lhs): 

635 lhs = numpy.ones(bad.shape, dtype=float) * lhs 

636 if numpy.isscalar(rhs): 

637 rhs = numpy.ones(bad.shape, dtype=float) * rhs 

638 if rtol is None: 

639 for a, b, diff in zip(lhs[bad], rhs[bad], absDiff[bad]): 

640 errMsg.append("%s %s %s (diff=%s)" % (a, cmpStr, b, diff)) 

641 else: 

642 for a, b, diff, rel in zip(lhs[bad], rhs[bad], absDiff[bad], relTo[bad]): 

643 errMsg.append("%s %s %s (diff=%s/%s=%s)" % (a, cmpStr, b, diff, rel, diff/rel)) 

644 

645 if msg is not None: 

646 errMsg.append(msg) 

647 testCase.assertFalse(failed, msg="\n".join(errMsg)) 

648 

649 

650@inTestCase 

651def assertFloatsNotEqual(testCase, lhs, rhs, **kwds): 

652 """Fail a test if the given floating point values are equal to within the 

653 given tolerances. 

654 

655 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

656 ``rtol=atol=0``) for more information. 

657 

658 Parameters 

659 ---------- 

660 testCase : `unittest.TestCase` 

661 Instance the test is part of. 

662 lhs : scalar or array-like 

663 LHS value(s) to compare; may be a scalar or array-like of any 

664 dimension. 

665 rhs : scalar or array-like 

666 RHS value(s) to compare; may be a scalar or array-like of any 

667 dimension. 

668 

669 Raises 

670 ------ 

671 AssertionError 

672 The values are almost equal. 

673 """ 

674 return assertFloatsAlmostEqual(testCase, lhs, rhs, invert=True, **kwds) 

675 

676 

677@inTestCase 

678def assertFloatsEqual(testCase, lhs, rhs, **kwargs): 

679 """ 

680 Assert that lhs == rhs (both numeric types, whether scalar or array). 

681 

682 See `~lsst.utils.tests.assertFloatsAlmostEqual` (called with 

683 ``rtol=atol=0``) for more information. 

684 

685 Parameters 

686 ---------- 

687 testCase : `unittest.TestCase` 

688 Instance the test is part of. 

689 lhs : scalar or array-like 

690 LHS value(s) to compare; may be a scalar or array-like of any 

691 dimension. 

692 rhs : scalar or array-like 

693 RHS value(s) to compare; may be a scalar or array-like of any 

694 dimension. 

695 

696 Raises 

697 ------ 

698 AssertionError 

699 The values are not equal. 

700 """ 

701 return assertFloatsAlmostEqual(testCase, lhs, rhs, rtol=0, atol=0, **kwargs) 

702 

703 

704def _settingsIterator(settings): 

705 """Return an iterator for the provided test settings 

706 

707 Parameters 

708 ---------- 

709 settings : `dict` (`str`: iterable) 

710 Lists of test parameters. Each should be an iterable of the same length. 

711 If a string is provided as an iterable, it will be converted to a list 

712 of a single string. 

713 

714 Raises 

715 ------ 

716 AssertionError 

717 If the ``settings`` are not of the same length. 

718 

719 Yields 

720 ------ 

721 parameters : `dict` (`str`: anything) 

722 Set of parameters. 

723 """ 

724 for name, values in settings.items(): 

725 if isinstance(values, str): 725 ↛ 727line 725 didn't jump to line 727, because the condition on line 725 was never true

726 # Probably meant as a single-element string, rather than an iterable of chars 

727 settings[name] = [values] 

728 num = len(next(iter(settings.values()))) # Number of settings 

729 for name, values in settings.items(): 

730 assert len(values) == num, f"Length mismatch for setting {name}: {len(values)} vs {num}" 

731 for ii in range(num): 

732 values = [settings[kk][ii] for kk in settings] 

733 yield dict(zip(settings.keys(), values)) 

734 

735 

736def classParameters(**settings): 

737 """Class decorator for generating unit tests 

738 

739 This decorator generates classes with class variables according to the 

740 supplied ``settings``. 

741 

742 Parameters 

743 ---------- 

744 **settings : `dict` (`str`: iterable) 

745 The lists of test parameters to set as class variables in turn. Each 

746 should be an iterable of the same length. 

747 

748 Examples 

749 -------- 

750 :: 

751 

752 @classParameters(foo=[1, 2], bar=[3, 4]) 

753 class MyTestCase(unittest.TestCase): 

754 ... 

755 

756 will generate two classes, as if you wrote:: 

757 

758 class MyTestCase_1_3(unittest.TestCase): 

759 foo = 1 

760 bar = 3 

761 ... 

762 

763 class MyTestCase_2_4(unittest.TestCase): 

764 foo = 2 

765 bar = 4 

766 ... 

767 

768 Note that the values are embedded in the class name. 

769 """ 

770 def decorator(cls): 

771 module = sys.modules[cls.__module__].__dict__ 

772 for params in _settingsIterator(settings): 

773 name = f"{cls.__name__}_{'_'.join(str(vv) for vv in params.values())}" 

774 bindings = dict(cls.__dict__) 

775 bindings.update(params) 

776 module[name] = type(name, (cls,), bindings) 

777 return decorator 

778 

779 

780def methodParameters(**settings): 

781 """Method decorator for unit tests 

782 

783 This decorator iterates over the supplied settings, using 

784 ``TestCase.subTest`` to communicate the values in the event of a failure. 

785 

786 Parameters 

787 ---------- 

788 **settings : `dict` (`str`: iterable) 

789 The lists of test parameters. Each should be an iterable of the same 

790 length. 

791 

792 Examples 

793 -------- 

794 :: 

795 

796 @methodParameters(foo=[1, 2], bar=[3, 4]) 

797 def testSomething(self, foo, bar): 

798 ... 

799 

800 will run:: 

801 

802 testSomething(foo=1, bar=3) 

803 testSomething(foo=2, bar=4) 

804 """ 

805 def decorator(func): 

806 @functools.wraps(func) 

807 def wrapper(self, *args, **kwargs): 

808 for params in _settingsIterator(settings): 

809 kwargs.update(params) 

810 with self.subTest(**params): 

811 func(self, *args, **kwargs) 

812 return wrapper 

813 return decorator