Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of daf_butler. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22import unittest 

23import copy 

24from dataclasses import dataclass 

25import os 

26import pickle 

27from random import Random 

28import itertools 

29from typing import Iterator, Optional 

30 

31from lsst.daf.butler import ( 

32 DataCoordinate, 

33 DataCoordinateSequence, 

34 DataCoordinateSet, 

35 Dimension, 

36 DimensionGraph, 

37 DimensionUniverse, 

38 NamedKeyDict, 

39 NamedValueSet, 

40 Registry, 

41 SpatialRegionDatabaseRepresentation, 

42 TimespanDatabaseRepresentation, 

43 YamlRepoImportBackend, 

44) 

45from lsst.daf.butler.registry import RegistryConfig 

46 

47DIMENSION_DATA_FILE = os.path.normpath(os.path.join(os.path.dirname(__file__), 

48 "data", "registry", "hsc-rc2-subset.yaml")) 

49 

50 

51def loadDimensionData() -> DataCoordinateSequence: 

52 """Load dimension data from an export file included in the code repository. 

53 

54 Returns 

55 ------- 

56 dataIds : `DataCoordinateSet` 

57 A set containing all data IDs in the export file. 

58 """ 

59 # Create an in-memory SQLite database and Registry just to import the YAML 

60 # data and retreive it as a set of DataCoordinate objects. 

61 config = RegistryConfig() 

62 config["db"] = "sqlite://" 

63 registry = Registry.createFromConfig(config) 

64 with open(DIMENSION_DATA_FILE, 'r') as stream: 

65 backend = YamlRepoImportBackend(stream, registry) 

66 backend.register() 

67 backend.load(datastore=None) 

68 dimensions = DimensionGraph(registry.dimensions, names=["visit", "detector", "tract", "patch"]) 

69 return registry.queryDataIds(dimensions).expanded().toSequence() 

70 

71 

72class DimensionTestCase(unittest.TestCase): 

73 """Tests for dimensions. 

74 

75 All tests here rely on the content of ``config/dimensions.yaml``, either 

76 to test that the definitions there are read in properly or just as generic 

77 data for testing various operations. 

78 """ 

79 

80 def setUp(self): 

81 self.universe = DimensionUniverse() 

82 

83 def checkGraphInvariants(self, graph): 

84 elements = list(graph.elements) 

85 for n, element in enumerate(elements): 

86 # Ordered comparisons on graphs behave like sets. 

87 self.assertLessEqual(element.graph, graph) 

88 # Ordered comparisons on elements correspond to the ordering within 

89 # a DimensionUniverse (topological, with deterministic 

90 # tiebreakers). 

91 for other in elements[:n]: 

92 self.assertLess(other, element) 

93 self.assertLessEqual(other, element) 

94 for other in elements[n + 1:]: 

95 self.assertGreater(other, element) 

96 self.assertGreaterEqual(other, element) 

97 if isinstance(element, Dimension): 

98 self.assertEqual(element.graph.required, element.required) 

99 self.assertEqual(DimensionGraph(self.universe, graph.required), graph) 

100 self.assertCountEqual(graph.required, 

101 [dimension for dimension in graph.dimensions 

102 if not any(dimension in other.graph.implied for other in graph.elements)]) 

103 self.assertCountEqual(graph.implied, graph.dimensions - graph.required) 

104 self.assertCountEqual(graph.dimensions, 

105 [element for element in graph.elements 

106 if isinstance(element, Dimension)]) 

107 self.assertCountEqual(graph.dimensions, itertools.chain(graph.required, graph.implied)) 

108 # Check primary key traversal order: each element should follow any it 

109 # requires, and element that is implied by any other in the graph 

110 # follow at least one of those. 

111 seen = NamedValueSet() 

112 for element in graph.primaryKeyTraversalOrder: 

113 with self.subTest(required=graph.required, implied=graph.implied, element=element): 

114 seen.add(element) 

115 self.assertLessEqual(element.graph.required, seen) 

116 if element in graph.implied: 

117 self.assertTrue(any(element in s.implied for s in seen)) 

118 self.assertCountEqual(seen, graph.elements) 

119 

120 def testConfigRead(self): 

121 self.assertEqual(self.universe.getStaticDimensions().names, 

122 {"instrument", "visit", "visit_system", "exposure", "detector", 

123 "physical_filter", "band", "subfilter", 

124 "skymap", "tract", "patch"} | {f"htm{level}" for level in range(25)}) 

125 

126 def testGraphs(self): 

127 self.checkGraphInvariants(self.universe.empty) 

128 for element in self.universe.getStaticElements(): 

129 self.checkGraphInvariants(element.graph) 

130 

131 def testInstrumentDimensions(self): 

132 graph = DimensionGraph(self.universe, names=("exposure", "detector", "visit")) 

133 self.assertCountEqual(graph.dimensions.names, 

134 ("instrument", "exposure", "detector", 

135 "visit", "physical_filter", "band", "visit_system")) 

136 self.assertCountEqual(graph.required.names, ("instrument", "exposure", "detector", "visit")) 

137 self.assertCountEqual(graph.implied.names, ("physical_filter", "band", "visit_system")) 

138 self.assertCountEqual(graph.elements.names - graph.dimensions.names, 

139 ("visit_detector_region", "visit_definition")) 

140 self.assertCountEqual(graph.governors.names, {"instrument"}) 

141 

142 def testCalibrationDimensions(self): 

143 graph = DimensionGraph(self.universe, names=("physical_filter", "detector")) 

144 self.assertCountEqual(graph.dimensions.names, 

145 ("instrument", "detector", "physical_filter", "band")) 

146 self.assertCountEqual(graph.required.names, ("instrument", "detector", "physical_filter")) 

147 self.assertCountEqual(graph.implied.names, ("band",)) 

148 self.assertCountEqual(graph.elements.names, graph.dimensions.names) 

149 self.assertCountEqual(graph.governors.names, {"instrument"}) 

150 

151 def testObservationDimensions(self): 

152 graph = DimensionGraph(self.universe, names=("exposure", "detector", "visit")) 

153 self.assertCountEqual(graph.dimensions.names, ("instrument", "detector", "visit", "exposure", 

154 "physical_filter", "band", "visit_system")) 

155 self.assertCountEqual(graph.required.names, ("instrument", "detector", "exposure", "visit")) 

156 self.assertCountEqual(graph.implied.names, ("physical_filter", "band", "visit_system")) 

157 self.assertCountEqual(graph.elements.names - graph.dimensions.names, 

158 ("visit_detector_region", "visit_definition")) 

159 self.assertCountEqual(graph.spatial.names, ("observation_regions",)) 

160 self.assertCountEqual(graph.temporal.names, ("observation_timespans",)) 

161 self.assertCountEqual(graph.governors.names, {"instrument"}) 

162 self.assertEqual(graph.spatial.names, {"observation_regions"}) 

163 self.assertEqual(graph.temporal.names, {"observation_timespans"}) 

164 self.assertEqual(next(iter(graph.spatial)).governor, self.universe["instrument"]) 

165 self.assertEqual(next(iter(graph.temporal)).governor, self.universe["instrument"]) 

166 

167 def testSkyMapDimensions(self): 

168 graph = DimensionGraph(self.universe, names=("patch",)) 

169 self.assertCountEqual(graph.dimensions.names, ("skymap", "tract", "patch")) 

170 self.assertCountEqual(graph.required.names, ("skymap", "tract", "patch")) 

171 self.assertCountEqual(graph.implied.names, ()) 

172 self.assertCountEqual(graph.elements.names, graph.dimensions.names) 

173 self.assertCountEqual(graph.spatial.names, ("skymap_regions",)) 

174 self.assertCountEqual(graph.governors.names, {"skymap"}) 

175 self.assertEqual(graph.spatial.names, {"skymap_regions"}) 

176 self.assertEqual(next(iter(graph.spatial)).governor, self.universe["skymap"]) 

177 

178 def testSubsetCalculation(self): 

179 """Test that independent spatial and temporal options are computed 

180 correctly. 

181 """ 

182 graph = DimensionGraph(self.universe, names=("visit", "detector", "tract", "patch", "htm7", 

183 "exposure")) 

184 self.assertCountEqual(graph.spatial.names, 

185 ("observation_regions", "skymap_regions", "htm")) 

186 self.assertCountEqual(graph.temporal.names, 

187 ("observation_timespans",)) 

188 

189 def testSchemaGeneration(self): 

190 tableSpecs = NamedKeyDict({}) 

191 for element in self.universe.getStaticElements(): 

192 if element.hasTable and element.viewOf is None: 

193 tableSpecs[element] = element.RecordClass.fields.makeTableSpec( 

194 RegionReprClass=SpatialRegionDatabaseRepresentation, 

195 TimespanReprClass=TimespanDatabaseRepresentation.Compound 

196 ) 

197 for element, tableSpec in tableSpecs.items(): 

198 for dep in element.required: 

199 with self.subTest(element=element.name, dep=dep.name): 

200 if dep != element: 

201 self.assertIn(dep.name, tableSpec.fields) 

202 self.assertEqual(tableSpec.fields[dep.name].dtype, dep.primaryKey.dtype) 

203 self.assertEqual(tableSpec.fields[dep.name].length, dep.primaryKey.length) 

204 self.assertEqual(tableSpec.fields[dep.name].nbytes, dep.primaryKey.nbytes) 

205 self.assertFalse(tableSpec.fields[dep.name].nullable) 

206 self.assertTrue(tableSpec.fields[dep.name].primaryKey) 

207 else: 

208 self.assertIn(element.primaryKey.name, tableSpec.fields) 

209 self.assertEqual(tableSpec.fields[element.primaryKey.name].dtype, 

210 dep.primaryKey.dtype) 

211 self.assertEqual(tableSpec.fields[element.primaryKey.name].length, 

212 dep.primaryKey.length) 

213 self.assertEqual(tableSpec.fields[element.primaryKey.name].nbytes, 

214 dep.primaryKey.nbytes) 

215 self.assertFalse(tableSpec.fields[element.primaryKey.name].nullable) 

216 self.assertTrue(tableSpec.fields[element.primaryKey.name].primaryKey) 

217 for dep in element.implied: 

218 with self.subTest(element=element.name, dep=dep.name): 

219 self.assertIn(dep.name, tableSpec.fields) 

220 self.assertEqual(tableSpec.fields[dep.name].dtype, dep.primaryKey.dtype) 

221 self.assertFalse(tableSpec.fields[dep.name].primaryKey) 

222 for foreignKey in tableSpec.foreignKeys: 

223 self.assertIn(foreignKey.table, tableSpecs) 

224 self.assertIn(foreignKey.table, element.graph.dimensions.names) 

225 self.assertEqual(len(foreignKey.source), len(foreignKey.target)) 

226 for source, target in zip(foreignKey.source, foreignKey.target): 

227 self.assertIn(source, tableSpec.fields.names) 

228 self.assertIn(target, tableSpecs[foreignKey.table].fields.names) 

229 self.assertEqual(tableSpec.fields[source].dtype, 

230 tableSpecs[foreignKey.table].fields[target].dtype) 

231 self.assertEqual(tableSpec.fields[source].length, 

232 tableSpecs[foreignKey.table].fields[target].length) 

233 self.assertEqual(tableSpec.fields[source].nbytes, 

234 tableSpecs[foreignKey.table].fields[target].nbytes) 

235 

236 def testPickling(self): 

237 # Pickling and copying should always yield the exact same object within 

238 # a single process (cross-process is impossible to test here). 

239 universe1 = DimensionUniverse() 

240 universe2 = pickle.loads(pickle.dumps(universe1)) 

241 universe3 = copy.copy(universe1) 

242 universe4 = copy.deepcopy(universe1) 

243 self.assertIs(universe1, universe2) 

244 self.assertIs(universe1, universe3) 

245 self.assertIs(universe1, universe4) 

246 for element1 in universe1.getStaticElements(): 

247 element2 = pickle.loads(pickle.dumps(element1)) 

248 self.assertIs(element1, element2) 

249 graph1 = element1.graph 

250 graph2 = pickle.loads(pickle.dumps(graph1)) 

251 self.assertIs(graph1, graph2) 

252 

253 

254@dataclass 

255class SplitByStateFlags: 

256 """A struct that separates data IDs with different states but the same 

257 values. 

258 """ 

259 

260 minimal: Optional[DataCoordinateSequence] = None 

261 """Data IDs that only contain values for required dimensions. 

262 

263 `DataCoordinateSequence.hasFull()` will return `True` for this if and only 

264 if ``minimal.graph.implied`` has no elements. 

265 `DataCoordinate.hasRecords()` will always return `False`. 

266 """ 

267 

268 complete: Optional[DataCoordinateSequence] = None 

269 """Data IDs that contain values for all dimensions. 

270 

271 `DataCoordinateSequence.hasFull()` will always `True` and 

272 `DataCoordinate.hasRecords()` will always return `True` for this attribute. 

273 """ 

274 

275 expanded: Optional[DataCoordinateSequence] = None 

276 """Data IDs that contain values for all dimensions as well as records. 

277 

278 `DataCoordinateSequence.hasFull()` and `DataCoordinate.hasRecords()` will 

279 always return `True` for this attribute. 

280 """ 

281 

282 def chain(self, n: Optional[int] = None) -> Iterator: 

283 """Iterate over the data IDs of different types. 

284 

285 Parameters 

286 ---------- 

287 n : `int`, optional 

288 If provided (`None` is default), iterate over only the ``nth`` 

289 data ID in each attribute. 

290 

291 Yields 

292 ------ 

293 dataId : `DataCoordinate` 

294 A data ID from one of the attributes in this struct. 

295 """ 

296 if n is None: 

297 s = slice(None, None) 

298 else: 

299 s = slice(n, n + 1) 

300 if self.minimal is not None: 

301 yield from self.minimal[s] 

302 if self.complete is not None: 

303 yield from self.complete[s] 

304 if self.expanded is not None: 

305 yield from self.expanded[s] 

306 

307 

308class DataCoordinateTestCase(unittest.TestCase): 

309 

310 RANDOM_SEED = 10 

311 

312 @classmethod 

313 def setUpClass(cls): 

314 cls.allDataIds = loadDimensionData() 

315 

316 def setUp(self): 

317 self.rng = Random(self.RANDOM_SEED) 

318 

319 def randomDataIds(self, n: int, dataIds: Optional[DataCoordinateSequence] = None): 

320 """Select random data IDs from those loaded from test data. 

321 

322 Parameters 

323 ---------- 

324 n : `int` 

325 Number of data IDs to select. 

326 dataIds : `DataCoordinateSequence`, optional 

327 Data IDs to select from. Defaults to ``self.allDataIds``. 

328 

329 Returns 

330 ------- 

331 selected : `DataCoordinateSequence` 

332 ``n`` Data IDs randomly selected from ``dataIds`` with replacement. 

333 """ 

334 if dataIds is None: 

335 dataIds = self.allDataIds 

336 return DataCoordinateSequence(self.rng.sample(dataIds, n), 

337 graph=dataIds.graph, 

338 hasFull=dataIds.hasFull(), 

339 hasRecords=dataIds.hasRecords(), 

340 check=False) 

341 

342 def randomDimensionSubset(self, n: int = 3, graph: Optional[DimensionGraph] = None) -> DimensionGraph: 

343 """Generate a random `DimensionGraph` that has a subset of the 

344 dimensions in a given one. 

345 

346 Parameters 

347 ---------- 

348 n : `int` 

349 Number of dimensions to select, before automatic expansion by 

350 `DimensionGraph`. 

351 dataIds : `DimensionGraph`, optional 

352 Dimensions to select ffrom. Defaults to ``self.allDataIds.graph``. 

353 

354 Returns 

355 ------- 

356 selected : `DimensionGraph` 

357 ``n`` or more dimensions randomly selected from ``graph`` with 

358 replacement. 

359 """ 

360 if graph is None: 

361 graph = self.allDataIds.graph 

362 return DimensionGraph( 

363 graph.universe, 

364 names=self.rng.sample(list(graph.dimensions.names), max(n, len(graph.dimensions))) 

365 ) 

366 

367 def splitByStateFlags(self, dataIds: Optional[DataCoordinateSequence] = None, *, 

368 expanded: bool = True, 

369 complete: bool = True, 

370 minimal: bool = True) -> SplitByStateFlags: 

371 """Given a sequence of data IDs, generate new equivalent sequences 

372 containing less information. 

373 

374 Parameters 

375 ---------- 

376 dataIds : `DataCoordinateSequence`, optional. 

377 Data IDs to start from. Defaults to ``self.allDataIds``. 

378 ``dataIds.hasRecords()`` and ``dataIds.hasFull()`` must both return 

379 `True`. 

380 expanded : `bool`, optional 

381 If `True` (default) include the original data IDs that contain all 

382 information in the result. 

383 complete : `bool`, optional 

384 If `True` (default) include data IDs for which ``hasFull()`` 

385 returns `True` but ``hasRecords()`` does not. 

386 minimal : `bool`, optional 

387 If `True` (default) include data IDS that only contain values for 

388 required dimensions, for which ``hasFull()`` may not return `True`. 

389 

390 Returns 

391 ------- 

392 split : `SplitByStateFlags` 

393 A dataclass holding the indicated data IDs in attributes that 

394 correspond to the boolean keyword arguments. 

395 """ 

396 if dataIds is None: 

397 dataIds = self.allDataIds 

398 assert dataIds.hasFull() and dataIds.hasRecords() 

399 result = SplitByStateFlags(expanded=dataIds) 

400 if complete: 

401 result.complete = DataCoordinateSequence( 

402 [DataCoordinate.standardize(e.full.byName(), graph=dataIds.graph) for e in result.expanded], 

403 graph=dataIds.graph 

404 ) 

405 self.assertTrue(result.complete.hasFull()) 

406 self.assertFalse(result.complete.hasRecords()) 

407 if minimal: 

408 result.minimal = DataCoordinateSequence( 

409 [DataCoordinate.standardize(e.byName(), graph=dataIds.graph) for e in result.expanded], 

410 graph=dataIds.graph 

411 ) 

412 self.assertEqual(result.minimal.hasFull(), not dataIds.graph.implied) 

413 self.assertFalse(result.minimal.hasRecords()) 

414 if not expanded: 

415 result.expanded = None 

416 return result 

417 

418 def testMappingInterface(self): 

419 """Test that the mapping interface in `DataCoordinate` and (when 

420 applicable) its ``full`` property are self-consistent and consistent 

421 with the ``graph`` property. 

422 """ 

423 for n in range(5): 

424 dimensions = self.randomDimensionSubset() 

425 dataIds = self.randomDataIds(n=1).subset(dimensions) 

426 split = self.splitByStateFlags(dataIds) 

427 for dataId in split.chain(): 

428 with self.subTest(dataId=dataId): 

429 self.assertEqual(list(dataId.values()), [dataId[d] for d in dataId.keys()]) 

430 self.assertEqual(list(dataId.values()), [dataId[d.name] for d in dataId.keys()]) 

431 self.assertEqual(dataId.keys(), dataId.graph.required) 

432 for dataId in itertools.chain(split.complete, split.expanded): 

433 with self.subTest(dataId=dataId): 

434 self.assertTrue(dataId.hasFull()) 

435 self.assertEqual(dataId.graph.dimensions, dataId.full.keys()) 

436 self.assertEqual(list(dataId.full.values()), [dataId[k] for k in dataId.graph.dimensions]) 

437 

438 def testEquality(self): 

439 """Test that different `DataCoordinate` instances with different state 

440 flags can be compared with each other and other mappings. 

441 """ 

442 dataIds = self.randomDataIds(n=2) 

443 split = self.splitByStateFlags(dataIds) 

444 # Iterate over all combinations of different states of DataCoordinate, 

445 # with the same underlying data ID values. 

446 for a0, b0 in itertools.combinations(split.chain(0), 2): 

447 self.assertEqual(a0, b0) 

448 self.assertEqual(a0, b0.byName()) 

449 self.assertEqual(a0.byName(), b0) 

450 # Same thing, for a different data ID value. 

451 for a1, b1 in itertools.combinations(split.chain(1), 2): 

452 self.assertEqual(a1, b1) 

453 self.assertEqual(a1, b1.byName()) 

454 self.assertEqual(a1.byName(), b1) 

455 # Iterate over all combinations of different states of DataCoordinate, 

456 # with different underlying data ID values. 

457 for a0, b1 in itertools.product(split.chain(0), split.chain(1)): 

458 self.assertNotEqual(a0, b1) 

459 self.assertNotEqual(a1, b0) 

460 self.assertNotEqual(a0, b1.byName()) 

461 self.assertNotEqual(a0.byName(), b1) 

462 self.assertNotEqual(a1, b0.byName()) 

463 self.assertNotEqual(a1.byName(), b0) 

464 

465 def testStandardize(self): 

466 """Test constructing a DataCoordinate from many different kinds of 

467 input via `DataCoordinate.standardize` and `DataCoordinate.subset`. 

468 """ 

469 for n in range(5): 

470 dimensions = self.randomDimensionSubset() 

471 dataIds = self.randomDataIds(n=1).subset(dimensions) 

472 split = self.splitByStateFlags(dataIds) 

473 for m, dataId in enumerate(split.chain()): 

474 # Passing in any kind of DataCoordinate alone just returns 

475 # that object. 

476 self.assertIs(dataId, DataCoordinate.standardize(dataId)) 

477 # Same if we also explicitly pass the dimensions we want. 

478 self.assertIs(dataId, DataCoordinate.standardize(dataId, graph=dataId.graph)) 

479 # Same if we pass the dimensions and some irrelevant 

480 # kwargs. 

481 self.assertIs(dataId, DataCoordinate.standardize(dataId, graph=dataId.graph, htm7=12)) 

482 # Test constructing a new data ID from this one with a 

483 # subset of the dimensions. 

484 # This is not possible for some combinations of 

485 # dimensions if hasFull is False (see 

486 # `DataCoordinate.subset` docs). 

487 newDimensions = self.randomDimensionSubset(n=1, graph=dataId.graph) 

488 if dataId.hasFull() or dataId.graph.required.issuperset(newDimensions.required): 

489 newDataIds = [ 

490 dataId.subset(newDimensions), 

491 DataCoordinate.standardize(dataId, graph=newDimensions), 

492 DataCoordinate.standardize(dataId, graph=newDimensions, htm7=12), 

493 ] 

494 for newDataId in newDataIds: 

495 with self.subTest(newDataId=newDataId, type=type(dataId)): 

496 commonKeys = dataId.keys() & newDataId.keys() 

497 self.assertTrue(commonKeys) 

498 self.assertEqual( 

499 [newDataId[k] for k in commonKeys], 

500 [dataId[k] for k in commonKeys], 

501 ) 

502 # This should never "downgrade" from 

503 # Complete to Minimal or Expanded to Complete. 

504 if dataId.hasRecords(): 

505 self.assertTrue(newDataId.hasRecords()) 

506 if dataId.hasFull(): 

507 self.assertTrue(newDataId.hasFull()) 

508 # Start from a complete data ID, and pass its values in via several 

509 # different ways that should be equivalent. 

510 for dataId in split.complete: 

511 # Split the keys (dimension names) into two random subsets, so 

512 # we can pass some as kwargs below. 

513 keys1 = set(self.rng.sample(list(dataId.graph.dimensions.names), 

514 len(dataId.graph.dimensions)//2)) 

515 keys2 = dataId.graph.dimensions.names - keys1 

516 newCompleteDataIds = [ 

517 DataCoordinate.standardize(dataId.full.byName(), universe=dataId.universe), 

518 DataCoordinate.standardize(dataId.full.byName(), graph=dataId.graph), 

519 DataCoordinate.standardize(DataCoordinate.makeEmpty(dataId.graph.universe), 

520 **dataId.full.byName()), 

521 DataCoordinate.standardize(DataCoordinate.makeEmpty(dataId.graph.universe), 

522 graph=dataId.graph, **dataId.full.byName()), 

523 DataCoordinate.standardize(**dataId.full.byName(), universe=dataId.universe), 

524 DataCoordinate.standardize(graph=dataId.graph, **dataId.full.byName()), 

525 DataCoordinate.standardize( 

526 {k: dataId[k] for k in keys1}, 

527 universe=dataId.universe, 

528 **{k: dataId[k] for k in keys2} 

529 ), 

530 DataCoordinate.standardize( 

531 {k: dataId[k] for k in keys1}, 

532 graph=dataId.graph, 

533 **{k: dataId[k] for k in keys2} 

534 ), 

535 ] 

536 for newDataId in newCompleteDataIds: 

537 with self.subTest(dataId=dataId, newDataId=newDataId, type=type(dataId)): 

538 self.assertEqual(dataId, newDataId) 

539 self.assertTrue(newDataId.hasFull()) 

540 

541 def testRegions(self): 

542 """Test that data IDs for a few known dimensions have the expected 

543 regions. 

544 """ 

545 for dataId in self.randomDataIds(n=4).subset( 

546 DimensionGraph(self.allDataIds.universe, names=["visit"])): 

547 self.assertIsNotNone(dataId.region) 

548 self.assertEqual(dataId.graph.spatial.names, {"observation_regions"}) 

549 self.assertEqual(dataId.region, dataId.records["visit"].region) 

550 for dataId in self.randomDataIds(n=4).subset( 

551 DimensionGraph(self.allDataIds.universe, names=["visit", "detector"])): 

552 self.assertIsNotNone(dataId.region) 

553 self.assertEqual(dataId.graph.spatial.names, {"observation_regions"}) 

554 self.assertEqual(dataId.region, dataId.records["visit_detector_region"].region) 

555 for dataId in self.randomDataIds(n=4).subset( 

556 DimensionGraph(self.allDataIds.universe, names=["tract"])): 

557 self.assertIsNotNone(dataId.region) 

558 self.assertEqual(dataId.graph.spatial.names, {"skymap_regions"}) 

559 self.assertEqual(dataId.region, dataId.records["tract"].region) 

560 for dataId in self.randomDataIds(n=4).subset( 

561 DimensionGraph(self.allDataIds.universe, names=["patch"])): 

562 self.assertIsNotNone(dataId.region) 

563 self.assertEqual(dataId.graph.spatial.names, {"skymap_regions"}) 

564 self.assertEqual(dataId.region, dataId.records["patch"].region) 

565 

566 def testTimespans(self): 

567 """Test that data IDs for a few known dimensions have the expected 

568 timespans. 

569 """ 

570 for dataId in self.randomDataIds(n=4).subset( 

571 DimensionGraph(self.allDataIds.universe, names=["visit"])): 

572 self.assertIsNotNone(dataId.timespan) 

573 self.assertEqual(dataId.graph.temporal.names, {"observation_timespans"}) 

574 self.assertEqual(dataId.timespan, dataId.records["visit"].timespan) 

575 

576 def testIterableStatusFlags(self): 

577 """Test that DataCoordinateSet and DataCoordinateSequence compute 

578 their hasFull and hasRecords flags correctly from their elements. 

579 """ 

580 dataIds = self.randomDataIds(n=10) 

581 split = self.splitByStateFlags(dataIds) 

582 for cls in (DataCoordinateSet, DataCoordinateSequence): 

583 self.assertTrue(cls(split.expanded, graph=dataIds.graph, check=True).hasFull()) 

584 self.assertTrue(cls(split.expanded, graph=dataIds.graph, check=False).hasFull()) 

585 self.assertTrue(cls(split.expanded, graph=dataIds.graph, check=True).hasRecords()) 

586 self.assertTrue(cls(split.expanded, graph=dataIds.graph, check=False).hasRecords()) 

587 self.assertTrue(cls(split.complete, graph=dataIds.graph, check=True).hasFull()) 

588 self.assertTrue(cls(split.complete, graph=dataIds.graph, check=False).hasFull()) 

589 self.assertFalse(cls(split.complete, graph=dataIds.graph, check=True).hasRecords()) 

590 self.assertFalse(cls(split.complete, graph=dataIds.graph, check=False).hasRecords()) 

591 with self.assertRaises(ValueError): 

592 cls(split.complete, graph=dataIds.graph, hasRecords=True, check=True) 

593 self.assertEqual(cls(split.minimal, graph=dataIds.graph, check=True).hasFull(), 

594 not dataIds.graph.implied) 

595 self.assertEqual(cls(split.minimal, graph=dataIds.graph, check=False).hasFull(), 

596 not dataIds.graph.implied) 

597 self.assertFalse(cls(split.minimal, graph=dataIds.graph, check=True).hasRecords()) 

598 self.assertFalse(cls(split.minimal, graph=dataIds.graph, check=False).hasRecords()) 

599 with self.assertRaises(ValueError): 

600 cls(split.minimal, graph=dataIds.graph, hasRecords=True, check=True) 

601 if dataIds.graph.implied: 

602 with self.assertRaises(ValueError): 

603 cls(split.minimal, graph=dataIds.graph, hasFull=True, check=True) 

604 

605 def testSetOperations(self): 

606 """Test for self-consistency across DataCoordinateSet's operations. 

607 """ 

608 c = self.randomDataIds(n=10).toSet() 

609 a = self.randomDataIds(n=20).toSet() | c 

610 b = self.randomDataIds(n=20).toSet() | c 

611 # Make sure we don't have a particularly unlucky random seed, since 

612 # that would make a lot of this test uninteresting. 

613 self.assertNotEqual(a, b) 

614 self.assertGreater(len(a), 0) 

615 self.assertGreater(len(b), 0) 

616 # The rest of the tests should not depend on the random seed. 

617 self.assertEqual(a, a) 

618 self.assertNotEqual(a, a.toSequence()) 

619 self.assertEqual(a, a.toSequence().toSet()) 

620 self.assertEqual(a, a.toSequence().toSet()) 

621 self.assertEqual(b, b) 

622 self.assertNotEqual(b, b.toSequence()) 

623 self.assertEqual(b, b.toSequence().toSet()) 

624 self.assertEqual(a & b, a.intersection(b)) 

625 self.assertLessEqual(a & b, a) 

626 self.assertLessEqual(a & b, b) 

627 self.assertEqual(a | b, a.union(b)) 

628 self.assertGreaterEqual(a | b, a) 

629 self.assertGreaterEqual(a | b, b) 

630 self.assertEqual(a - b, a.difference(b)) 

631 self.assertLessEqual(a - b, a) 

632 self.assertLessEqual(b - a, b) 

633 self.assertEqual(a ^ b, a.symmetric_difference(b)) 

634 self.assertGreaterEqual(a ^ b, (a | b) - (a & b)) 

635 

636 

637if __name__ == "__main__": 637 ↛ 638line 637 didn't jump to line 638, because the condition on line 637 was never true

638 unittest.main()