Coverage for python/lsst/daf/butler/registry/tests/_registry.py : 6%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of daf_butler.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21from __future__ import annotations
23__all__ = ["RegistryTests"]
25from abc import ABC, abstractmethod
26import os
27import re
29import astropy.time
30import sqlalchemy
31from typing import Optional
33from ...core import (
34 DataCoordinate,
35 DatasetRef,
36 DatasetType,
37 DimensionGraph,
38 NamedValueSet,
39 StorageClass,
40 ddl,
41 YamlRepoImportBackend
42)
43from .._registry import (
44 CollectionType,
45 ConflictingDefinitionError,
46 InconsistentDataIdError,
47 Registry,
48 RegistryConfig,
49)
50from ..wildcards import DatasetTypeRestriction
51from ..interfaces import MissingCollectionError, ButlerAttributeExistsError
54class RegistryTests(ABC):
55 """Generic tests for the `Registry` class that can be subclassed to
56 generate tests for different configurations.
57 """
59 collectionsManager: Optional[str] = None
60 """Name of the collections manager class, if subclass provides value for
61 this member then it overrides name specified in default configuration
62 (`str`).
63 """
65 @classmethod
66 @abstractmethod
67 def getDataDir(cls) -> str:
68 """Return the root directory containing test data YAML files.
69 """
70 raise NotImplementedError()
72 def makeRegistryConfig(self) -> RegistryConfig:
73 """Create RegistryConfig used to create a registry.
75 This method should be called by a subclass from `makeRegistry`.
76 Returned instance will be pre-configured based on the values of class
77 members, and default-configured for all other parametrs. Subclasses
78 that need default configuration should just instantiate
79 `RegistryConfig` directly.
80 """
81 config = RegistryConfig()
82 if self.collectionsManager:
83 config["managers"]["collections"] = self.collectionsManager
84 return config
86 @abstractmethod
87 def makeRegistry(self) -> Registry:
88 """Return the Registry instance to be tested.
89 """
90 raise NotImplementedError()
92 def loadData(self, registry: Registry, filename: str):
93 """Load registry test data from ``getDataDir/<filename>``,
94 which should be a YAML import/export file.
95 """
96 with open(os.path.join(self.getDataDir(), filename), 'r') as stream:
97 backend = YamlRepoImportBackend(stream, registry)
98 backend.register()
99 backend.load(datastore=None)
101 def assertRowCount(self, registry: Registry, table: str, count: int):
102 """Check the number of rows in table.
103 """
104 # TODO: all tests that rely on this method should be rewritten, as it
105 # needs to depend on Registry implementation details to have any chance
106 # of working.
107 sql = sqlalchemy.sql.select(
108 [sqlalchemy.sql.func.count()]
109 ).select_from(
110 getattr(registry._tables, table)
111 )
112 self.assertEqual(registry._db.query(sql).scalar(), count)
114 def testOpaque(self):
115 """Tests for `Registry.registerOpaqueTable`,
116 `Registry.insertOpaqueData`, `Registry.fetchOpaqueData`, and
117 `Registry.deleteOpaqueData`.
118 """
119 registry = self.makeRegistry()
120 table = "opaque_table_for_testing"
121 registry.registerOpaqueTable(
122 table,
123 spec=ddl.TableSpec(
124 fields=[
125 ddl.FieldSpec("id", dtype=sqlalchemy.BigInteger, primaryKey=True),
126 ddl.FieldSpec("name", dtype=sqlalchemy.String, length=16, nullable=False),
127 ddl.FieldSpec("count", dtype=sqlalchemy.SmallInteger, nullable=True),
128 ],
129 )
130 )
131 rows = [
132 {"id": 1, "name": "one", "count": None},
133 {"id": 2, "name": "two", "count": 5},
134 {"id": 3, "name": "three", "count": 6},
135 ]
136 registry.insertOpaqueData(table, *rows)
137 self.assertCountEqual(rows, list(registry.fetchOpaqueData(table)))
138 self.assertEqual(rows[0:1], list(registry.fetchOpaqueData(table, id=1)))
139 self.assertEqual(rows[1:2], list(registry.fetchOpaqueData(table, name="two")))
140 self.assertEqual([], list(registry.fetchOpaqueData(table, id=1, name="two")))
141 registry.deleteOpaqueData(table, id=3)
142 self.assertCountEqual(rows[:2], list(registry.fetchOpaqueData(table)))
143 registry.deleteOpaqueData(table)
144 self.assertEqual([], list(registry.fetchOpaqueData(table)))
146 def testDatasetType(self):
147 """Tests for `Registry.registerDatasetType` and
148 `Registry.getDatasetType`.
149 """
150 registry = self.makeRegistry()
151 # Check valid insert
152 datasetTypeName = "test"
153 storageClass = StorageClass("testDatasetType")
154 registry.storageClasses.registerStorageClass(storageClass)
155 dimensions = registry.dimensions.extract(("instrument", "visit"))
156 differentDimensions = registry.dimensions.extract(("instrument", "patch"))
157 inDatasetType = DatasetType(datasetTypeName, dimensions, storageClass)
158 # Inserting for the first time should return True
159 self.assertTrue(registry.registerDatasetType(inDatasetType))
160 outDatasetType1 = registry.getDatasetType(datasetTypeName)
161 self.assertEqual(outDatasetType1, inDatasetType)
163 # Re-inserting should work
164 self.assertFalse(registry.registerDatasetType(inDatasetType))
165 # Except when they are not identical
166 with self.assertRaises(ConflictingDefinitionError):
167 nonIdenticalDatasetType = DatasetType(datasetTypeName, differentDimensions, storageClass)
168 registry.registerDatasetType(nonIdenticalDatasetType)
170 # Template can be None
171 datasetTypeName = "testNoneTemplate"
172 storageClass = StorageClass("testDatasetType2")
173 registry.storageClasses.registerStorageClass(storageClass)
174 dimensions = registry.dimensions.extract(("instrument", "visit"))
175 inDatasetType = DatasetType(datasetTypeName, dimensions, storageClass)
176 registry.registerDatasetType(inDatasetType)
177 outDatasetType2 = registry.getDatasetType(datasetTypeName)
178 self.assertEqual(outDatasetType2, inDatasetType)
180 allTypes = set(registry.queryDatasetTypes())
181 self.assertEqual(allTypes, {outDatasetType1, outDatasetType2})
183 def testDimensions(self):
184 """Tests for `Registry.insertDimensionData`,
185 `Registry.syncDimensionData`, and `Registry.expandDataId`.
186 """
187 registry = self.makeRegistry()
188 dimensionName = "instrument"
189 dimension = registry.dimensions[dimensionName]
190 dimensionValue = {"name": "DummyCam", "visit_max": 10, "exposure_max": 10, "detector_max": 2,
191 "class_name": "lsst.obs.base.Instrument"}
192 registry.insertDimensionData(dimensionName, dimensionValue)
193 # Inserting the same value twice should fail
194 with self.assertRaises(sqlalchemy.exc.IntegrityError):
195 registry.insertDimensionData(dimensionName, dimensionValue)
196 # expandDataId should retrieve the record we just inserted
197 self.assertEqual(
198 registry.expandDataId(
199 instrument="DummyCam",
200 graph=dimension.graph
201 ).records[dimensionName].toDict(),
202 dimensionValue
203 )
204 # expandDataId should raise if there is no record with the given ID.
205 with self.assertRaises(LookupError):
206 registry.expandDataId({"instrument": "Unknown"}, graph=dimension.graph)
207 # abstract_filter doesn't have a table; insert should fail.
208 with self.assertRaises(TypeError):
209 registry.insertDimensionData("abstract_filter", {"abstract_filter": "i"})
210 dimensionName2 = "physical_filter"
211 dimension2 = registry.dimensions[dimensionName2]
212 dimensionValue2 = {"name": "DummyCam_i", "abstract_filter": "i"}
213 # Missing required dependency ("instrument") should fail
214 with self.assertRaises(sqlalchemy.exc.IntegrityError):
215 registry.insertDimensionData(dimensionName2, dimensionValue2)
216 # Adding required dependency should fix the failure
217 dimensionValue2["instrument"] = "DummyCam"
218 registry.insertDimensionData(dimensionName2, dimensionValue2)
219 # expandDataId should retrieve the record we just inserted.
220 self.assertEqual(
221 registry.expandDataId(
222 instrument="DummyCam", physical_filter="DummyCam_i",
223 graph=dimension2.graph
224 ).records[dimensionName2].toDict(),
225 dimensionValue2
226 )
227 # Use syncDimensionData to insert a new record successfully.
228 dimensionName3 = "detector"
229 dimensionValue3 = {"instrument": "DummyCam", "id": 1, "full_name": "one",
230 "name_in_raft": "zero", "purpose": "SCIENCE"}
231 self.assertTrue(registry.syncDimensionData(dimensionName3, dimensionValue3))
232 # Sync that again. Note that one field ("raft") is NULL, and that
233 # should be okay.
234 self.assertFalse(registry.syncDimensionData(dimensionName3, dimensionValue3))
235 # Now try that sync with the same primary key but a different value.
236 # This should fail.
237 with self.assertRaises(ConflictingDefinitionError):
238 registry.syncDimensionData(
239 dimensionName3,
240 {"instrument": "DummyCam", "id": 1, "full_name": "one",
241 "name_in_raft": "four", "purpose": "SCIENCE"}
242 )
244 def testDataIdRelationships(self):
245 """Test that `Registry.expandDataId` raises an exception when the given
246 keys are inconsistent.
247 """
248 registry = self.makeRegistry()
249 self.loadData(registry, "base.yaml")
250 # Insert a few more dimension records for the next test.
251 registry.insertDimensionData(
252 "exposure",
253 {"instrument": "Cam1", "id": 1, "name": "one", "physical_filter": "Cam1-G"},
254 )
255 registry.insertDimensionData(
256 "exposure",
257 {"instrument": "Cam1", "id": 2, "name": "two", "physical_filter": "Cam1-G"},
258 )
259 registry.insertDimensionData(
260 "visit_system",
261 {"instrument": "Cam1", "id": 0, "name": "one-to-one"},
262 )
263 registry.insertDimensionData(
264 "visit",
265 {"instrument": "Cam1", "id": 1, "name": "one", "physical_filter": "Cam1-G", "visit_system": 0},
266 )
267 registry.insertDimensionData(
268 "visit_definition",
269 {"instrument": "Cam1", "visit": 1, "exposure": 1, "visit_system": 0},
270 )
271 with self.assertRaises(InconsistentDataIdError):
272 registry.expandDataId(
273 {"instrument": "Cam1", "visit": 1, "exposure": 2},
274 )
276 def testDataset(self):
277 """Basic tests for `Registry.insertDatasets`, `Registry.getDataset`,
278 and `Registry.removeDatasets`.
279 """
280 registry = self.makeRegistry()
281 self.loadData(registry, "base.yaml")
282 run = "test"
283 registry.registerRun(run)
284 datasetType = registry.getDatasetType("permabias")
285 dataId = {"instrument": "Cam1", "detector": 2}
286 ref, = registry.insertDatasets(datasetType, dataIds=[dataId], run=run)
287 outRef = registry.getDataset(ref.id)
288 self.assertIsNotNone(ref.id)
289 self.assertEqual(ref, outRef)
290 with self.assertRaises(ConflictingDefinitionError):
291 registry.insertDatasets(datasetType, dataIds=[dataId], run=run)
292 registry.removeDatasets([ref])
293 self.assertIsNone(registry.findDataset(datasetType, dataId, collections=[run]))
295 def testFindDataset(self):
296 """Tests for `Registry.findDataset`.
297 """
298 registry = self.makeRegistry()
299 self.loadData(registry, "base.yaml")
300 run = "test"
301 datasetType = registry.getDatasetType("permabias")
302 dataId = {"instrument": "Cam1", "detector": 4}
303 registry.registerRun(run)
304 inputRef, = registry.insertDatasets(datasetType, dataIds=[dataId], run=run)
305 outputRef = registry.findDataset(datasetType, dataId, collections=[run])
306 self.assertEqual(outputRef, inputRef)
307 # Check that retrieval with invalid dataId raises
308 with self.assertRaises(LookupError):
309 dataId = {"instrument": "Cam1"} # no detector
310 registry.findDataset(datasetType, dataId, collections=run)
311 # Check that different dataIds match to different datasets
312 dataId1 = {"instrument": "Cam1", "detector": 1}
313 inputRef1, = registry.insertDatasets(datasetType, dataIds=[dataId1], run=run)
314 dataId2 = {"instrument": "Cam1", "detector": 2}
315 inputRef2, = registry.insertDatasets(datasetType, dataIds=[dataId2], run=run)
316 self.assertEqual(registry.findDataset(datasetType, dataId1, collections=run), inputRef1)
317 self.assertEqual(registry.findDataset(datasetType, dataId2, collections=run), inputRef2)
318 self.assertNotEqual(registry.findDataset(datasetType, dataId1, collections=run), inputRef2)
319 self.assertNotEqual(registry.findDataset(datasetType, dataId2, collections=run), inputRef1)
320 # Check that requesting a non-existing dataId returns None
321 nonExistingDataId = {"instrument": "Cam1", "detector": 3}
322 self.assertIsNone(registry.findDataset(datasetType, nonExistingDataId, collections=run))
324 def testDatasetTypeComponentQueries(self):
325 """Test component options when querying for dataset types.
326 """
327 registry = self.makeRegistry()
328 self.loadData(registry, "base.yaml")
329 self.loadData(registry, "datasets.yaml")
330 # Test querying for dataset types with different inputs.
331 # First query for all dataset types; components should only be included
332 # when components=True.
333 self.assertEqual(
334 {"permabias", "permaflat"},
335 NamedValueSet(registry.queryDatasetTypes()).names
336 )
337 self.assertEqual(
338 {"permabias", "permaflat"},
339 NamedValueSet(registry.queryDatasetTypes(components=False)).names
340 )
341 self.assertLess(
342 {"permabias", "permaflat", "permabias.wcs", "permaflat.photoCalib"},
343 NamedValueSet(registry.queryDatasetTypes(components=True)).names
344 )
345 # Use a pattern that can match either parent or components. Again,
346 # components are only returned if components=True.
347 self.assertEqual(
348 {"permabias"},
349 NamedValueSet(registry.queryDatasetTypes(re.compile(".+bias.*"))).names
350 )
351 self.assertEqual(
352 {"permabias"},
353 NamedValueSet(registry.queryDatasetTypes(re.compile(".+bias.*"), components=False)).names
354 )
355 self.assertLess(
356 {"permabias", "permabias.wcs"},
357 NamedValueSet(registry.queryDatasetTypes(re.compile(".+bias.*"), components=True)).names
358 )
359 # This pattern matches only a component. In this case we also return
360 # that component dataset type if components=None.
361 self.assertEqual(
362 {"permabias.wcs"},
363 NamedValueSet(registry.queryDatasetTypes(re.compile(r".+bias\.wcs"))).names
364 )
365 self.assertEqual(
366 set(),
367 NamedValueSet(registry.queryDatasetTypes(re.compile(r".+bias\.wcs"), components=False)).names
368 )
369 self.assertEqual(
370 {"permabias.wcs"},
371 NamedValueSet(registry.queryDatasetTypes(re.compile(r".+bias\.wcs"), components=True)).names
372 )
374 def testComponentLookups(self):
375 """Test searching for component datasets via their parents.
376 """
377 registry = self.makeRegistry()
378 self.loadData(registry, "base.yaml")
379 self.loadData(registry, "datasets.yaml")
380 # Test getting the child dataset type (which does still exist in the
381 # Registry), and check for consistency with
382 # DatasetRef.makeComponentRef.
383 collection = "imported_g"
384 parentType = registry.getDatasetType("permabias")
385 childType = registry.getDatasetType("permabias.wcs")
386 parentRefResolved = registry.findDataset(parentType, collections=collection,
387 instrument="Cam1", detector=1)
388 self.assertIsInstance(parentRefResolved, DatasetRef)
389 self.assertEqual(childType, parentRefResolved.makeComponentRef("wcs").datasetType)
390 # Search for a single dataset with findDataset.
391 childRef1 = registry.findDataset("permabias.wcs", collections=collection,
392 dataId=parentRefResolved.dataId)
393 self.assertEqual(childRef1, parentRefResolved.makeComponentRef("wcs"))
394 # Search for detector data IDs constrained by component dataset
395 # existence with queryDimensions.
396 dataIds = set(registry.queryDimensions(
397 ["detector"],
398 datasets=["permabias.wcs"],
399 collections=collection,
400 expand=False,
401 ))
402 self.assertEqual(
403 dataIds,
404 {
405 DataCoordinate.standardize(instrument="Cam1", detector=d, graph=parentType.dimensions)
406 for d in (1, 2, 3)
407 }
408 )
409 # Search for multiple datasets of a single type with queryDatasets.
410 childRefs2 = set(registry.queryDatasets(
411 "permabias.wcs",
412 collections=collection,
413 expand=False,
414 ))
415 self.assertEqual(
416 {ref.unresolved() for ref in childRefs2},
417 {DatasetRef(childType, dataId) for dataId in dataIds}
418 )
420 def testCollections(self):
421 """Tests for registry methods that manage collections.
422 """
423 registry = self.makeRegistry()
424 self.loadData(registry, "base.yaml")
425 self.loadData(registry, "datasets.yaml")
426 run1 = "imported_g"
427 run2 = "imported_r"
428 datasetType = "permabias"
429 # Find some datasets via their run's collection.
430 dataId1 = {"instrument": "Cam1", "detector": 1}
431 ref1 = registry.findDataset(datasetType, dataId1, collections=run1)
432 self.assertIsNotNone(ref1)
433 dataId2 = {"instrument": "Cam1", "detector": 2}
434 ref2 = registry.findDataset(datasetType, dataId2, collections=run1)
435 self.assertIsNotNone(ref2)
436 # Associate those into a new collection,then look for them there.
437 tag1 = "tag1"
438 registry.registerCollection(tag1, type=CollectionType.TAGGED)
439 registry.associate(tag1, [ref1, ref2])
440 self.assertEqual(registry.findDataset(datasetType, dataId1, collections=tag1), ref1)
441 self.assertEqual(registry.findDataset(datasetType, dataId2, collections=tag1), ref2)
442 # Disassociate one and verify that we can't it there anymore...
443 registry.disassociate(tag1, [ref1])
444 self.assertIsNone(registry.findDataset(datasetType, dataId1, collections=tag1))
445 # ...but we can still find ref2 in tag1, and ref1 in the run.
446 self.assertEqual(registry.findDataset(datasetType, dataId1, collections=run1), ref1)
447 self.assertEqual(registry.findDataset(datasetType, dataId2, collections=tag1), ref2)
448 collections = set(registry.queryCollections())
449 self.assertEqual(collections, {run1, run2, tag1})
450 # Associate both refs into tag1 again; ref2 is already there, but that
451 # should be a harmless no-op.
452 registry.associate(tag1, [ref1, ref2])
453 self.assertEqual(registry.findDataset(datasetType, dataId1, collections=tag1), ref1)
454 self.assertEqual(registry.findDataset(datasetType, dataId2, collections=tag1), ref2)
455 # Get a different dataset (from a different run) that has the same
456 # dataset type and data ID as ref2.
457 ref2b = registry.findDataset(datasetType, dataId2, collections=run2)
458 self.assertNotEqual(ref2, ref2b)
459 # Attempting to associate that into tag1 should be an error.
460 with self.assertRaises(ConflictingDefinitionError):
461 registry.associate(tag1, [ref2b])
462 # That error shouldn't have messed up what we had before.
463 self.assertEqual(registry.findDataset(datasetType, dataId1, collections=tag1), ref1)
464 self.assertEqual(registry.findDataset(datasetType, dataId2, collections=tag1), ref2)
465 # Attempt to associate the conflicting dataset again, this time with
466 # a dataset that isn't in the collection and won't cause a conflict.
467 # Should also fail without modifying anything.
468 dataId3 = {"instrument": "Cam1", "detector": 3}
469 ref3 = registry.findDataset(datasetType, dataId3, collections=run1)
470 with self.assertRaises(ConflictingDefinitionError):
471 registry.associate(tag1, [ref3, ref2b])
472 self.assertEqual(registry.findDataset(datasetType, dataId1, collections=tag1), ref1)
473 self.assertEqual(registry.findDataset(datasetType, dataId2, collections=tag1), ref2)
474 self.assertIsNone(registry.findDataset(datasetType, dataId3, collections=tag1))
475 # Register a chained collection that searches:
476 # 1. 'tag1'
477 # 2. 'run1', but only for the permaflat dataset
478 # 3. 'run2'
479 chain1 = "chain1"
480 registry.registerCollection(chain1, type=CollectionType.CHAINED)
481 self.assertIs(registry.getCollectionType(chain1), CollectionType.CHAINED)
482 # Chained collection exists, but has no collections in it.
483 self.assertFalse(registry.getCollectionChain(chain1))
484 # If we query for all collections, we should get the chained collection
485 # only if we don't ask to flatten it (i.e. yield only its children).
486 self.assertEqual(set(registry.queryCollections(flattenChains=False)), {tag1, run1, run2, chain1})
487 self.assertEqual(set(registry.queryCollections(flattenChains=True)), {tag1, run1, run2})
488 # Attempt to set its child collections to something circular; that
489 # should fail.
490 with self.assertRaises(ValueError):
491 registry.setCollectionChain(chain1, [tag1, chain1])
492 # Add the child collections.
493 registry.setCollectionChain(chain1, [tag1, (run1, "permaflat"), run2])
494 self.assertEqual(
495 list(registry.getCollectionChain(chain1)),
496 [(tag1, DatasetTypeRestriction.any),
497 (run1, DatasetTypeRestriction.fromExpression("permaflat")),
498 (run2, DatasetTypeRestriction.any)]
499 )
500 # Searching for dataId1 or dataId2 in the chain should return ref1 and
501 # ref2, because both are in tag1.
502 self.assertEqual(registry.findDataset(datasetType, dataId1, collections=chain1), ref1)
503 self.assertEqual(registry.findDataset(datasetType, dataId2, collections=chain1), ref2)
504 # Now disassociate ref2 from tag1. The search (for permabias) with
505 # dataId2 in chain1 should then:
506 # 1. not find it in tag1
507 # 2. not look in tag2, because it's restricted to permaflat here
508 # 3. find a different dataset in run2
509 registry.disassociate(tag1, [ref2])
510 ref2b = registry.findDataset(datasetType, dataId2, collections=chain1)
511 self.assertNotEqual(ref2b, ref2)
512 self.assertEqual(ref2b, registry.findDataset(datasetType, dataId2, collections=run2))
513 # Look in the chain for a permaflat that is in run1; should get the
514 # same ref as if we'd searched run1 directly.
515 dataId3 = {"instrument": "Cam1", "detector": 2, "physical_filter": "Cam1-G"}
516 self.assertEqual(registry.findDataset("permaflat", dataId3, collections=chain1),
517 registry.findDataset("permaflat", dataId3, collections=run1),)
518 # Define a new chain so we can test recursive chains.
519 chain2 = "chain2"
520 registry.registerCollection(chain2, type=CollectionType.CHAINED)
521 registry.setCollectionChain(chain2, [(run2, "permabias"), chain1])
522 # Search for permabias with dataId1 should find it via tag1 in chain2,
523 # recursing, because is not in run1.
524 self.assertIsNone(registry.findDataset(datasetType, dataId1, collections=run2))
525 self.assertEqual(registry.findDataset(datasetType, dataId1, collections=chain2), ref1)
526 # Search for permabias with dataId2 should find it in run2 (ref2b).
527 self.assertEqual(registry.findDataset(datasetType, dataId2, collections=chain2), ref2b)
528 # Search for a permaflat that is in run2. That should not be found
529 # at the front of chain2, because of the restriction to permabias
530 # on run2 there, but it should be found in at the end of chain1.
531 dataId4 = {"instrument": "Cam1", "detector": 3, "physical_filter": "Cam1-R2"}
532 ref4 = registry.findDataset("permaflat", dataId4, collections=run2)
533 self.assertIsNotNone(ref4)
534 self.assertEqual(ref4, registry.findDataset("permaflat", dataId4, collections=chain2))
535 # Deleting a collection that's part of a CHAINED collection is not
536 # allowed, and is exception-safe.
537 with self.assertRaises(Exception):
538 registry.removeCollection(run2)
539 self.assertEqual(registry.getCollectionType(run2), CollectionType.RUN)
540 with self.assertRaises(Exception):
541 registry.removeCollection(chain1)
542 self.assertEqual(registry.getCollectionType(chain1), CollectionType.CHAINED)
543 # Actually remove chain2, test that it's gone by asking for its type.
544 registry.removeCollection(chain2)
545 with self.assertRaises(MissingCollectionError):
546 registry.getCollectionType(chain2)
547 # Actually remove run2 and chain1, which should work now.
548 registry.removeCollection(chain1)
549 registry.removeCollection(run2)
550 with self.assertRaises(MissingCollectionError):
551 registry.getCollectionType(run2)
552 with self.assertRaises(MissingCollectionError):
553 registry.getCollectionType(chain1)
554 # Remove tag1 as well, just to test that we can remove TAGGED
555 # collections.
556 registry.removeCollection(tag1)
557 with self.assertRaises(MissingCollectionError):
558 registry.getCollectionType(tag1)
560 def testBasicTransaction(self):
561 """Test that all operations within a single transaction block are
562 rolled back if an exception propagates out of the block.
563 """
564 registry = self.makeRegistry()
565 storageClass = StorageClass("testDatasetType")
566 registry.storageClasses.registerStorageClass(storageClass)
567 with registry.transaction():
568 registry.insertDimensionData("instrument", {"name": "Cam1", "class_name": "A"})
569 with self.assertRaises(ValueError):
570 with registry.transaction():
571 registry.insertDimensionData("instrument", {"name": "Cam2"})
572 raise ValueError("Oops, something went wrong")
573 # Cam1 should exist
574 self.assertEqual(registry.expandDataId(instrument="Cam1").records["instrument"].class_name, "A")
575 # But Cam2 and Cam3 should both not exist
576 with self.assertRaises(LookupError):
577 registry.expandDataId(instrument="Cam2")
578 with self.assertRaises(LookupError):
579 registry.expandDataId(instrument="Cam3")
581 def testNestedTransaction(self):
582 """Test that operations within a transaction block are not rolled back
583 if an exception propagates out of an inner transaction block and is
584 then caught.
585 """
586 registry = self.makeRegistry()
587 dimension = registry.dimensions["instrument"]
588 dataId1 = {"instrument": "DummyCam"}
589 dataId2 = {"instrument": "DummyCam2"}
590 checkpointReached = False
591 with registry.transaction():
592 # This should be added and (ultimately) committed.
593 registry.insertDimensionData(dimension, dataId1)
594 with self.assertRaises(sqlalchemy.exc.IntegrityError):
595 with registry.transaction():
596 # This does not conflict, and should succeed (but not
597 # be committed).
598 registry.insertDimensionData(dimension, dataId2)
599 checkpointReached = True
600 # This should conflict and raise, triggerring a rollback
601 # of the previous insertion within the same transaction
602 # context, but not the original insertion in the outer
603 # block.
604 registry.insertDimensionData(dimension, dataId1)
605 self.assertTrue(checkpointReached)
606 self.assertIsNotNone(registry.expandDataId(dataId1, graph=dimension.graph))
607 with self.assertRaises(LookupError):
608 registry.expandDataId(dataId2, graph=dimension.graph)
610 def testInstrumentDimensions(self):
611 """Test queries involving only instrument dimensions, with no joins to
612 skymap."""
613 registry = self.makeRegistry()
615 # need a bunch of dimensions and datasets for test
616 registry.insertDimensionData(
617 "instrument",
618 dict(name="DummyCam", visit_max=25, exposure_max=300, detector_max=6)
619 )
620 registry.insertDimensionData(
621 "physical_filter",
622 dict(instrument="DummyCam", name="dummy_r", abstract_filter="r"),
623 dict(instrument="DummyCam", name="dummy_i", abstract_filter="i"),
624 )
625 registry.insertDimensionData(
626 "detector",
627 *[dict(instrument="DummyCam", id=i, full_name=str(i)) for i in range(1, 6)]
628 )
629 registry.insertDimensionData(
630 "visit_system",
631 dict(instrument="DummyCam", id=1, name="default"),
632 )
633 registry.insertDimensionData(
634 "visit",
635 dict(instrument="DummyCam", id=10, name="ten", physical_filter="dummy_i", visit_system=1),
636 dict(instrument="DummyCam", id=11, name="eleven", physical_filter="dummy_r", visit_system=1),
637 dict(instrument="DummyCam", id=20, name="twelve", physical_filter="dummy_r", visit_system=1),
638 )
639 registry.insertDimensionData(
640 "exposure",
641 dict(instrument="DummyCam", id=100, name="100", physical_filter="dummy_i"),
642 dict(instrument="DummyCam", id=101, name="101", physical_filter="dummy_i"),
643 dict(instrument="DummyCam", id=110, name="110", physical_filter="dummy_r"),
644 dict(instrument="DummyCam", id=111, name="111", physical_filter="dummy_r"),
645 dict(instrument="DummyCam", id=200, name="200", physical_filter="dummy_r"),
646 dict(instrument="DummyCam", id=201, name="201", physical_filter="dummy_r"),
647 )
648 registry.insertDimensionData(
649 "visit_definition",
650 dict(instrument="DummyCam", exposure=100, visit_system=1, visit=10),
651 dict(instrument="DummyCam", exposure=101, visit_system=1, visit=10),
652 dict(instrument="DummyCam", exposure=110, visit_system=1, visit=11),
653 dict(instrument="DummyCam", exposure=111, visit_system=1, visit=11),
654 dict(instrument="DummyCam", exposure=200, visit_system=1, visit=20),
655 dict(instrument="DummyCam", exposure=201, visit_system=1, visit=20),
656 )
657 # dataset types
658 run1 = "test1_r"
659 run2 = "test2_r"
660 tagged2 = "test2_t"
661 registry.registerRun(run1)
662 registry.registerRun(run2)
663 registry.registerCollection(tagged2)
664 storageClass = StorageClass("testDataset")
665 registry.storageClasses.registerStorageClass(storageClass)
666 rawType = DatasetType(name="RAW",
667 dimensions=registry.dimensions.extract(("instrument", "exposure", "detector")),
668 storageClass=storageClass)
669 registry.registerDatasetType(rawType)
670 calexpType = DatasetType(name="CALEXP",
671 dimensions=registry.dimensions.extract(("instrument", "visit", "detector")),
672 storageClass=storageClass)
673 registry.registerDatasetType(calexpType)
675 # add pre-existing datasets
676 for exposure in (100, 101, 110, 111):
677 for detector in (1, 2, 3):
678 # note that only 3 of 5 detectors have datasets
679 dataId = dict(instrument="DummyCam", exposure=exposure, detector=detector)
680 ref, = registry.insertDatasets(rawType, dataIds=[dataId], run=run1)
681 # exposures 100 and 101 appear in both run1 and tagged2.
682 # 100 has different datasets in the different collections
683 # 101 has the same dataset in both collections.
684 if exposure == 100:
685 ref, = registry.insertDatasets(rawType, dataIds=[dataId], run=run2)
686 if exposure in (100, 101):
687 registry.associate(tagged2, [ref])
688 # Add pre-existing datasets to tagged2.
689 for exposure in (200, 201):
690 for detector in (3, 4, 5):
691 # note that only 3 of 5 detectors have datasets
692 dataId = dict(instrument="DummyCam", exposure=exposure, detector=detector)
693 ref, = registry.insertDatasets(rawType, dataIds=[dataId], run=run2)
694 registry.associate(tagged2, [ref])
696 dimensions = DimensionGraph(
697 registry.dimensions,
698 dimensions=(rawType.dimensions.required | calexpType.dimensions.required)
699 )
700 # Test that single dim string works as well as list of str
701 rows = list(registry.queryDimensions("visit", datasets=rawType, collections=run1, expand=True))
702 rowsI = list(registry.queryDimensions(["visit"], datasets=rawType, collections=run1, expand=True))
703 self.assertEqual(rows, rowsI)
704 # with empty expression
705 rows = list(registry.queryDimensions(dimensions, datasets=rawType, collections=run1, expand=True))
706 self.assertEqual(len(rows), 4*3) # 4 exposures times 3 detectors
707 for dataId in rows:
708 self.assertCountEqual(dataId.keys(), ("instrument", "detector", "exposure", "visit"))
709 packer1 = registry.dimensions.makePacker("visit_detector", dataId)
710 packer2 = registry.dimensions.makePacker("exposure_detector", dataId)
711 self.assertEqual(packer1.unpack(packer1.pack(dataId)),
712 DataCoordinate.standardize(dataId, graph=packer1.dimensions))
713 self.assertEqual(packer2.unpack(packer2.pack(dataId)),
714 DataCoordinate.standardize(dataId, graph=packer2.dimensions))
715 self.assertNotEqual(packer1.pack(dataId), packer2.pack(dataId))
716 self.assertCountEqual(set(dataId["exposure"] for dataId in rows),
717 (100, 101, 110, 111))
718 self.assertCountEqual(set(dataId["visit"] for dataId in rows), (10, 11))
719 self.assertCountEqual(set(dataId["detector"] for dataId in rows), (1, 2, 3))
721 # second collection
722 rows = list(registry.queryDimensions(dimensions, datasets=rawType, collections=tagged2))
723 self.assertEqual(len(rows), 4*3) # 4 exposures times 3 detectors
724 for dataId in rows:
725 self.assertCountEqual(dataId.keys(), ("instrument", "detector", "exposure", "visit"))
726 self.assertCountEqual(set(dataId["exposure"] for dataId in rows),
727 (100, 101, 200, 201))
728 self.assertCountEqual(set(dataId["visit"] for dataId in rows), (10, 20))
729 self.assertCountEqual(set(dataId["detector"] for dataId in rows), (1, 2, 3, 4, 5))
731 # with two input datasets
732 rows = list(registry.queryDimensions(dimensions, datasets=rawType, collections=[run1, tagged2]))
733 self.assertEqual(len(set(rows)), 6*3) # 6 exposures times 3 detectors; set needed to de-dupe
734 for dataId in rows:
735 self.assertCountEqual(dataId.keys(), ("instrument", "detector", "exposure", "visit"))
736 self.assertCountEqual(set(dataId["exposure"] for dataId in rows),
737 (100, 101, 110, 111, 200, 201))
738 self.assertCountEqual(set(dataId["visit"] for dataId in rows), (10, 11, 20))
739 self.assertCountEqual(set(dataId["detector"] for dataId in rows), (1, 2, 3, 4, 5))
741 # limit to single visit
742 rows = list(registry.queryDimensions(dimensions, datasets=rawType, collections=run1,
743 where="visit = 10"))
744 self.assertEqual(len(rows), 2*3) # 2 exposures times 3 detectors
745 self.assertCountEqual(set(dataId["exposure"] for dataId in rows), (100, 101))
746 self.assertCountEqual(set(dataId["visit"] for dataId in rows), (10,))
747 self.assertCountEqual(set(dataId["detector"] for dataId in rows), (1, 2, 3))
749 # more limiting expression, using link names instead of Table.column
750 rows = list(registry.queryDimensions(dimensions, datasets=rawType, collections=run1,
751 where="visit = 10 and detector > 1"))
752 self.assertEqual(len(rows), 2*2) # 2 exposures times 2 detectors
753 self.assertCountEqual(set(dataId["exposure"] for dataId in rows), (100, 101))
754 self.assertCountEqual(set(dataId["visit"] for dataId in rows), (10,))
755 self.assertCountEqual(set(dataId["detector"] for dataId in rows), (2, 3))
757 # expression excludes everything
758 rows = list(registry.queryDimensions(dimensions, datasets=rawType, collections=run1,
759 where="visit > 1000"))
760 self.assertEqual(len(rows), 0)
762 # Selecting by physical_filter, this is not in the dimensions, but it
763 # is a part of the full expression so it should work too.
764 rows = list(registry.queryDimensions(dimensions, datasets=rawType, collections=run1,
765 where="physical_filter = 'dummy_r'"))
766 self.assertEqual(len(rows), 2*3) # 2 exposures times 3 detectors
767 self.assertCountEqual(set(dataId["exposure"] for dataId in rows), (110, 111))
768 self.assertCountEqual(set(dataId["visit"] for dataId in rows), (11,))
769 self.assertCountEqual(set(dataId["detector"] for dataId in rows), (1, 2, 3))
771 def testSkyMapDimensions(self):
772 """Tests involving only skymap dimensions, no joins to instrument."""
773 registry = self.makeRegistry()
775 # need a bunch of dimensions and datasets for test, we want
776 # "abstract_filter" in the test so also have to add physical_filter
777 # dimensions
778 registry.insertDimensionData(
779 "instrument",
780 dict(instrument="DummyCam")
781 )
782 registry.insertDimensionData(
783 "physical_filter",
784 dict(instrument="DummyCam", name="dummy_r", abstract_filter="r"),
785 dict(instrument="DummyCam", name="dummy_i", abstract_filter="i"),
786 )
787 registry.insertDimensionData(
788 "skymap",
789 dict(name="DummyMap", hash="sha!".encode("utf8"))
790 )
791 for tract in range(10):
792 registry.insertDimensionData("tract", dict(skymap="DummyMap", id=tract))
793 registry.insertDimensionData(
794 "patch",
795 *[dict(skymap="DummyMap", tract=tract, id=patch, cell_x=0, cell_y=0)
796 for patch in range(10)]
797 )
799 # dataset types
800 run = "test"
801 registry.registerRun(run)
802 storageClass = StorageClass("testDataset")
803 registry.storageClasses.registerStorageClass(storageClass)
804 calexpType = DatasetType(name="deepCoadd_calexp",
805 dimensions=registry.dimensions.extract(("skymap", "tract", "patch",
806 "abstract_filter")),
807 storageClass=storageClass)
808 registry.registerDatasetType(calexpType)
809 mergeType = DatasetType(name="deepCoadd_mergeDet",
810 dimensions=registry.dimensions.extract(("skymap", "tract", "patch")),
811 storageClass=storageClass)
812 registry.registerDatasetType(mergeType)
813 measType = DatasetType(name="deepCoadd_meas",
814 dimensions=registry.dimensions.extract(("skymap", "tract", "patch",
815 "abstract_filter")),
816 storageClass=storageClass)
817 registry.registerDatasetType(measType)
819 dimensions = DimensionGraph(
820 registry.dimensions,
821 dimensions=(calexpType.dimensions.required | mergeType.dimensions.required
822 | measType.dimensions.required)
823 )
825 # add pre-existing datasets
826 for tract in (1, 3, 5):
827 for patch in (2, 4, 6, 7):
828 dataId = dict(skymap="DummyMap", tract=tract, patch=patch)
829 registry.insertDatasets(mergeType, dataIds=[dataId], run=run)
830 for aFilter in ("i", "r"):
831 dataId = dict(skymap="DummyMap", tract=tract, patch=patch, abstract_filter=aFilter)
832 registry.insertDatasets(calexpType, dataIds=[dataId], run=run)
834 # with empty expression
835 rows = list(registry.queryDimensions(dimensions,
836 datasets=[calexpType, mergeType], collections=run))
837 self.assertEqual(len(rows), 3*4*2) # 4 tracts x 4 patches x 2 filters
838 for dataId in rows:
839 self.assertCountEqual(dataId.keys(), ("skymap", "tract", "patch", "abstract_filter"))
840 self.assertCountEqual(set(dataId["tract"] for dataId in rows), (1, 3, 5))
841 self.assertCountEqual(set(dataId["patch"] for dataId in rows), (2, 4, 6, 7))
842 self.assertCountEqual(set(dataId["abstract_filter"] for dataId in rows), ("i", "r"))
844 # limit to 2 tracts and 2 patches
845 rows = list(registry.queryDimensions(dimensions,
846 datasets=[calexpType, mergeType], collections=run,
847 where="tract IN (1, 5) AND patch IN (2, 7)"))
848 self.assertEqual(len(rows), 2*2*2) # 2 tracts x 2 patches x 2 filters
849 self.assertCountEqual(set(dataId["tract"] for dataId in rows), (1, 5))
850 self.assertCountEqual(set(dataId["patch"] for dataId in rows), (2, 7))
851 self.assertCountEqual(set(dataId["abstract_filter"] for dataId in rows), ("i", "r"))
853 # limit to single filter
854 rows = list(registry.queryDimensions(dimensions,
855 datasets=[calexpType, mergeType], collections=run,
856 where="abstract_filter = 'i'"))
857 self.assertEqual(len(rows), 3*4*1) # 4 tracts x 4 patches x 2 filters
858 self.assertCountEqual(set(dataId["tract"] for dataId in rows), (1, 3, 5))
859 self.assertCountEqual(set(dataId["patch"] for dataId in rows), (2, 4, 6, 7))
860 self.assertCountEqual(set(dataId["abstract_filter"] for dataId in rows), ("i",))
862 # expression excludes everything, specifying non-existing skymap is
863 # not a fatal error, it's operator error
864 rows = list(registry.queryDimensions(dimensions,
865 datasets=[calexpType, mergeType], collections=run,
866 where="skymap = 'Mars'"))
867 self.assertEqual(len(rows), 0)
869 def testSpatialMatch(self):
870 """Test involving spatial match using join tables.
872 Note that realistic test needs a reasonably-defined skypix and regions
873 in registry tables which is hard to implement in this simple test.
874 So we do not actually fill registry with any data and all queries will
875 return empty result, but this is still useful for coverage of the code
876 that generates query.
877 """
878 registry = self.makeRegistry()
880 # dataset types
881 collection = "test"
882 registry.registerRun(name=collection)
883 storageClass = StorageClass("testDataset")
884 registry.storageClasses.registerStorageClass(storageClass)
886 calexpType = DatasetType(name="CALEXP",
887 dimensions=registry.dimensions.extract(("instrument", "visit", "detector")),
888 storageClass=storageClass)
889 registry.registerDatasetType(calexpType)
891 coaddType = DatasetType(name="deepCoadd_calexp",
892 dimensions=registry.dimensions.extract(("skymap", "tract", "patch",
893 "abstract_filter")),
894 storageClass=storageClass)
895 registry.registerDatasetType(coaddType)
897 dimensions = DimensionGraph(
898 registry.dimensions,
899 dimensions=(calexpType.dimensions.required | coaddType.dimensions.required)
900 )
902 # without data this should run OK but return empty set
903 rows = list(registry.queryDimensions(dimensions, datasets=calexpType, collections=collection))
904 self.assertEqual(len(rows), 0)
906 def testCalibrationLabelIndirection(self):
907 """Test that we can look up datasets with calibration_label dimensions
908 from a data ID with exposure dimensions.
909 """
911 def _dt(iso_string):
912 return astropy.time.Time(iso_string, format="iso", scale="utc")
914 registry = self.makeRegistry()
916 flat = DatasetType(
917 "flat",
918 registry.dimensions.extract(
919 ["instrument", "detector", "physical_filter", "calibration_label"]
920 ),
921 "ImageU"
922 )
923 registry.registerDatasetType(flat)
924 registry.insertDimensionData("instrument", dict(name="DummyCam"))
925 registry.insertDimensionData(
926 "physical_filter",
927 dict(instrument="DummyCam", name="dummy_i", abstract_filter="i"),
928 )
929 registry.insertDimensionData(
930 "detector",
931 *[dict(instrument="DummyCam", id=i, full_name=str(i)) for i in (1, 2, 3, 4, 5)]
932 )
933 registry.insertDimensionData(
934 "exposure",
935 dict(instrument="DummyCam", id=100, name="100", physical_filter="dummy_i",
936 datetime_begin=_dt("2005-12-15 02:00:00"), datetime_end=_dt("2005-12-15 03:00:00")),
937 dict(instrument="DummyCam", id=101, name="101", physical_filter="dummy_i",
938 datetime_begin=_dt("2005-12-16 02:00:00"), datetime_end=_dt("2005-12-16 03:00:00")),
939 )
940 registry.insertDimensionData(
941 "calibration_label",
942 dict(instrument="DummyCam", name="first_night",
943 datetime_begin=_dt("2005-12-15 01:00:00"), datetime_end=_dt("2005-12-15 04:00:00")),
944 dict(instrument="DummyCam", name="second_night",
945 datetime_begin=_dt("2005-12-16 01:00:00"), datetime_end=_dt("2005-12-16 04:00:00")),
946 dict(instrument="DummyCam", name="both_nights",
947 datetime_begin=_dt("2005-12-15 01:00:00"), datetime_end=_dt("2005-12-16 04:00:00")),
948 )
949 # Different flats for different nights for detectors 1-3 in first
950 # collection.
951 run1 = "calibs1"
952 registry.registerRun(run1)
953 for detector in (1, 2, 3):
954 registry.insertDatasets(flat, [dict(instrument="DummyCam", calibration_label="first_night",
955 physical_filter="dummy_i", detector=detector)],
956 run=run1)
957 registry.insertDatasets(flat, [dict(instrument="DummyCam", calibration_label="second_night",
958 physical_filter="dummy_i", detector=detector)],
959 run=run1)
960 # The same flat for both nights for detectors 3-5 (so detector 3 has
961 # multiple valid flats) in second collection.
962 run2 = "calib2"
963 registry.registerRun(run2)
964 for detector in (3, 4, 5):
965 registry.insertDatasets(flat, [dict(instrument="DummyCam", calibration_label="both_nights",
966 physical_filter="dummy_i", detector=detector)],
967 run=run2)
968 # Perform queries for individual exposure+detector combinations, which
969 # should always return exactly one flat.
970 for exposure in (100, 101):
971 for detector in (1, 2, 3):
972 with self.subTest(exposure=exposure, detector=detector):
973 rows = list(registry.queryDatasets("flat", collections=[run1],
974 instrument="DummyCam",
975 exposure=exposure,
976 detector=detector))
977 self.assertEqual(len(rows), 1)
978 for detector in (3, 4, 5):
979 with self.subTest(exposure=exposure, detector=detector):
980 rows = registry.queryDatasets("flat", collections=[run2],
981 instrument="DummyCam",
982 exposure=exposure,
983 detector=detector)
984 self.assertEqual(len(list(rows)), 1)
985 for detector in (1, 2, 4, 5):
986 with self.subTest(exposure=exposure, detector=detector):
987 rows = registry.queryDatasets("flat", collections=[run1, run2],
988 instrument="DummyCam",
989 exposure=exposure,
990 detector=detector)
991 self.assertEqual(len(list(rows)), 1)
992 for detector in (3,):
993 with self.subTest(exposure=exposure, detector=detector):
994 rows = registry.queryDatasets("flat", collections=[run1, run2],
995 instrument="DummyCam",
996 exposure=exposure,
997 detector=detector)
998 self.assertEqual(len(list(rows)), 2)
1000 def testAbstractFilterQuery(self):
1001 """Test that we can run a query that just lists the known
1002 abstract_filters. This is tricky because abstract_filter is
1003 backed by a query against physical_filter.
1004 """
1005 registry = self.makeRegistry()
1006 registry.insertDimensionData("instrument", dict(name="DummyCam"))
1007 registry.insertDimensionData(
1008 "physical_filter",
1009 dict(instrument="DummyCam", name="dummy_i", abstract_filter="i"),
1010 dict(instrument="DummyCam", name="dummy_i2", abstract_filter="i"),
1011 dict(instrument="DummyCam", name="dummy_r", abstract_filter="r"),
1012 )
1013 rows = list(registry.queryDimensions(["abstract_filter"]))
1014 self.assertCountEqual(
1015 rows,
1016 [DataCoordinate.standardize(abstract_filter="i", universe=registry.dimensions),
1017 DataCoordinate.standardize(abstract_filter="r", universe=registry.dimensions)]
1018 )
1020 def testAttributeManager(self):
1021 """Test basic functionality of attribute manager.
1022 """
1023 # number of attributes with schema versions in a fresh database
1024 VERSION_COUNT = 0
1026 registry = self.makeRegistry()
1027 attributes = registry._attributes
1029 # check what get() returns for non-existing key
1030 self.assertIsNone(attributes.get("attr"))
1031 self.assertEqual(attributes.get("attr", ""), "")
1032 self.assertEqual(attributes.get("attr", "Value"), "Value")
1033 self.assertEqual(len(list(attributes.items())), VERSION_COUNT)
1035 # cannot store empty key or value
1036 with self.assertRaises(ValueError):
1037 attributes.set("", "value")
1038 with self.assertRaises(ValueError):
1039 attributes.set("attr", "")
1041 # set value of non-existing key
1042 attributes.set("attr", "value")
1043 self.assertEqual(len(list(attributes.items())), VERSION_COUNT + 1)
1044 self.assertEqual(attributes.get("attr"), "value")
1046 # update value of existing key
1047 with self.assertRaises(ButlerAttributeExistsError):
1048 attributes.set("attr", "value2")
1050 attributes.set("attr", "value2", force=True)
1051 self.assertEqual(len(list(attributes.items())), VERSION_COUNT + 1)
1052 self.assertEqual(attributes.get("attr"), "value2")
1054 # delete existing key
1055 self.assertTrue(attributes.delete("attr"))
1056 self.assertEqual(len(list(attributes.items())), VERSION_COUNT)
1058 # delete non-existing key
1059 self.assertFalse(attributes.delete("non-attr"))
1061 # store bunch of keys and get the list back
1062 data = [
1063 ("version.core", "1.2.3"),
1064 ("version.dimensions", "3.2.1"),
1065 ("config.managers.opaque", "ByNameOpaqueTableStorageManager"),
1066 ]
1067 for key, value in data:
1068 attributes.set(key, value)
1069 items = dict(attributes.items())
1070 for key, value in data:
1071 self.assertEqual(items[key], value)