Coverage for tests/test_simpleButler.py : 15%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of daf_butler.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24import os
25import tempfile
26from typing import Any
27import unittest
28import uuid
30try:
31 import numpy as np
32except ImportError:
33 np = None
35import astropy.time
37from lsst.daf.butler import (
38 Butler,
39 ButlerConfig,
40 CollectionType,
41 DatasetRef,
42 DatasetType,
43 Registry,
44 Timespan,
45)
46from lsst.daf.butler.registry import RegistryConfig, RegistryDefaults, ConflictingDefinitionError
47from lsst.daf.butler.tests import DatastoreMock
48from lsst.daf.butler.tests.utils import makeTestTempDir, removeTestTempDir
51TESTDIR = os.path.abspath(os.path.dirname(__file__))
54class SimpleButlerTestCase(unittest.TestCase):
55 """Tests for butler (including import/export functionality) that should not
56 depend on the Registry Database backend or Datastore implementation, and
57 can instead utilize an in-memory SQLite Registry and a mocked Datastore.
58 """
60 datasetsManager = \
61 "lsst.daf.butler.registry.datasets.byDimensions.ByDimensionsDatasetRecordStorageManager"
62 datasetsImportFile = "datasets.yaml"
63 datasetsIdType = int
65 def setUp(self):
66 self.root = makeTestTempDir(TESTDIR)
68 def tearDown(self):
69 removeTestTempDir(self.root)
71 def makeButler(self, **kwargs: Any) -> Butler:
72 """Return new Butler instance on each call.
73 """
74 config = ButlerConfig()
76 # make separate temporary directory for registry of this instance
77 tmpdir = tempfile.mkdtemp(dir=self.root)
78 config["registry", "db"] = f"sqlite:///{tmpdir}/gen3.sqlite3"
79 config["registry", "managers", "datasets"] = self.datasetsManager
80 config["root"] = self.root
82 # have to make a registry first
83 registryConfig = RegistryConfig(config.get("registry"))
84 Registry.createFromConfig(registryConfig)
86 butler = Butler(config, **kwargs)
87 DatastoreMock.apply(butler)
88 return butler
90 def comparableRef(self, ref: DatasetRef) -> DatasetRef:
91 """Return a DatasetRef that can be compared to a DatasetRef from
92 other repository.
94 For repositories that do not support round-trip of ID values this
95 method returns unresolved DatasetRef, for round-trip-safe repos it
96 returns unchanged ref.
97 """
98 return ref if self.datasetsIdType is uuid.UUID else ref.unresolved()
100 def testReadBackwardsCompatibility(self):
101 """Test that we can read an export file written by a previous version
102 and commit to the daf_butler git repo.
104 Notes
105 -----
106 At present this export file includes only dimension data, not datasets,
107 which greatly limits the usefulness of this test. We should address
108 this at some point, but I think it's best to wait for the changes to
109 the export format required for CALIBRATION collections to land.
110 """
111 butler = self.makeButler(writeable=True)
112 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "hsc-rc2-subset.yaml"))
113 # Spot-check a few things, but the most important test is just that
114 # the above does not raise.
115 self.assertGreaterEqual(
116 set(record.id for record in butler.registry.queryDimensionRecords("detector", instrument="HSC")),
117 set(range(104)), # should have all science CCDs; may have some focus ones.
118 )
119 self.assertGreaterEqual(
120 {
121 (record.id, record.physical_filter)
122 for record in butler.registry.queryDimensionRecords("visit", instrument="HSC")
123 },
124 {
125 (27136, 'HSC-Z'),
126 (11694, 'HSC-G'),
127 (23910, 'HSC-R'),
128 (11720, 'HSC-Y'),
129 (23900, 'HSC-R'),
130 (22646, 'HSC-Y'),
131 (1248, 'HSC-I'),
132 (19680, 'HSC-I'),
133 (1240, 'HSC-I'),
134 (424, 'HSC-Y'),
135 (19658, 'HSC-I'),
136 (344, 'HSC-Y'),
137 (1218, 'HSC-R'),
138 (1190, 'HSC-Z'),
139 (23718, 'HSC-R'),
140 (11700, 'HSC-G'),
141 (26036, 'HSC-G'),
142 (23872, 'HSC-R'),
143 (1170, 'HSC-Z'),
144 (1876, 'HSC-Y'),
145 }
146 )
148 def testDatasetTransfers(self):
149 """Test exporting all datasets from a repo and then importing them all
150 back in again.
151 """
152 # Import data to play with.
153 butler1 = self.makeButler(writeable=True)
154 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
155 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
156 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml") as file:
157 # Export all datasets.
158 with butler1.export(filename=file.name) as exporter:
159 exporter.saveDatasets(
160 butler1.registry.queryDatasets(..., collections=...)
161 )
162 # Import it all again.
163 butler2 = self.makeButler(writeable=True)
164 butler2.import_(filename=file.name)
165 datasets1 = list(butler1.registry.queryDatasets(..., collections=...))
166 datasets2 = list(butler2.registry.queryDatasets(..., collections=...))
167 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets1))
168 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets2))
169 self.assertCountEqual(
170 [self.comparableRef(ref) for ref in datasets1],
171 [self.comparableRef(ref) for ref in datasets2],
172 )
174 def testDatasetImportTwice(self):
175 """Test exporting all datasets from a repo and then importing them all
176 back in again twice.
177 """
178 if self.datasetsIdType is not uuid.UUID:
179 self.skipTest("This test can only work for UUIDs")
180 # Import data to play with.
181 butler1 = self.makeButler(writeable=True)
182 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
183 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
184 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml", delete=False) as file:
185 # Export all datasets.
186 with butler1.export(filename=file.name) as exporter:
187 exporter.saveDatasets(
188 butler1.registry.queryDatasets(..., collections=...)
189 )
190 butler2 = self.makeButler(writeable=True)
191 # Import it once.
192 butler2.import_(filename=file.name)
193 # Import it again, but ignore all dimensions
194 dimensions = set(
195 dimension.name for dimension in butler2.registry.dimensions.getStaticDimensions())
196 butler2.import_(filename=file.name, skip_dimensions=dimensions)
197 datasets1 = list(butler1.registry.queryDatasets(..., collections=...))
198 datasets2 = list(butler2.registry.queryDatasets(..., collections=...))
199 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets1))
200 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets2))
201 self.assertCountEqual(
202 [self.comparableRef(ref) for ref in datasets1],
203 [self.comparableRef(ref) for ref in datasets2],
204 )
206 def testDatasetImportReuseIds(self):
207 """Test for import that should preserve dataset IDs.
209 This test assumes that dataset IDs in datasets YAML are different from
210 what auto-incremental insert would produce.
211 """
212 if self.datasetsIdType is not int:
213 self.skipTest("This test can only work for UUIDs")
214 # Import data to play with.
215 butler = self.makeButler(writeable=True)
216 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
217 filename = os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile)
218 butler.import_(filename=filename, reuseIds=True)
219 datasets = list(butler.registry.queryDatasets(..., collections=...))
220 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets))
221 # IDs are copied from YAML, list needs to be updated if file contents
222 # is changed.
223 self.assertCountEqual(
224 [ref.id for ref in datasets],
225 [1001, 1002, 1003, 1010, 1020, 1030, 2001, 2002, 2003, 2010, 2020, 2030, 2040],
226 )
228 # Try once again, it will raise
229 with self.assertRaises(ConflictingDefinitionError):
230 butler.import_(filename=filename, reuseIds=True)
232 def testCollectionTransfers(self):
233 """Test exporting and then importing collections of various types.
234 """
235 # Populate a registry with some datasets.
236 butler1 = self.makeButler(writeable=True)
237 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
238 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
239 registry1 = butler1.registry
240 # Add some more collections.
241 registry1.registerRun("run1")
242 registry1.registerCollection("tag1", CollectionType.TAGGED)
243 registry1.registerCollection("calibration1", CollectionType.CALIBRATION)
244 registry1.registerCollection("chain1", CollectionType.CHAINED)
245 registry1.registerCollection("chain2", CollectionType.CHAINED)
246 registry1.setCollectionChain("chain1", ["tag1", "run1", "chain2"])
247 registry1.setCollectionChain("chain2", ["calibration1", "run1"])
248 # Associate some datasets into the TAGGED and CALIBRATION collections.
249 flats1 = list(registry1.queryDatasets("flat", collections=...))
250 registry1.associate("tag1", flats1)
251 t1 = astropy.time.Time('2020-01-01T01:00:00', format="isot", scale="tai")
252 t2 = astropy.time.Time('2020-01-01T02:00:00', format="isot", scale="tai")
253 t3 = astropy.time.Time('2020-01-01T03:00:00', format="isot", scale="tai")
254 bias2a = registry1.findDataset("bias", instrument="Cam1", detector=2, collections="imported_g")
255 bias3a = registry1.findDataset("bias", instrument="Cam1", detector=3, collections="imported_g")
256 bias2b = registry1.findDataset("bias", instrument="Cam1", detector=2, collections="imported_r")
257 bias3b = registry1.findDataset("bias", instrument="Cam1", detector=3, collections="imported_r")
258 registry1.certify("calibration1", [bias2a, bias3a], Timespan(t1, t2))
259 registry1.certify("calibration1", [bias2b], Timespan(t2, None))
260 registry1.certify("calibration1", [bias3b], Timespan(t2, t3))
262 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml") as file:
263 # Export all collections, and some datasets.
264 with butler1.export(filename=file.name) as exporter:
265 # Sort results to put chain1 before chain2, which is
266 # intentionally not topological order.
267 for collection in sorted(registry1.queryCollections()):
268 exporter.saveCollection(collection)
269 exporter.saveDatasets(flats1)
270 exporter.saveDatasets([bias2a, bias2b, bias3a, bias3b])
271 # Import them into a new registry.
272 butler2 = self.makeButler(writeable=True)
273 butler2.import_(filename=file.name)
274 registry2 = butler2.registry
275 # Check that it all round-tripped, starting with the collections
276 # themselves.
277 self.assertIs(registry2.getCollectionType("run1"), CollectionType.RUN)
278 self.assertIs(registry2.getCollectionType("tag1"), CollectionType.TAGGED)
279 self.assertIs(registry2.getCollectionType("calibration1"), CollectionType.CALIBRATION)
280 self.assertIs(registry2.getCollectionType("chain1"), CollectionType.CHAINED)
281 self.assertIs(registry2.getCollectionType("chain2"), CollectionType.CHAINED)
282 self.assertEqual(
283 list(registry2.getCollectionChain("chain1")),
284 ["tag1", "run1", "chain2"],
285 )
286 self.assertEqual(
287 list(registry2.getCollectionChain("chain2")),
288 ["calibration1", "run1"],
289 )
290 # Check that tag collection contents are the same.
291 self.maxDiff = None
292 self.assertCountEqual(
293 [self.comparableRef(ref) for ref in registry1.queryDatasets(..., collections="tag1")],
294 [self.comparableRef(ref) for ref in registry2.queryDatasets(..., collections="tag1")],
295 )
296 # Check that calibration collection contents are the same.
297 self.assertCountEqual(
298 [(self.comparableRef(assoc.ref), assoc.timespan)
299 for assoc in registry1.queryDatasetAssociations("bias", collections="calibration1")],
300 [(self.comparableRef(assoc.ref), assoc.timespan)
301 for assoc in registry2.queryDatasetAssociations("bias", collections="calibration1")],
302 )
304 def testButlerGet(self):
305 """Test that butler.get can work with different variants."""
307 # Import data to play with.
308 butler = self.makeButler(writeable=True)
309 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
310 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
312 # Find the DatasetRef for a flat
313 coll = "imported_g"
314 flat2g = butler.registry.findDataset("flat", instrument="Cam1", detector=2, physical_filter="Cam1-G",
315 collections=coll)
317 # Create a numpy integer to check that works fine
318 detector_np = np.int64(2) if np else 2
319 print(type(detector_np))
321 # Try to get it using different variations of dataId + keyword
322 # arguments
323 # Note that instrument.class_name does not work
324 variants = (
325 (None, {"instrument": "Cam1", "detector": 2, "physical_filter": "Cam1-G"}),
326 (None, {"instrument": "Cam1", "detector": detector_np, "physical_filter": "Cam1-G"}),
327 ({"instrument": "Cam1", "detector": 2, "physical_filter": "Cam1-G"}, {}),
328 ({"instrument": "Cam1", "detector": detector_np, "physical_filter": "Cam1-G"}, {}),
329 ({"instrument": "Cam1", "detector": 2}, {"physical_filter": "Cam1-G"}),
330 ({"detector.full_name": "Ab"}, {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
331 ({"full_name": "Ab"}, {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
332 (None, {"full_name": "Ab", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
333 ({"name_in_raft": "b", "raft": "A"}, {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
334 ({"name_in_raft": "b"}, {"raft": "A", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
335 (None, {"name_in_raft": "b", "raft": "A", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
336 ({"detector.name_in_raft": "b", "detector.raft": "A"},
337 {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
338 ({"detector.name_in_raft": "b", "detector.raft": "A",
339 "instrument": "Cam1", "physical_filter": "Cam1-G"}, {}),
340 )
342 for dataId, kwds in variants:
343 try:
344 flat_id, _ = butler.get("flat", dataId=dataId, collections=coll, **kwds)
345 except Exception as e:
346 raise type(e)(f"{str(e)}: dataId={dataId}, kwds={kwds}") from e
347 self.assertEqual(flat_id, flat2g.id, msg=f"DataId: {dataId}, kwds: {kwds}")
349 def testGetCalibration(self):
350 """Test that `Butler.get` can be used to fetch from
351 `~CollectionType.CALIBRATION` collections if the data ID includes
352 extra dimensions with temporal information.
353 """
354 # Import data to play with.
355 butler = self.makeButler(writeable=True)
356 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
357 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
358 # Certify some biases into a CALIBRATION collection.
359 registry = butler.registry
360 registry.registerCollection("calibs", CollectionType.CALIBRATION)
361 t1 = astropy.time.Time('2020-01-01T01:00:00', format="isot", scale="tai")
362 t2 = astropy.time.Time('2020-01-01T02:00:00', format="isot", scale="tai")
363 t3 = astropy.time.Time('2020-01-01T03:00:00', format="isot", scale="tai")
364 bias2a = registry.findDataset("bias", instrument="Cam1", detector=2, collections="imported_g")
365 bias3a = registry.findDataset("bias", instrument="Cam1", detector=3, collections="imported_g")
366 bias2b = registry.findDataset("bias", instrument="Cam1", detector=2, collections="imported_r")
367 bias3b = registry.findDataset("bias", instrument="Cam1", detector=3, collections="imported_r")
368 registry.certify("calibs", [bias2a, bias3a], Timespan(t1, t2))
369 registry.certify("calibs", [bias2b], Timespan(t2, None))
370 registry.certify("calibs", [bias3b], Timespan(t2, t3))
371 # Insert some exposure dimension data.
372 registry.insertDimensionData(
373 "exposure",
374 {
375 "instrument": "Cam1",
376 "id": 3,
377 "obs_id": "three",
378 "timespan": Timespan(t1, t2),
379 "physical_filter": "Cam1-G",
380 "day_obs": 20201114,
381 "seq_num": 55,
382 },
383 {
384 "instrument": "Cam1",
385 "id": 4,
386 "obs_id": "four",
387 "timespan": Timespan(t2, t3),
388 "physical_filter": "Cam1-G",
389 "day_obs": 20211114,
390 "seq_num": 42,
391 },
392 )
393 # Get some biases from raw-like data IDs.
394 bias2a_id, _ = butler.get("bias", {"instrument": "Cam1", "exposure": 3, "detector": 2},
395 collections="calibs")
396 self.assertEqual(bias2a_id, bias2a.id)
397 bias3b_id, _ = butler.get("bias", {"instrument": "Cam1", "exposure": 4, "detector": 3},
398 collections="calibs")
399 self.assertEqual(bias3b_id, bias3b.id)
401 # Get using the kwarg form
402 bias3b_id, _ = butler.get("bias",
403 instrument="Cam1", exposure=4, detector=3,
404 collections="calibs")
405 self.assertEqual(bias3b_id, bias3b.id)
407 # Do it again but using the record information
408 bias2a_id, _ = butler.get("bias", {"instrument": "Cam1", "exposure.obs_id": "three",
409 "detector.full_name": "Ab"},
410 collections="calibs")
411 self.assertEqual(bias2a_id, bias2a.id)
412 bias3b_id, _ = butler.get("bias", {"exposure.obs_id": "four",
413 "detector.full_name": "Ba"},
414 collections="calibs", instrument="Cam1")
415 self.assertEqual(bias3b_id, bias3b.id)
417 # And again but this time using the alternate value rather than
418 # the primary.
419 bias3b_id, _ = butler.get("bias", {"exposure": "four",
420 "detector": "Ba"},
421 collections="calibs", instrument="Cam1")
422 self.assertEqual(bias3b_id, bias3b.id)
424 # And again but this time using the alternate value rather than
425 # the primary and do it in the keyword arguments.
426 bias3b_id, _ = butler.get("bias",
427 exposure="four", detector="Ba",
428 collections="calibs", instrument="Cam1")
429 self.assertEqual(bias3b_id, bias3b.id)
431 # Now with implied record columns
432 bias3b_id, _ = butler.get("bias", day_obs=20211114, seq_num=42,
433 raft="B", name_in_raft="a",
434 collections="calibs", instrument="Cam1")
435 self.assertEqual(bias3b_id, bias3b.id)
437 def testRegistryDefaults(self):
438 """Test that we can default the collections and some data ID keys when
439 constructing a butler.
441 Many tests that use default run already exist in ``test_butler.py``, so
442 that isn't tested here. And while most of this functionality is
443 implemented in `Registry`, we test it here instead of
444 ``daf/butler/tests/registry.py`` because it shouldn't depend on the
445 database backend at all.
446 """
447 butler = self.makeButler(writeable=True)
448 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
449 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
450 # Need to actually set defaults later, not at construction, because
451 # we need to import the instrument before we can use it as a default.
452 # Don't set a default instrument value for data IDs, because 'Cam1'
453 # should be inferred by virtue of that being the only value in the
454 # input collections.
455 butler.registry.defaults = RegistryDefaults(collections=["imported_g"])
456 # Use findDataset without collections or instrument.
457 ref = butler.registry.findDataset("flat", detector=2, physical_filter="Cam1-G")
458 # Do the same with Butler.get; this should ultimately invoke a lot of
459 # the same code, so it's a bit circular, but mostly we're checking that
460 # it works at all.
461 dataset_id, _ = butler.get("flat", detector=2, physical_filter="Cam1-G")
462 self.assertEqual(ref.id, dataset_id)
463 # Query for datasets. Test defaulting the data ID in both kwargs and
464 # in the WHERE expression.
465 queried_refs_1 = set(butler.registry.queryDatasets("flat", detector=2, physical_filter="Cam1-G"))
466 self.assertEqual({ref}, queried_refs_1)
467 queried_refs_2 = set(butler.registry.queryDatasets("flat",
468 where="detector=2 AND physical_filter='Cam1-G'"))
469 self.assertEqual({ref}, queried_refs_2)
470 # Query for data IDs with a dataset constraint.
471 queried_data_ids = set(butler.registry.queryDataIds({"instrument", "detector", "physical_filter"},
472 datasets={"flat"},
473 detector=2, physical_filter="Cam1-G"))
474 self.assertEqual({ref.dataId}, queried_data_ids)
475 # Add another instrument to the repo, and a dataset that uses it to
476 # the `imported_g` collection.
477 butler.registry.insertDimensionData("instrument", {"name": "Cam2"})
478 camera = DatasetType(
479 "camera",
480 dimensions=butler.registry.dimensions["instrument"].graph,
481 storageClass="Camera",
482 )
483 butler.registry.registerDatasetType(camera)
484 butler.registry.insertDatasets(camera, [{"instrument": "Cam2"}], run="imported_g")
485 # Initialize a new butler with `imported_g` as its default run.
486 # This should not have a default instrument, because there are two.
487 # Pass run instead of collections; this should set both.
488 butler2 = Butler(butler=butler, run="imported_g")
489 self.assertEqual(list(butler2.registry.defaults.collections), ["imported_g"])
490 self.assertEqual(butler2.registry.defaults.run, "imported_g")
491 self.assertFalse(butler2.registry.defaults.dataId)
492 # Initialize a new butler with an instrument default explicitly given.
493 # Set collections instead of run, which should then be None.
494 butler3 = Butler(butler=butler, collections=["imported_g"], instrument="Cam2")
495 self.assertEqual(list(butler3.registry.defaults.collections), ["imported_g"])
496 self.assertIsNone(butler3.registry.defaults.run, None)
497 self.assertEqual(butler3.registry.defaults.dataId.byName(), {"instrument": "Cam2"})
499 def testJson(self):
500 """Test JSON serialization mediated by registry.
501 """
502 butler = self.makeButler(writeable=True)
503 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
504 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
505 # Need to actually set defaults later, not at construction, because
506 # we need to import the instrument before we can use it as a default.
507 # Don't set a default instrument value for data IDs, because 'Cam1'
508 # should be inferred by virtue of that being the only value in the
509 # input collections.
510 butler.registry.defaults = RegistryDefaults(collections=["imported_g"])
511 # Use findDataset without collections or instrument.
512 ref = butler.registry.findDataset("flat", detector=2, physical_filter="Cam1-G")
514 # Transform the ref and dataset type to and from JSON
515 # and check that it can be reconstructed properly
517 # Do it with the ref and a component ref in minimal and standard form
518 compRef = ref.makeComponentRef("wcs")
520 for test_item in (ref, ref.datasetType, compRef, compRef.datasetType):
521 for minimal in (False, True):
522 json_str = test_item.to_json(minimal=minimal)
523 from_json = type(test_item).from_json(json_str, registry=butler.registry)
524 self.assertEqual(from_json, test_item, msg=f"From JSON '{json_str}' using registry")
526 # for minimal=False case also do a test without registry
527 if not minimal:
528 from_json = type(test_item).from_json(json_str, universe=butler.registry.dimensions)
529 self.assertEqual(from_json, test_item, msg=f"From JSON '{json_str}' using universe")
531 def testJsonDimensionRecords(self):
532 # Dimension Records
533 butler = self.makeButler(writeable=True)
534 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "hsc-rc2-subset.yaml"))
536 for dimension in ("detector", "visit"):
537 records = butler.registry.queryDimensionRecords(dimension, instrument="HSC")
538 for r in records:
539 for minimal in (True, False):
540 json_str = r.to_json(minimal=minimal)
541 r_json = type(r).from_json(json_str, registry=butler.registry)
542 self.assertEqual(r_json, r)
543 # Also check equality of each of the components as dicts
544 self.assertEqual(r_json.toDict(), r.toDict())
547class SimpleButlerUUIDTestCase(SimpleButlerTestCase):
548 """Same as SimpleButlerTestCase but uses UUID-based datasets manager and
549 loads datasets from YAML file with UUIDs.
550 """
552 datasetsManager = \
553 "lsst.daf.butler.registry.datasets.byDimensions.ByDimensionsDatasetRecordStorageManagerUUID"
554 datasetsImportFile = "datasets-uuid.yaml"
555 datasetsIdType = uuid.UUID
558class SimpleButlerMixedUUIDTestCase(SimpleButlerTestCase):
559 """Same as SimpleButlerTestCase but uses UUID-based datasets manager and
560 loads datasets from YAML file with integer IDs.
561 """
563 datasetsManager = \
564 "lsst.daf.butler.registry.datasets.byDimensions.ByDimensionsDatasetRecordStorageManagerUUID"
565 datasetsImportFile = "datasets.yaml"
566 datasetsIdType = uuid.UUID
569if __name__ == "__main__": 569 ↛ 570line 569 didn't jump to line 570, because the condition on line 569 was never true
570 unittest.main()