Coverage for tests/test_simpleButler.py : 14%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of daf_butler.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24import os
25import tempfile
26from typing import Any
27import unittest
28import uuid
29import re
31try:
32 import numpy as np
33except ImportError:
34 np = None
36import astropy.time
38from lsst.daf.butler import (
39 Butler,
40 ButlerConfig,
41 CollectionType,
42 DatasetRef,
43 DatasetType,
44 Registry,
45 Timespan,
46)
47from lsst.daf.butler.registry import RegistryConfig, RegistryDefaults, ConflictingDefinitionError
48from lsst.daf.butler.tests import DatastoreMock
49from lsst.daf.butler.tests.utils import makeTestTempDir, removeTestTempDir
52TESTDIR = os.path.abspath(os.path.dirname(__file__))
55class SimpleButlerTestCase(unittest.TestCase):
56 """Tests for butler (including import/export functionality) that should not
57 depend on the Registry Database backend or Datastore implementation, and
58 can instead utilize an in-memory SQLite Registry and a mocked Datastore.
59 """
61 datasetsManager = \
62 "lsst.daf.butler.registry.datasets.byDimensions.ByDimensionsDatasetRecordStorageManager"
63 datasetsImportFile = "datasets.yaml"
64 datasetsIdType = int
66 def setUp(self):
67 self.root = makeTestTempDir(TESTDIR)
69 def tearDown(self):
70 removeTestTempDir(self.root)
72 def makeButler(self, **kwargs: Any) -> Butler:
73 """Return new Butler instance on each call.
74 """
75 config = ButlerConfig()
77 # make separate temporary directory for registry of this instance
78 tmpdir = tempfile.mkdtemp(dir=self.root)
79 config["registry", "db"] = f"sqlite:///{tmpdir}/gen3.sqlite3"
80 config["registry", "managers", "datasets"] = self.datasetsManager
81 config["root"] = self.root
83 # have to make a registry first
84 registryConfig = RegistryConfig(config.get("registry"))
85 Registry.createFromConfig(registryConfig)
87 butler = Butler(config, **kwargs)
88 DatastoreMock.apply(butler)
89 return butler
91 def comparableRef(self, ref: DatasetRef) -> DatasetRef:
92 """Return a DatasetRef that can be compared to a DatasetRef from
93 other repository.
95 For repositories that do not support round-trip of ID values this
96 method returns unresolved DatasetRef, for round-trip-safe repos it
97 returns unchanged ref.
98 """
99 return ref if self.datasetsIdType is uuid.UUID else ref.unresolved()
101 def testReadBackwardsCompatibility(self):
102 """Test that we can read an export file written by a previous version
103 and commit to the daf_butler git repo.
105 Notes
106 -----
107 At present this export file includes only dimension data, not datasets,
108 which greatly limits the usefulness of this test. We should address
109 this at some point, but I think it's best to wait for the changes to
110 the export format required for CALIBRATION collections to land.
111 """
112 butler = self.makeButler(writeable=True)
113 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "hsc-rc2-subset.yaml"))
114 # Spot-check a few things, but the most important test is just that
115 # the above does not raise.
116 self.assertGreaterEqual(
117 set(record.id for record in butler.registry.queryDimensionRecords("detector", instrument="HSC")),
118 set(range(104)), # should have all science CCDs; may have some focus ones.
119 )
120 self.assertGreaterEqual(
121 {
122 (record.id, record.physical_filter)
123 for record in butler.registry.queryDimensionRecords("visit", instrument="HSC")
124 },
125 {
126 (27136, 'HSC-Z'),
127 (11694, 'HSC-G'),
128 (23910, 'HSC-R'),
129 (11720, 'HSC-Y'),
130 (23900, 'HSC-R'),
131 (22646, 'HSC-Y'),
132 (1248, 'HSC-I'),
133 (19680, 'HSC-I'),
134 (1240, 'HSC-I'),
135 (424, 'HSC-Y'),
136 (19658, 'HSC-I'),
137 (344, 'HSC-Y'),
138 (1218, 'HSC-R'),
139 (1190, 'HSC-Z'),
140 (23718, 'HSC-R'),
141 (11700, 'HSC-G'),
142 (26036, 'HSC-G'),
143 (23872, 'HSC-R'),
144 (1170, 'HSC-Z'),
145 (1876, 'HSC-Y'),
146 }
147 )
149 def testDatasetTransfers(self):
150 """Test exporting all datasets from a repo and then importing them all
151 back in again.
152 """
153 # Import data to play with.
154 butler1 = self.makeButler(writeable=True)
155 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
156 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
157 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml") as file:
158 # Export all datasets.
159 with butler1.export(filename=file.name) as exporter:
160 exporter.saveDatasets(
161 butler1.registry.queryDatasets(..., collections=...)
162 )
163 # Import it all again.
164 butler2 = self.makeButler(writeable=True)
165 butler2.import_(filename=file.name)
166 datasets1 = list(butler1.registry.queryDatasets(..., collections=...))
167 datasets2 = list(butler2.registry.queryDatasets(..., collections=...))
168 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets1))
169 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets2))
170 self.assertCountEqual(
171 [self.comparableRef(ref) for ref in datasets1],
172 [self.comparableRef(ref) for ref in datasets2],
173 )
175 def testDatasetImportTwice(self):
176 """Test exporting all datasets from a repo and then importing them all
177 back in again twice.
178 """
179 if self.datasetsIdType is not uuid.UUID:
180 self.skipTest("This test can only work for UUIDs")
181 # Import data to play with.
182 butler1 = self.makeButler(writeable=True)
183 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
184 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
185 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml", delete=False) as file:
186 # Export all datasets.
187 with butler1.export(filename=file.name) as exporter:
188 exporter.saveDatasets(
189 butler1.registry.queryDatasets(..., collections=...)
190 )
191 butler2 = self.makeButler(writeable=True)
192 # Import it once.
193 butler2.import_(filename=file.name)
194 # Import it again, but ignore all dimensions
195 dimensions = set(
196 dimension.name for dimension in butler2.registry.dimensions.getStaticDimensions())
197 butler2.import_(filename=file.name, skip_dimensions=dimensions)
198 datasets1 = list(butler1.registry.queryDatasets(..., collections=...))
199 datasets2 = list(butler2.registry.queryDatasets(..., collections=...))
200 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets1))
201 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets2))
202 self.assertCountEqual(
203 [self.comparableRef(ref) for ref in datasets1],
204 [self.comparableRef(ref) for ref in datasets2],
205 )
207 def testDatasetImportReuseIds(self):
208 """Test for import that should preserve dataset IDs.
210 This test assumes that dataset IDs in datasets YAML are different from
211 what auto-incremental insert would produce.
212 """
213 if self.datasetsIdType is not int:
214 self.skipTest("This test can only work for UUIDs")
215 # Import data to play with.
216 butler = self.makeButler(writeable=True)
217 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
218 filename = os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile)
219 butler.import_(filename=filename, reuseIds=True)
220 datasets = list(butler.registry.queryDatasets(..., collections=...))
221 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets))
222 # IDs are copied from YAML, list needs to be updated if file contents
223 # is changed.
224 self.assertCountEqual(
225 [ref.id for ref in datasets],
226 [1001, 1002, 1003, 1010, 1020, 1030, 2001, 2002, 2003, 2010, 2020, 2030, 2040],
227 )
229 # Try once again, it will raise
230 with self.assertRaises(ConflictingDefinitionError):
231 butler.import_(filename=filename, reuseIds=True)
233 def testCollectionTransfers(self):
234 """Test exporting and then importing collections of various types.
235 """
236 # Populate a registry with some datasets.
237 butler1 = self.makeButler(writeable=True)
238 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
239 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
240 registry1 = butler1.registry
241 # Add some more collections.
242 registry1.registerRun("run1")
243 registry1.registerCollection("tag1", CollectionType.TAGGED)
244 registry1.registerCollection("calibration1", CollectionType.CALIBRATION)
245 registry1.registerCollection("chain1", CollectionType.CHAINED)
246 registry1.registerCollection("chain2", CollectionType.CHAINED)
247 registry1.setCollectionChain("chain1", ["tag1", "run1", "chain2"])
248 registry1.setCollectionChain("chain2", ["calibration1", "run1"])
249 # Associate some datasets into the TAGGED and CALIBRATION collections.
250 flats1 = list(registry1.queryDatasets("flat", collections=...))
251 registry1.associate("tag1", flats1)
252 t1 = astropy.time.Time('2020-01-01T01:00:00', format="isot", scale="tai")
253 t2 = astropy.time.Time('2020-01-01T02:00:00', format="isot", scale="tai")
254 t3 = astropy.time.Time('2020-01-01T03:00:00', format="isot", scale="tai")
255 bias2a = registry1.findDataset("bias", instrument="Cam1", detector=2, collections="imported_g")
256 bias3a = registry1.findDataset("bias", instrument="Cam1", detector=3, collections="imported_g")
257 bias2b = registry1.findDataset("bias", instrument="Cam1", detector=2, collections="imported_r")
258 bias3b = registry1.findDataset("bias", instrument="Cam1", detector=3, collections="imported_r")
259 registry1.certify("calibration1", [bias2a, bias3a], Timespan(t1, t2))
260 registry1.certify("calibration1", [bias2b], Timespan(t2, None))
261 registry1.certify("calibration1", [bias3b], Timespan(t2, t3))
263 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml") as file:
264 # Export all collections, and some datasets.
265 with butler1.export(filename=file.name) as exporter:
266 # Sort results to put chain1 before chain2, which is
267 # intentionally not topological order.
268 for collection in sorted(registry1.queryCollections()):
269 exporter.saveCollection(collection)
270 exporter.saveDatasets(flats1)
271 exporter.saveDatasets([bias2a, bias2b, bias3a, bias3b])
272 # Import them into a new registry.
273 butler2 = self.makeButler(writeable=True)
274 butler2.import_(filename=file.name)
275 registry2 = butler2.registry
276 # Check that it all round-tripped, starting with the collections
277 # themselves.
278 self.assertIs(registry2.getCollectionType("run1"), CollectionType.RUN)
279 self.assertIs(registry2.getCollectionType("tag1"), CollectionType.TAGGED)
280 self.assertIs(registry2.getCollectionType("calibration1"), CollectionType.CALIBRATION)
281 self.assertIs(registry2.getCollectionType("chain1"), CollectionType.CHAINED)
282 self.assertIs(registry2.getCollectionType("chain2"), CollectionType.CHAINED)
283 self.assertEqual(
284 list(registry2.getCollectionChain("chain1")),
285 ["tag1", "run1", "chain2"],
286 )
287 self.assertEqual(
288 list(registry2.getCollectionChain("chain2")),
289 ["calibration1", "run1"],
290 )
291 # Check that tag collection contents are the same.
292 self.maxDiff = None
293 self.assertCountEqual(
294 [self.comparableRef(ref) for ref in registry1.queryDatasets(..., collections="tag1")],
295 [self.comparableRef(ref) for ref in registry2.queryDatasets(..., collections="tag1")],
296 )
297 # Check that calibration collection contents are the same.
298 self.assertCountEqual(
299 [(self.comparableRef(assoc.ref), assoc.timespan)
300 for assoc in registry1.queryDatasetAssociations("bias", collections="calibration1")],
301 [(self.comparableRef(assoc.ref), assoc.timespan)
302 for assoc in registry2.queryDatasetAssociations("bias", collections="calibration1")],
303 )
305 def testButlerGet(self):
306 """Test that butler.get can work with different variants."""
308 # Import data to play with.
309 butler = self.makeButler(writeable=True)
310 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
311 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
313 # Find the DatasetRef for a flat
314 coll = "imported_g"
315 flat2g = butler.registry.findDataset("flat", instrument="Cam1", detector=2, physical_filter="Cam1-G",
316 collections=coll)
318 # Create a numpy integer to check that works fine
319 detector_np = np.int64(2) if np else 2
320 print(type(detector_np))
322 # Try to get it using different variations of dataId + keyword
323 # arguments
324 # Note that instrument.class_name does not work
325 variants = (
326 (None, {"instrument": "Cam1", "detector": 2, "physical_filter": "Cam1-G"}),
327 (None, {"instrument": "Cam1", "detector": detector_np, "physical_filter": "Cam1-G"}),
328 ({"instrument": "Cam1", "detector": 2, "physical_filter": "Cam1-G"}, {}),
329 ({"instrument": "Cam1", "detector": detector_np, "physical_filter": "Cam1-G"}, {}),
330 ({"instrument": "Cam1", "detector": 2}, {"physical_filter": "Cam1-G"}),
331 ({"detector.full_name": "Ab"}, {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
332 ({"full_name": "Ab"}, {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
333 (None, {"full_name": "Ab", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
334 ({"name_in_raft": "b", "raft": "A"}, {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
335 ({"name_in_raft": "b"}, {"raft": "A", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
336 (None, {"name_in_raft": "b", "raft": "A", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
337 ({"detector.name_in_raft": "b", "detector.raft": "A"},
338 {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
339 ({"detector.name_in_raft": "b", "detector.raft": "A",
340 "instrument": "Cam1", "physical_filter": "Cam1-G"}, {}),
341 )
343 for dataId, kwds in variants:
344 try:
345 flat_id, _ = butler.get("flat", dataId=dataId, collections=coll, **kwds)
346 except Exception as e:
347 raise type(e)(f"{str(e)}: dataId={dataId}, kwds={kwds}") from e
348 self.assertEqual(flat_id, flat2g.id, msg=f"DataId: {dataId}, kwds: {kwds}")
350 def testGetCalibration(self):
351 """Test that `Butler.get` can be used to fetch from
352 `~CollectionType.CALIBRATION` collections if the data ID includes
353 extra dimensions with temporal information.
354 """
355 # Import data to play with.
356 butler = self.makeButler(writeable=True)
357 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
358 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
359 # Certify some biases into a CALIBRATION collection.
360 registry = butler.registry
361 registry.registerCollection("calibs", CollectionType.CALIBRATION)
362 t1 = astropy.time.Time('2020-01-01T01:00:00', format="isot", scale="tai")
363 t2 = astropy.time.Time('2020-01-01T02:00:00', format="isot", scale="tai")
364 t3 = astropy.time.Time('2020-01-01T03:00:00', format="isot", scale="tai")
365 bias2a = registry.findDataset("bias", instrument="Cam1", detector=2, collections="imported_g")
366 bias3a = registry.findDataset("bias", instrument="Cam1", detector=3, collections="imported_g")
367 bias2b = registry.findDataset("bias", instrument="Cam1", detector=2, collections="imported_r")
368 bias3b = registry.findDataset("bias", instrument="Cam1", detector=3, collections="imported_r")
369 registry.certify("calibs", [bias2a, bias3a], Timespan(t1, t2))
370 registry.certify("calibs", [bias2b], Timespan(t2, None))
371 registry.certify("calibs", [bias3b], Timespan(t2, t3))
372 # Insert some exposure dimension data.
373 registry.insertDimensionData(
374 "exposure",
375 {
376 "instrument": "Cam1",
377 "id": 3,
378 "obs_id": "three",
379 "timespan": Timespan(t1, t2),
380 "physical_filter": "Cam1-G",
381 "day_obs": 20201114,
382 "seq_num": 55,
383 },
384 {
385 "instrument": "Cam1",
386 "id": 4,
387 "obs_id": "four",
388 "timespan": Timespan(t2, t3),
389 "physical_filter": "Cam1-G",
390 "day_obs": 20211114,
391 "seq_num": 42,
392 },
393 )
394 # Get some biases from raw-like data IDs.
395 bias2a_id, _ = butler.get("bias", {"instrument": "Cam1", "exposure": 3, "detector": 2},
396 collections="calibs")
397 self.assertEqual(bias2a_id, bias2a.id)
398 bias3b_id, _ = butler.get("bias", {"instrument": "Cam1", "exposure": 4, "detector": 3},
399 collections="calibs")
400 self.assertEqual(bias3b_id, bias3b.id)
402 # Get using the kwarg form
403 bias3b_id, _ = butler.get("bias",
404 instrument="Cam1", exposure=4, detector=3,
405 collections="calibs")
406 self.assertEqual(bias3b_id, bias3b.id)
408 # Do it again but using the record information
409 bias2a_id, _ = butler.get("bias", {"instrument": "Cam1", "exposure.obs_id": "three",
410 "detector.full_name": "Ab"},
411 collections="calibs")
412 self.assertEqual(bias2a_id, bias2a.id)
413 bias3b_id, _ = butler.get("bias", {"exposure.obs_id": "four",
414 "detector.full_name": "Ba"},
415 collections="calibs", instrument="Cam1")
416 self.assertEqual(bias3b_id, bias3b.id)
418 # And again but this time using the alternate value rather than
419 # the primary.
420 bias3b_id, _ = butler.get("bias", {"exposure": "four",
421 "detector": "Ba"},
422 collections="calibs", instrument="Cam1")
423 self.assertEqual(bias3b_id, bias3b.id)
425 # And again but this time using the alternate value rather than
426 # the primary and do it in the keyword arguments.
427 bias3b_id, _ = butler.get("bias",
428 exposure="four", detector="Ba",
429 collections="calibs", instrument="Cam1")
430 self.assertEqual(bias3b_id, bias3b.id)
432 # Now with implied record columns
433 bias3b_id, _ = butler.get("bias", day_obs=20211114, seq_num=42,
434 raft="B", name_in_raft="a",
435 collections="calibs", instrument="Cam1")
436 self.assertEqual(bias3b_id, bias3b.id)
438 def testRegistryDefaults(self):
439 """Test that we can default the collections and some data ID keys when
440 constructing a butler.
442 Many tests that use default run already exist in ``test_butler.py``, so
443 that isn't tested here. And while most of this functionality is
444 implemented in `Registry`, we test it here instead of
445 ``daf/butler/tests/registry.py`` because it shouldn't depend on the
446 database backend at all.
447 """
448 butler = self.makeButler(writeable=True)
449 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
450 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
451 # Need to actually set defaults later, not at construction, because
452 # we need to import the instrument before we can use it as a default.
453 # Don't set a default instrument value for data IDs, because 'Cam1'
454 # should be inferred by virtue of that being the only value in the
455 # input collections.
456 butler.registry.defaults = RegistryDefaults(collections=["imported_g"])
457 # Use findDataset without collections or instrument.
458 ref = butler.registry.findDataset("flat", detector=2, physical_filter="Cam1-G")
459 # Do the same with Butler.get; this should ultimately invoke a lot of
460 # the same code, so it's a bit circular, but mostly we're checking that
461 # it works at all.
462 dataset_id, _ = butler.get("flat", detector=2, physical_filter="Cam1-G")
463 self.assertEqual(ref.id, dataset_id)
464 # Query for datasets. Test defaulting the data ID in both kwargs and
465 # in the WHERE expression.
466 queried_refs_1 = set(butler.registry.queryDatasets("flat", detector=2, physical_filter="Cam1-G"))
467 self.assertEqual({ref}, queried_refs_1)
468 queried_refs_2 = set(butler.registry.queryDatasets("flat",
469 where="detector=2 AND physical_filter='Cam1-G'"))
470 self.assertEqual({ref}, queried_refs_2)
471 # Query for data IDs with a dataset constraint.
472 queried_data_ids = set(butler.registry.queryDataIds({"instrument", "detector", "physical_filter"},
473 datasets={"flat"},
474 detector=2, physical_filter="Cam1-G"))
475 self.assertEqual({ref.dataId}, queried_data_ids)
476 # Add another instrument to the repo, and a dataset that uses it to
477 # the `imported_g` collection.
478 butler.registry.insertDimensionData("instrument", {"name": "Cam2"})
479 camera = DatasetType(
480 "camera",
481 dimensions=butler.registry.dimensions["instrument"].graph,
482 storageClass="Camera",
483 )
484 butler.registry.registerDatasetType(camera)
485 butler.registry.insertDatasets(camera, [{"instrument": "Cam2"}], run="imported_g")
486 # Initialize a new butler with `imported_g` as its default run.
487 # This should not have a default instrument, because there are two.
488 # Pass run instead of collections; this should set both.
489 butler2 = Butler(butler=butler, run="imported_g")
490 self.assertEqual(list(butler2.registry.defaults.collections), ["imported_g"])
491 self.assertEqual(butler2.registry.defaults.run, "imported_g")
492 self.assertFalse(butler2.registry.defaults.dataId)
493 # Initialize a new butler with an instrument default explicitly given.
494 # Set collections instead of run, which should then be None.
495 butler3 = Butler(butler=butler, collections=["imported_g"], instrument="Cam2")
496 self.assertEqual(list(butler3.registry.defaults.collections), ["imported_g"])
497 self.assertIsNone(butler3.registry.defaults.run, None)
498 self.assertEqual(butler3.registry.defaults.dataId.byName(), {"instrument": "Cam2"})
500 def testJson(self):
501 """Test JSON serialization mediated by registry.
502 """
503 butler = self.makeButler(writeable=True)
504 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
505 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
506 # Need to actually set defaults later, not at construction, because
507 # we need to import the instrument before we can use it as a default.
508 # Don't set a default instrument value for data IDs, because 'Cam1'
509 # should be inferred by virtue of that being the only value in the
510 # input collections.
511 butler.registry.defaults = RegistryDefaults(collections=["imported_g"])
512 # Use findDataset without collections or instrument.
513 ref = butler.registry.findDataset("flat", detector=2, physical_filter="Cam1-G")
515 # Transform the ref and dataset type to and from JSON
516 # and check that it can be reconstructed properly
518 # Do it with the ref and a component ref in minimal and standard form
519 compRef = ref.makeComponentRef("wcs")
521 for test_item in (ref, ref.datasetType, compRef, compRef.datasetType):
522 for minimal in (False, True):
523 json_str = test_item.to_json(minimal=minimal)
524 from_json = type(test_item).from_json(json_str, registry=butler.registry)
525 self.assertEqual(from_json, test_item, msg=f"From JSON '{json_str}' using registry")
527 # for minimal=False case also do a test without registry
528 if not minimal:
529 from_json = type(test_item).from_json(json_str, universe=butler.registry.dimensions)
530 self.assertEqual(from_json, test_item, msg=f"From JSON '{json_str}' using universe")
532 def testJsonDimensionRecords(self):
533 # Dimension Records
534 butler = self.makeButler(writeable=True)
535 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "hsc-rc2-subset.yaml"))
537 for dimension in ("detector", "visit"):
538 records = butler.registry.queryDimensionRecords(dimension, instrument="HSC")
539 for r in records:
540 for minimal in (True, False):
541 json_str = r.to_json(minimal=minimal)
542 r_json = type(r).from_json(json_str, registry=butler.registry)
543 self.assertEqual(r_json, r)
544 # Also check equality of each of the components as dicts
545 self.assertEqual(r_json.toDict(), r.toDict())
547 def testWildcardQueries(self):
548 """Test that different collection type queries work."""
550 # Import data to play with.
551 butler = self.makeButler(writeable=True)
552 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
554 # Create some collections
555 created = {"collection", "u/user/test", "coll3"}
556 for collection in created:
557 butler.registry.registerCollection(collection, type=CollectionType.RUN)
559 collections = butler.registry.queryCollections()
560 self.assertEqual(set(collections), created)
562 expressions = (
563 ("collection", {"collection"}),
564 (..., created),
565 ("*", created),
566 (("collection", "*"), created),
567 ("u/*", {"u/user/test"}),
568 (re.compile("u.*"), {"u/user/test"}),
569 (re.compile(".*oll.*"), {"collection", "coll3"}),
570 ("*oll*", {"collection", "coll3"}),
571 ((re.compile(r".*\d$"), "u/user/test"), {"coll3", "u/user/test"}),
572 ("*[0-9]", {"coll3"}),
573 )
574 for expression, expected in expressions:
575 result = butler.registry.queryCollections(expression)
576 self.assertEqual(set(result), expected)
579class SimpleButlerUUIDTestCase(SimpleButlerTestCase):
580 """Same as SimpleButlerTestCase but uses UUID-based datasets manager and
581 loads datasets from YAML file with UUIDs.
582 """
584 datasetsManager = \
585 "lsst.daf.butler.registry.datasets.byDimensions.ByDimensionsDatasetRecordStorageManagerUUID"
586 datasetsImportFile = "datasets-uuid.yaml"
587 datasetsIdType = uuid.UUID
590class SimpleButlerMixedUUIDTestCase(SimpleButlerTestCase):
591 """Same as SimpleButlerTestCase but uses UUID-based datasets manager and
592 loads datasets from YAML file with integer IDs.
593 """
595 datasetsManager = \
596 "lsst.daf.butler.registry.datasets.byDimensions.ByDimensionsDatasetRecordStorageManagerUUID"
597 datasetsImportFile = "datasets.yaml"
598 datasetsIdType = uuid.UUID
601if __name__ == "__main__": 601 ↛ 602line 601 didn't jump to line 602, because the condition on line 601 was never true
602 unittest.main()