Coverage for tests/test_simpleButler.py: 15%
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of daf_butler.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22from __future__ import annotations
24import os
25import tempfile
26from typing import Any
27import unittest
28import uuid
29import re
31try:
32 import numpy as np
33except ImportError:
34 np = None
36import astropy.time
38from lsst.daf.butler import (
39 Butler,
40 ButlerConfig,
41 CollectionType,
42 DatasetRef,
43 DatasetType,
44 Registry,
45 Timespan,
46)
47from lsst.daf.butler.registry import RegistryConfig, RegistryDefaults, ConflictingDefinitionError
48from lsst.daf.butler.tests import DatastoreMock
49from lsst.daf.butler.tests.utils import makeTestTempDir, removeTestTempDir
52TESTDIR = os.path.abspath(os.path.dirname(__file__))
55class SimpleButlerTestCase(unittest.TestCase):
56 """Tests for butler (including import/export functionality) that should not
57 depend on the Registry Database backend or Datastore implementation, and
58 can instead utilize an in-memory SQLite Registry and a mocked Datastore.
59 """
61 datasetsManager = \
62 "lsst.daf.butler.registry.datasets.byDimensions.ByDimensionsDatasetRecordStorageManager"
63 datasetsImportFile = "datasets.yaml"
64 datasetsIdType = int
66 def setUp(self):
67 self.root = makeTestTempDir(TESTDIR)
69 def tearDown(self):
70 removeTestTempDir(self.root)
72 def makeButler(self, **kwargs: Any) -> Butler:
73 """Return new Butler instance on each call.
74 """
75 config = ButlerConfig()
77 # make separate temporary directory for registry of this instance
78 tmpdir = tempfile.mkdtemp(dir=self.root)
79 config["registry", "db"] = f"sqlite:///{tmpdir}/gen3.sqlite3"
80 config["registry", "managers", "datasets"] = self.datasetsManager
81 config["root"] = self.root
83 # have to make a registry first
84 registryConfig = RegistryConfig(config.get("registry"))
85 Registry.createFromConfig(registryConfig)
87 butler = Butler(config, **kwargs)
88 DatastoreMock.apply(butler)
89 return butler
91 def comparableRef(self, ref: DatasetRef) -> DatasetRef:
92 """Return a DatasetRef that can be compared to a DatasetRef from
93 other repository.
95 For repositories that do not support round-trip of ID values this
96 method returns unresolved DatasetRef, for round-trip-safe repos it
97 returns unchanged ref.
98 """
99 return ref if self.datasetsIdType is uuid.UUID else ref.unresolved()
101 def testReadBackwardsCompatibility(self):
102 """Test that we can read an export file written by a previous version
103 and commit to the daf_butler git repo.
105 Notes
106 -----
107 At present this export file includes only dimension data, not datasets,
108 which greatly limits the usefulness of this test. We should address
109 this at some point, but I think it's best to wait for the changes to
110 the export format required for CALIBRATION collections to land.
111 """
112 butler = self.makeButler(writeable=True)
113 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "hsc-rc2-subset.yaml"))
114 # Spot-check a few things, but the most important test is just that
115 # the above does not raise.
116 self.assertGreaterEqual(
117 set(record.id for record in butler.registry.queryDimensionRecords("detector", instrument="HSC")),
118 set(range(104)), # should have all science CCDs; may have some focus ones.
119 )
120 self.assertGreaterEqual(
121 {
122 (record.id, record.physical_filter)
123 for record in butler.registry.queryDimensionRecords("visit", instrument="HSC")
124 },
125 {
126 (27136, 'HSC-Z'),
127 (11694, 'HSC-G'),
128 (23910, 'HSC-R'),
129 (11720, 'HSC-Y'),
130 (23900, 'HSC-R'),
131 (22646, 'HSC-Y'),
132 (1248, 'HSC-I'),
133 (19680, 'HSC-I'),
134 (1240, 'HSC-I'),
135 (424, 'HSC-Y'),
136 (19658, 'HSC-I'),
137 (344, 'HSC-Y'),
138 (1218, 'HSC-R'),
139 (1190, 'HSC-Z'),
140 (23718, 'HSC-R'),
141 (11700, 'HSC-G'),
142 (26036, 'HSC-G'),
143 (23872, 'HSC-R'),
144 (1170, 'HSC-Z'),
145 (1876, 'HSC-Y'),
146 }
147 )
149 def testDatasetTransfers(self):
150 """Test exporting all datasets from a repo and then importing them all
151 back in again.
152 """
153 # Import data to play with.
154 butler1 = self.makeButler(writeable=True)
155 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
156 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
157 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml") as file:
158 # Export all datasets.
159 with butler1.export(filename=file.name) as exporter:
160 exporter.saveDatasets(
161 butler1.registry.queryDatasets(..., collections=...)
162 )
163 # Import it all again.
164 butler2 = self.makeButler(writeable=True)
165 butler2.import_(filename=file.name)
166 datasets1 = list(butler1.registry.queryDatasets(..., collections=...))
167 datasets2 = list(butler2.registry.queryDatasets(..., collections=...))
168 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets1))
169 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets2))
170 self.assertCountEqual(
171 [self.comparableRef(ref) for ref in datasets1],
172 [self.comparableRef(ref) for ref in datasets2],
173 )
175 def testComponentExport(self):
176 """Test exporting component datasets and then importing them.
178 This test intentionally does not depend on whether just the component
179 is exported and then imported vs. the full composite dataset, because
180 I don't want it to assume more than it needs to about the
181 implementation.
182 """
183 # Import data to play with.
184 butler1 = self.makeButler(writeable=True)
185 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
186 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
187 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml") as file:
188 # Export all datasets.
189 with butler1.export(filename=file.name) as exporter:
190 exporter.saveDatasets(
191 butler1.registry.queryDatasets("flat.psf", collections=...)
192 )
193 # Import it all again.
194 butler2 = self.makeButler(writeable=True)
195 butler2.import_(filename=file.name)
196 datasets1 = list(butler1.registry.queryDatasets("flat.psf", collections=...))
197 datasets2 = list(butler2.registry.queryDatasets("flat.psf", collections=...))
198 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets1))
199 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets2))
200 self.assertCountEqual(
201 [self.comparableRef(ref) for ref in datasets1],
202 [self.comparableRef(ref) for ref in datasets2],
203 )
205 def testDatasetImportTwice(self):
206 """Test exporting all datasets from a repo and then importing them all
207 back in again twice.
208 """
209 if self.datasetsIdType is not uuid.UUID:
210 self.skipTest("This test can only work for UUIDs")
211 # Import data to play with.
212 butler1 = self.makeButler(writeable=True)
213 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
214 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
215 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml", delete=False) as file:
216 # Export all datasets.
217 with butler1.export(filename=file.name) as exporter:
218 exporter.saveDatasets(
219 butler1.registry.queryDatasets(..., collections=...)
220 )
221 butler2 = self.makeButler(writeable=True)
222 # Import it once.
223 butler2.import_(filename=file.name)
224 # Import it again, but ignore all dimensions
225 dimensions = set(
226 dimension.name for dimension in butler2.registry.dimensions.getStaticDimensions())
227 butler2.import_(filename=file.name, skip_dimensions=dimensions)
228 datasets1 = list(butler1.registry.queryDatasets(..., collections=...))
229 datasets2 = list(butler2.registry.queryDatasets(..., collections=...))
230 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets1))
231 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets2))
232 self.assertCountEqual(
233 [self.comparableRef(ref) for ref in datasets1],
234 [self.comparableRef(ref) for ref in datasets2],
235 )
237 def testDatasetImportReuseIds(self):
238 """Test for import that should preserve dataset IDs.
240 This test assumes that dataset IDs in datasets YAML are different from
241 what auto-incremental insert would produce.
242 """
243 if self.datasetsIdType is not int:
244 self.skipTest("This test can only work for UUIDs")
245 # Import data to play with.
246 butler = self.makeButler(writeable=True)
247 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
248 filename = os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile)
249 butler.import_(filename=filename, reuseIds=True)
250 datasets = list(butler.registry.queryDatasets(..., collections=...))
251 self.assertTrue(all(isinstance(ref.id, self.datasetsIdType) for ref in datasets))
252 # IDs are copied from YAML, list needs to be updated if file contents
253 # is changed.
254 self.assertCountEqual(
255 [ref.id for ref in datasets],
256 [1001, 1002, 1003, 1010, 1020, 1030, 2001, 2002, 2003, 2010, 2020, 2030, 2040],
257 )
259 # Try once again, it will raise
260 with self.assertRaises(ConflictingDefinitionError):
261 butler.import_(filename=filename, reuseIds=True)
263 def testCollectionTransfers(self):
264 """Test exporting and then importing collections of various types.
265 """
266 # Populate a registry with some datasets.
267 butler1 = self.makeButler(writeable=True)
268 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
269 butler1.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
270 registry1 = butler1.registry
271 # Add some more collections.
272 registry1.registerRun("run1")
273 registry1.registerCollection("tag1", CollectionType.TAGGED)
274 registry1.registerCollection("calibration1", CollectionType.CALIBRATION)
275 registry1.registerCollection("chain1", CollectionType.CHAINED)
276 registry1.registerCollection("chain2", CollectionType.CHAINED)
277 registry1.setCollectionChain("chain1", ["tag1", "run1", "chain2"])
278 registry1.setCollectionChain("chain2", ["calibration1", "run1"])
279 # Associate some datasets into the TAGGED and CALIBRATION collections.
280 flats1 = list(registry1.queryDatasets("flat", collections=...))
281 registry1.associate("tag1", flats1)
282 t1 = astropy.time.Time('2020-01-01T01:00:00', format="isot", scale="tai")
283 t2 = astropy.time.Time('2020-01-01T02:00:00', format="isot", scale="tai")
284 t3 = astropy.time.Time('2020-01-01T03:00:00', format="isot", scale="tai")
285 bias2a = registry1.findDataset("bias", instrument="Cam1", detector=2, collections="imported_g")
286 bias3a = registry1.findDataset("bias", instrument="Cam1", detector=3, collections="imported_g")
287 bias2b = registry1.findDataset("bias", instrument="Cam1", detector=2, collections="imported_r")
288 bias3b = registry1.findDataset("bias", instrument="Cam1", detector=3, collections="imported_r")
289 registry1.certify("calibration1", [bias2a, bias3a], Timespan(t1, t2))
290 registry1.certify("calibration1", [bias2b], Timespan(t2, None))
291 registry1.certify("calibration1", [bias3b], Timespan(t2, t3))
293 with tempfile.NamedTemporaryFile(mode='w', suffix=".yaml") as file:
294 # Export all collections, and some datasets.
295 with butler1.export(filename=file.name) as exporter:
296 # Sort results to put chain1 before chain2, which is
297 # intentionally not topological order.
298 for collection in sorted(registry1.queryCollections()):
299 exporter.saveCollection(collection)
300 exporter.saveDatasets(flats1)
301 exporter.saveDatasets([bias2a, bias2b, bias3a, bias3b])
302 # Import them into a new registry.
303 butler2 = self.makeButler(writeable=True)
304 butler2.import_(filename=file.name)
305 registry2 = butler2.registry
306 # Check that it all round-tripped, starting with the collections
307 # themselves.
308 self.assertIs(registry2.getCollectionType("run1"), CollectionType.RUN)
309 self.assertIs(registry2.getCollectionType("tag1"), CollectionType.TAGGED)
310 self.assertIs(registry2.getCollectionType("calibration1"), CollectionType.CALIBRATION)
311 self.assertIs(registry2.getCollectionType("chain1"), CollectionType.CHAINED)
312 self.assertIs(registry2.getCollectionType("chain2"), CollectionType.CHAINED)
313 self.assertEqual(
314 list(registry2.getCollectionChain("chain1")),
315 ["tag1", "run1", "chain2"],
316 )
317 self.assertEqual(
318 list(registry2.getCollectionChain("chain2")),
319 ["calibration1", "run1"],
320 )
321 # Check that tag collection contents are the same.
322 self.maxDiff = None
323 self.assertCountEqual(
324 [self.comparableRef(ref) for ref in registry1.queryDatasets(..., collections="tag1")],
325 [self.comparableRef(ref) for ref in registry2.queryDatasets(..., collections="tag1")],
326 )
327 # Check that calibration collection contents are the same.
328 self.assertCountEqual(
329 [(self.comparableRef(assoc.ref), assoc.timespan)
330 for assoc in registry1.queryDatasetAssociations("bias", collections="calibration1")],
331 [(self.comparableRef(assoc.ref), assoc.timespan)
332 for assoc in registry2.queryDatasetAssociations("bias", collections="calibration1")],
333 )
335 def testButlerGet(self):
336 """Test that butler.get can work with different variants."""
338 # Import data to play with.
339 butler = self.makeButler(writeable=True)
340 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
341 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
343 # Find the DatasetRef for a flat
344 coll = "imported_g"
345 flat2g = butler.registry.findDataset("flat", instrument="Cam1", detector=2, physical_filter="Cam1-G",
346 collections=coll)
348 # Create a numpy integer to check that works fine
349 detector_np = np.int64(2) if np else 2
350 print(type(detector_np))
352 # Try to get it using different variations of dataId + keyword
353 # arguments
354 # Note that instrument.class_name does not work
355 variants = (
356 (None, {"instrument": "Cam1", "detector": 2, "physical_filter": "Cam1-G"}),
357 (None, {"instrument": "Cam1", "detector": detector_np, "physical_filter": "Cam1-G"}),
358 ({"instrument": "Cam1", "detector": 2, "physical_filter": "Cam1-G"}, {}),
359 ({"instrument": "Cam1", "detector": detector_np, "physical_filter": "Cam1-G"}, {}),
360 ({"instrument": "Cam1", "detector": 2}, {"physical_filter": "Cam1-G"}),
361 ({"detector.full_name": "Ab"}, {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
362 ({"full_name": "Ab"}, {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
363 (None, {"full_name": "Ab", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
364 (None, {"detector": "Ab", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
365 ({"name_in_raft": "b", "raft": "A"}, {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
366 ({"name_in_raft": "b"}, {"raft": "A", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
367 (None, {"name_in_raft": "b", "raft": "A", "instrument": "Cam1", "physical_filter": "Cam1-G"}),
368 ({"detector.name_in_raft": "b", "detector.raft": "A"},
369 {"instrument": "Cam1", "physical_filter": "Cam1-G"}),
370 ({"detector.name_in_raft": "b", "detector.raft": "A",
371 "instrument": "Cam1", "physical_filter": "Cam1-G"}, {}),
372 )
374 for dataId, kwds in variants:
375 try:
376 flat_id, _ = butler.get("flat", dataId=dataId, collections=coll, **kwds)
377 except Exception as e:
378 raise type(e)(f"{str(e)}: dataId={dataId}, kwds={kwds}") from e
379 self.assertEqual(flat_id, flat2g.id, msg=f"DataId: {dataId}, kwds: {kwds}")
381 def testGetCalibration(self):
382 """Test that `Butler.get` can be used to fetch from
383 `~CollectionType.CALIBRATION` collections if the data ID includes
384 extra dimensions with temporal information.
385 """
386 # Import data to play with.
387 butler = self.makeButler(writeable=True)
388 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
389 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
390 # Certify some biases into a CALIBRATION collection.
391 registry = butler.registry
392 registry.registerCollection("calibs", CollectionType.CALIBRATION)
393 t1 = astropy.time.Time('2020-01-01T01:00:00', format="isot", scale="tai")
394 t2 = astropy.time.Time('2020-01-01T02:00:00', format="isot", scale="tai")
395 t3 = astropy.time.Time('2020-01-01T03:00:00', format="isot", scale="tai")
396 bias2a = registry.findDataset("bias", instrument="Cam1", detector=2, collections="imported_g")
397 bias3a = registry.findDataset("bias", instrument="Cam1", detector=3, collections="imported_g")
398 bias2b = registry.findDataset("bias", instrument="Cam1", detector=2, collections="imported_r")
399 bias3b = registry.findDataset("bias", instrument="Cam1", detector=3, collections="imported_r")
400 registry.certify("calibs", [bias2a, bias3a], Timespan(t1, t2))
401 registry.certify("calibs", [bias2b], Timespan(t2, None))
402 registry.certify("calibs", [bias3b], Timespan(t2, t3))
403 # Insert some exposure dimension data.
404 registry.insertDimensionData(
405 "exposure",
406 {
407 "instrument": "Cam1",
408 "id": 3,
409 "obs_id": "three",
410 "timespan": Timespan(t1, t2),
411 "physical_filter": "Cam1-G",
412 "day_obs": 20201114,
413 "seq_num": 55,
414 },
415 {
416 "instrument": "Cam1",
417 "id": 4,
418 "obs_id": "four",
419 "timespan": Timespan(t2, t3),
420 "physical_filter": "Cam1-G",
421 "day_obs": 20211114,
422 "seq_num": 42,
423 },
424 )
425 # Get some biases from raw-like data IDs.
426 bias2a_id, _ = butler.get("bias", {"instrument": "Cam1", "exposure": 3, "detector": 2},
427 collections="calibs")
428 self.assertEqual(bias2a_id, bias2a.id)
429 bias3b_id, _ = butler.get("bias", {"instrument": "Cam1", "exposure": 4, "detector": 3},
430 collections="calibs")
431 self.assertEqual(bias3b_id, bias3b.id)
433 # Get using the kwarg form
434 bias3b_id, _ = butler.get("bias",
435 instrument="Cam1", exposure=4, detector=3,
436 collections="calibs")
437 self.assertEqual(bias3b_id, bias3b.id)
439 # Do it again but using the record information
440 bias2a_id, _ = butler.get("bias", {"instrument": "Cam1", "exposure.obs_id": "three",
441 "detector.full_name": "Ab"},
442 collections="calibs")
443 self.assertEqual(bias2a_id, bias2a.id)
444 bias3b_id, _ = butler.get("bias", {"exposure.obs_id": "four",
445 "detector.full_name": "Ba"},
446 collections="calibs", instrument="Cam1")
447 self.assertEqual(bias3b_id, bias3b.id)
449 # And again but this time using the alternate value rather than
450 # the primary.
451 bias3b_id, _ = butler.get("bias", {"exposure": "four",
452 "detector": "Ba"},
453 collections="calibs", instrument="Cam1")
454 self.assertEqual(bias3b_id, bias3b.id)
456 # And again but this time using the alternate value rather than
457 # the primary and do it in the keyword arguments.
458 bias3b_id, _ = butler.get("bias",
459 exposure="four", detector="Ba",
460 collections="calibs", instrument="Cam1")
461 self.assertEqual(bias3b_id, bias3b.id)
463 # Now with implied record columns
464 bias3b_id, _ = butler.get("bias", day_obs=20211114, seq_num=42,
465 raft="B", name_in_raft="a",
466 collections="calibs", instrument="Cam1")
467 self.assertEqual(bias3b_id, bias3b.id)
469 def testRegistryDefaults(self):
470 """Test that we can default the collections and some data ID keys when
471 constructing a butler.
473 Many tests that use default run already exist in ``test_butler.py``, so
474 that isn't tested here. And while most of this functionality is
475 implemented in `Registry`, we test it here instead of
476 ``daf/butler/tests/registry.py`` because it shouldn't depend on the
477 database backend at all.
478 """
479 butler = self.makeButler(writeable=True)
480 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
481 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
482 # Need to actually set defaults later, not at construction, because
483 # we need to import the instrument before we can use it as a default.
484 # Don't set a default instrument value for data IDs, because 'Cam1'
485 # should be inferred by virtue of that being the only value in the
486 # input collections.
487 butler.registry.defaults = RegistryDefaults(collections=["imported_g"])
488 # Use findDataset without collections or instrument.
489 ref = butler.registry.findDataset("flat", detector=2, physical_filter="Cam1-G")
490 # Do the same with Butler.get; this should ultimately invoke a lot of
491 # the same code, so it's a bit circular, but mostly we're checking that
492 # it works at all.
493 dataset_id, _ = butler.get("flat", detector=2, physical_filter="Cam1-G")
494 self.assertEqual(ref.id, dataset_id)
495 # Query for datasets. Test defaulting the data ID in both kwargs and
496 # in the WHERE expression.
497 queried_refs_1 = set(butler.registry.queryDatasets("flat", detector=2, physical_filter="Cam1-G"))
498 self.assertEqual({ref}, queried_refs_1)
499 queried_refs_2 = set(butler.registry.queryDatasets("flat",
500 where="detector=2 AND physical_filter='Cam1-G'"))
501 self.assertEqual({ref}, queried_refs_2)
502 # Query for data IDs with a dataset constraint.
503 queried_data_ids = set(butler.registry.queryDataIds({"instrument", "detector", "physical_filter"},
504 datasets={"flat"},
505 detector=2, physical_filter="Cam1-G"))
506 self.assertEqual({ref.dataId}, queried_data_ids)
507 # Add another instrument to the repo, and a dataset that uses it to
508 # the `imported_g` collection.
509 butler.registry.insertDimensionData("instrument", {"name": "Cam2"})
510 camera = DatasetType(
511 "camera",
512 dimensions=butler.registry.dimensions["instrument"].graph,
513 storageClass="Camera",
514 )
515 butler.registry.registerDatasetType(camera)
516 butler.registry.insertDatasets(camera, [{"instrument": "Cam2"}], run="imported_g")
517 # Initialize a new butler with `imported_g` as its default run.
518 # This should not have a default instrument, because there are two.
519 # Pass run instead of collections; this should set both.
520 butler2 = Butler(butler=butler, run="imported_g")
521 self.assertEqual(list(butler2.registry.defaults.collections), ["imported_g"])
522 self.assertEqual(butler2.registry.defaults.run, "imported_g")
523 self.assertFalse(butler2.registry.defaults.dataId)
524 # Initialize a new butler with an instrument default explicitly given.
525 # Set collections instead of run, which should then be None.
526 butler3 = Butler(butler=butler, collections=["imported_g"], instrument="Cam2")
527 self.assertEqual(list(butler3.registry.defaults.collections), ["imported_g"])
528 self.assertIsNone(butler3.registry.defaults.run, None)
529 self.assertEqual(butler3.registry.defaults.dataId.byName(), {"instrument": "Cam2"})
531 def testJson(self):
532 """Test JSON serialization mediated by registry.
533 """
534 butler = self.makeButler(writeable=True)
535 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
536 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", self.datasetsImportFile))
537 # Need to actually set defaults later, not at construction, because
538 # we need to import the instrument before we can use it as a default.
539 # Don't set a default instrument value for data IDs, because 'Cam1'
540 # should be inferred by virtue of that being the only value in the
541 # input collections.
542 butler.registry.defaults = RegistryDefaults(collections=["imported_g"])
543 # Use findDataset without collections or instrument.
544 ref = butler.registry.findDataset("flat", detector=2, physical_filter="Cam1-G")
546 # Transform the ref and dataset type to and from JSON
547 # and check that it can be reconstructed properly
549 # Do it with the ref and a component ref in minimal and standard form
550 compRef = ref.makeComponentRef("wcs")
552 for test_item in (ref, ref.datasetType, compRef, compRef.datasetType):
553 for minimal in (False, True):
554 json_str = test_item.to_json(minimal=minimal)
555 from_json = type(test_item).from_json(json_str, registry=butler.registry)
556 self.assertEqual(from_json, test_item, msg=f"From JSON '{json_str}' using registry")
558 # for minimal=False case also do a test without registry
559 if not minimal:
560 from_json = type(test_item).from_json(json_str, universe=butler.registry.dimensions)
561 self.assertEqual(from_json, test_item, msg=f"From JSON '{json_str}' using universe")
563 def testJsonDimensionRecords(self):
564 # Dimension Records
565 butler = self.makeButler(writeable=True)
566 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "hsc-rc2-subset.yaml"))
568 for dimension in ("detector", "visit"):
569 records = butler.registry.queryDimensionRecords(dimension, instrument="HSC")
570 for r in records:
571 for minimal in (True, False):
572 json_str = r.to_json(minimal=minimal)
573 r_json = type(r).from_json(json_str, registry=butler.registry)
574 self.assertEqual(r_json, r)
575 # Also check equality of each of the components as dicts
576 self.assertEqual(r_json.toDict(), r.toDict())
578 def testWildcardQueries(self):
579 """Test that different collection type queries work."""
581 # Import data to play with.
582 butler = self.makeButler(writeable=True)
583 butler.import_(filename=os.path.join(TESTDIR, "data", "registry", "base.yaml"))
585 # Create some collections
586 created = {"collection", "u/user/test", "coll3"}
587 for collection in created:
588 butler.registry.registerCollection(collection, type=CollectionType.RUN)
590 collections = butler.registry.queryCollections()
591 self.assertEqual(set(collections), created)
593 expressions = (
594 ("collection", {"collection"}),
595 (..., created),
596 ("*", created),
597 (("collection", "*"), created),
598 ("u/*", {"u/user/test"}),
599 (re.compile("u.*"), {"u/user/test"}),
600 (re.compile(".*oll.*"), {"collection", "coll3"}),
601 ("*oll*", {"collection", "coll3"}),
602 ((re.compile(r".*\d$"), "u/user/test"), {"coll3", "u/user/test"}),
603 ("*[0-9]", {"coll3"}),
604 )
605 for expression, expected in expressions:
606 result = butler.registry.queryCollections(expression)
607 self.assertEqual(set(result), expected)
610class SimpleButlerUUIDTestCase(SimpleButlerTestCase):
611 """Same as SimpleButlerTestCase but uses UUID-based datasets manager and
612 loads datasets from YAML file with UUIDs.
613 """
615 datasetsManager = \
616 "lsst.daf.butler.registry.datasets.byDimensions.ByDimensionsDatasetRecordStorageManagerUUID"
617 datasetsImportFile = "datasets-uuid.yaml"
618 datasetsIdType = uuid.UUID
621class SimpleButlerMixedUUIDTestCase(SimpleButlerTestCase):
622 """Same as SimpleButlerTestCase but uses UUID-based datasets manager and
623 loads datasets from YAML file with integer IDs.
624 """
626 datasetsManager = \
627 "lsst.daf.butler.registry.datasets.byDimensions.ByDimensionsDatasetRecordStorageManagerUUID"
628 datasetsImportFile = "datasets.yaml"
629 datasetsIdType = uuid.UUID
632if __name__ == "__main__": 632 ↛ 633line 632 didn't jump to line 633, because the condition on line 632 was never true
633 unittest.main()