Coverage for python / lsst / daf / butler / registry / queries / _query_datasets.py: 36%
50 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-26 08:49 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-04-26 08:49 +0000
1# This file is part of daf_butler.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This software is dual licensed under the GNU General Public License and also
10# under a 3-clause BSD license. Recipients may choose which of these licenses
11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
12# respectively. If you choose the GPL option then the following text applies
13# (but note that there is still no warranty even if you opt for BSD instead):
14#
15# This program is free software: you can redistribute it and/or modify
16# it under the terms of the GNU General Public License as published by
17# the Free Software Foundation, either version 3 of the License, or
18# (at your option) any later version.
19#
20# This program is distributed in the hope that it will be useful,
21# but WITHOUT ANY WARRANTY; without even the implied warranty of
22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23# GNU General Public License for more details.
24#
25# You should have received a copy of the GNU General Public License
26# along with this program. If not, see <http://www.gnu.org/licenses/>.
28from __future__ import annotations
30from collections.abc import Iterable, Iterator
32from ..._butler import Butler
33from ..._dataset_ref import DatasetRef
34from ..._dataset_type import DatasetType
35from ...dimensions import DimensionGroup
36from ...queries import DatasetRefQueryResults, Query
37from ._query_common import CommonQueryArguments, LegacyQueryResultsMixin
38from ._results import DataCoordinateQueryResults, ParentDatasetQueryResults
41class QueryDriverDatasetRefQueryResults(
42 LegacyQueryResultsMixin[DatasetRefQueryResults], ParentDatasetQueryResults
43):
44 """Implementation of the legacy ``DimensionRecordQueryResults`` interface
45 using the new query system.
47 Parameters
48 ----------
49 butler : `Butler`
50 Butler object used to execute queries.
51 args : `CommonQueryArguments`
52 User-facing arguments forwarded from
53 ``registry.queryDatasets``.
54 dataset_type : `DatasetType`
55 Type of datasets to search for.
56 find_first : `bool`
57 If `True`, for each result data ID, only yield one `DatasetRef` from
58 the first collection in which a dataset of that dataset type appears
59 (according to the order of ``collections`` passed in).
60 extra_dimensions : `DimensionGroup` | `None`
61 Dimensions to include in the query (in addition to those used
62 to identify the queried dataset type(s)), either to constrain
63 the resulting datasets to those for which a matching dimension
64 exists, or to relate the dataset type's dimensions to dimensions
65 referenced by the ``dataId`` or ``where`` arguments.
66 doomed_by : `list` [ `str` ]
67 List of messages explaining reasons why this query might not return
68 any results.
69 expanded : `bool`
70 `True` if the query will generate "expanded" DatasetRefs that include
71 dimension records associated with the data IDs.
72 """
74 def __init__(
75 self,
76 butler: Butler,
77 args: CommonQueryArguments,
78 *,
79 dataset_type: DatasetType,
80 find_first: bool,
81 extra_dimensions: DimensionGroup | None,
82 doomed_by: list[str],
83 expanded: bool,
84 ) -> None:
85 LegacyQueryResultsMixin.__init__(self, butler, args)
86 ParentDatasetQueryResults.__init__(self)
87 self._dataset_type = dataset_type
88 self._find_first = find_first
89 self._extra_dimensions = extra_dimensions
90 self._doomed_by = doomed_by
91 self._expanded = expanded
93 def _build_result(self, query: Query) -> DatasetRefQueryResults:
94 if self._extra_dimensions:
95 query = query.join_dimensions(self._extra_dimensions)
96 results = query.datasets(self._dataset_type, self._args.collections, find_first=self._find_first)
97 if self._expanded:
98 return results.with_dimension_records()
99 else:
100 return results
102 def __iter__(self) -> Iterator[DatasetRef]:
103 # We have to eagerly fetch the results to prevent
104 # leaking the resources associated with QueryDriver.
105 return iter(list(self._iterate_rows()))
107 def _iterate_rows(self) -> Iterator[DatasetRef]:
108 with self._build_query() as result:
109 target_storage_class = self._dataset_type.storageClass_name
110 for ref in result:
111 if ref.datasetType.storageClass_name != target_storage_class:
112 yield ref.overrideStorageClass(target_storage_class)
113 else:
114 yield ref
116 @property
117 def parentDatasetType(self) -> DatasetType:
118 return self._dataset_type
120 @property
121 def dataIds(self) -> DataCoordinateQueryResults:
122 from ._query_data_coordinates import QueryDriverDataCoordinateQueryResults
124 args = self._args.replaceDatasetTypes([self._dataset_type.name])
126 return QueryDriverDataCoordinateQueryResults(
127 self._butler, dimensions=self._dataset_type.dimensions, expanded=self._expanded, args=args
128 )
130 def byParentDatasetType(self) -> Iterator[ParentDatasetQueryResults]:
131 yield self
133 def expanded(self) -> QueryDriverDatasetRefQueryResults:
134 return QueryDriverDatasetRefQueryResults(
135 self._butler,
136 self._args,
137 dataset_type=self._dataset_type,
138 find_first=self._find_first,
139 extra_dimensions=self._extra_dimensions,
140 doomed_by=self._doomed_by,
141 expanded=True,
142 )
144 def explain_no_results(self, execute: bool = True) -> Iterable[str]:
145 messages = list(super().explain_no_results(execute))
146 messages.extend(self._doomed_by)
147 return messages