Coverage for python/lsst/daf/butler/registry/queries/_sql_query_backend.py: 18%

109 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-06 10:53 +0000

1# This file is part of daf_butler. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This software is dual licensed under the GNU General Public License and also 

10# under a 3-clause BSD license. Recipients may choose which of these licenses 

11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt, 

12# respectively. If you choose the GPL option then the following text applies 

13# (but note that there is still no warranty even if you opt for BSD instead): 

14# 

15# This program is free software: you can redistribute it and/or modify 

16# it under the terms of the GNU General Public License as published by 

17# the Free Software Foundation, either version 3 of the License, or 

18# (at your option) any later version. 

19# 

20# This program is distributed in the hope that it will be useful, 

21# but WITHOUT ANY WARRANTY; without even the implied warranty of 

22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

23# GNU General Public License for more details. 

24# 

25# You should have received a copy of the GNU General Public License 

26# along with this program. If not, see <http://www.gnu.org/licenses/>. 

27from __future__ import annotations 

28 

29__all__ = ("SqlQueryBackend",) 

30 

31from collections.abc import Iterable, Mapping, Sequence, Set 

32from typing import TYPE_CHECKING, Any, cast 

33 

34from lsst.daf.relation import ColumnError, ColumnExpression, ColumnTag, Join, Predicate, Relation 

35 

36from ..._column_categorization import ColumnCategorization 

37from ..._column_tags import DimensionKeyColumnTag, DimensionRecordColumnTag 

38from ..._dataset_type import DatasetType 

39from ...dimensions import DataCoordinate, DimensionGroup, DimensionRecord, DimensionUniverse 

40from .._collection_type import CollectionType 

41from .._exceptions import DataIdValueError 

42from ..interfaces import CollectionRecord, Database 

43from ._query_backend import QueryBackend 

44from ._sql_query_context import SqlQueryContext 

45 

46if TYPE_CHECKING: 

47 from ..managers import RegistryManagerInstances 

48 

49 

50class SqlQueryBackend(QueryBackend[SqlQueryContext]): 

51 """An implementation of `QueryBackend` for `SqlRegistry`. 

52 

53 Parameters 

54 ---------- 

55 db : `Database` 

56 Object that abstracts the database engine. 

57 managers : `RegistryManagerInstances` 

58 Struct containing the manager objects that back a `SqlRegistry`. 

59 """ 

60 

61 def __init__( 

62 self, 

63 db: Database, 

64 managers: RegistryManagerInstances, 

65 ): 

66 self._db = db 

67 self._managers = managers 

68 

69 @property 

70 def universe(self) -> DimensionUniverse: 

71 # Docstring inherited. 

72 return self._managers.dimensions.universe 

73 

74 def context(self) -> SqlQueryContext: 

75 # Docstring inherited. 

76 return SqlQueryContext(self._db, self._managers.column_types) 

77 

78 def get_collection_name(self, key: Any) -> str: 

79 return self._managers.collections[key].name 

80 

81 def resolve_collection_wildcard( 

82 self, 

83 expression: Any, 

84 *, 

85 collection_types: Set[CollectionType] = CollectionType.all(), 

86 done: set[str] | None = None, 

87 flatten_chains: bool = True, 

88 include_chains: bool | None = None, 

89 ) -> list[CollectionRecord]: 

90 # Docstring inherited. 

91 return self._managers.collections.resolve_wildcard( 

92 expression, 

93 collection_types=collection_types, 

94 done=done, 

95 flatten_chains=flatten_chains, 

96 include_chains=include_chains, 

97 ) 

98 

99 def resolve_dataset_type_wildcard( 

100 self, 

101 expression: Any, 

102 components: bool | None = None, 

103 missing: list[str] | None = None, 

104 explicit_only: bool = False, 

105 components_deprecated: bool = True, 

106 ) -> dict[DatasetType, list[str | None]]: 

107 # Docstring inherited. 

108 return self._managers.datasets.resolve_wildcard( 

109 expression, components, missing, explicit_only, components_deprecated 

110 ) 

111 

112 def filter_dataset_collections( 

113 self, 

114 dataset_types: Iterable[DatasetType], 

115 collections: Sequence[CollectionRecord], 

116 *, 

117 governor_constraints: Mapping[str, Set[str]], 

118 rejections: list[str] | None = None, 

119 ) -> dict[DatasetType, list[CollectionRecord]]: 

120 # Docstring inherited. 

121 result: dict[DatasetType, list[CollectionRecord]] = { 

122 dataset_type: [] for dataset_type in dataset_types 

123 } 

124 summaries = self._managers.datasets.fetch_summaries(collections, result.keys()) 

125 for dataset_type, filtered_collections in result.items(): 

126 for collection_record in collections: 

127 if not dataset_type.isCalibration() and collection_record.type is CollectionType.CALIBRATION: 

128 if rejections is not None: 

129 rejections.append( 

130 f"Not searching for non-calibration dataset of type {dataset_type.name!r} " 

131 f"in CALIBRATION collection {collection_record.name!r}." 

132 ) 

133 else: 

134 collection_summary = summaries[collection_record.key] 

135 if collection_summary.is_compatible_with( 

136 dataset_type, 

137 governor_constraints, 

138 rejections=rejections, 

139 name=collection_record.name, 

140 ): 

141 filtered_collections.append(collection_record) 

142 return result 

143 

144 def _make_dataset_query_relation_impl( 

145 self, 

146 dataset_type: DatasetType, 

147 collections: Sequence[CollectionRecord], 

148 columns: Set[str], 

149 context: SqlQueryContext, 

150 ) -> Relation: 

151 # Docstring inherited. 

152 assert len(collections) > 0, ( 

153 "Caller is responsible for handling the case of all collections being rejected (we can't " 

154 "write a good error message without knowing why collections were rejected)." 

155 ) 

156 dataset_storage = self._managers.datasets.find(dataset_type.name) 

157 if dataset_storage is None: 

158 # Unrecognized dataset type means no results. 

159 return self.make_doomed_dataset_relation( 

160 dataset_type, 

161 columns, 

162 messages=[ 

163 f"Dataset type {dataset_type.name!r} is not registered, " 

164 "so no instances of it can exist in any collection." 

165 ], 

166 context=context, 

167 ) 

168 else: 

169 return dataset_storage.make_relation( 

170 *collections, 

171 columns=columns, 

172 context=context, 

173 ) 

174 

175 def make_dimension_relation( 

176 self, 

177 dimensions: DimensionGroup, 

178 columns: Set[ColumnTag], 

179 context: SqlQueryContext, 

180 *, 

181 initial_relation: Relation | None = None, 

182 initial_join_max_columns: frozenset[ColumnTag] | None = None, 

183 initial_dimension_relationships: Set[frozenset[str]] | None = None, 

184 spatial_joins: Iterable[tuple[str, str]] = (), 

185 governor_constraints: Mapping[str, Set[str]], 

186 ) -> Relation: 

187 # Docstring inherited. 

188 

189 default_join = Join(max_columns=initial_join_max_columns) 

190 

191 # Set up the relation variable we'll update as we join more relations 

192 # in, and ensure it is in the SQL engine. 

193 relation = context.make_initial_relation(initial_relation) 

194 

195 if initial_dimension_relationships is None: 

196 relationships = self.extract_dimension_relationships(relation) 

197 else: 

198 relationships = set(initial_dimension_relationships) 

199 

200 # Make a mutable copy of the columns argument. 

201 columns_required = set(columns) 

202 

203 # Sort spatial joins to put those involving the commonSkyPix dimension 

204 # first, since those join subqueries might get reused in implementing 

205 # other joins later. 

206 spatial_joins = list(spatial_joins) 

207 spatial_joins.sort(key=lambda j: self.universe.commonSkyPix.name not in j) 

208 

209 # Next we'll handle spatial joins, since those can require refinement 

210 # predicates that will need region columns to be included in the 

211 # relations we'll join. 

212 predicate: Predicate = Predicate.literal(True) 

213 for element1, element2 in spatial_joins: 

214 (overlaps, needs_refinement) = self._managers.dimensions.make_spatial_join_relation( 

215 element1, 

216 element2, 

217 context=context, 

218 governor_constraints=governor_constraints, 

219 existing_relationships=relationships, 

220 ) 

221 if needs_refinement: 

222 predicate = predicate.logical_and( 

223 context.make_spatial_region_overlap_predicate( 

224 ColumnExpression.reference(DimensionRecordColumnTag(element1, "region")), 

225 ColumnExpression.reference(DimensionRecordColumnTag(element2, "region")), 

226 ) 

227 ) 

228 columns_required.add(DimensionRecordColumnTag(element1, "region")) 

229 columns_required.add(DimensionRecordColumnTag(element2, "region")) 

230 relation = relation.join(overlaps) 

231 relationships.add( 

232 frozenset(self.universe[element1].dimensions.names | self.universe[element2].dimensions.names) 

233 ) 

234 

235 # All skypix columns need to come from either the initial_relation or a 

236 # spatial join, since we need all dimension key columns present in the 

237 # SQL engine and skypix regions are added by postprocessing in the 

238 # native iteration engine. 

239 for skypix_dimension_name in dimensions.skypix: 

240 if DimensionKeyColumnTag(skypix_dimension_name) not in relation.columns: 

241 raise NotImplementedError( 

242 f"Cannot construct query involving skypix dimension {skypix_dimension_name} unless " 

243 "it is part of a dataset subquery, spatial join, or other initial relation." 

244 ) 

245 

246 # Before joining in new tables to provide columns, attempt to restore 

247 # them from the given relation by weakening projections applied to it. 

248 relation, _ = context.restore_columns(relation, columns_required) 

249 

250 # Categorize columns not yet included in the relation to associate them 

251 # with dimension elements and detect bad inputs. 

252 missing_columns = ColumnCategorization.from_iterable(columns_required - relation.columns) 

253 if not (missing_columns.dimension_keys <= dimensions.names): 

254 raise ColumnError( 

255 "Cannot add dimension key column(s) " 

256 f"{{{', '.join(name for name in missing_columns.dimension_keys)}}} " 

257 f"that were not included in the given dimensions {dimensions}." 

258 ) 

259 if missing_columns.datasets: 

260 raise ColumnError( 

261 f"Unexpected dataset columns {missing_columns.datasets} in call to make_dimension_relation; " 

262 "use make_dataset_query_relation or make_dataset_search relation instead, or filter them " 

263 "out if they have already been added or will be added later." 

264 ) 

265 for element_name in missing_columns.dimension_records: 

266 if element_name not in dimensions.elements.names: 

267 raise ColumnError( 

268 f"Cannot join dimension element {element_name} whose dimensions are not a " 

269 f"subset of {dimensions}." 

270 ) 

271 

272 # Iterate over all dimension elements whose relations definitely have 

273 # to be joined in. The order doesn't matter as long as we can assume 

274 # the database query optimizer is going to try to reorder them anyway. 

275 for element_name in dimensions.elements: 

276 columns_still_needed = missing_columns.dimension_records[element_name] 

277 element = self.universe[element_name] 

278 # Two separate conditions in play here: 

279 # - if we need a record column (not just key columns) from this 

280 # element, we have to join in its relation; 

281 # - if the element establishes a relationship between key columns 

282 # that wasn't already established by the initial relation, we 

283 # always join that element's relation. Any element with 

284 # implied dependencies or the alwaysJoin flag establishes such a 

285 # relationship. 

286 if columns_still_needed or ( 

287 (element.alwaysJoin or element.implied) 

288 and frozenset(element.dimensions.names) not in relationships 

289 ): 

290 storage = self._managers.dimensions[element_name] 

291 relation = storage.join(relation, default_join, context) 

292 # At this point we've joined in all of the element relations that 

293 # definitely need to be included, but we may not have all of the 

294 # dimension key columns in the query that we want. To fill out that 

295 # set, we iterate over just the given DimensionGroup's dimensions (not 

296 # all dimension *elements*) in reverse topological order. That order 

297 # should reduce the total number of tables we bring in, since each 

298 # dimension will bring in keys for its required dependencies before we 

299 # get to those required dependencies. 

300 for dimension_name in reversed(dimensions.names.as_tuple()): 

301 if DimensionKeyColumnTag(dimension_name) not in relation.columns: 

302 storage = self._managers.dimensions[dimension_name] 

303 relation = storage.join(relation, default_join, context) 

304 

305 # Add the predicates we constructed earlier, with a transfer to native 

306 # iteration first if necessary. 

307 if not predicate.as_trivial(): 

308 relation = relation.with_rows_satisfying( 

309 predicate, preferred_engine=context.iteration_engine, transfer=True 

310 ) 

311 

312 # Finally project the new relation down to just the columns in the 

313 # initial relation, the dimension key columns, and the new columns 

314 # requested. 

315 columns_kept = set(columns) 

316 if initial_relation is not None: 

317 columns_kept.update(initial_relation.columns) 

318 columns_kept.update(DimensionKeyColumnTag.generate(dimensions.names)) 

319 relation = relation.with_only_columns(columns_kept, preferred_engine=context.preferred_engine) 

320 

321 return relation 

322 

323 def resolve_governor_constraints( 

324 self, dimensions: DimensionGroup, constraints: Mapping[str, Set[str]], context: SqlQueryContext 

325 ) -> Mapping[str, Set[str]]: 

326 # Docstring inherited. 

327 result: dict[str, Set[str]] = {} 

328 for dimension_name in dimensions.governors: 

329 storage = self._managers.dimensions[dimension_name] 

330 records = storage.get_record_cache(context) 

331 assert records is not None, "Governor dimensions are always cached." 

332 all_values = {cast(str, data_id[dimension_name]) for data_id in records} 

333 if (constraint_values := constraints.get(dimension_name)) is not None: 

334 if not (constraint_values <= all_values): 

335 raise DataIdValueError( 

336 f"Unknown values specified for governor dimension {dimension_name}: " 

337 f"{constraint_values - all_values}." 

338 ) 

339 result[dimension_name] = constraint_values 

340 else: 

341 result[dimension_name] = all_values 

342 return result 

343 

344 def get_dimension_record_cache( 

345 self, 

346 element_name: str, 

347 context: SqlQueryContext, 

348 ) -> Mapping[DataCoordinate, DimensionRecord] | None: 

349 return self._managers.dimensions[element_name].get_record_cache(context)