Coverage for python/lsst/daf/butler/registry/dimensions/governor.py: 92%
80 statements
« prev ^ index » next coverage.py v7.3.1, created at 2023-10-02 07:59 +0000
« prev ^ index » next coverage.py v7.3.1, created at 2023-10-02 07:59 +0000
1# This file is part of daf_butler.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This software is dual licensed under the GNU General Public License and also
10# under a 3-clause BSD license. Recipients may choose which of these licenses
11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
12# respectively. If you choose the GPL option then the following text applies
13# (but note that there is still no warranty even if you opt for BSD instead):
14#
15# This program is free software: you can redistribute it and/or modify
16# it under the terms of the GNU General Public License as published by
17# the Free Software Foundation, either version 3 of the License, or
18# (at your option) any later version.
19#
20# This program is distributed in the hope that it will be useful,
21# but WITHOUT ANY WARRANTY; without even the implied warranty of
22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23# GNU General Public License for more details.
24#
25# You should have received a copy of the GNU General Public License
26# along with this program. If not, see <http://www.gnu.org/licenses/>.
27from __future__ import annotations
29__all__ = ["BasicGovernorDimensionRecordStorage"]
31from collections.abc import Callable, Mapping
32from typing import Any, cast
34import sqlalchemy
35from lsst.daf.relation import Relation
37from ...core import DataCoordinate, DimensionRecord, GovernorDimension
38from .. import queries
39from ..interfaces import Database, GovernorDimensionRecordStorage, StaticTablesContext
42class BasicGovernorDimensionRecordStorage(GovernorDimensionRecordStorage):
43 """A record storage implementation for `GovernorDimension` that
44 aggressively fetches and caches all values from the database.
46 Parameters
47 ----------
48 db : `Database`
49 Interface to the database engine and namespace that will hold these
50 dimension records.
51 dimension : `GovernorDimension`
52 The dimension whose records this storage will manage.
53 table : `sqlalchemy.schema.Table`
54 The logical table for the dimension.
55 """
57 def __init__(
58 self,
59 db: Database,
60 dimension: GovernorDimension,
61 table: sqlalchemy.schema.Table,
62 ):
63 self._db = db
64 self._dimension = dimension
65 self._table = table
66 # We need to allow the cache to be None so we have some recourse when
67 # it is cleared as part of transaction rollback - we can't run
68 # queries to repopulate them at that point, so we need to defer it
69 # until next use.
70 self._cache: dict[DataCoordinate, DimensionRecord] | None = None
71 self._callbacks: list[Callable[[DimensionRecord], None]] = []
73 @classmethod
74 def initialize(
75 cls,
76 db: Database,
77 element: GovernorDimension,
78 *,
79 context: StaticTablesContext | None = None,
80 config: Mapping[str, Any],
81 ) -> GovernorDimensionRecordStorage:
82 # Docstring inherited from GovernorDimensionRecordStorage.
83 spec = element.RecordClass.fields.makeTableSpec(
84 TimespanReprClass=db.getTimespanRepresentation(),
85 )
86 if context is not None: 86 ↛ 89line 86 didn't jump to line 89, because the condition on line 86 was never false
87 table = context.addTable(element.name, spec)
88 else:
89 table = db.ensureTableExists(element.name, spec)
90 return cls(db, element, table)
92 @property
93 def element(self) -> GovernorDimension:
94 # Docstring inherited from DimensionRecordStorage.element.
95 return self._dimension
97 @property
98 def table(self) -> sqlalchemy.schema.Table:
99 return self._table
101 def registerInsertionListener(self, callback: Callable[[DimensionRecord], None]) -> None:
102 # Docstring inherited from GovernorDimensionRecordStorage.
103 self._callbacks.append(callback)
105 def clearCaches(self) -> None:
106 # Docstring inherited from DimensionRecordStorage.clearCaches.
107 self._cache = None
109 def make_relation(self, context: queries.SqlQueryContext, _sized: bool = True) -> Relation:
110 # Docstring inherited.
111 payload = self._build_sql_payload(self._table, context.column_types)
112 if _sized:
113 cache = self.get_record_cache(context)
114 return context.sql_engine.make_leaf(
115 payload.columns_available.keys(),
116 name=self.element.name,
117 payload=payload,
118 min_rows=len(cache) if _sized else 0,
119 max_rows=len(cache) if _sized else None,
120 )
122 def insert(self, *records: DimensionRecord, replace: bool = False, skip_existing: bool = False) -> None:
123 # Docstring inherited from DimensionRecordStorage.insert.
124 elementRows = [record.toDict() for record in records]
125 with self._db.transaction():
126 if replace: 126 ↛ 127line 126 didn't jump to line 127, because the condition on line 126 was never true
127 self._db.replace(self._table, *elementRows)
128 elif skip_existing:
129 self._db.ensure(self._table, *elementRows, primary_key_only=True)
130 else:
131 self._db.insert(self._table, *elementRows)
132 for record in records:
133 # We really shouldn't ever get into a situation where the
134 # record here differs from the one in the DB, but the last
135 # thing we want is to make it harder to debug by making the
136 # cache different from the DB.
137 if self._cache is not None:
138 # We really shouldn't ever get into a situation where the
139 # record here differs from the one in the DB, but the last
140 # thing we want is to make it harder to debug by making the
141 # cache different from the DB.
142 if skip_existing:
143 self._cache.setdefault(record.dataId, record)
144 else:
145 self._cache[record.dataId] = record
146 for callback in self._callbacks:
147 callback(record)
149 def sync(self, record: DimensionRecord, update: bool = False) -> bool | dict[str, Any]:
150 # Docstring inherited from DimensionRecordStorage.sync.
151 compared = record.toDict()
152 keys = {}
153 for name in record.fields.required.names:
154 keys[name] = compared.pop(name)
155 with self._db.transaction():
156 _, inserted_or_updated = self._db.sync(
157 self._table,
158 keys=keys,
159 compared=compared,
160 update=update,
161 )
162 if inserted_or_updated: 162 ↛ 167line 162 didn't jump to line 167, because the condition on line 162 was never false
163 if self._cache is not None: 163 ↛ 164line 163 didn't jump to line 164, because the condition on line 163 was never true
164 self._cache[record.dataId] = record
165 for callback in self._callbacks:
166 callback(record)
167 return inserted_or_updated
169 def fetch_one(self, data_id: DataCoordinate, context: queries.SqlQueryContext) -> DimensionRecord | None:
170 # Docstring inherited.
171 cache = self.get_record_cache(context)
172 return cache.get(data_id)
174 def get_record_cache(self, context: queries.SqlQueryContext) -> Mapping[DataCoordinate, DimensionRecord]:
175 # Docstring inherited.
176 if self._cache is None:
177 reader = queries.DimensionRecordReader(self.element)
178 cache = {}
179 for row in context.fetch_iterable(self.make_relation(context, _sized=False)):
180 record = reader.read(row)
181 cache[record.dataId] = record
182 self._cache = cache
183 return cast(Mapping[DataCoordinate, DimensionRecord], self._cache)
185 def digestTables(self) -> list[sqlalchemy.schema.Table]:
186 # Docstring inherited from DimensionRecordStorage.digestTables.
187 return [self._table]