Coverage for python/lsst/pipe/base/graph/_versionDeserializers.py: 31%
250 statements
« prev ^ index » next coverage.py v7.3.0, created at 2023-08-31 09:39 +0000
« prev ^ index » next coverage.py v7.3.0, created at 2023-08-31 09:39 +0000
1# This file is part of pipe_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21from __future__ import annotations
23__all__ = ("DESERIALIZER_MAP",)
25import json
26import lzma
27import pickle
28import struct
29import uuid
30from abc import ABC, abstractmethod
31from collections import defaultdict
32from collections.abc import Callable
33from dataclasses import dataclass
34from types import SimpleNamespace
35from typing import TYPE_CHECKING, ClassVar, cast
37import networkx as nx
38from lsst.daf.butler import (
39 DatasetRef,
40 DatasetType,
41 DimensionConfig,
42 DimensionUniverse,
43 Quantum,
44 SerializedDimensionRecord,
45)
46from lsst.utils import doImportType
48from ..config import PipelineTaskConfig
49from ..pipeline import TaskDef
50from ..pipelineTask import PipelineTask
51from ._implDetails import DatasetTypeName, _DatasetTracker
52from .quantumNode import QuantumNode, SerializedQuantumNode
54if TYPE_CHECKING:
55 from .graph import QuantumGraph
58class StructSizeDescriptor:
59 """Class level property. It exists to report the size
60 (number of bytes) of whatever the formatter string is for a deserializer.
61 """
63 def __get__(self, inst: DeserializerBase | None, owner: type[DeserializerBase]) -> int:
64 return struct.calcsize(owner.FMT_STRING())
67@dataclass
68class DeserializerBase(ABC):
69 @classmethod
70 @abstractmethod
71 def FMT_STRING(cls) -> str:
72 raise NotImplementedError("Base class does not implement this method")
74 structSize: ClassVar[StructSizeDescriptor]
76 preambleSize: int
77 sizeBytes: bytes
79 def __init_subclass__(cls) -> None:
80 # attach the size decriptor
81 cls.structSize = StructSizeDescriptor()
82 super().__init_subclass__()
84 def unpackHeader(self, rawHeader: bytes) -> str | None:
85 """Transform the raw bytes corresponding to the header of a save into
86 a string of the header information.
88 Parameters
89 ----------
90 rawheader : bytes
91 The bytes that are to be parsed into the header information. These
92 are the bytes after the preamble and structsize number of bytes
93 and before the headerSize bytes.
95 Returns
96 -------
97 header : `str` or `None`
98 Header information as a string. Returns `None` if the save format
99 has no header string implementation (such as save format 1 that is
100 all pickle).
101 """
102 raise NotImplementedError("Base class does not implement this method")
104 @property
105 def headerSize(self) -> int:
106 """Returns the number of bytes from the beginning of the file to the
107 end of the metadata.
108 """
109 raise NotImplementedError("Base class does not implement this method")
111 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
112 """Parse the supplied raw bytes into the header information and
113 byte ranges of specific TaskDefs and QuantumNodes
115 Parameters
116 ----------
117 rawheader : bytes
118 The bytes that are to be parsed into the header information. These
119 are the bytes after the preamble and structsize number of bytes
120 and before the headerSize bytes
121 """
122 raise NotImplementedError("Base class does not implement this method")
124 def constructGraph(
125 self,
126 nodes: set[uuid.UUID],
127 _readBytes: Callable[[int, int], bytes],
128 universe: DimensionUniverse | None = None,
129 ) -> QuantumGraph:
130 """Construct a graph from the deserialized information.
132 Parameters
133 ----------
134 nodes : `set` of `uuid.UUID`
135 The nodes to include in the graph
136 _readBytes : callable
137 A callable that can be used to read bytes from the file handle.
138 The callable will take two ints, start and stop, to use as the
139 numerical bounds to read and returns a byte stream.
140 universe : `~lsst.daf.butler.DimensionUniverse`
141 The singleton of all dimensions known to the middleware registry
142 """
143 raise NotImplementedError("Base class does not implement this method")
145 def description(self) -> str:
146 """Return the description of the serialized data format"""
147 raise NotImplementedError("Base class does not implement this method")
150Version1Description = """
151The save file starts with the first few bytes corresponding to the magic bytes
152in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
154The next few bytes are 2 big endian unsigned 64 bit integers.
156The first unsigned 64 bit integer corresponds to the number of bytes of a
157python mapping of TaskDef labels to the byte ranges in the save file where the
158definition can be loaded.
160The second unsigned 64 bit integer corrresponds to the number of bytes of a
161python mapping of QuantumGraph Node number to the byte ranges in the save file
162where the node can be loaded. The byte range is indexed starting after
163the `header` bytes of the magic bytes, size bytes, and bytes of the two
164mappings.
166Each of the above mappings are pickled and then lzma compressed, so to
167deserialize the bytes, first lzma decompression must be performed and the
168results passed to python pickle loader.
170As stated above, each map contains byte ranges of the corresponding
171datastructure. Theses bytes are also lzma compressed pickles, and should
172be deserialized in a similar manner. The byte range is indexed starting after
173the `header` bytes of the magic bytes, size bytes, and bytes of the two
174mappings.
176In addition to the the TaskDef byte locations, the TypeDef map also contains
177an additional key '__GraphBuildID'. The value associated with this is the
178unique id assigned to the graph at its creation time.
179"""
182@dataclass
183class DeserializerV1(DeserializerBase):
184 @classmethod
185 def FMT_STRING(cls) -> str:
186 return ">QQ"
188 def __post_init__(self) -> None:
189 self.taskDefMapSize, self.nodeMapSize = struct.unpack(self.FMT_STRING(), self.sizeBytes)
191 @property
192 def headerSize(self) -> int:
193 return self.preambleSize + self.structSize + self.taskDefMapSize + self.nodeMapSize
195 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
196 returnValue = SimpleNamespace()
197 returnValue.taskDefMap = pickle.loads(rawHeader[: self.taskDefMapSize])
198 returnValue._buildId = returnValue.taskDefMap["__GraphBuildID"]
199 returnValue.map = pickle.loads(rawHeader[self.taskDefMapSize :])
200 returnValue.metadata = None
201 self.returnValue = returnValue
202 return returnValue
204 def unpackHeader(self, rawHeader: bytes) -> str | None:
205 return None
207 def constructGraph(
208 self,
209 nodes: set[uuid.UUID],
210 _readBytes: Callable[[int, int], bytes],
211 universe: DimensionUniverse | None = None,
212 ) -> QuantumGraph:
213 # need to import here to avoid cyclic imports
214 from . import QuantumGraph
216 quanta: defaultdict[TaskDef, set[Quantum]] = defaultdict(set)
217 quantumToNodeId: dict[Quantum, uuid.UUID] = {}
218 loadedTaskDef = {}
219 # loop over the nodes specified above
220 for node in nodes:
221 # Get the bytes to read from the map
222 start, stop = self.returnValue.map[node]
223 start += self.headerSize
224 stop += self.headerSize
226 # read the specified bytes, will be overloaded by subclasses
227 # bytes are compressed, so decompress them
228 dump = lzma.decompress(_readBytes(start, stop))
230 # reconstruct node
231 qNode = pickle.loads(dump)
232 object.__setattr__(qNode, "nodeId", uuid.uuid4())
234 # read the saved node, name. If it has been loaded, attach it, if
235 # not read in the taskDef first, and then load it
236 nodeTask = qNode.taskDef
237 if nodeTask not in loadedTaskDef:
238 # Get the byte ranges corresponding to this taskDef
239 start, stop = self.returnValue.taskDefMap[nodeTask]
240 start += self.headerSize
241 stop += self.headerSize
243 # load the taskDef, this method call will be overloaded by
244 # subclasses.
245 # bytes are compressed, so decompress them
246 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
247 loadedTaskDef[nodeTask] = taskDef
248 # Explicitly overload the "frozen-ness" of nodes to attach the
249 # taskDef back into the un-persisted node
250 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
251 quanta[qNode.taskDef].add(qNode.quantum)
253 # record the node for later processing
254 quantumToNodeId[qNode.quantum] = qNode.nodeId
256 # construct an empty new QuantumGraph object, and run the associated
257 # creation method with the un-persisted data
258 qGraph = object.__new__(QuantumGraph)
259 qGraph._buildGraphs(
260 quanta,
261 _quantumToNodeId=quantumToNodeId,
262 _buildId=self.returnValue._buildId,
263 metadata=self.returnValue.metadata,
264 universe=universe,
265 )
266 return qGraph
268 def description(self) -> str:
269 return Version1Description
272Version2Description = """
273The save file starts with the first few bytes corresponding to the magic bytes
274in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
276The next few bytes are a big endian unsigned long long.
278The unsigned long long corresponds to the number of bytes of a python mapping
279of header information. This mapping is encoded into json and then lzma
280compressed, meaning the operations must be performed in the opposite order to
281deserialize.
283The json encoded header mapping contains 4 fields: TaskDefs, GraphBuildId,
284Nodes, and Metadata.
286The `TaskDefs` key corresponds to a value which is a mapping of Task label to
287task data. The task data is a mapping of key to value, where the only key is
288`bytes` and it corresponds to a tuple of a byte range of the start, stop
289bytes (indexed after all the header bytes)
291The `GraphBuildId` corresponds with a string that is the unique id assigned to
292this graph when it was created.
294The `Nodes` key is like the `TaskDefs` key except it corresponds to
295QuantumNodes instead of TaskDefs. Another important difference is that JSON
296formatting does not allow using numbers as keys, and this mapping is keyed by
297the node number. Thus it is stored in JSON as two equal length lists, the first
298being the keys, and the second the values associated with those keys.
300The `Metadata` key is a mapping of strings to associated values. This metadata
301may be anything that is important to be transported alongside the graph.
303As stated above, each map contains byte ranges of the corresponding
304datastructure. Theses bytes are also lzma compressed pickles, and should
305be deserialized in a similar manner.
306"""
309@dataclass
310class DeserializerV2(DeserializerBase):
311 @classmethod
312 def FMT_STRING(cls) -> str:
313 return ">Q"
315 def __post_init__(self) -> None:
316 (self.mapSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
318 @property
319 def headerSize(self) -> int:
320 return self.preambleSize + self.structSize + self.mapSize
322 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
323 uncompressedHeaderMap = self.unpackHeader(rawHeader)
324 if uncompressedHeaderMap is None:
325 raise ValueError(
326 "This error is not possible because self.unpackHeader cannot return None,"
327 " but is done to satisfy type checkers"
328 )
329 header = json.loads(uncompressedHeaderMap)
330 returnValue = SimpleNamespace()
331 returnValue.taskDefMap = header["TaskDefs"]
332 returnValue._buildId = header["GraphBuildID"]
333 returnValue.map = dict(header["Nodes"])
334 returnValue.metadata = header["Metadata"]
335 self.returnValue = returnValue
336 return returnValue
338 def unpackHeader(self, rawHeader: bytes) -> str | None:
339 return lzma.decompress(rawHeader).decode()
341 def constructGraph(
342 self,
343 nodes: set[uuid.UUID],
344 _readBytes: Callable[[int, int], bytes],
345 universe: DimensionUniverse | None = None,
346 ) -> QuantumGraph:
347 # need to import here to avoid cyclic imports
348 from . import QuantumGraph
350 quanta: defaultdict[TaskDef, set[Quantum]] = defaultdict(set)
351 quantumToNodeId: dict[Quantum, uuid.UUID] = {}
352 loadedTaskDef = {}
353 # loop over the nodes specified above
354 for node in nodes:
355 # Get the bytes to read from the map
356 start, stop = self.returnValue.map[node]["bytes"]
357 start += self.headerSize
358 stop += self.headerSize
360 # read the specified bytes, will be overloaded by subclasses
361 # bytes are compressed, so decompress them
362 dump = lzma.decompress(_readBytes(start, stop))
364 # reconstruct node
365 qNode = pickle.loads(dump)
366 object.__setattr__(qNode, "nodeId", uuid.uuid4())
368 # read the saved node, name. If it has been loaded, attach it, if
369 # not read in the taskDef first, and then load it
370 nodeTask = qNode.taskDef
371 if nodeTask not in loadedTaskDef:
372 # Get the byte ranges corresponding to this taskDef
373 start, stop = self.returnValue.taskDefMap[nodeTask]["bytes"]
374 start += self.headerSize
375 stop += self.headerSize
377 # load the taskDef, this method call will be overloaded by
378 # subclasses.
379 # bytes are compressed, so decompress them
380 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
381 loadedTaskDef[nodeTask] = taskDef
382 # Explicitly overload the "frozen-ness" of nodes to attach the
383 # taskDef back into the un-persisted node
384 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
385 quanta[qNode.taskDef].add(qNode.quantum)
387 # record the node for later processing
388 quantumToNodeId[qNode.quantum] = qNode.nodeId
390 # construct an empty new QuantumGraph object, and run the associated
391 # creation method with the un-persisted data
392 qGraph = object.__new__(QuantumGraph)
393 qGraph._buildGraphs(
394 quanta,
395 _quantumToNodeId=quantumToNodeId,
396 _buildId=self.returnValue._buildId,
397 metadata=self.returnValue.metadata,
398 universe=universe,
399 )
400 return qGraph
402 def description(self) -> str:
403 return Version2Description
406Version3Description = """
407The save file starts with the first few bytes corresponding to the magic bytes
408in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
410The next few bytes are a big endian unsigned long long.
412The unsigned long long corresponds to the number of bytes of a mapping
413of header information. This mapping is encoded into json and then lzma
414compressed, meaning the operations must be performed in the opposite order to
415deserialize.
417The json encoded header mapping contains 5 fields: GraphBuildId, TaskDefs,
418Nodes, Metadata, and DimensionRecords.
420The `GraphBuildId` key corresponds with a string that is the unique id assigned
421to this graph when it was created.
423The `TaskDefs` key corresponds to a value which is a mapping of Task label to
424task data. The task data is a mapping of key to value. The keys of this mapping
425are `bytes`, `inputs`, and `outputs`.
427The `TaskDefs` `bytes` key corresponds to a tuple of a byte range of the
428start, stop bytes (indexed after all the header bytes). This byte rage
429corresponds to a lzma compressed json mapping. This mapping has keys of
430`taskName`, corresponding to a fully qualified python class, `config` a
431pex_config string that is used to configure the class, and `label` which
432corresponds to a string that uniquely identifies the task within a given
433execution pipeline.
435The `TaskDefs` `inputs` key is associated with a list of tuples where each
436tuple is a label of a task that is considered coming before a given task, and
437the name of the dataset that is shared between the tasks (think node and edge
438in a graph sense).
440The `TaskDefs` `outputs` key is like inputs except the values in a list
441correspond to all the output connections of a task.
443The `Nodes` key is also a json mapping with keys corresponding to the UUIDs of
444QuantumNodes. The values associated with these keys is another mapping with
445the keys `bytes`, `inputs`, and `outputs`.
447`Nodes` key `bytes` corresponds to a tuple of a byte range of the start, stop
448bytes (indexed after all the header bytes). These bytes are a lzma compressed
449json mapping which contains many sub elements, this mapping will be referred to
450as the SerializedQuantumNode (related to the python class it corresponds to).
452SerializedQUantumNodes have 3 keys, `quantum` corresponding to a json mapping
453(described below) referred to as a SerializedQuantum, `taskLabel` a string
454which corresponds to a label in the `TaskDefs` mapping, and `nodeId.
456A SerializedQuantum has many keys; taskName, dataId, datasetTypeMapping,
457initInputs, inputs, outputs, dimensionRecords.
459like the `TaskDefs` key except it corresponds to
460QuantumNodes instead of TaskDefs, and the keys of the mappings are string
461representations of the UUIDs of the QuantumNodes.
463The `Metadata` key is a mapping of strings to associated values. This metadata
464may be anything that is important to be transported alongside the graph.
466As stated above, each map contains byte ranges of the corresponding
467datastructure. Theses bytes are also lzma compressed pickles, and should
468be deserialized in a similar manner.
469"""
472@dataclass
473class DeserializerV3(DeserializerBase):
474 @classmethod
475 def FMT_STRING(cls) -> str:
476 return ">Q"
478 def __post_init__(self) -> None:
479 self.infoSize: int
480 (self.infoSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
482 @property
483 def headerSize(self) -> int:
484 return self.preambleSize + self.structSize + self.infoSize
486 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
487 uncompressedinfoMap = self.unpackHeader(rawHeader)
488 assert uncompressedinfoMap is not None # for python typing, this variant can't be None
489 infoMap = json.loads(uncompressedinfoMap)
490 infoMappings = SimpleNamespace()
491 infoMappings.taskDefMap = infoMap["TaskDefs"]
492 infoMappings._buildId = infoMap["GraphBuildID"]
493 infoMappings.map = {uuid.UUID(k): v for k, v in infoMap["Nodes"]}
494 infoMappings.metadata = infoMap["Metadata"]
495 infoMappings.dimensionRecords = {}
496 for k, v in infoMap["DimensionRecords"].items():
497 infoMappings.dimensionRecords[int(k)] = SerializedDimensionRecord(**v)
498 # This is important to be a get call here, so that it supports versions
499 # of saved quantum graph that might not have a saved universe without
500 # changing save format
501 if (universeConfig := infoMap.get("universe")) is not None:
502 universe = DimensionUniverse(config=DimensionConfig(universeConfig))
503 else:
504 universe = DimensionUniverse()
505 infoMappings.universe = universe
506 infoMappings.globalInitOutputRefs = []
507 if (json_refs := infoMap.get("GlobalInitOutputRefs")) is not None:
508 infoMappings.globalInitOutputRefs = [
509 DatasetRef.from_json(json_ref, universe=universe) for json_ref in json_refs
510 ]
511 infoMappings.registryDatasetTypes = []
512 if (json_refs := infoMap.get("RegistryDatasetTypes")) is not None:
513 infoMappings.registryDatasetTypes = [
514 DatasetType.from_json(json_ref, universe=universe) for json_ref in json_refs
515 ]
516 self.infoMappings = infoMappings
517 return infoMappings
519 def unpackHeader(self, rawHeader: bytes) -> str | None:
520 return lzma.decompress(rawHeader).decode()
522 def constructGraph(
523 self,
524 nodes: set[uuid.UUID],
525 _readBytes: Callable[[int, int], bytes],
526 universe: DimensionUniverse | None = None,
527 ) -> QuantumGraph:
528 # need to import here to avoid cyclic imports
529 from . import QuantumGraph
531 graph = nx.DiGraph()
532 loadedTaskDef: dict[str, TaskDef] = {}
533 container = {}
534 datasetDict = _DatasetTracker(createInverse=True)
535 taskToQuantumNode: defaultdict[TaskDef, set[QuantumNode]] = defaultdict(set)
536 initInputRefs: dict[TaskDef, list[DatasetRef]] = {}
537 initOutputRefs: dict[TaskDef, list[DatasetRef]] = {}
539 if universe is not None:
540 if not universe.isCompatibleWith(self.infoMappings.universe):
541 saved = self.infoMappings.universe
542 raise RuntimeError(
543 f"The saved dimension universe ({saved.namespace}@v{saved.version}) is not "
544 f"compatible with the supplied universe ({universe.namespace}@v{universe.version})."
545 )
546 else:
547 universe = self.infoMappings.universe
549 for node in nodes:
550 start, stop = self.infoMappings.map[node]["bytes"]
551 start, stop = start + self.headerSize, stop + self.headerSize
552 # Read in the bytes corresponding to the node to load and
553 # decompress it
554 dump = json.loads(lzma.decompress(_readBytes(start, stop)))
556 # Turn the json back into the pydandtic model
557 nodeDeserialized = SerializedQuantumNode.direct(**dump)
558 del dump
560 # attach the dictionary of dimension records to the pydantic model
561 # these are stored separately because the are stored over and over
562 # and this saves a lot of space and time.
563 nodeDeserialized.quantum.dimensionRecords = self.infoMappings.dimensionRecords
564 # get the label for the current task
565 nodeTaskLabel = nodeDeserialized.taskLabel
567 if nodeTaskLabel not in loadedTaskDef:
568 # Get the byte ranges corresponding to this taskDef
569 start, stop = self.infoMappings.taskDefMap[nodeTaskLabel]["bytes"]
570 start, stop = start + self.headerSize, stop + self.headerSize
572 # bytes are compressed, so decompress them
573 taskDefDump = json.loads(lzma.decompress(_readBytes(start, stop)))
574 taskClass: type[PipelineTask] = doImportType(taskDefDump["taskName"])
575 config: PipelineTaskConfig = taskClass.ConfigClass()
576 config.loadFromStream(taskDefDump["config"])
577 # Rebuild TaskDef
578 recreatedTaskDef = TaskDef(
579 taskName=taskDefDump["taskName"],
580 taskClass=taskClass,
581 config=config,
582 label=taskDefDump["label"],
583 )
584 loadedTaskDef[nodeTaskLabel] = recreatedTaskDef
586 # initInputRefs and initOutputRefs are optional
587 if (refs := taskDefDump.get("initInputRefs")) is not None:
588 initInputRefs[recreatedTaskDef] = [
589 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
590 ]
591 if (refs := taskDefDump.get("initOutputRefs")) is not None:
592 initOutputRefs[recreatedTaskDef] = [
593 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
594 ]
596 # rebuild the mappings that associate dataset type names with
597 # TaskDefs
598 for _, input in self.infoMappings.taskDefMap[nodeTaskLabel]["inputs"]:
599 datasetDict.addConsumer(DatasetTypeName(input), recreatedTaskDef)
601 added = set()
602 for outputConnection in self.infoMappings.taskDefMap[nodeTaskLabel]["outputs"]:
603 typeName = outputConnection[1]
604 if typeName not in added:
605 added.add(typeName)
606 datasetDict.addProducer(DatasetTypeName(typeName), recreatedTaskDef)
608 # reconstitute the node, passing in the dictionaries for the
609 # loaded TaskDefs and dimension records. These are used to ensure
610 # that each unique record is only loaded once
611 qnode = QuantumNode.from_simple(nodeDeserialized, loadedTaskDef, universe)
612 container[qnode.nodeId] = qnode
613 taskToQuantumNode[loadedTaskDef[nodeTaskLabel]].add(qnode)
615 # recreate the relations between each node from stored info
616 graph.add_node(qnode)
617 for id in self.infoMappings.map[qnode.nodeId]["inputs"]:
618 # uuid is stored as a string, turn it back into a uuid
619 id = uuid.UUID(id)
620 # if the id is not yet in the container, dont make a connection
621 # this is not an issue, because once it is, that id will add
622 # the reverse connection
623 if id in container:
624 graph.add_edge(container[id], qnode)
625 for id in self.infoMappings.map[qnode.nodeId]["outputs"]:
626 # uuid is stored as a string, turn it back into a uuid
627 id = uuid.UUID(id)
628 # if the id is not yet in the container, dont make a connection
629 # this is not an issue, because once it is, that id will add
630 # the reverse connection
631 if id in container:
632 graph.add_edge(qnode, container[id])
634 newGraph = object.__new__(QuantumGraph)
635 newGraph._metadata = self.infoMappings.metadata
636 newGraph._buildId = self.infoMappings._buildId
637 newGraph._datasetDict = datasetDict
638 newGraph._nodeIdMap = container
639 newGraph._count = len(nodes)
640 newGraph._taskToQuantumNode = dict(taskToQuantumNode.items())
641 newGraph._taskGraph = datasetDict.makeNetworkXGraph()
642 newGraph._connectedQuanta = graph
643 newGraph._initInputRefs = initInputRefs
644 newGraph._initOutputRefs = initOutputRefs
645 newGraph._globalInitOutputRefs = self.infoMappings.globalInitOutputRefs
646 newGraph._registryDatasetTypes = self.infoMappings.registryDatasetTypes
647 newGraph._universe = universe
648 return newGraph
651DESERIALIZER_MAP: dict[int, type[DeserializerBase]] = {
652 1: DeserializerV1,
653 2: DeserializerV2,
654 3: DeserializerV3,
655}