Coverage for python/lsst/pipe/base/graph/_versionDeserializers.py: 31%
250 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-11-18 10:50 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-11-18 10:50 +0000
1# This file is part of pipe_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This software is dual licensed under the GNU General Public License and also
10# under a 3-clause BSD license. Recipients may choose which of these licenses
11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
12# respectively. If you choose the GPL option then the following text applies
13# (but note that there is still no warranty even if you opt for BSD instead):
14#
15# This program is free software: you can redistribute it and/or modify
16# it under the terms of the GNU General Public License as published by
17# the Free Software Foundation, either version 3 of the License, or
18# (at your option) any later version.
19#
20# This program is distributed in the hope that it will be useful,
21# but WITHOUT ANY WARRANTY; without even the implied warranty of
22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23# GNU General Public License for more details.
24#
25# You should have received a copy of the GNU General Public License
26# along with this program. If not, see <http://www.gnu.org/licenses/>.
27from __future__ import annotations
29__all__ = ("DESERIALIZER_MAP",)
31import json
32import lzma
33import pickle
34import struct
35import uuid
36from abc import ABC, abstractmethod
37from collections import defaultdict
38from collections.abc import Callable
39from dataclasses import dataclass
40from types import SimpleNamespace
41from typing import TYPE_CHECKING, ClassVar, cast
43import networkx as nx
44from lsst.daf.butler import (
45 DatasetRef,
46 DatasetType,
47 DimensionConfig,
48 DimensionUniverse,
49 Quantum,
50 SerializedDimensionRecord,
51)
52from lsst.utils import doImportType
54from ..config import PipelineTaskConfig
55from ..pipeline import TaskDef
56from ..pipelineTask import PipelineTask
57from ._implDetails import DatasetTypeName, _DatasetTracker
58from .quantumNode import QuantumNode, SerializedQuantumNode
60if TYPE_CHECKING:
61 from .graph import QuantumGraph
64class StructSizeDescriptor:
65 """Class level property. It exists to report the size
66 (number of bytes) of whatever the formatter string is for a deserializer.
67 """
69 def __get__(self, inst: DeserializerBase | None, owner: type[DeserializerBase]) -> int:
70 return struct.calcsize(owner.FMT_STRING())
73@dataclass
74class DeserializerBase(ABC):
75 @classmethod
76 @abstractmethod
77 def FMT_STRING(cls) -> str:
78 raise NotImplementedError("Base class does not implement this method")
80 structSize: ClassVar[StructSizeDescriptor]
82 preambleSize: int
83 sizeBytes: bytes
85 def __init_subclass__(cls) -> None:
86 # attach the size decriptor
87 cls.structSize = StructSizeDescriptor()
88 super().__init_subclass__()
90 def unpackHeader(self, rawHeader: bytes) -> str | None:
91 """Transform the raw bytes corresponding to the header of a save into
92 a string of the header information.
94 Parameters
95 ----------
96 rawheader : bytes
97 The bytes that are to be parsed into the header information. These
98 are the bytes after the preamble and structsize number of bytes
99 and before the headerSize bytes.
101 Returns
102 -------
103 header : `str` or `None`
104 Header information as a string. Returns `None` if the save format
105 has no header string implementation (such as save format 1 that is
106 all pickle).
107 """
108 raise NotImplementedError("Base class does not implement this method")
110 @property
111 def headerSize(self) -> int:
112 """Returns the number of bytes from the beginning of the file to the
113 end of the metadata.
114 """
115 raise NotImplementedError("Base class does not implement this method")
117 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
118 """Parse the supplied raw bytes into the header information and
119 byte ranges of specific TaskDefs and QuantumNodes
121 Parameters
122 ----------
123 rawheader : bytes
124 The bytes that are to be parsed into the header information. These
125 are the bytes after the preamble and structsize number of bytes
126 and before the headerSize bytes
127 """
128 raise NotImplementedError("Base class does not implement this method")
130 def constructGraph(
131 self,
132 nodes: set[uuid.UUID],
133 _readBytes: Callable[[int, int], bytes],
134 universe: DimensionUniverse | None = None,
135 ) -> QuantumGraph:
136 """Construct a graph from the deserialized information.
138 Parameters
139 ----------
140 nodes : `set` of `uuid.UUID`
141 The nodes to include in the graph
142 _readBytes : callable
143 A callable that can be used to read bytes from the file handle.
144 The callable will take two ints, start and stop, to use as the
145 numerical bounds to read and returns a byte stream.
146 universe : `~lsst.daf.butler.DimensionUniverse`
147 The singleton of all dimensions known to the middleware registry
148 """
149 raise NotImplementedError("Base class does not implement this method")
151 def description(self) -> str:
152 """Return the description of the serialized data format"""
153 raise NotImplementedError("Base class does not implement this method")
156Version1Description = """
157The save file starts with the first few bytes corresponding to the magic bytes
158in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
160The next few bytes are 2 big endian unsigned 64 bit integers.
162The first unsigned 64 bit integer corresponds to the number of bytes of a
163python mapping of TaskDef labels to the byte ranges in the save file where the
164definition can be loaded.
166The second unsigned 64 bit integer corrresponds to the number of bytes of a
167python mapping of QuantumGraph Node number to the byte ranges in the save file
168where the node can be loaded. The byte range is indexed starting after
169the `header` bytes of the magic bytes, size bytes, and bytes of the two
170mappings.
172Each of the above mappings are pickled and then lzma compressed, so to
173deserialize the bytes, first lzma decompression must be performed and the
174results passed to python pickle loader.
176As stated above, each map contains byte ranges of the corresponding
177datastructure. Theses bytes are also lzma compressed pickles, and should
178be deserialized in a similar manner. The byte range is indexed starting after
179the `header` bytes of the magic bytes, size bytes, and bytes of the two
180mappings.
182In addition to the the TaskDef byte locations, the TypeDef map also contains
183an additional key '__GraphBuildID'. The value associated with this is the
184unique id assigned to the graph at its creation time.
185"""
188@dataclass
189class DeserializerV1(DeserializerBase):
190 @classmethod
191 def FMT_STRING(cls) -> str:
192 return ">QQ"
194 def __post_init__(self) -> None:
195 self.taskDefMapSize, self.nodeMapSize = struct.unpack(self.FMT_STRING(), self.sizeBytes)
197 @property
198 def headerSize(self) -> int:
199 return self.preambleSize + self.structSize + self.taskDefMapSize + self.nodeMapSize
201 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
202 returnValue = SimpleNamespace()
203 returnValue.taskDefMap = pickle.loads(rawHeader[: self.taskDefMapSize])
204 returnValue._buildId = returnValue.taskDefMap["__GraphBuildID"]
205 returnValue.map = pickle.loads(rawHeader[self.taskDefMapSize :])
206 returnValue.metadata = None
207 self.returnValue = returnValue
208 return returnValue
210 def unpackHeader(self, rawHeader: bytes) -> str | None:
211 return None
213 def constructGraph(
214 self,
215 nodes: set[uuid.UUID],
216 _readBytes: Callable[[int, int], bytes],
217 universe: DimensionUniverse | None = None,
218 ) -> QuantumGraph:
219 # need to import here to avoid cyclic imports
220 from . import QuantumGraph
222 quanta: defaultdict[TaskDef, set[Quantum]] = defaultdict(set)
223 quantumToNodeId: dict[Quantum, uuid.UUID] = {}
224 loadedTaskDef = {}
225 # loop over the nodes specified above
226 for node in nodes:
227 # Get the bytes to read from the map
228 start, stop = self.returnValue.map[node]
229 start += self.headerSize
230 stop += self.headerSize
232 # read the specified bytes, will be overloaded by subclasses
233 # bytes are compressed, so decompress them
234 dump = lzma.decompress(_readBytes(start, stop))
236 # reconstruct node
237 qNode = pickle.loads(dump)
238 object.__setattr__(qNode, "nodeId", uuid.uuid4())
240 # read the saved node, name. If it has been loaded, attach it, if
241 # not read in the taskDef first, and then load it
242 nodeTask = qNode.taskDef
243 if nodeTask not in loadedTaskDef:
244 # Get the byte ranges corresponding to this taskDef
245 start, stop = self.returnValue.taskDefMap[nodeTask]
246 start += self.headerSize
247 stop += self.headerSize
249 # load the taskDef, this method call will be overloaded by
250 # subclasses.
251 # bytes are compressed, so decompress them
252 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
253 loadedTaskDef[nodeTask] = taskDef
254 # Explicitly overload the "frozen-ness" of nodes to attach the
255 # taskDef back into the un-persisted node
256 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
257 quanta[qNode.taskDef].add(qNode.quantum)
259 # record the node for later processing
260 quantumToNodeId[qNode.quantum] = qNode.nodeId
262 # construct an empty new QuantumGraph object, and run the associated
263 # creation method with the un-persisted data
264 qGraph = object.__new__(QuantumGraph)
265 qGraph._buildGraphs(
266 quanta,
267 _quantumToNodeId=quantumToNodeId,
268 _buildId=self.returnValue._buildId,
269 metadata=self.returnValue.metadata,
270 universe=universe,
271 )
272 return qGraph
274 def description(self) -> str:
275 return Version1Description
278Version2Description = """
279The save file starts with the first few bytes corresponding to the magic bytes
280in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
282The next few bytes are a big endian unsigned long long.
284The unsigned long long corresponds to the number of bytes of a python mapping
285of header information. This mapping is encoded into json and then lzma
286compressed, meaning the operations must be performed in the opposite order to
287deserialize.
289The json encoded header mapping contains 4 fields: TaskDefs, GraphBuildId,
290Nodes, and Metadata.
292The `TaskDefs` key corresponds to a value which is a mapping of Task label to
293task data. The task data is a mapping of key to value, where the only key is
294`bytes` and it corresponds to a tuple of a byte range of the start, stop
295bytes (indexed after all the header bytes)
297The `GraphBuildId` corresponds with a string that is the unique id assigned to
298this graph when it was created.
300The `Nodes` key is like the `TaskDefs` key except it corresponds to
301QuantumNodes instead of TaskDefs. Another important difference is that JSON
302formatting does not allow using numbers as keys, and this mapping is keyed by
303the node number. Thus it is stored in JSON as two equal length lists, the first
304being the keys, and the second the values associated with those keys.
306The `Metadata` key is a mapping of strings to associated values. This metadata
307may be anything that is important to be transported alongside the graph.
309As stated above, each map contains byte ranges of the corresponding
310datastructure. Theses bytes are also lzma compressed pickles, and should
311be deserialized in a similar manner.
312"""
315@dataclass
316class DeserializerV2(DeserializerBase):
317 @classmethod
318 def FMT_STRING(cls) -> str:
319 return ">Q"
321 def __post_init__(self) -> None:
322 (self.mapSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
324 @property
325 def headerSize(self) -> int:
326 return self.preambleSize + self.structSize + self.mapSize
328 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
329 uncompressedHeaderMap = self.unpackHeader(rawHeader)
330 if uncompressedHeaderMap is None:
331 raise ValueError(
332 "This error is not possible because self.unpackHeader cannot return None,"
333 " but is done to satisfy type checkers"
334 )
335 header = json.loads(uncompressedHeaderMap)
336 returnValue = SimpleNamespace()
337 returnValue.taskDefMap = header["TaskDefs"]
338 returnValue._buildId = header["GraphBuildID"]
339 returnValue.map = dict(header["Nodes"])
340 returnValue.metadata = header["Metadata"]
341 self.returnValue = returnValue
342 return returnValue
344 def unpackHeader(self, rawHeader: bytes) -> str | None:
345 return lzma.decompress(rawHeader).decode()
347 def constructGraph(
348 self,
349 nodes: set[uuid.UUID],
350 _readBytes: Callable[[int, int], bytes],
351 universe: DimensionUniverse | None = None,
352 ) -> QuantumGraph:
353 # need to import here to avoid cyclic imports
354 from . import QuantumGraph
356 quanta: defaultdict[TaskDef, set[Quantum]] = defaultdict(set)
357 quantumToNodeId: dict[Quantum, uuid.UUID] = {}
358 loadedTaskDef = {}
359 # loop over the nodes specified above
360 for node in nodes:
361 # Get the bytes to read from the map
362 start, stop = self.returnValue.map[node]["bytes"]
363 start += self.headerSize
364 stop += self.headerSize
366 # read the specified bytes, will be overloaded by subclasses
367 # bytes are compressed, so decompress them
368 dump = lzma.decompress(_readBytes(start, stop))
370 # reconstruct node
371 qNode = pickle.loads(dump)
372 object.__setattr__(qNode, "nodeId", uuid.uuid4())
374 # read the saved node, name. If it has been loaded, attach it, if
375 # not read in the taskDef first, and then load it
376 nodeTask = qNode.taskDef
377 if nodeTask not in loadedTaskDef:
378 # Get the byte ranges corresponding to this taskDef
379 start, stop = self.returnValue.taskDefMap[nodeTask]["bytes"]
380 start += self.headerSize
381 stop += self.headerSize
383 # load the taskDef, this method call will be overloaded by
384 # subclasses.
385 # bytes are compressed, so decompress them
386 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
387 loadedTaskDef[nodeTask] = taskDef
388 # Explicitly overload the "frozen-ness" of nodes to attach the
389 # taskDef back into the un-persisted node
390 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
391 quanta[qNode.taskDef].add(qNode.quantum)
393 # record the node for later processing
394 quantumToNodeId[qNode.quantum] = qNode.nodeId
396 # construct an empty new QuantumGraph object, and run the associated
397 # creation method with the un-persisted data
398 qGraph = object.__new__(QuantumGraph)
399 qGraph._buildGraphs(
400 quanta,
401 _quantumToNodeId=quantumToNodeId,
402 _buildId=self.returnValue._buildId,
403 metadata=self.returnValue.metadata,
404 universe=universe,
405 )
406 return qGraph
408 def description(self) -> str:
409 return Version2Description
412Version3Description = """
413The save file starts with the first few bytes corresponding to the magic bytes
414in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
416The next few bytes are a big endian unsigned long long.
418The unsigned long long corresponds to the number of bytes of a mapping
419of header information. This mapping is encoded into json and then lzma
420compressed, meaning the operations must be performed in the opposite order to
421deserialize.
423The json encoded header mapping contains 5 fields: GraphBuildId, TaskDefs,
424Nodes, Metadata, and DimensionRecords.
426The `GraphBuildId` key corresponds with a string that is the unique id assigned
427to this graph when it was created.
429The `TaskDefs` key corresponds to a value which is a mapping of Task label to
430task data. The task data is a mapping of key to value. The keys of this mapping
431are `bytes`, `inputs`, and `outputs`.
433The `TaskDefs` `bytes` key corresponds to a tuple of a byte range of the
434start, stop bytes (indexed after all the header bytes). This byte rage
435corresponds to a lzma compressed json mapping. This mapping has keys of
436`taskName`, corresponding to a fully qualified python class, `config` a
437pex_config string that is used to configure the class, and `label` which
438corresponds to a string that uniquely identifies the task within a given
439execution pipeline.
441The `TaskDefs` `inputs` key is associated with a list of tuples where each
442tuple is a label of a task that is considered coming before a given task, and
443the name of the dataset that is shared between the tasks (think node and edge
444in a graph sense).
446The `TaskDefs` `outputs` key is like inputs except the values in a list
447correspond to all the output connections of a task.
449The `Nodes` key is also a json mapping with keys corresponding to the UUIDs of
450QuantumNodes. The values associated with these keys is another mapping with
451the keys `bytes`, `inputs`, and `outputs`.
453`Nodes` key `bytes` corresponds to a tuple of a byte range of the start, stop
454bytes (indexed after all the header bytes). These bytes are a lzma compressed
455json mapping which contains many sub elements, this mapping will be referred to
456as the SerializedQuantumNode (related to the python class it corresponds to).
458SerializedQUantumNodes have 3 keys, `quantum` corresponding to a json mapping
459(described below) referred to as a SerializedQuantum, `taskLabel` a string
460which corresponds to a label in the `TaskDefs` mapping, and `nodeId.
462A SerializedQuantum has many keys; taskName, dataId, datasetTypeMapping,
463initInputs, inputs, outputs, dimensionRecords.
465like the `TaskDefs` key except it corresponds to
466QuantumNodes instead of TaskDefs, and the keys of the mappings are string
467representations of the UUIDs of the QuantumNodes.
469The `Metadata` key is a mapping of strings to associated values. This metadata
470may be anything that is important to be transported alongside the graph.
472As stated above, each map contains byte ranges of the corresponding
473datastructure. Theses bytes are also lzma compressed pickles, and should
474be deserialized in a similar manner.
475"""
478@dataclass
479class DeserializerV3(DeserializerBase):
480 @classmethod
481 def FMT_STRING(cls) -> str:
482 return ">Q"
484 def __post_init__(self) -> None:
485 self.infoSize: int
486 (self.infoSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
488 @property
489 def headerSize(self) -> int:
490 return self.preambleSize + self.structSize + self.infoSize
492 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
493 uncompressedinfoMap = self.unpackHeader(rawHeader)
494 assert uncompressedinfoMap is not None # for python typing, this variant can't be None
495 infoMap = json.loads(uncompressedinfoMap)
496 infoMappings = SimpleNamespace()
497 infoMappings.taskDefMap = infoMap["TaskDefs"]
498 infoMappings._buildId = infoMap["GraphBuildID"]
499 infoMappings.map = {uuid.UUID(k): v for k, v in infoMap["Nodes"]}
500 infoMappings.metadata = infoMap["Metadata"]
501 infoMappings.dimensionRecords = {}
502 for k, v in infoMap["DimensionRecords"].items():
503 infoMappings.dimensionRecords[int(k)] = SerializedDimensionRecord(**v)
504 # This is important to be a get call here, so that it supports versions
505 # of saved quantum graph that might not have a saved universe without
506 # changing save format
507 if (universeConfig := infoMap.get("universe")) is not None:
508 universe = DimensionUniverse(config=DimensionConfig(universeConfig))
509 else:
510 universe = DimensionUniverse()
511 infoMappings.universe = universe
512 infoMappings.globalInitOutputRefs = []
513 if (json_refs := infoMap.get("GlobalInitOutputRefs")) is not None:
514 infoMappings.globalInitOutputRefs = [
515 DatasetRef.from_json(json_ref, universe=universe) for json_ref in json_refs
516 ]
517 infoMappings.registryDatasetTypes = []
518 if (json_refs := infoMap.get("RegistryDatasetTypes")) is not None:
519 infoMappings.registryDatasetTypes = [
520 DatasetType.from_json(json_ref, universe=universe) for json_ref in json_refs
521 ]
522 self.infoMappings = infoMappings
523 return infoMappings
525 def unpackHeader(self, rawHeader: bytes) -> str | None:
526 return lzma.decompress(rawHeader).decode()
528 def constructGraph(
529 self,
530 nodes: set[uuid.UUID],
531 _readBytes: Callable[[int, int], bytes],
532 universe: DimensionUniverse | None = None,
533 ) -> QuantumGraph:
534 # need to import here to avoid cyclic imports
535 from . import QuantumGraph
537 graph = nx.DiGraph()
538 loadedTaskDef: dict[str, TaskDef] = {}
539 container = {}
540 datasetDict = _DatasetTracker(createInverse=True)
541 taskToQuantumNode: defaultdict[TaskDef, set[QuantumNode]] = defaultdict(set)
542 initInputRefs: dict[TaskDef, list[DatasetRef]] = {}
543 initOutputRefs: dict[TaskDef, list[DatasetRef]] = {}
545 if universe is not None:
546 if not universe.isCompatibleWith(self.infoMappings.universe):
547 saved = self.infoMappings.universe
548 raise RuntimeError(
549 f"The saved dimension universe ({saved.namespace}@v{saved.version}) is not "
550 f"compatible with the supplied universe ({universe.namespace}@v{universe.version})."
551 )
552 else:
553 universe = self.infoMappings.universe
555 for node in nodes:
556 start, stop = self.infoMappings.map[node]["bytes"]
557 start, stop = start + self.headerSize, stop + self.headerSize
558 # Read in the bytes corresponding to the node to load and
559 # decompress it
560 dump = json.loads(lzma.decompress(_readBytes(start, stop)))
562 # Turn the json back into the pydandtic model
563 nodeDeserialized = SerializedQuantumNode.direct(**dump)
564 del dump
566 # attach the dictionary of dimension records to the pydantic model
567 # these are stored separately because the are stored over and over
568 # and this saves a lot of space and time.
569 nodeDeserialized.quantum.dimensionRecords = self.infoMappings.dimensionRecords
570 # get the label for the current task
571 nodeTaskLabel = nodeDeserialized.taskLabel
573 if nodeTaskLabel not in loadedTaskDef:
574 # Get the byte ranges corresponding to this taskDef
575 start, stop = self.infoMappings.taskDefMap[nodeTaskLabel]["bytes"]
576 start, stop = start + self.headerSize, stop + self.headerSize
578 # bytes are compressed, so decompress them
579 taskDefDump = json.loads(lzma.decompress(_readBytes(start, stop)))
580 taskClass: type[PipelineTask] = doImportType(taskDefDump["taskName"])
581 config: PipelineTaskConfig = taskClass.ConfigClass()
582 config.loadFromStream(taskDefDump["config"])
583 # Rebuild TaskDef
584 recreatedTaskDef = TaskDef(
585 taskName=taskDefDump["taskName"],
586 taskClass=taskClass,
587 config=config,
588 label=taskDefDump["label"],
589 )
590 loadedTaskDef[nodeTaskLabel] = recreatedTaskDef
592 # initInputRefs and initOutputRefs are optional
593 if (refs := taskDefDump.get("initInputRefs")) is not None:
594 initInputRefs[recreatedTaskDef] = [
595 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
596 ]
597 if (refs := taskDefDump.get("initOutputRefs")) is not None:
598 initOutputRefs[recreatedTaskDef] = [
599 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
600 ]
602 # rebuild the mappings that associate dataset type names with
603 # TaskDefs
604 for _, input in self.infoMappings.taskDefMap[nodeTaskLabel]["inputs"]:
605 datasetDict.addConsumer(DatasetTypeName(input), recreatedTaskDef)
607 added = set()
608 for outputConnection in self.infoMappings.taskDefMap[nodeTaskLabel]["outputs"]:
609 typeName = outputConnection[1]
610 if typeName not in added:
611 added.add(typeName)
612 datasetDict.addProducer(DatasetTypeName(typeName), recreatedTaskDef)
614 # reconstitute the node, passing in the dictionaries for the
615 # loaded TaskDefs and dimension records. These are used to ensure
616 # that each unique record is only loaded once
617 qnode = QuantumNode.from_simple(nodeDeserialized, loadedTaskDef, universe)
618 container[qnode.nodeId] = qnode
619 taskToQuantumNode[loadedTaskDef[nodeTaskLabel]].add(qnode)
621 # recreate the relations between each node from stored info
622 graph.add_node(qnode)
623 for id in self.infoMappings.map[qnode.nodeId]["inputs"]:
624 # uuid is stored as a string, turn it back into a uuid
625 id = uuid.UUID(id)
626 # if the id is not yet in the container, dont make a connection
627 # this is not an issue, because once it is, that id will add
628 # the reverse connection
629 if id in container:
630 graph.add_edge(container[id], qnode)
631 for id in self.infoMappings.map[qnode.nodeId]["outputs"]:
632 # uuid is stored as a string, turn it back into a uuid
633 id = uuid.UUID(id)
634 # if the id is not yet in the container, dont make a connection
635 # this is not an issue, because once it is, that id will add
636 # the reverse connection
637 if id in container:
638 graph.add_edge(qnode, container[id])
640 newGraph = object.__new__(QuantumGraph)
641 newGraph._metadata = self.infoMappings.metadata
642 newGraph._buildId = self.infoMappings._buildId
643 newGraph._datasetDict = datasetDict
644 newGraph._nodeIdMap = container
645 newGraph._count = len(nodes)
646 newGraph._taskToQuantumNode = dict(taskToQuantumNode.items())
647 newGraph._taskGraph = datasetDict.makeNetworkXGraph()
648 newGraph._connectedQuanta = graph
649 newGraph._initInputRefs = initInputRefs
650 newGraph._initOutputRefs = initOutputRefs
651 newGraph._globalInitOutputRefs = self.infoMappings.globalInitOutputRefs
652 newGraph._registryDatasetTypes = self.infoMappings.registryDatasetTypes
653 newGraph._universe = universe
654 return newGraph
657DESERIALIZER_MAP: dict[int, type[DeserializerBase]] = {
658 1: DeserializerV1,
659 2: DeserializerV2,
660 3: DeserializerV3,
661}