Coverage for python/lsst/pipe/base/graph/_versionDeserializers.py: 31%
250 statements
« prev ^ index » next coverage.py v7.4.3, created at 2024-03-14 10:49 -0700
« prev ^ index » next coverage.py v7.4.3, created at 2024-03-14 10:49 -0700
1# This file is part of pipe_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This software is dual licensed under the GNU General Public License and also
10# under a 3-clause BSD license. Recipients may choose which of these licenses
11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
12# respectively. If you choose the GPL option then the following text applies
13# (but note that there is still no warranty even if you opt for BSD instead):
14#
15# This program is free software: you can redistribute it and/or modify
16# it under the terms of the GNU General Public License as published by
17# the Free Software Foundation, either version 3 of the License, or
18# (at your option) any later version.
19#
20# This program is distributed in the hope that it will be useful,
21# but WITHOUT ANY WARRANTY; without even the implied warranty of
22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23# GNU General Public License for more details.
24#
25# You should have received a copy of the GNU General Public License
26# along with this program. If not, see <http://www.gnu.org/licenses/>.
27from __future__ import annotations
29__all__ = ("DESERIALIZER_MAP",)
31import json
32import lzma
33import pickle
34import struct
35import uuid
36from abc import ABC, abstractmethod
37from collections import defaultdict
38from collections.abc import Callable
39from dataclasses import dataclass
40from types import SimpleNamespace
41from typing import TYPE_CHECKING, ClassVar, cast
43import networkx as nx
44from lsst.daf.butler import (
45 DatasetRef,
46 DatasetType,
47 DimensionConfig,
48 DimensionUniverse,
49 Quantum,
50 SerializedDimensionRecord,
51)
52from lsst.utils import doImportType
54from ..config import PipelineTaskConfig
55from ..pipeline import TaskDef
56from ..pipelineTask import PipelineTask
57from ._implDetails import DatasetTypeName, _DatasetTracker
58from .quantumNode import QuantumNode, SerializedQuantumNode
60if TYPE_CHECKING:
61 from .graph import QuantumGraph
64class StructSizeDescriptor:
65 """Class level property. It exists to report the size
66 (number of bytes) of whatever the formatter string is for a deserializer.
67 """
69 def __get__(self, inst: DeserializerBase | None, owner: type[DeserializerBase]) -> int:
70 return struct.calcsize(owner.FMT_STRING())
73@dataclass
74class DeserializerBase(ABC):
75 @classmethod
76 @abstractmethod
77 def FMT_STRING(cls) -> str:
78 raise NotImplementedError("Base class does not implement this method")
80 structSize: ClassVar[StructSizeDescriptor]
82 preambleSize: int
83 sizeBytes: bytes
85 def __init_subclass__(cls) -> None:
86 # attach the size decriptor
87 cls.structSize = StructSizeDescriptor()
88 super().__init_subclass__()
90 def unpackHeader(self, rawHeader: bytes) -> str | None:
91 """Transform the raw bytes corresponding to the header of a save into
92 a string of the header information.
94 Parameters
95 ----------
96 rawHeader : bytes
97 The bytes that are to be parsed into the header information. These
98 are the bytes after the preamble and structsize number of bytes
99 and before the headerSize bytes.
101 Returns
102 -------
103 header : `str` or `None`
104 Header information as a string. Returns `None` if the save format
105 has no header string implementation (such as save format 1 that is
106 all pickle).
107 """
108 raise NotImplementedError("Base class does not implement this method")
110 @property
111 def headerSize(self) -> int:
112 """Returns the number of bytes from the beginning of the file to the
113 end of the metadata.
114 """
115 raise NotImplementedError("Base class does not implement this method")
117 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
118 """Parse the supplied raw bytes into the header information and
119 byte ranges of specific TaskDefs and QuantumNodes.
121 Parameters
122 ----------
123 rawHeader : bytes
124 The bytes that are to be parsed into the header information. These
125 are the bytes after the preamble and structsize number of bytes
126 and before the headerSize bytes.
127 """
128 raise NotImplementedError("Base class does not implement this method")
130 def constructGraph(
131 self,
132 nodes: set[uuid.UUID],
133 _readBytes: Callable[[int, int], bytes],
134 universe: DimensionUniverse | None = None,
135 ) -> QuantumGraph:
136 """Construct a graph from the deserialized information.
138 Parameters
139 ----------
140 nodes : `set` of `uuid.UUID`
141 The nodes to include in the graph.
142 _readBytes : callable
143 A callable that can be used to read bytes from the file handle.
144 The callable will take two ints, start and stop, to use as the
145 numerical bounds to read and returns a byte stream.
146 universe : `~lsst.daf.butler.DimensionUniverse`
147 The singleton of all dimensions known to the middleware registry.
148 """
149 raise NotImplementedError("Base class does not implement this method")
151 def description(self) -> str:
152 """Return the description of the serialized data format.
154 Returns
155 -------
156 desc : `str`
157 Description of serialized data format.
158 """
159 raise NotImplementedError("Base class does not implement this method")
162Version1Description = """
163The save file starts with the first few bytes corresponding to the magic bytes
164in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
166The next few bytes are 2 big endian unsigned 64 bit integers.
168The first unsigned 64 bit integer corresponds to the number of bytes of a
169python mapping of TaskDef labels to the byte ranges in the save file where the
170definition can be loaded.
172The second unsigned 64 bit integer corrresponds to the number of bytes of a
173python mapping of QuantumGraph Node number to the byte ranges in the save file
174where the node can be loaded. The byte range is indexed starting after
175the `header` bytes of the magic bytes, size bytes, and bytes of the two
176mappings.
178Each of the above mappings are pickled and then lzma compressed, so to
179deserialize the bytes, first lzma decompression must be performed and the
180results passed to python pickle loader.
182As stated above, each map contains byte ranges of the corresponding
183datastructure. Theses bytes are also lzma compressed pickles, and should
184be deserialized in a similar manner. The byte range is indexed starting after
185the `header` bytes of the magic bytes, size bytes, and bytes of the two
186mappings.
188In addition to the the TaskDef byte locations, the TypeDef map also contains
189an additional key '__GraphBuildID'. The value associated with this is the
190unique id assigned to the graph at its creation time.
191"""
194@dataclass
195class DeserializerV1(DeserializerBase):
196 @classmethod
197 def FMT_STRING(cls) -> str:
198 return ">QQ"
200 def __post_init__(self) -> None:
201 self.taskDefMapSize, self.nodeMapSize = struct.unpack(self.FMT_STRING(), self.sizeBytes)
203 @property
204 def headerSize(self) -> int:
205 return self.preambleSize + self.structSize + self.taskDefMapSize + self.nodeMapSize
207 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
208 returnValue = SimpleNamespace()
209 returnValue.taskDefMap = pickle.loads(rawHeader[: self.taskDefMapSize])
210 returnValue._buildId = returnValue.taskDefMap["__GraphBuildID"]
211 returnValue.map = pickle.loads(rawHeader[self.taskDefMapSize :])
212 returnValue.metadata = None
213 self.returnValue = returnValue
214 return returnValue
216 def unpackHeader(self, rawHeader: bytes) -> str | None:
217 return None
219 def constructGraph(
220 self,
221 nodes: set[uuid.UUID],
222 _readBytes: Callable[[int, int], bytes],
223 universe: DimensionUniverse | None = None,
224 ) -> QuantumGraph:
225 # need to import here to avoid cyclic imports
226 from . import QuantumGraph
228 quanta: defaultdict[TaskDef, set[Quantum]] = defaultdict(set)
229 quantumToNodeId: dict[Quantum, uuid.UUID] = {}
230 loadedTaskDef = {}
231 # loop over the nodes specified above
232 for node in nodes:
233 # Get the bytes to read from the map
234 start, stop = self.returnValue.map[node]
235 start += self.headerSize
236 stop += self.headerSize
238 # read the specified bytes, will be overloaded by subclasses
239 # bytes are compressed, so decompress them
240 dump = lzma.decompress(_readBytes(start, stop))
242 # reconstruct node
243 qNode = pickle.loads(dump)
244 object.__setattr__(qNode, "nodeId", uuid.uuid4())
246 # read the saved node, name. If it has been loaded, attach it, if
247 # not read in the taskDef first, and then load it
248 nodeTask = qNode.taskDef
249 if nodeTask not in loadedTaskDef:
250 # Get the byte ranges corresponding to this taskDef
251 start, stop = self.returnValue.taskDefMap[nodeTask]
252 start += self.headerSize
253 stop += self.headerSize
255 # load the taskDef, this method call will be overloaded by
256 # subclasses.
257 # bytes are compressed, so decompress them
258 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
259 loadedTaskDef[nodeTask] = taskDef
260 # Explicitly overload the "frozen-ness" of nodes to attach the
261 # taskDef back into the un-persisted node
262 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
263 quanta[qNode.taskDef].add(qNode.quantum)
265 # record the node for later processing
266 quantumToNodeId[qNode.quantum] = qNode.nodeId
268 # construct an empty new QuantumGraph object, and run the associated
269 # creation method with the un-persisted data
270 qGraph = object.__new__(QuantumGraph)
271 qGraph._buildGraphs(
272 quanta,
273 _quantumToNodeId=quantumToNodeId,
274 _buildId=self.returnValue._buildId,
275 metadata=self.returnValue.metadata,
276 universe=universe,
277 )
278 return qGraph
280 def description(self) -> str:
281 return Version1Description
284Version2Description = """
285The save file starts with the first few bytes corresponding to the magic bytes
286in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
288The next few bytes are a big endian unsigned long long.
290The unsigned long long corresponds to the number of bytes of a python mapping
291of header information. This mapping is encoded into json and then lzma
292compressed, meaning the operations must be performed in the opposite order to
293deserialize.
295The json encoded header mapping contains 4 fields: TaskDefs, GraphBuildId,
296Nodes, and Metadata.
298The `TaskDefs` key corresponds to a value which is a mapping of Task label to
299task data. The task data is a mapping of key to value, where the only key is
300`bytes` and it corresponds to a tuple of a byte range of the start, stop
301bytes (indexed after all the header bytes)
303The `GraphBuildId` corresponds with a string that is the unique id assigned to
304this graph when it was created.
306The `Nodes` key is like the `TaskDefs` key except it corresponds to
307QuantumNodes instead of TaskDefs. Another important difference is that JSON
308formatting does not allow using numbers as keys, and this mapping is keyed by
309the node number. Thus it is stored in JSON as two equal length lists, the first
310being the keys, and the second the values associated with those keys.
312The `Metadata` key is a mapping of strings to associated values. This metadata
313may be anything that is important to be transported alongside the graph.
315As stated above, each map contains byte ranges of the corresponding
316datastructure. Theses bytes are also lzma compressed pickles, and should
317be deserialized in a similar manner.
318"""
321@dataclass
322class DeserializerV2(DeserializerBase):
323 @classmethod
324 def FMT_STRING(cls) -> str:
325 return ">Q"
327 def __post_init__(self) -> None:
328 (self.mapSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
330 @property
331 def headerSize(self) -> int:
332 return self.preambleSize + self.structSize + self.mapSize
334 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
335 uncompressedHeaderMap = self.unpackHeader(rawHeader)
336 if uncompressedHeaderMap is None:
337 raise ValueError(
338 "This error is not possible because self.unpackHeader cannot return None,"
339 " but is done to satisfy type checkers"
340 )
341 header = json.loads(uncompressedHeaderMap)
342 returnValue = SimpleNamespace()
343 returnValue.taskDefMap = header["TaskDefs"]
344 returnValue._buildId = header["GraphBuildID"]
345 returnValue.map = dict(header["Nodes"])
346 returnValue.metadata = header["Metadata"]
347 self.returnValue = returnValue
348 return returnValue
350 def unpackHeader(self, rawHeader: bytes) -> str | None:
351 return lzma.decompress(rawHeader).decode()
353 def constructGraph(
354 self,
355 nodes: set[uuid.UUID],
356 _readBytes: Callable[[int, int], bytes],
357 universe: DimensionUniverse | None = None,
358 ) -> QuantumGraph:
359 # need to import here to avoid cyclic imports
360 from . import QuantumGraph
362 quanta: defaultdict[TaskDef, set[Quantum]] = defaultdict(set)
363 quantumToNodeId: dict[Quantum, uuid.UUID] = {}
364 loadedTaskDef = {}
365 # loop over the nodes specified above
366 for node in nodes:
367 # Get the bytes to read from the map
368 start, stop = self.returnValue.map[node]["bytes"]
369 start += self.headerSize
370 stop += self.headerSize
372 # read the specified bytes, will be overloaded by subclasses
373 # bytes are compressed, so decompress them
374 dump = lzma.decompress(_readBytes(start, stop))
376 # reconstruct node
377 qNode = pickle.loads(dump)
378 object.__setattr__(qNode, "nodeId", uuid.uuid4())
380 # read the saved node, name. If it has been loaded, attach it, if
381 # not read in the taskDef first, and then load it
382 nodeTask = qNode.taskDef
383 if nodeTask not in loadedTaskDef:
384 # Get the byte ranges corresponding to this taskDef
385 start, stop = self.returnValue.taskDefMap[nodeTask]["bytes"]
386 start += self.headerSize
387 stop += self.headerSize
389 # load the taskDef, this method call will be overloaded by
390 # subclasses.
391 # bytes are compressed, so decompress them
392 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
393 loadedTaskDef[nodeTask] = taskDef
394 # Explicitly overload the "frozen-ness" of nodes to attach the
395 # taskDef back into the un-persisted node
396 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
397 quanta[qNode.taskDef].add(qNode.quantum)
399 # record the node for later processing
400 quantumToNodeId[qNode.quantum] = qNode.nodeId
402 # construct an empty new QuantumGraph object, and run the associated
403 # creation method with the un-persisted data
404 qGraph = object.__new__(QuantumGraph)
405 qGraph._buildGraphs(
406 quanta,
407 _quantumToNodeId=quantumToNodeId,
408 _buildId=self.returnValue._buildId,
409 metadata=self.returnValue.metadata,
410 universe=universe,
411 )
412 return qGraph
414 def description(self) -> str:
415 return Version2Description
418Version3Description = """
419The save file starts with the first few bytes corresponding to the magic bytes
420in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
422The next few bytes are a big endian unsigned long long.
424The unsigned long long corresponds to the number of bytes of a mapping
425of header information. This mapping is encoded into json and then lzma
426compressed, meaning the operations must be performed in the opposite order to
427deserialize.
429The json encoded header mapping contains 5 fields: GraphBuildId, TaskDefs,
430Nodes, Metadata, and DimensionRecords.
432The `GraphBuildId` key corresponds with a string that is the unique id assigned
433to this graph when it was created.
435The `TaskDefs` key corresponds to a value which is a mapping of Task label to
436task data. The task data is a mapping of key to value. The keys of this mapping
437are `bytes`, `inputs`, and `outputs`.
439The `TaskDefs` `bytes` key corresponds to a tuple of a byte range of the
440start, stop bytes (indexed after all the header bytes). This byte rage
441corresponds to a lzma compressed json mapping. This mapping has keys of
442`taskName`, corresponding to a fully qualified python class, `config` a
443pex_config string that is used to configure the class, and `label` which
444corresponds to a string that uniquely identifies the task within a given
445execution pipeline.
447The `TaskDefs` `inputs` key is associated with a list of tuples where each
448tuple is a label of a task that is considered coming before a given task, and
449the name of the dataset that is shared between the tasks (think node and edge
450in a graph sense).
452The `TaskDefs` `outputs` key is like inputs except the values in a list
453correspond to all the output connections of a task.
455The `Nodes` key is also a json mapping with keys corresponding to the UUIDs of
456QuantumNodes. The values associated with these keys is another mapping with
457the keys `bytes`, `inputs`, and `outputs`.
459`Nodes` key `bytes` corresponds to a tuple of a byte range of the start, stop
460bytes (indexed after all the header bytes). These bytes are a lzma compressed
461json mapping which contains many sub elements, this mapping will be referred to
462as the SerializedQuantumNode (related to the python class it corresponds to).
464SerializedQUantumNodes have 3 keys, `quantum` corresponding to a json mapping
465(described below) referred to as a SerializedQuantum, `taskLabel` a string
466which corresponds to a label in the `TaskDefs` mapping, and `nodeId.
468A SerializedQuantum has many keys; taskName, dataId, datasetTypeMapping,
469initInputs, inputs, outputs, dimensionRecords.
471like the `TaskDefs` key except it corresponds to
472QuantumNodes instead of TaskDefs, and the keys of the mappings are string
473representations of the UUIDs of the QuantumNodes.
475The `Metadata` key is a mapping of strings to associated values. This metadata
476may be anything that is important to be transported alongside the graph.
478As stated above, each map contains byte ranges of the corresponding
479datastructure. Theses bytes are also lzma compressed pickles, and should
480be deserialized in a similar manner.
481"""
484@dataclass
485class DeserializerV3(DeserializerBase):
486 @classmethod
487 def FMT_STRING(cls) -> str:
488 return ">Q"
490 def __post_init__(self) -> None:
491 self.infoSize: int
492 (self.infoSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
494 @property
495 def headerSize(self) -> int:
496 return self.preambleSize + self.structSize + self.infoSize
498 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
499 uncompressedinfoMap = self.unpackHeader(rawHeader)
500 assert uncompressedinfoMap is not None # for python typing, this variant can't be None
501 infoMap = json.loads(uncompressedinfoMap)
502 infoMappings = SimpleNamespace()
503 infoMappings.taskDefMap = infoMap["TaskDefs"]
504 infoMappings._buildId = infoMap["GraphBuildID"]
505 infoMappings.map = {uuid.UUID(k): v for k, v in infoMap["Nodes"]}
506 infoMappings.metadata = infoMap["Metadata"]
507 infoMappings.dimensionRecords = {}
508 for k, v in infoMap["DimensionRecords"].items():
509 infoMappings.dimensionRecords[int(k)] = SerializedDimensionRecord(**v)
510 # This is important to be a get call here, so that it supports versions
511 # of saved quantum graph that might not have a saved universe without
512 # changing save format
513 if (universeConfig := infoMap.get("universe")) is not None:
514 universe = DimensionUniverse(config=DimensionConfig(universeConfig))
515 else:
516 universe = DimensionUniverse()
517 infoMappings.universe = universe
518 infoMappings.globalInitOutputRefs = []
519 if (json_refs := infoMap.get("GlobalInitOutputRefs")) is not None:
520 infoMappings.globalInitOutputRefs = [
521 DatasetRef.from_json(json_ref, universe=universe) for json_ref in json_refs
522 ]
523 infoMappings.registryDatasetTypes = []
524 if (json_refs := infoMap.get("RegistryDatasetTypes")) is not None:
525 infoMappings.registryDatasetTypes = [
526 DatasetType.from_json(json_ref, universe=universe) for json_ref in json_refs
527 ]
528 self.infoMappings = infoMappings
529 return infoMappings
531 def unpackHeader(self, rawHeader: bytes) -> str | None:
532 return lzma.decompress(rawHeader).decode()
534 def constructGraph(
535 self,
536 nodes: set[uuid.UUID],
537 _readBytes: Callable[[int, int], bytes],
538 universe: DimensionUniverse | None = None,
539 ) -> QuantumGraph:
540 # need to import here to avoid cyclic imports
541 from . import QuantumGraph
543 graph = nx.DiGraph()
544 loadedTaskDef: dict[str, TaskDef] = {}
545 container = {}
546 datasetDict = _DatasetTracker(createInverse=True)
547 taskToQuantumNode: defaultdict[TaskDef, set[QuantumNode]] = defaultdict(set)
548 initInputRefs: dict[TaskDef, list[DatasetRef]] = {}
549 initOutputRefs: dict[TaskDef, list[DatasetRef]] = {}
551 if universe is not None:
552 if not universe.isCompatibleWith(self.infoMappings.universe):
553 saved = self.infoMappings.universe
554 raise RuntimeError(
555 f"The saved dimension universe ({saved.namespace}@v{saved.version}) is not "
556 f"compatible with the supplied universe ({universe.namespace}@v{universe.version})."
557 )
558 else:
559 universe = self.infoMappings.universe
561 for node in nodes:
562 start, stop = self.infoMappings.map[node]["bytes"]
563 start, stop = start + self.headerSize, stop + self.headerSize
564 # Read in the bytes corresponding to the node to load and
565 # decompress it
566 dump = json.loads(lzma.decompress(_readBytes(start, stop)))
568 # Turn the json back into the pydandtic model
569 nodeDeserialized = SerializedQuantumNode.direct(**dump)
570 del dump
572 # attach the dictionary of dimension records to the pydantic model
573 # these are stored separately because the are stored over and over
574 # and this saves a lot of space and time.
575 nodeDeserialized.quantum.dimensionRecords = self.infoMappings.dimensionRecords
576 # get the label for the current task
577 nodeTaskLabel = nodeDeserialized.taskLabel
579 if nodeTaskLabel not in loadedTaskDef:
580 # Get the byte ranges corresponding to this taskDef
581 start, stop = self.infoMappings.taskDefMap[nodeTaskLabel]["bytes"]
582 start, stop = start + self.headerSize, stop + self.headerSize
584 # bytes are compressed, so decompress them
585 taskDefDump = json.loads(lzma.decompress(_readBytes(start, stop)))
586 taskClass: type[PipelineTask] = doImportType(taskDefDump["taskName"])
587 config: PipelineTaskConfig = taskClass.ConfigClass()
588 config.loadFromStream(taskDefDump["config"])
589 # Rebuild TaskDef
590 recreatedTaskDef = TaskDef(
591 taskName=taskDefDump["taskName"],
592 taskClass=taskClass,
593 config=config,
594 label=taskDefDump["label"],
595 )
596 loadedTaskDef[nodeTaskLabel] = recreatedTaskDef
598 # initInputRefs and initOutputRefs are optional
599 if (refs := taskDefDump.get("initInputRefs")) is not None:
600 initInputRefs[recreatedTaskDef] = [
601 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
602 ]
603 if (refs := taskDefDump.get("initOutputRefs")) is not None:
604 initOutputRefs[recreatedTaskDef] = [
605 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
606 ]
608 # rebuild the mappings that associate dataset type names with
609 # TaskDefs
610 for _, input in self.infoMappings.taskDefMap[nodeTaskLabel]["inputs"]:
611 datasetDict.addConsumer(DatasetTypeName(input), recreatedTaskDef)
613 added = set()
614 for outputConnection in self.infoMappings.taskDefMap[nodeTaskLabel]["outputs"]:
615 typeName = outputConnection[1]
616 if typeName not in added:
617 added.add(typeName)
618 datasetDict.addProducer(DatasetTypeName(typeName), recreatedTaskDef)
620 # reconstitute the node, passing in the dictionaries for the
621 # loaded TaskDefs and dimension records. These are used to ensure
622 # that each unique record is only loaded once
623 qnode = QuantumNode.from_simple(nodeDeserialized, loadedTaskDef, universe)
624 container[qnode.nodeId] = qnode
625 taskToQuantumNode[loadedTaskDef[nodeTaskLabel]].add(qnode)
627 # recreate the relations between each node from stored info
628 graph.add_node(qnode)
629 for id in self.infoMappings.map[qnode.nodeId]["inputs"]:
630 # uuid is stored as a string, turn it back into a uuid
631 id = uuid.UUID(id)
632 # if the id is not yet in the container, dont make a connection
633 # this is not an issue, because once it is, that id will add
634 # the reverse connection
635 if id in container:
636 graph.add_edge(container[id], qnode)
637 for id in self.infoMappings.map[qnode.nodeId]["outputs"]:
638 # uuid is stored as a string, turn it back into a uuid
639 id = uuid.UUID(id)
640 # if the id is not yet in the container, dont make a connection
641 # this is not an issue, because once it is, that id will add
642 # the reverse connection
643 if id in container:
644 graph.add_edge(qnode, container[id])
646 newGraph = object.__new__(QuantumGraph)
647 newGraph._metadata = self.infoMappings.metadata
648 newGraph._buildId = self.infoMappings._buildId
649 newGraph._datasetDict = datasetDict
650 newGraph._nodeIdMap = container
651 newGraph._count = len(nodes)
652 newGraph._taskToQuantumNode = dict(taskToQuantumNode.items())
653 newGraph._taskGraph = datasetDict.makeNetworkXGraph()
654 newGraph._connectedQuanta = graph
655 newGraph._initInputRefs = initInputRefs
656 newGraph._initOutputRefs = initOutputRefs
657 newGraph._globalInitOutputRefs = self.infoMappings.globalInitOutputRefs
658 newGraph._registryDatasetTypes = self.infoMappings.registryDatasetTypes
659 newGraph._universe = universe
660 return newGraph
663DESERIALIZER_MAP: dict[int, type[DeserializerBase]] = {
664 1: DeserializerV1,
665 2: DeserializerV2,
666 3: DeserializerV3,
667}