Coverage for python/lsst/pipe/base/graph/_versionDeserializers.py: 26%
262 statements
« prev ^ index » next coverage.py v7.2.5, created at 2023-05-06 02:42 -0700
« prev ^ index » next coverage.py v7.2.5, created at 2023-05-06 02:42 -0700
1# This file is part of pipe_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21from __future__ import annotations
23__all__ = ("DESERIALIZER_MAP",)
25import json
26import lzma
27import pickle
28import struct
29import uuid
30import warnings
31from abc import ABC, abstractmethod
32from collections import defaultdict
33from dataclasses import dataclass
34from types import SimpleNamespace
35from typing import (
36 TYPE_CHECKING,
37 Callable,
38 ClassVar,
39 DefaultDict,
40 Dict,
41 List,
42 Optional,
43 Set,
44 Tuple,
45 Type,
46 cast,
47)
49import networkx as nx
50from lsst.daf.butler import (
51 DatasetRef,
52 DatasetType,
53 DimensionConfig,
54 DimensionRecord,
55 DimensionUniverse,
56 Quantum,
57 SerializedDimensionRecord,
58 UnresolvedRefWarning,
59)
60from lsst.utils import doImportType
62from ..config import PipelineTaskConfig
63from ..pipeline import TaskDef
64from ..pipelineTask import PipelineTask
65from ._implDetails import DatasetTypeName, _DatasetTracker
66from .quantumNode import QuantumNode, SerializedQuantumNode
68if TYPE_CHECKING: 68 ↛ 69line 68 didn't jump to line 69, because the condition on line 68 was never true
69 from .graph import QuantumGraph
72class StructSizeDescriptor:
73 """This is basically a class level property. It exists to report the size
74 (number of bytes) of whatever the formatter string is for a deserializer
75 """
77 def __get__(self, inst: Optional[DeserializerBase], owner: Type[DeserializerBase]) -> int:
78 return struct.calcsize(owner.FMT_STRING())
81@dataclass
82class DeserializerBase(ABC):
83 @classmethod
84 @abstractmethod
85 def FMT_STRING(cls) -> str: # noqa: N805 # flake8 wants self
86 raise NotImplementedError("Base class does not implement this method")
88 structSize: ClassVar[StructSizeDescriptor]
90 preambleSize: int
91 sizeBytes: bytes
93 def __init_subclass__(cls) -> None:
94 # attach the size decriptor
95 cls.structSize = StructSizeDescriptor()
96 super().__init_subclass__()
98 def unpackHeader(self, rawHeader: bytes) -> Optional[str]:
99 """Transforms the raw bytes corresponding to the header of a save into
100 a string of the header information. Returns none if the save format has
101 no header string implementation (such as save format 1 that is all
102 pickle)
104 Parameters
105 ----------
106 rawheader : bytes
107 The bytes that are to be parsed into the header information. These
108 are the bytes after the preamble and structsize number of bytes
109 and before the headerSize bytes
110 """
111 raise NotImplementedError("Base class does not implement this method")
113 @property
114 def headerSize(self) -> int:
115 """Returns the number of bytes from the beginning of the file to the
116 end of the metadata.
117 """
118 raise NotImplementedError("Base class does not implement this method")
120 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
121 """Parse the supplied raw bytes into the header information and
122 byte ranges of specific TaskDefs and QuantumNodes
124 Parameters
125 ----------
126 rawheader : bytes
127 The bytes that are to be parsed into the header information. These
128 are the bytes after the preamble and structsize number of bytes
129 and before the headerSize bytes
130 """
131 raise NotImplementedError("Base class does not implement this method")
133 def constructGraph(
134 self,
135 nodes: set[uuid.UUID],
136 _readBytes: Callable[[int, int], bytes],
137 universe: Optional[DimensionUniverse] = None,
138 ) -> QuantumGraph:
139 """Constructs a graph from the deserialized information.
141 Parameters
142 ----------
143 nodes : `set` of `uuid.UUID`
144 The nodes to include in the graph
145 _readBytes : callable
146 A callable that can be used to read bytes from the file handle.
147 The callable will take two ints, start and stop, to use as the
148 numerical bounds to read and returns a byte stream.
149 universe : `~lsst.daf.butler.DimensionUniverse`
150 The singleton of all dimensions known to the middleware registry
151 """
152 raise NotImplementedError("Base class does not implement this method")
154 def description(self) -> str:
155 """Return the description of the serialized data format"""
156 raise NotImplementedError("Base class does not implement this method")
159Version1Description = """
160The save file starts with the first few bytes corresponding to the magic bytes
161in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
163The next few bytes are 2 big endian unsigned 64 bit integers.
165The first unsigned 64 bit integer corresponds to the number of bytes of a
166python mapping of TaskDef labels to the byte ranges in the save file where the
167definition can be loaded.
169The second unsigned 64 bit integer corrresponds to the number of bytes of a
170python mapping of QuantumGraph Node number to the byte ranges in the save file
171where the node can be loaded. The byte range is indexed starting after
172the `header` bytes of the magic bytes, size bytes, and bytes of the two
173mappings.
175Each of the above mappings are pickled and then lzma compressed, so to
176deserialize the bytes, first lzma decompression must be performed and the
177results passed to python pickle loader.
179As stated above, each map contains byte ranges of the corresponding
180datastructure. Theses bytes are also lzma compressed pickles, and should
181be deserialized in a similar manner. The byte range is indexed starting after
182the `header` bytes of the magic bytes, size bytes, and bytes of the two
183mappings.
185In addition to the the TaskDef byte locations, the TypeDef map also contains
186an additional key '__GraphBuildID'. The value associated with this is the
187unique id assigned to the graph at its creation time.
188"""
191@dataclass
192class DeserializerV1(DeserializerBase):
193 @classmethod
194 def FMT_STRING(cls) -> str:
195 return ">QQ"
197 def __post_init__(self) -> None:
198 self.taskDefMapSize, self.nodeMapSize = struct.unpack(self.FMT_STRING(), self.sizeBytes)
200 @property
201 def headerSize(self) -> int:
202 return self.preambleSize + self.structSize + self.taskDefMapSize + self.nodeMapSize
204 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
205 returnValue = SimpleNamespace()
206 returnValue.taskDefMap = pickle.loads(rawHeader[: self.taskDefMapSize])
207 returnValue._buildId = returnValue.taskDefMap["__GraphBuildID"]
208 returnValue.map = pickle.loads(rawHeader[self.taskDefMapSize :])
209 returnValue.metadata = None
210 self.returnValue = returnValue
211 return returnValue
213 def unpackHeader(self, rawHeader: bytes) -> Optional[str]:
214 return None
216 def constructGraph(
217 self,
218 nodes: set[uuid.UUID],
219 _readBytes: Callable[[int, int], bytes],
220 universe: Optional[DimensionUniverse] = None,
221 ) -> QuantumGraph:
222 # need to import here to avoid cyclic imports
223 from . import QuantumGraph
225 quanta: DefaultDict[TaskDef, Set[Quantum]] = defaultdict(set)
226 quantumToNodeId: Dict[Quantum, uuid.UUID] = {}
227 loadedTaskDef = {}
228 # loop over the nodes specified above
229 for node in nodes:
230 # Get the bytes to read from the map
231 start, stop = self.returnValue.map[node]
232 start += self.headerSize
233 stop += self.headerSize
235 # read the specified bytes, will be overloaded by subclasses
236 # bytes are compressed, so decompress them
237 dump = lzma.decompress(_readBytes(start, stop))
239 # reconstruct node
240 qNode = pickle.loads(dump)
241 object.__setattr__(qNode, "nodeId", uuid.uuid4())
243 # read the saved node, name. If it has been loaded, attach it, if
244 # not read in the taskDef first, and then load it
245 nodeTask = qNode.taskDef
246 if nodeTask not in loadedTaskDef:
247 # Get the byte ranges corresponding to this taskDef
248 start, stop = self.returnValue.taskDefMap[nodeTask]
249 start += self.headerSize
250 stop += self.headerSize
252 # load the taskDef, this method call will be overloaded by
253 # subclasses.
254 # bytes are compressed, so decompress them
255 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
256 loadedTaskDef[nodeTask] = taskDef
257 # Explicitly overload the "frozen-ness" of nodes to attach the
258 # taskDef back into the un-persisted node
259 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
260 quanta[qNode.taskDef].add(qNode.quantum)
262 # record the node for later processing
263 quantumToNodeId[qNode.quantum] = qNode.nodeId
265 # construct an empty new QuantumGraph object, and run the associated
266 # creation method with the un-persisted data
267 qGraph = object.__new__(QuantumGraph)
268 qGraph._buildGraphs(
269 quanta,
270 _quantumToNodeId=quantumToNodeId,
271 _buildId=self.returnValue._buildId,
272 metadata=self.returnValue.metadata,
273 universe=universe,
274 )
275 return qGraph
277 def description(self) -> str:
278 return Version1Description
281Version2Description = """
282The save file starts with the first few bytes corresponding to the magic bytes
283in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
285The next few bytes are a big endian unsigned long long.
287The unsigned long long corresponds to the number of bytes of a python mapping
288of header information. This mapping is encoded into json and then lzma
289compressed, meaning the operations must be performed in the opposite order to
290deserialize.
292The json encoded header mapping contains 4 fields: TaskDefs, GraphBuildId,
293Nodes, and Metadata.
295The `TaskDefs` key corresponds to a value which is a mapping of Task label to
296task data. The task data is a mapping of key to value, where the only key is
297`bytes` and it corresponds to a tuple of a byte range of the start, stop
298bytes (indexed after all the header bytes)
300The `GraphBuildId` corresponds with a string that is the unique id assigned to
301this graph when it was created.
303The `Nodes` key is like the `TaskDefs` key except it corresponds to
304QuantumNodes instead of TaskDefs. Another important difference is that JSON
305formatting does not allow using numbers as keys, and this mapping is keyed by
306the node number. Thus it is stored in JSON as two equal length lists, the first
307being the keys, and the second the values associated with those keys.
309The `Metadata` key is a mapping of strings to associated values. This metadata
310may be anything that is important to be transported alongside the graph.
312As stated above, each map contains byte ranges of the corresponding
313datastructure. Theses bytes are also lzma compressed pickles, and should
314be deserialized in a similar manner.
315"""
318@dataclass
319class DeserializerV2(DeserializerBase):
320 @classmethod
321 def FMT_STRING(cls) -> str:
322 return ">Q"
324 def __post_init__(self) -> None:
325 (self.mapSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
327 @property
328 def headerSize(self) -> int:
329 return self.preambleSize + self.structSize + self.mapSize
331 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
332 uncompressedHeaderMap = self.unpackHeader(rawHeader)
333 if uncompressedHeaderMap is None:
334 raise ValueError(
335 "This error is not possible because self.unpackHeader cannot return None,"
336 " but is done to satisfy type checkers"
337 )
338 header = json.loads(uncompressedHeaderMap)
339 returnValue = SimpleNamespace()
340 returnValue.taskDefMap = header["TaskDefs"]
341 returnValue._buildId = header["GraphBuildID"]
342 returnValue.map = dict(header["Nodes"])
343 returnValue.metadata = header["Metadata"]
344 self.returnValue = returnValue
345 return returnValue
347 def unpackHeader(self, rawHeader: bytes) -> Optional[str]:
348 return lzma.decompress(rawHeader).decode()
350 def constructGraph(
351 self,
352 nodes: set[uuid.UUID],
353 _readBytes: Callable[[int, int], bytes],
354 universe: Optional[DimensionUniverse] = None,
355 ) -> QuantumGraph:
356 # need to import here to avoid cyclic imports
357 from . import QuantumGraph
359 quanta: DefaultDict[TaskDef, Set[Quantum]] = defaultdict(set)
360 quantumToNodeId: Dict[Quantum, uuid.UUID] = {}
361 loadedTaskDef = {}
362 # loop over the nodes specified above
363 for node in nodes:
364 # Get the bytes to read from the map
365 start, stop = self.returnValue.map[node]["bytes"]
366 start += self.headerSize
367 stop += self.headerSize
369 # read the specified bytes, will be overloaded by subclasses
370 # bytes are compressed, so decompress them
371 dump = lzma.decompress(_readBytes(start, stop))
373 # reconstruct node
374 qNode = pickle.loads(dump)
375 object.__setattr__(qNode, "nodeId", uuid.uuid4())
377 # read the saved node, name. If it has been loaded, attach it, if
378 # not read in the taskDef first, and then load it
379 nodeTask = qNode.taskDef
380 if nodeTask not in loadedTaskDef:
381 # Get the byte ranges corresponding to this taskDef
382 start, stop = self.returnValue.taskDefMap[nodeTask]["bytes"]
383 start += self.headerSize
384 stop += self.headerSize
386 # load the taskDef, this method call will be overloaded by
387 # subclasses.
388 # bytes are compressed, so decompress them
389 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
390 loadedTaskDef[nodeTask] = taskDef
391 # Explicitly overload the "frozen-ness" of nodes to attach the
392 # taskDef back into the un-persisted node
393 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
394 quanta[qNode.taskDef].add(qNode.quantum)
396 # record the node for later processing
397 quantumToNodeId[qNode.quantum] = qNode.nodeId
399 # construct an empty new QuantumGraph object, and run the associated
400 # creation method with the un-persisted data
401 qGraph = object.__new__(QuantumGraph)
402 qGraph._buildGraphs(
403 quanta,
404 _quantumToNodeId=quantumToNodeId,
405 _buildId=self.returnValue._buildId,
406 metadata=self.returnValue.metadata,
407 universe=universe,
408 )
409 return qGraph
411 def description(self) -> str:
412 return Version2Description
415Version3Description = """
416The save file starts with the first few bytes corresponding to the magic bytes
417in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
419The next few bytes are a big endian unsigned long long.
421The unsigned long long corresponds to the number of bytes of a mapping
422of header information. This mapping is encoded into json and then lzma
423compressed, meaning the operations must be performed in the opposite order to
424deserialize.
426The json encoded header mapping contains 5 fields: GraphBuildId, TaskDefs,
427Nodes, Metadata, and DimensionRecords.
429The `GraphBuildId` key corresponds with a string that is the unique id assigned
430to this graph when it was created.
432The `TaskDefs` key corresponds to a value which is a mapping of Task label to
433task data. The task data is a mapping of key to value. The keys of this mapping
434are `bytes`, `inputs`, and `outputs`.
436The `TaskDefs` `bytes` key corresponds to a tuple of a byte range of the
437start, stop bytes (indexed after all the header bytes). This byte rage
438corresponds to a lzma compressed json mapping. This mapping has keys of
439`taskName`, corresponding to a fully qualified python class, `config` a
440pex_config string that is used to configure the class, and `label` which
441corresponds to a string that uniquely identifies the task within a given
442execution pipeline.
444The `TaskDefs` `inputs` key is associated with a list of tuples where each
445tuple is a label of a task that is considered coming before a given task, and
446the name of the dataset that is shared between the tasks (think node and edge
447in a graph sense).
449The `TaskDefs` `outputs` key is like inputs except the values in a list
450correspond to all the output connections of a task.
452The `Nodes` key is also a json mapping with keys corresponding to the UUIDs of
453QuantumNodes. The values associated with these keys is another mapping with
454the keys `bytes`, `inputs`, and `outputs`.
456`Nodes` key `bytes` corresponds to a tuple of a byte range of the start, stop
457bytes (indexed after all the header bytes). These bytes are a lzma compressed
458json mapping which contains many sub elements, this mapping will be referred to
459as the SerializedQuantumNode (related to the python class it corresponds to).
461SerializedQUantumNodes have 3 keys, `quantum` corresponding to a json mapping
462(described below) referred to as a SerializedQuantum, `taskLabel` a string
463which corresponds to a label in the `TaskDefs` mapping, and `nodeId.
465A SerializedQuantum has many keys; taskName, dataId, datasetTypeMapping,
466initInputs, inputs, outputs, dimensionRecords.
468like the `TaskDefs` key except it corresponds to
469QuantumNodes instead of TaskDefs, and the keys of the mappings are string
470representations of the UUIDs of the QuantumNodes.
472The `Metadata` key is a mapping of strings to associated values. This metadata
473may be anything that is important to be transported alongside the graph.
475As stated above, each map contains byte ranges of the corresponding
476datastructure. Theses bytes are also lzma compressed pickles, and should
477be deserialized in a similar manner.
478"""
481@dataclass
482class DeserializerV3(DeserializerBase):
483 @classmethod
484 def FMT_STRING(cls) -> str:
485 return ">Q"
487 def __post_init__(self) -> None:
488 self.infoSize: int
489 (self.infoSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
491 @property
492 def headerSize(self) -> int:
493 return self.preambleSize + self.structSize + self.infoSize
495 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
496 uncompressedinfoMap = self.unpackHeader(rawHeader)
497 assert uncompressedinfoMap is not None # for python typing, this variant can't be None
498 infoMap = json.loads(uncompressedinfoMap)
499 infoMappings = SimpleNamespace()
500 infoMappings.taskDefMap = infoMap["TaskDefs"]
501 infoMappings._buildId = infoMap["GraphBuildID"]
502 infoMappings.map = {uuid.UUID(k): v for k, v in infoMap["Nodes"]}
503 infoMappings.metadata = infoMap["Metadata"]
504 infoMappings.dimensionRecords = {}
505 for k, v in infoMap["DimensionRecords"].items():
506 infoMappings.dimensionRecords[int(k)] = SerializedDimensionRecord(**v)
507 # This is important to be a get call here, so that it supports versions
508 # of saved quantum graph that might not have a saved universe without
509 # changing save format
510 if (universeConfig := infoMap.get("universe")) is not None:
511 universe = DimensionUniverse(config=DimensionConfig(universeConfig))
512 else:
513 universe = DimensionUniverse()
514 infoMappings.universe = universe
515 infoMappings.globalInitOutputRefs = []
516 with warnings.catch_warnings():
517 warnings.simplefilter("ignore", category=UnresolvedRefWarning)
518 if (json_refs := infoMap.get("GlobalInitOutputRefs")) is not None:
519 infoMappings.globalInitOutputRefs = [
520 DatasetRef.from_json(json_ref, universe=universe) for json_ref in json_refs
521 ]
522 infoMappings.registryDatasetTypes = []
523 if (json_refs := infoMap.get("RegistryDatasetTypes")) is not None:
524 infoMappings.registryDatasetTypes = [
525 DatasetType.from_json(json_ref, universe=universe) for json_ref in json_refs
526 ]
527 self.infoMappings = infoMappings
528 return infoMappings
530 def unpackHeader(self, rawHeader: bytes) -> Optional[str]:
531 return lzma.decompress(rawHeader).decode()
533 def constructGraph(
534 self,
535 nodes: set[uuid.UUID],
536 _readBytes: Callable[[int, int], bytes],
537 universe: Optional[DimensionUniverse] = None,
538 ) -> QuantumGraph:
539 # need to import here to avoid cyclic imports
540 from . import QuantumGraph
542 graph = nx.DiGraph()
543 loadedTaskDef: Dict[str, TaskDef] = {}
544 container = {}
545 datasetDict = _DatasetTracker[DatasetTypeName, TaskDef](createInverse=True)
546 taskToQuantumNode: DefaultDict[TaskDef, Set[QuantumNode]] = defaultdict(set)
547 recontitutedDimensions: Dict[int, Tuple[str, DimensionRecord]] = {}
548 initInputRefs: Dict[TaskDef, List[DatasetRef]] = {}
549 initOutputRefs: Dict[TaskDef, List[DatasetRef]] = {}
551 if universe is not None:
552 if not universe.isCompatibleWith(self.infoMappings.universe):
553 saved = self.infoMappings.universe
554 raise RuntimeError(
555 f"The saved dimension universe ({saved.namespace}@v{saved.version}) is not "
556 f"compatible with the supplied universe ({universe.namespace}@v{universe.version})."
557 )
558 else:
559 universe = self.infoMappings.universe
561 for node in nodes:
562 start, stop = self.infoMappings.map[node]["bytes"]
563 start, stop = start + self.headerSize, stop + self.headerSize
564 # Read in the bytes corresponding to the node to load and
565 # decompress it
566 dump = json.loads(lzma.decompress(_readBytes(start, stop)))
568 # Turn the json back into the pydandtic model
569 nodeDeserialized = SerializedQuantumNode.direct(**dump)
570 # attach the dictionary of dimension records to the pydandtic model
571 # these are stored seperately because the are stored over and over
572 # and this saves a lot of space and time.
573 nodeDeserialized.quantum.dimensionRecords = self.infoMappings.dimensionRecords
574 # get the label for the current task
575 nodeTaskLabel = nodeDeserialized.taskLabel
577 if nodeTaskLabel not in loadedTaskDef:
578 # Get the byte ranges corresponding to this taskDef
579 start, stop = self.infoMappings.taskDefMap[nodeTaskLabel]["bytes"]
580 start, stop = start + self.headerSize, stop + self.headerSize
582 # bytes are compressed, so decompress them
583 taskDefDump = json.loads(lzma.decompress(_readBytes(start, stop)))
584 taskClass: Type[PipelineTask] = doImportType(taskDefDump["taskName"])
585 config: PipelineTaskConfig = taskClass.ConfigClass()
586 config.loadFromStream(taskDefDump["config"])
587 # Rebuild TaskDef
588 recreatedTaskDef = TaskDef(
589 taskName=taskDefDump["taskName"],
590 taskClass=taskClass,
591 config=config,
592 label=taskDefDump["label"],
593 )
594 loadedTaskDef[nodeTaskLabel] = recreatedTaskDef
596 # initInputRefs and initOutputRefs are optional
597 with warnings.catch_warnings():
598 warnings.simplefilter("ignore", category=UnresolvedRefWarning)
599 if (refs := taskDefDump.get("initInputRefs")) is not None:
600 initInputRefs[recreatedTaskDef] = [
601 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
602 ]
603 if (refs := taskDefDump.get("initOutputRefs")) is not None:
604 initOutputRefs[recreatedTaskDef] = [
605 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
606 ]
608 # rebuild the mappings that associate dataset type names with
609 # TaskDefs
610 for _, input in self.infoMappings.taskDefMap[nodeTaskLabel]["inputs"]:
611 datasetDict.addConsumer(DatasetTypeName(input), recreatedTaskDef)
613 added = set()
614 for outputConnection in self.infoMappings.taskDefMap[nodeTaskLabel]["outputs"]:
615 typeName = outputConnection[1]
616 if typeName not in added:
617 added.add(typeName)
618 datasetDict.addProducer(DatasetTypeName(typeName), recreatedTaskDef)
620 # reconstitute the node, passing in the dictionaries for the
621 # loaded TaskDefs and dimension records. These are used to ensure
622 # that each unique record is only loaded once
623 qnode = QuantumNode.from_simple(nodeDeserialized, loadedTaskDef, universe, recontitutedDimensions)
624 container[qnode.nodeId] = qnode
625 taskToQuantumNode[loadedTaskDef[nodeTaskLabel]].add(qnode)
627 # recreate the relations between each node from stored info
628 graph.add_node(qnode)
629 for id in self.infoMappings.map[qnode.nodeId]["inputs"]:
630 # uuid is stored as a string, turn it back into a uuid
631 id = uuid.UUID(id)
632 # if the id is not yet in the container, dont make a connection
633 # this is not an issue, because once it is, that id will add
634 # the reverse connection
635 if id in container:
636 graph.add_edge(container[id], qnode)
637 for id in self.infoMappings.map[qnode.nodeId]["outputs"]:
638 # uuid is stored as a string, turn it back into a uuid
639 id = uuid.UUID(id)
640 # if the id is not yet in the container, dont make a connection
641 # this is not an issue, because once it is, that id will add
642 # the reverse connection
643 if id in container:
644 graph.add_edge(qnode, container[id])
646 newGraph = object.__new__(QuantumGraph)
647 newGraph._metadata = self.infoMappings.metadata
648 newGraph._buildId = self.infoMappings._buildId
649 newGraph._datasetDict = datasetDict
650 newGraph._nodeIdMap = container
651 newGraph._count = len(nodes)
652 newGraph._taskToQuantumNode = dict(taskToQuantumNode.items())
653 newGraph._taskGraph = datasetDict.makeNetworkXGraph()
654 newGraph._connectedQuanta = graph
655 newGraph._initInputRefs = initInputRefs
656 newGraph._initOutputRefs = initOutputRefs
657 newGraph._globalInitOutputRefs = self.infoMappings.globalInitOutputRefs
658 newGraph._registryDatasetTypes = self.infoMappings.registryDatasetTypes
659 newGraph._universe = universe
660 return newGraph
663DESERIALIZER_MAP: dict[int, Type[DeserializerBase]] = {
664 1: DeserializerV1,
665 2: DeserializerV2,
666 3: DeserializerV3,
667}