Coverage for python/lsst/pipe/base/graph/_versionDeserializers.py: 28%
248 statements
« prev ^ index » next coverage.py v6.5.0, created at 2022-10-29 02:33 -0700
« prev ^ index » next coverage.py v6.5.0, created at 2022-10-29 02:33 -0700
1# This file is part of pipe_base.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
21from __future__ import annotations
23__all__ = ("DESERIALIZER_MAP",)
25import json
26import lzma
27import pickle
28import struct
29import uuid
30from abc import ABC, abstractmethod
31from collections import defaultdict
32from dataclasses import dataclass
33from types import SimpleNamespace
34from typing import (
35 TYPE_CHECKING,
36 Callable,
37 ClassVar,
38 DefaultDict,
39 Dict,
40 List,
41 Optional,
42 Set,
43 Tuple,
44 Type,
45 cast,
46)
48import networkx as nx
49from lsst.daf.butler import (
50 DatasetRef,
51 DimensionConfig,
52 DimensionRecord,
53 DimensionUniverse,
54 Quantum,
55 SerializedDimensionRecord,
56)
57from lsst.utils import doImportType
59from ..config import PipelineTaskConfig
60from ..pipeline import TaskDef
61from ..pipelineTask import PipelineTask
62from ._implDetails import DatasetTypeName, _DatasetTracker
63from .quantumNode import QuantumNode, SerializedQuantumNode
65if TYPE_CHECKING: 65 ↛ 66line 65 didn't jump to line 66, because the condition on line 65 was never true
66 from .graph import QuantumGraph
69class StructSizeDescriptor:
70 """This is basically a class level property. It exists to report the size
71 (number of bytes) of whatever the formatter string is for a deserializer
72 """
74 def __get__(self, inst: Optional[DeserializerBase], owner: Type[DeserializerBase]) -> int:
75 return struct.calcsize(owner.FMT_STRING())
78@dataclass
79class DeserializerBase(ABC):
80 @classmethod
81 @abstractmethod
82 def FMT_STRING(cls) -> str: # noqa: N805 # flake8 wants self
83 raise NotImplementedError("Base class does not implement this method")
85 structSize: ClassVar[StructSizeDescriptor]
87 preambleSize: int
88 sizeBytes: bytes
90 def __init_subclass__(cls) -> None:
91 # attach the size decriptor
92 cls.structSize = StructSizeDescriptor()
93 super().__init_subclass__()
95 def unpackHeader(self, rawHeader: bytes) -> Optional[str]:
96 """Transforms the raw bytes corresponding to the header of a save into
97 a string of the header information. Returns none if the save format has
98 no header string implementation (such as save format 1 that is all
99 pickle)
101 Parameters
102 ----------
103 rawheader : bytes
104 The bytes that are to be parsed into the header information. These
105 are the bytes after the preamble and structsize number of bytes
106 and before the headerSize bytes
107 """
108 raise NotImplementedError("Base class does not implement this method")
110 @property
111 def headerSize(self) -> int:
112 """Returns the number of bytes from the beginning of the file to the
113 end of the metadata.
114 """
115 raise NotImplementedError("Base class does not implement this method")
117 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
118 """Parse the supplied raw bytes into the header information and
119 byte ranges of specific TaskDefs and QuantumNodes
121 Parameters
122 ----------
123 rawheader : bytes
124 The bytes that are to be parsed into the header information. These
125 are the bytes after the preamble and structsize number of bytes
126 and before the headerSize bytes
127 """
128 raise NotImplementedError("Base class does not implement this method")
130 def constructGraph(
131 self,
132 nodes: set[uuid.UUID],
133 _readBytes: Callable[[int, int], bytes],
134 universe: Optional[DimensionUniverse] = None,
135 ) -> QuantumGraph:
136 """Constructs a graph from the deserialized information.
138 Parameters
139 ----------
140 nodes : `set` of `uuid.UUID`
141 The nodes to include in the graph
142 _readBytes : callable
143 A callable that can be used to read bytes from the file handle.
144 The callable will take two ints, start and stop, to use as the
145 numerical bounds to read and returns a byte stream.
146 universe : `~lsst.daf.butler.DimensionUniverse`
147 The singleton of all dimensions known to the middleware registry
148 """
149 raise NotImplementedError("Base class does not implement this method")
151 def description(self) -> str:
152 """Return the description of the serialized data format"""
153 raise NotImplementedError("Base class does not implement this method")
156Version1Description = """
157The save file starts with the first few bytes corresponding to the magic bytes
158in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
160The next few bytes are 2 big endian unsigned 64 bit integers.
162The first unsigned 64 bit integer corresponds to the number of bytes of a
163python mapping of TaskDef labels to the byte ranges in the save file where the
164definition can be loaded.
166The second unsigned 64 bit integer corrresponds to the number of bytes of a
167python mapping of QuantumGraph Node number to the byte ranges in the save file
168where the node can be loaded. The byte range is indexed starting after
169the `header` bytes of the magic bytes, size bytes, and bytes of the two
170mappings.
172Each of the above mappings are pickled and then lzma compressed, so to
173deserialize the bytes, first lzma decompression must be performed and the
174results passed to python pickle loader.
176As stated above, each map contains byte ranges of the corresponding
177datastructure. Theses bytes are also lzma compressed pickles, and should
178be deserialized in a similar manner. The byte range is indexed starting after
179the `header` bytes of the magic bytes, size bytes, and bytes of the two
180mappings.
182In addition to the the TaskDef byte locations, the TypeDef map also contains
183an additional key '__GraphBuildID'. The value associated with this is the
184unique id assigned to the graph at its creation time.
185"""
188@dataclass
189class DeserializerV1(DeserializerBase):
190 @classmethod
191 def FMT_STRING(cls) -> str:
192 return ">QQ"
194 def __post_init__(self) -> None:
195 self.taskDefMapSize, self.nodeMapSize = struct.unpack(self.FMT_STRING(), self.sizeBytes)
197 @property
198 def headerSize(self) -> int:
199 return self.preambleSize + self.structSize + self.taskDefMapSize + self.nodeMapSize
201 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
202 returnValue = SimpleNamespace()
203 returnValue.taskDefMap = pickle.loads(rawHeader[: self.taskDefMapSize])
204 returnValue._buildId = returnValue.taskDefMap["__GraphBuildID"]
205 returnValue.map = pickle.loads(rawHeader[self.taskDefMapSize :])
206 returnValue.metadata = None
207 self.returnValue = returnValue
208 return returnValue
210 def unpackHeader(self, rawHeader: bytes) -> Optional[str]:
211 return None
213 def constructGraph(
214 self,
215 nodes: set[uuid.UUID],
216 _readBytes: Callable[[int, int], bytes],
217 universe: Optional[DimensionUniverse] = None,
218 ) -> QuantumGraph:
219 # need to import here to avoid cyclic imports
220 from . import QuantumGraph
222 quanta: DefaultDict[TaskDef, Set[Quantum]] = defaultdict(set)
223 quantumToNodeId: Dict[Quantum, uuid.UUID] = {}
224 loadedTaskDef = {}
225 # loop over the nodes specified above
226 for node in nodes:
227 # Get the bytes to read from the map
228 start, stop = self.returnValue.map[node]
229 start += self.headerSize
230 stop += self.headerSize
232 # read the specified bytes, will be overloaded by subclasses
233 # bytes are compressed, so decompress them
234 dump = lzma.decompress(_readBytes(start, stop))
236 # reconstruct node
237 qNode = pickle.loads(dump)
238 object.__setattr__(qNode, "nodeId", uuid.uuid4())
240 # read the saved node, name. If it has been loaded, attach it, if
241 # not read in the taskDef first, and then load it
242 nodeTask = qNode.taskDef
243 if nodeTask not in loadedTaskDef:
244 # Get the byte ranges corresponding to this taskDef
245 start, stop = self.returnValue.taskDefMap[nodeTask]
246 start += self.headerSize
247 stop += self.headerSize
249 # load the taskDef, this method call will be overloaded by
250 # subclasses.
251 # bytes are compressed, so decompress them
252 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
253 loadedTaskDef[nodeTask] = taskDef
254 # Explicitly overload the "frozen-ness" of nodes to attach the
255 # taskDef back into the un-persisted node
256 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
257 quanta[qNode.taskDef].add(qNode.quantum)
259 # record the node for later processing
260 quantumToNodeId[qNode.quantum] = qNode.nodeId
262 # construct an empty new QuantumGraph object, and run the associated
263 # creation method with the un-persisted data
264 qGraph = object.__new__(QuantumGraph)
265 qGraph._buildGraphs(
266 quanta,
267 _quantumToNodeId=quantumToNodeId,
268 _buildId=self.returnValue._buildId,
269 metadata=self.returnValue.metadata,
270 universe=universe,
271 )
272 return qGraph
274 def description(self) -> str:
275 return Version1Description
278Version2Description = """
279The save file starts with the first few bytes corresponding to the magic bytes
280in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
282The next few bytes are a big endian unsigned long long.
284The unsigned long long corresponds to the number of bytes of a python mapping
285of header information. This mapping is encoded into json and then lzma
286compressed, meaning the operations must be performed in the opposite order to
287deserialize.
289The json encoded header mapping contains 4 fields: TaskDefs, GraphBuildId,
290Nodes, and Metadata.
292The `TaskDefs` key corresponds to a value which is a mapping of Task label to
293task data. The task data is a mapping of key to value, where the only key is
294`bytes` and it corresponds to a tuple of a byte range of the start, stop
295bytes (indexed after all the header bytes)
297The `GraphBuildId` corresponds with a string that is the unique id assigned to
298this graph when it was created.
300The `Nodes` key is like the `TaskDefs` key except it corresponds to
301QuantumNodes instead of TaskDefs. Another important difference is that JSON
302formatting does not allow using numbers as keys, and this mapping is keyed by
303the node number. Thus it is stored in JSON as two equal length lists, the first
304being the keys, and the second the values associated with those keys.
306The `Metadata` key is a mapping of strings to associated values. This metadata
307may be anything that is important to be transported alongside the graph.
309As stated above, each map contains byte ranges of the corresponding
310datastructure. Theses bytes are also lzma compressed pickles, and should
311be deserialized in a similar manner.
312"""
315@dataclass
316class DeserializerV2(DeserializerBase):
317 @classmethod
318 def FMT_STRING(cls) -> str:
319 return ">Q"
321 def __post_init__(self) -> None:
322 (self.mapSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
324 @property
325 def headerSize(self) -> int:
326 return self.preambleSize + self.structSize + self.mapSize
328 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
329 uncompressedHeaderMap = self.unpackHeader(rawHeader)
330 if uncompressedHeaderMap is None:
331 raise ValueError(
332 "This error is not possible because self.unpackHeader cannot return None,"
333 " but is done to satisfy type checkers"
334 )
335 header = json.loads(uncompressedHeaderMap)
336 returnValue = SimpleNamespace()
337 returnValue.taskDefMap = header["TaskDefs"]
338 returnValue._buildId = header["GraphBuildID"]
339 returnValue.map = dict(header["Nodes"])
340 returnValue.metadata = header["Metadata"]
341 self.returnValue = returnValue
342 return returnValue
344 def unpackHeader(self, rawHeader: bytes) -> Optional[str]:
345 return lzma.decompress(rawHeader).decode()
347 def constructGraph(
348 self,
349 nodes: set[uuid.UUID],
350 _readBytes: Callable[[int, int], bytes],
351 universe: Optional[DimensionUniverse] = None,
352 ) -> QuantumGraph:
353 # need to import here to avoid cyclic imports
354 from . import QuantumGraph
356 quanta: DefaultDict[TaskDef, Set[Quantum]] = defaultdict(set)
357 quantumToNodeId: Dict[Quantum, uuid.UUID] = {}
358 loadedTaskDef = {}
359 # loop over the nodes specified above
360 for node in nodes:
361 # Get the bytes to read from the map
362 start, stop = self.returnValue.map[node]["bytes"]
363 start += self.headerSize
364 stop += self.headerSize
366 # read the specified bytes, will be overloaded by subclasses
367 # bytes are compressed, so decompress them
368 dump = lzma.decompress(_readBytes(start, stop))
370 # reconstruct node
371 qNode = pickle.loads(dump)
372 object.__setattr__(qNode, "nodeId", uuid.uuid4())
374 # read the saved node, name. If it has been loaded, attach it, if
375 # not read in the taskDef first, and then load it
376 nodeTask = qNode.taskDef
377 if nodeTask not in loadedTaskDef:
378 # Get the byte ranges corresponding to this taskDef
379 start, stop = self.returnValue.taskDefMap[nodeTask]["bytes"]
380 start += self.headerSize
381 stop += self.headerSize
383 # load the taskDef, this method call will be overloaded by
384 # subclasses.
385 # bytes are compressed, so decompress them
386 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop)))
387 loadedTaskDef[nodeTask] = taskDef
388 # Explicitly overload the "frozen-ness" of nodes to attach the
389 # taskDef back into the un-persisted node
390 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask])
391 quanta[qNode.taskDef].add(qNode.quantum)
393 # record the node for later processing
394 quantumToNodeId[qNode.quantum] = qNode.nodeId
396 # construct an empty new QuantumGraph object, and run the associated
397 # creation method with the un-persisted data
398 qGraph = object.__new__(QuantumGraph)
399 qGraph._buildGraphs(
400 quanta,
401 _quantumToNodeId=quantumToNodeId,
402 _buildId=self.returnValue._buildId,
403 metadata=self.returnValue.metadata,
404 universe=universe,
405 )
406 return qGraph
408 def description(self) -> str:
409 return Version2Description
412Version3Description = """
413The save file starts with the first few bytes corresponding to the magic bytes
414in the QuantumGraph: `qgraph4\xf6\xe8\xa9`.
416The next few bytes are a big endian unsigned long long.
418The unsigned long long corresponds to the number of bytes of a mapping
419of header information. This mapping is encoded into json and then lzma
420compressed, meaning the operations must be performed in the opposite order to
421deserialize.
423The json encoded header mapping contains 5 fields: GraphBuildId, TaskDefs,
424Nodes, Metadata, and DimensionRecords.
426The `GraphBuildId` key corresponds with a string that is the unique id assigned
427to this graph when it was created.
429The `TaskDefs` key corresponds to a value which is a mapping of Task label to
430task data. The task data is a mapping of key to value. The keys of this mapping
431are `bytes`, `inputs`, and `outputs`.
433The `TaskDefs` `bytes` key corresponds to a tuple of a byte range of the
434start, stop bytes (indexed after all the header bytes). This byte rage
435corresponds to a lzma compressed json mapping. This mapping has keys of
436`taskName`, corresponding to a fully qualified python class, `config` a
437pex_config string that is used to configure the class, and `label` which
438corresponds to a string that uniquely identifies the task within a given
439execution pipeline.
441The `TaskDefs` `inputs` key is associated with a list of tuples where each
442tuple is a label of a task that is considered coming before a given task, and
443the name of the dataset that is shared between the tasks (think node and edge
444in a graph sense).
446The `TaskDefs` `outputs` key is like inputs except the values in a list
447correspond to all the output connections of a task.
449The `Nodes` key is also a json mapping with keys corresponding to the UUIDs of
450QuantumNodes. The values associated with these keys is another mapping with
451the keys `bytes`, `inputs`, and `outputs`.
453`Nodes` key `bytes` corresponds to a tuple of a byte range of the start, stop
454bytes (indexed after all the header bytes). These bytes are a lzma compressed
455json mapping which contains many sub elements, this mapping will be referred to
456as the SerializedQuantumNode (related to the python class it corresponds to).
458SerializedQUantumNodes have 3 keys, `quantum` corresponding to a json mapping
459(described below) referred to as a SerializedQuantum, `taskLabel` a string
460which corresponds to a label in the `TaskDefs` mapping, and `nodeId.
462A SerializedQuantum has many keys; taskName, dataId, datasetTypeMapping,
463initInputs, inputs, outputs, dimensionRecords.
465like the `TaskDefs` key except it corresponds to
466QuantumNodes instead of TaskDefs, and the keys of the mappings are string
467representations of the UUIDs of the QuantumNodes.
469The `Metadata` key is a mapping of strings to associated values. This metadata
470may be anything that is important to be transported alongside the graph.
472As stated above, each map contains byte ranges of the corresponding
473datastructure. Theses bytes are also lzma compressed pickles, and should
474be deserialized in a similar manner.
475"""
478@dataclass
479class DeserializerV3(DeserializerBase):
480 @classmethod
481 def FMT_STRING(cls) -> str:
482 return ">Q"
484 def __post_init__(self) -> None:
485 self.infoSize: int
486 (self.infoSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes)
488 @property
489 def headerSize(self) -> int:
490 return self.preambleSize + self.structSize + self.infoSize
492 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace:
493 uncompressedinfoMap = self.unpackHeader(rawHeader)
494 assert uncompressedinfoMap is not None # for python typing, this variant can't be None
495 infoMap = json.loads(uncompressedinfoMap)
496 infoMappings = SimpleNamespace()
497 infoMappings.taskDefMap = infoMap["TaskDefs"]
498 infoMappings._buildId = infoMap["GraphBuildID"]
499 infoMappings.map = {uuid.UUID(k): v for k, v in infoMap["Nodes"]}
500 infoMappings.metadata = infoMap["Metadata"]
501 infoMappings.dimensionRecords = {}
502 for k, v in infoMap["DimensionRecords"].items():
503 infoMappings.dimensionRecords[int(k)] = SerializedDimensionRecord(**v)
504 # This is important to be a get call here, so that it supports versions
505 # of saved quantum graph that might not have a saved universe without
506 # changing save format
507 if (universeConfig := infoMap.get("universe")) is not None:
508 universe = DimensionUniverse(config=DimensionConfig(universeConfig))
509 else:
510 universe = DimensionUniverse()
511 infoMappings.universe = universe
512 self.infoMappings = infoMappings
513 return infoMappings
515 def unpackHeader(self, rawHeader: bytes) -> Optional[str]:
516 return lzma.decompress(rawHeader).decode()
518 def constructGraph(
519 self,
520 nodes: set[uuid.UUID],
521 _readBytes: Callable[[int, int], bytes],
522 universe: Optional[DimensionUniverse] = None,
523 ) -> QuantumGraph:
524 # need to import here to avoid cyclic imports
525 from . import QuantumGraph
527 graph = nx.DiGraph()
528 loadedTaskDef: Dict[str, TaskDef] = {}
529 container = {}
530 datasetDict = _DatasetTracker[DatasetTypeName, TaskDef](createInverse=True)
531 taskToQuantumNode: DefaultDict[TaskDef, Set[QuantumNode]] = defaultdict(set)
532 recontitutedDimensions: Dict[int, Tuple[str, DimensionRecord]] = {}
533 initInputRefs: Dict[TaskDef, List[DatasetRef]] = {}
534 initOutputRefs: Dict[TaskDef, List[DatasetRef]] = {}
536 if universe is not None:
537 if not universe.isCompatibleWith(self.infoMappings.universe):
538 saved = self.infoMappings.universe
539 raise RuntimeError(
540 f"The saved dimension universe ({saved.namespace}@v{saved.version}) is not "
541 f"compatible with the supplied universe ({universe.namespace}@v{universe.version})."
542 )
543 else:
544 universe = self.infoMappings.universe
546 for node in nodes:
547 start, stop = self.infoMappings.map[node]["bytes"]
548 start, stop = start + self.headerSize, stop + self.headerSize
549 # Read in the bytes corresponding to the node to load and
550 # decompress it
551 dump = json.loads(lzma.decompress(_readBytes(start, stop)))
553 # Turn the json back into the pydandtic model
554 nodeDeserialized = SerializedQuantumNode.direct(**dump)
555 # attach the dictionary of dimension records to the pydandtic model
556 # these are stored seperately because the are stored over and over
557 # and this saves a lot of space and time.
558 nodeDeserialized.quantum.dimensionRecords = self.infoMappings.dimensionRecords
559 # get the label for the current task
560 nodeTaskLabel = nodeDeserialized.taskLabel
562 if nodeTaskLabel not in loadedTaskDef:
563 # Get the byte ranges corresponding to this taskDef
564 start, stop = self.infoMappings.taskDefMap[nodeTaskLabel]["bytes"]
565 start, stop = start + self.headerSize, stop + self.headerSize
567 # bytes are compressed, so decompress them
568 taskDefDump = json.loads(lzma.decompress(_readBytes(start, stop)))
569 taskClass: Type[PipelineTask] = doImportType(taskDefDump["taskName"])
570 config: PipelineTaskConfig = taskClass.ConfigClass()
571 config.loadFromStream(taskDefDump["config"])
572 # Rebuild TaskDef
573 recreatedTaskDef = TaskDef(
574 taskName=taskDefDump["taskName"],
575 taskClass=taskClass,
576 config=config,
577 label=taskDefDump["label"],
578 )
579 loadedTaskDef[nodeTaskLabel] = recreatedTaskDef
581 # initInputRefs and initOutputRefs are optional
582 if (refs := taskDefDump.get("initInputRefs")) is not None:
583 initInputRefs[recreatedTaskDef] = [
584 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
585 ]
586 if (refs := taskDefDump.get("initOutputRefs")) is not None:
587 initOutputRefs[recreatedTaskDef] = [
588 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs
589 ]
591 # rebuild the mappings that associate dataset type names with
592 # TaskDefs
593 for _, input in self.infoMappings.taskDefMap[nodeTaskLabel]["inputs"]:
594 datasetDict.addConsumer(DatasetTypeName(input), recreatedTaskDef)
596 added = set()
597 for outputConnection in self.infoMappings.taskDefMap[nodeTaskLabel]["outputs"]:
598 typeName = outputConnection[1]
599 if typeName not in added:
600 added.add(typeName)
601 datasetDict.addProducer(DatasetTypeName(typeName), recreatedTaskDef)
603 # reconstitute the node, passing in the dictionaries for the
604 # loaded TaskDefs and dimension records. These are used to ensure
605 # that each unique record is only loaded once
606 qnode = QuantumNode.from_simple(nodeDeserialized, loadedTaskDef, universe, recontitutedDimensions)
607 container[qnode.nodeId] = qnode
608 taskToQuantumNode[loadedTaskDef[nodeTaskLabel]].add(qnode)
610 # recreate the relations between each node from stored info
611 graph.add_node(qnode)
612 for id in self.infoMappings.map[qnode.nodeId]["inputs"]:
613 # uuid is stored as a string, turn it back into a uuid
614 id = uuid.UUID(id)
615 # if the id is not yet in the container, dont make a connection
616 # this is not an issue, because once it is, that id will add
617 # the reverse connection
618 if id in container:
619 graph.add_edge(container[id], qnode)
620 for id in self.infoMappings.map[qnode.nodeId]["outputs"]:
621 # uuid is stored as a string, turn it back into a uuid
622 id = uuid.UUID(id)
623 # if the id is not yet in the container, dont make a connection
624 # this is not an issue, because once it is, that id will add
625 # the reverse connection
626 if id in container:
627 graph.add_edge(qnode, container[id])
629 newGraph = object.__new__(QuantumGraph)
630 newGraph._metadata = self.infoMappings.metadata
631 newGraph._buildId = self.infoMappings._buildId
632 newGraph._datasetDict = datasetDict
633 newGraph._nodeIdMap = container
634 newGraph._count = len(nodes)
635 newGraph._taskToQuantumNode = dict(taskToQuantumNode.items())
636 newGraph._taskGraph = datasetDict.makeNetworkXGraph()
637 newGraph._connectedQuanta = graph
638 newGraph._initInputRefs = initInputRefs
639 newGraph._initOutputRefs = initOutputRefs
640 return newGraph
643DESERIALIZER_MAP = {1: DeserializerV1, 2: DeserializerV2, 3: DeserializerV3}