Coverage for python/lsst/pipe/base/graph/_versionDeserializers.py: 28%

248 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2022-08-27 02:39 -0700

1# This file is part of pipe_base. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21from __future__ import annotations 

22 

23__all__ = ("DESERIALIZER_MAP",) 

24 

25import json 

26import lzma 

27import pickle 

28import struct 

29import uuid 

30from abc import ABC, abstractmethod 

31from collections import defaultdict 

32from dataclasses import dataclass 

33from types import SimpleNamespace 

34from typing import ( 

35 TYPE_CHECKING, 

36 Callable, 

37 ClassVar, 

38 DefaultDict, 

39 Dict, 

40 List, 

41 Optional, 

42 Set, 

43 Tuple, 

44 Type, 

45 cast, 

46) 

47 

48import networkx as nx 

49from lsst.daf.butler import ( 

50 DatasetRef, 

51 DimensionConfig, 

52 DimensionRecord, 

53 DimensionUniverse, 

54 Quantum, 

55 SerializedDimensionRecord, 

56) 

57from lsst.utils import doImportType 

58 

59from ..config import PipelineTaskConfig 

60from ..pipeline import TaskDef 

61from ..pipelineTask import PipelineTask 

62from ._implDetails import DatasetTypeName, _DatasetTracker 

63from .quantumNode import QuantumNode, SerializedQuantumNode 

64 

65if TYPE_CHECKING: 65 ↛ 66line 65 didn't jump to line 66, because the condition on line 65 was never true

66 from .graph import QuantumGraph 

67 

68 

69class StructSizeDescriptor: 

70 """This is basically a class level property. It exists to report the size 

71 (number of bytes) of whatever the formatter string is for a deserializer 

72 """ 

73 

74 def __get__(self, inst: Optional[DeserializerBase], owner: Type[DeserializerBase]) -> int: 

75 return struct.calcsize(owner.FMT_STRING()) 

76 

77 

78# MyPy doesn't seem to like the idea of an abstract dataclass. It seems to 

79# work, but maybe we're doing something that isn't really supported (or maybe 

80# I misunderstood the error message). 

81@dataclass # type: ignore 

82class DeserializerBase(ABC): 

83 @classmethod 

84 @abstractmethod 

85 def FMT_STRING(cls) -> str: # noqa: N805 # flake8 wants self 

86 raise NotImplementedError("Base class does not implement this method") 

87 

88 structSize: ClassVar[StructSizeDescriptor] 

89 

90 preambleSize: int 

91 sizeBytes: bytes 

92 

93 def __init_subclass__(cls) -> None: 

94 # attach the size decriptor 

95 cls.structSize = StructSizeDescriptor() 

96 super().__init_subclass__() 

97 

98 def unpackHeader(self, rawHeader: bytes) -> Optional[str]: 

99 """Transforms the raw bytes corresponding to the header of a save into 

100 a string of the header information. Returns none if the save format has 

101 no header string implementation (such as save format 1 that is all 

102 pickle) 

103 

104 Parameters 

105 ---------- 

106 rawheader : bytes 

107 The bytes that are to be parsed into the header information. These 

108 are the bytes after the preamble and structsize number of bytes 

109 and before the headerSize bytes 

110 """ 

111 raise NotImplementedError("Base class does not implement this method") 

112 

113 @property 

114 def headerSize(self) -> int: 

115 """Returns the number of bytes from the beginning of the file to the 

116 end of the metadata. 

117 """ 

118 raise NotImplementedError("Base class does not implement this method") 

119 

120 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace: 

121 """Parse the supplied raw bytes into the header information and 

122 byte ranges of specific TaskDefs and QuantumNodes 

123 

124 Parameters 

125 ---------- 

126 rawheader : bytes 

127 The bytes that are to be parsed into the header information. These 

128 are the bytes after the preamble and structsize number of bytes 

129 and before the headerSize bytes 

130 """ 

131 raise NotImplementedError("Base class does not implement this method") 

132 

133 def constructGraph( 

134 self, 

135 nodes: set[uuid.UUID], 

136 _readBytes: Callable[[int, int], bytes], 

137 universe: Optional[DimensionUniverse] = None, 

138 ) -> QuantumGraph: 

139 """Constructs a graph from the deserialized information. 

140 

141 Parameters 

142 ---------- 

143 nodes : `set` of `uuid.UUID` 

144 The nodes to include in the graph 

145 _readBytes : callable 

146 A callable that can be used to read bytes from the file handle. 

147 The callable will take two ints, start and stop, to use as the 

148 numerical bounds to read and returns a byte stream. 

149 universe : `~lsst.daf.butler.DimensionUniverse` 

150 The singleton of all dimensions known to the middleware registry 

151 """ 

152 raise NotImplementedError("Base class does not implement this method") 

153 

154 def description(self) -> str: 

155 """Return the description of the serialized data format""" 

156 raise NotImplementedError("Base class does not implement this method") 

157 

158 

159Version1Description = """ 

160The save file starts with the first few bytes corresponding to the magic bytes 

161in the QuantumGraph: `qgraph4\xf6\xe8\xa9`. 

162 

163The next few bytes are 2 big endian unsigned 64 bit integers. 

164 

165The first unsigned 64 bit integer corresponds to the number of bytes of a 

166python mapping of TaskDef labels to the byte ranges in the save file where the 

167definition can be loaded. 

168 

169The second unsigned 64 bit integer corrresponds to the number of bytes of a 

170python mapping of QuantumGraph Node number to the byte ranges in the save file 

171where the node can be loaded. The byte range is indexed starting after 

172the `header` bytes of the magic bytes, size bytes, and bytes of the two 

173mappings. 

174 

175Each of the above mappings are pickled and then lzma compressed, so to 

176deserialize the bytes, first lzma decompression must be performed and the 

177results passed to python pickle loader. 

178 

179As stated above, each map contains byte ranges of the corresponding 

180datastructure. Theses bytes are also lzma compressed pickles, and should 

181be deserialized in a similar manner. The byte range is indexed starting after 

182the `header` bytes of the magic bytes, size bytes, and bytes of the two 

183mappings. 

184 

185In addition to the the TaskDef byte locations, the TypeDef map also contains 

186an additional key '__GraphBuildID'. The value associated with this is the 

187unique id assigned to the graph at its creation time. 

188""" 

189 

190 

191@dataclass 

192class DeserializerV1(DeserializerBase): 

193 @classmethod 

194 def FMT_STRING(cls) -> str: 

195 return ">QQ" 

196 

197 def __post_init__(self) -> None: 

198 self.taskDefMapSize, self.nodeMapSize = struct.unpack(self.FMT_STRING(), self.sizeBytes) 

199 

200 @property 

201 def headerSize(self) -> int: 

202 return self.preambleSize + self.structSize + self.taskDefMapSize + self.nodeMapSize 

203 

204 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace: 

205 returnValue = SimpleNamespace() 

206 returnValue.taskDefMap = pickle.loads(rawHeader[: self.taskDefMapSize]) 

207 returnValue._buildId = returnValue.taskDefMap["__GraphBuildID"] 

208 returnValue.map = pickle.loads(rawHeader[self.taskDefMapSize :]) 

209 returnValue.metadata = None 

210 self.returnValue = returnValue 

211 return returnValue 

212 

213 def unpackHeader(self, rawHeader: bytes) -> Optional[str]: 

214 return None 

215 

216 def constructGraph( 

217 self, 

218 nodes: set[uuid.UUID], 

219 _readBytes: Callable[[int, int], bytes], 

220 universe: Optional[DimensionUniverse] = None, 

221 ) -> QuantumGraph: 

222 # need to import here to avoid cyclic imports 

223 from . import QuantumGraph 

224 

225 quanta: DefaultDict[TaskDef, Set[Quantum]] = defaultdict(set) 

226 quantumToNodeId: Dict[Quantum, uuid.UUID] = {} 

227 loadedTaskDef = {} 

228 # loop over the nodes specified above 

229 for node in nodes: 

230 # Get the bytes to read from the map 

231 start, stop = self.returnValue.map[node] 

232 start += self.headerSize 

233 stop += self.headerSize 

234 

235 # read the specified bytes, will be overloaded by subclasses 

236 # bytes are compressed, so decompress them 

237 dump = lzma.decompress(_readBytes(start, stop)) 

238 

239 # reconstruct node 

240 qNode = pickle.loads(dump) 

241 object.__setattr__(qNode, "nodeId", uuid.uuid4()) 

242 

243 # read the saved node, name. If it has been loaded, attach it, if 

244 # not read in the taskDef first, and then load it 

245 nodeTask = qNode.taskDef 

246 if nodeTask not in loadedTaskDef: 

247 # Get the byte ranges corresponding to this taskDef 

248 start, stop = self.returnValue.taskDefMap[nodeTask] 

249 start += self.headerSize 

250 stop += self.headerSize 

251 

252 # load the taskDef, this method call will be overloaded by 

253 # subclasses. 

254 # bytes are compressed, so decompress them 

255 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop))) 

256 loadedTaskDef[nodeTask] = taskDef 

257 # Explicitly overload the "frozen-ness" of nodes to attach the 

258 # taskDef back into the un-persisted node 

259 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask]) 

260 quanta[qNode.taskDef].add(qNode.quantum) 

261 

262 # record the node for later processing 

263 quantumToNodeId[qNode.quantum] = qNode.nodeId 

264 

265 # construct an empty new QuantumGraph object, and run the associated 

266 # creation method with the un-persisted data 

267 qGraph = object.__new__(QuantumGraph) 

268 qGraph._buildGraphs( 

269 quanta, 

270 _quantumToNodeId=quantumToNodeId, 

271 _buildId=self.returnValue._buildId, 

272 metadata=self.returnValue.metadata, 

273 universe=universe, 

274 ) 

275 return qGraph 

276 

277 def description(self) -> str: 

278 return Version1Description 

279 

280 

281Version2Description = """ 

282The save file starts with the first few bytes corresponding to the magic bytes 

283in the QuantumGraph: `qgraph4\xf6\xe8\xa9`. 

284 

285The next few bytes are a big endian unsigned long long. 

286 

287The unsigned long long corresponds to the number of bytes of a python mapping 

288of header information. This mapping is encoded into json and then lzma 

289compressed, meaning the operations must be performed in the opposite order to 

290deserialize. 

291 

292The json encoded header mapping contains 4 fields: TaskDefs, GraphBuildId, 

293Nodes, and Metadata. 

294 

295The `TaskDefs` key corresponds to a value which is a mapping of Task label to 

296task data. The task data is a mapping of key to value, where the only key is 

297`bytes` and it corresponds to a tuple of a byte range of the start, stop 

298bytes (indexed after all the header bytes) 

299 

300The `GraphBuildId` corresponds with a string that is the unique id assigned to 

301this graph when it was created. 

302 

303The `Nodes` key is like the `TaskDefs` key except it corresponds to 

304QuantumNodes instead of TaskDefs. Another important difference is that JSON 

305formatting does not allow using numbers as keys, and this mapping is keyed by 

306the node number. Thus it is stored in JSON as two equal length lists, the first 

307being the keys, and the second the values associated with those keys. 

308 

309The `Metadata` key is a mapping of strings to associated values. This metadata 

310may be anything that is important to be transported alongside the graph. 

311 

312As stated above, each map contains byte ranges of the corresponding 

313datastructure. Theses bytes are also lzma compressed pickles, and should 

314be deserialized in a similar manner. 

315""" 

316 

317 

318@dataclass 

319class DeserializerV2(DeserializerBase): 

320 @classmethod 

321 def FMT_STRING(cls) -> str: 

322 return ">Q" 

323 

324 def __post_init__(self) -> None: 

325 (self.mapSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes) 

326 

327 @property 

328 def headerSize(self) -> int: 

329 return self.preambleSize + self.structSize + self.mapSize 

330 

331 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace: 

332 uncompressedHeaderMap = self.unpackHeader(rawHeader) 

333 if uncompressedHeaderMap is None: 

334 raise ValueError( 

335 "This error is not possible because self.unpackHeader cannot return None," 

336 " but is done to satisfy type checkers" 

337 ) 

338 header = json.loads(uncompressedHeaderMap) 

339 returnValue = SimpleNamespace() 

340 returnValue.taskDefMap = header["TaskDefs"] 

341 returnValue._buildId = header["GraphBuildID"] 

342 returnValue.map = dict(header["Nodes"]) 

343 returnValue.metadata = header["Metadata"] 

344 self.returnValue = returnValue 

345 return returnValue 

346 

347 def unpackHeader(self, rawHeader: bytes) -> Optional[str]: 

348 return lzma.decompress(rawHeader).decode() 

349 

350 def constructGraph( 

351 self, 

352 nodes: set[uuid.UUID], 

353 _readBytes: Callable[[int, int], bytes], 

354 universe: Optional[DimensionUniverse] = None, 

355 ) -> QuantumGraph: 

356 # need to import here to avoid cyclic imports 

357 from . import QuantumGraph 

358 

359 quanta: DefaultDict[TaskDef, Set[Quantum]] = defaultdict(set) 

360 quantumToNodeId: Dict[Quantum, uuid.UUID] = {} 

361 loadedTaskDef = {} 

362 # loop over the nodes specified above 

363 for node in nodes: 

364 # Get the bytes to read from the map 

365 start, stop = self.returnValue.map[node]["bytes"] 

366 start += self.headerSize 

367 stop += self.headerSize 

368 

369 # read the specified bytes, will be overloaded by subclasses 

370 # bytes are compressed, so decompress them 

371 dump = lzma.decompress(_readBytes(start, stop)) 

372 

373 # reconstruct node 

374 qNode = pickle.loads(dump) 

375 object.__setattr__(qNode, "nodeId", uuid.uuid4()) 

376 

377 # read the saved node, name. If it has been loaded, attach it, if 

378 # not read in the taskDef first, and then load it 

379 nodeTask = qNode.taskDef 

380 if nodeTask not in loadedTaskDef: 

381 # Get the byte ranges corresponding to this taskDef 

382 start, stop = self.returnValue.taskDefMap[nodeTask]["bytes"] 

383 start += self.headerSize 

384 stop += self.headerSize 

385 

386 # load the taskDef, this method call will be overloaded by 

387 # subclasses. 

388 # bytes are compressed, so decompress them 

389 taskDef = pickle.loads(lzma.decompress(_readBytes(start, stop))) 

390 loadedTaskDef[nodeTask] = taskDef 

391 # Explicitly overload the "frozen-ness" of nodes to attach the 

392 # taskDef back into the un-persisted node 

393 object.__setattr__(qNode, "taskDef", loadedTaskDef[nodeTask]) 

394 quanta[qNode.taskDef].add(qNode.quantum) 

395 

396 # record the node for later processing 

397 quantumToNodeId[qNode.quantum] = qNode.nodeId 

398 

399 # construct an empty new QuantumGraph object, and run the associated 

400 # creation method with the un-persisted data 

401 qGraph = object.__new__(QuantumGraph) 

402 qGraph._buildGraphs( 

403 quanta, 

404 _quantumToNodeId=quantumToNodeId, 

405 _buildId=self.returnValue._buildId, 

406 metadata=self.returnValue.metadata, 

407 universe=universe, 

408 ) 

409 return qGraph 

410 

411 def description(self) -> str: 

412 return Version2Description 

413 

414 

415Version3Description = """ 

416The save file starts with the first few bytes corresponding to the magic bytes 

417in the QuantumGraph: `qgraph4\xf6\xe8\xa9`. 

418 

419The next few bytes are a big endian unsigned long long. 

420 

421The unsigned long long corresponds to the number of bytes of a mapping 

422of header information. This mapping is encoded into json and then lzma 

423compressed, meaning the operations must be performed in the opposite order to 

424deserialize. 

425 

426The json encoded header mapping contains 5 fields: GraphBuildId, TaskDefs, 

427Nodes, Metadata, and DimensionRecords. 

428 

429The `GraphBuildId` key corresponds with a string that is the unique id assigned 

430to this graph when it was created. 

431 

432The `TaskDefs` key corresponds to a value which is a mapping of Task label to 

433task data. The task data is a mapping of key to value. The keys of this mapping 

434are `bytes`, `inputs`, and `outputs`. 

435 

436The `TaskDefs` `bytes` key corresponds to a tuple of a byte range of the 

437start, stop bytes (indexed after all the header bytes). This byte rage 

438corresponds to a lzma compressed json mapping. This mapping has keys of 

439`taskName`, corresponding to a fully qualified python class, `config` a 

440pex_config string that is used to configure the class, and `label` which 

441corresponds to a string that uniquely identifies the task within a given 

442execution pipeline. 

443 

444The `TaskDefs` `inputs` key is associated with a list of tuples where each 

445tuple is a label of a task that is considered coming before a given task, and 

446the name of the dataset that is shared between the tasks (think node and edge 

447in a graph sense). 

448 

449The `TaskDefs` `outputs` key is like inputs except the values in a list 

450correspond to all the output connections of a task. 

451 

452The `Nodes` key is also a json mapping with keys corresponding to the UUIDs of 

453QuantumNodes. The values associated with these keys is another mapping with 

454the keys `bytes`, `inputs`, and `outputs`. 

455 

456`Nodes` key `bytes` corresponds to a tuple of a byte range of the start, stop 

457bytes (indexed after all the header bytes). These bytes are a lzma compressed 

458json mapping which contains many sub elements, this mapping will be referred to 

459as the SerializedQuantumNode (related to the python class it corresponds to). 

460 

461SerializedQUantumNodes have 3 keys, `quantum` corresponding to a json mapping 

462(described below) referred to as a SerializedQuantum, `taskLabel` a string 

463which corresponds to a label in the `TaskDefs` mapping, and `nodeId. 

464 

465A SerializedQuantum has many keys; taskName, dataId, datasetTypeMapping, 

466initInputs, inputs, outputs, dimensionRecords. 

467 

468like the `TaskDefs` key except it corresponds to 

469QuantumNodes instead of TaskDefs, and the keys of the mappings are string 

470representations of the UUIDs of the QuantumNodes. 

471 

472The `Metadata` key is a mapping of strings to associated values. This metadata 

473may be anything that is important to be transported alongside the graph. 

474 

475As stated above, each map contains byte ranges of the corresponding 

476datastructure. Theses bytes are also lzma compressed pickles, and should 

477be deserialized in a similar manner. 

478""" 

479 

480 

481@dataclass 

482class DeserializerV3(DeserializerBase): 

483 @classmethod 

484 def FMT_STRING(cls) -> str: 

485 return ">Q" 

486 

487 def __post_init__(self) -> None: 

488 self.infoSize: int 

489 (self.infoSize,) = struct.unpack(self.FMT_STRING(), self.sizeBytes) 

490 

491 @property 

492 def headerSize(self) -> int: 

493 return self.preambleSize + self.structSize + self.infoSize 

494 

495 def readHeaderInfo(self, rawHeader: bytes) -> SimpleNamespace: 

496 uncompressedinfoMap = self.unpackHeader(rawHeader) 

497 assert uncompressedinfoMap is not None # for python typing, this variant can't be None 

498 infoMap = json.loads(uncompressedinfoMap) 

499 infoMappings = SimpleNamespace() 

500 infoMappings.taskDefMap = infoMap["TaskDefs"] 

501 infoMappings._buildId = infoMap["GraphBuildID"] 

502 infoMappings.map = {uuid.UUID(k): v for k, v in infoMap["Nodes"]} 

503 infoMappings.metadata = infoMap["Metadata"] 

504 infoMappings.dimensionRecords = {} 

505 for k, v in infoMap["DimensionRecords"].items(): 

506 infoMappings.dimensionRecords[int(k)] = SerializedDimensionRecord(**v) 

507 # This is important to be a get call here, so that it supports versions 

508 # of saved quantum graph that might not have a saved universe without 

509 # changing save format 

510 if (universeConfig := infoMap.get("universe")) is not None: 

511 universe = DimensionUniverse(config=DimensionConfig(universeConfig)) 

512 else: 

513 universe = DimensionUniverse() 

514 infoMappings.universe = universe 

515 self.infoMappings = infoMappings 

516 return infoMappings 

517 

518 def unpackHeader(self, rawHeader: bytes) -> Optional[str]: 

519 return lzma.decompress(rawHeader).decode() 

520 

521 def constructGraph( 

522 self, 

523 nodes: set[uuid.UUID], 

524 _readBytes: Callable[[int, int], bytes], 

525 universe: Optional[DimensionUniverse] = None, 

526 ) -> QuantumGraph: 

527 # need to import here to avoid cyclic imports 

528 from . import QuantumGraph 

529 

530 graph = nx.DiGraph() 

531 loadedTaskDef: Dict[str, TaskDef] = {} 

532 container = {} 

533 datasetDict = _DatasetTracker[DatasetTypeName, TaskDef](createInverse=True) 

534 taskToQuantumNode: DefaultDict[TaskDef, Set[QuantumNode]] = defaultdict(set) 

535 recontitutedDimensions: Dict[int, Tuple[str, DimensionRecord]] = {} 

536 initInputRefs: Dict[TaskDef, List[DatasetRef]] = {} 

537 initOutputRefs: Dict[TaskDef, List[DatasetRef]] = {} 

538 

539 if universe is not None: 

540 if not universe.isCompatibleWith(self.infoMappings.universe): 

541 saved = self.infoMappings.universe 

542 raise RuntimeError( 

543 f"The saved dimension universe ({saved.namespace}@v{saved.version}) is not " 

544 f"compatible with the supplied universe ({universe.namespace}@v{universe.version})." 

545 ) 

546 else: 

547 universe = self.infoMappings.universe 

548 

549 for node in nodes: 

550 start, stop = self.infoMappings.map[node]["bytes"] 

551 start, stop = start + self.headerSize, stop + self.headerSize 

552 # Read in the bytes corresponding to the node to load and 

553 # decompress it 

554 dump = json.loads(lzma.decompress(_readBytes(start, stop))) 

555 

556 # Turn the json back into the pydandtic model 

557 nodeDeserialized = SerializedQuantumNode.direct(**dump) 

558 # attach the dictionary of dimension records to the pydandtic model 

559 # these are stored seperately because the are stored over and over 

560 # and this saves a lot of space and time. 

561 nodeDeserialized.quantum.dimensionRecords = self.infoMappings.dimensionRecords 

562 # get the label for the current task 

563 nodeTaskLabel = nodeDeserialized.taskLabel 

564 

565 if nodeTaskLabel not in loadedTaskDef: 

566 # Get the byte ranges corresponding to this taskDef 

567 start, stop = self.infoMappings.taskDefMap[nodeTaskLabel]["bytes"] 

568 start, stop = start + self.headerSize, stop + self.headerSize 

569 

570 # bytes are compressed, so decompress them 

571 taskDefDump = json.loads(lzma.decompress(_readBytes(start, stop))) 

572 taskClass: Type[PipelineTask] = doImportType(taskDefDump["taskName"]) 

573 config: PipelineTaskConfig = taskClass.ConfigClass() 

574 config.loadFromStream(taskDefDump["config"]) 

575 # Rebuild TaskDef 

576 recreatedTaskDef = TaskDef( 

577 taskName=taskDefDump["taskName"], 

578 taskClass=taskClass, 

579 config=config, 

580 label=taskDefDump["label"], 

581 ) 

582 loadedTaskDef[nodeTaskLabel] = recreatedTaskDef 

583 

584 # initInputRefs and initOutputRefs are optional 

585 if (refs := taskDefDump.get("initInputRefs")) is not None: 

586 initInputRefs[recreatedTaskDef] = [ 

587 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs 

588 ] 

589 if (refs := taskDefDump.get("initOutputRefs")) is not None: 

590 initOutputRefs[recreatedTaskDef] = [ 

591 cast(DatasetRef, DatasetRef.from_json(ref, universe=universe)) for ref in refs 

592 ] 

593 

594 # rebuild the mappings that associate dataset type names with 

595 # TaskDefs 

596 for _, input in self.infoMappings.taskDefMap[nodeTaskLabel]["inputs"]: 

597 datasetDict.addConsumer(DatasetTypeName(input), recreatedTaskDef) 

598 

599 added = set() 

600 for outputConnection in self.infoMappings.taskDefMap[nodeTaskLabel]["outputs"]: 

601 typeName = outputConnection[1] 

602 if typeName not in added: 

603 added.add(typeName) 

604 datasetDict.addProducer(DatasetTypeName(typeName), recreatedTaskDef) 

605 

606 # reconstitute the node, passing in the dictionaries for the 

607 # loaded TaskDefs and dimension records. These are used to ensure 

608 # that each unique record is only loaded once 

609 qnode = QuantumNode.from_simple(nodeDeserialized, loadedTaskDef, universe, recontitutedDimensions) 

610 container[qnode.nodeId] = qnode 

611 taskToQuantumNode[loadedTaskDef[nodeTaskLabel]].add(qnode) 

612 

613 # recreate the relations between each node from stored info 

614 graph.add_node(qnode) 

615 for id in self.infoMappings.map[qnode.nodeId]["inputs"]: 

616 # uuid is stored as a string, turn it back into a uuid 

617 id = uuid.UUID(id) 

618 # if the id is not yet in the container, dont make a connection 

619 # this is not an issue, because once it is, that id will add 

620 # the reverse connection 

621 if id in container: 

622 graph.add_edge(container[id], qnode) 

623 for id in self.infoMappings.map[qnode.nodeId]["outputs"]: 

624 # uuid is stored as a string, turn it back into a uuid 

625 id = uuid.UUID(id) 

626 # if the id is not yet in the container, dont make a connection 

627 # this is not an issue, because once it is, that id will add 

628 # the reverse connection 

629 if id in container: 

630 graph.add_edge(qnode, container[id]) 

631 

632 newGraph = object.__new__(QuantumGraph) 

633 newGraph._metadata = self.infoMappings.metadata 

634 newGraph._buildId = self.infoMappings._buildId 

635 newGraph._datasetDict = datasetDict 

636 newGraph._nodeIdMap = container 

637 newGraph._count = len(nodes) 

638 newGraph._taskToQuantumNode = dict(taskToQuantumNode.items()) 

639 newGraph._taskGraph = datasetDict.makeNetworkXGraph() 

640 newGraph._connectedQuanta = graph 

641 newGraph._initInputRefs = initInputRefs 

642 newGraph._initOutputRefs = initOutputRefs 

643 return newGraph 

644 

645 

646DESERIALIZER_MAP = {1: DeserializerV1, 2: DeserializerV2, 3: DeserializerV3}