Coverage for python/lsst/pipe/tasks/postprocess.py: 31%

805 statements  

« prev     ^ index     » next       coverage.py v6.4.1, created at 2022-06-14 12:35 +0000

1# This file is part of pipe_tasks 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22import functools 

23import pandas as pd 

24from collections import defaultdict 

25import logging 

26import numpy as np 

27import numbers 

28import os 

29 

30import lsst.geom 

31import lsst.pex.config as pexConfig 

32import lsst.pipe.base as pipeBase 

33import lsst.daf.base as dafBase 

34from lsst.obs.base import ExposureIdInfo 

35from lsst.pipe.base import connectionTypes 

36import lsst.afw.table as afwTable 

37from lsst.meas.base import SingleFrameMeasurementTask 

38from lsst.pipe.base import CmdLineTask, ArgumentParser, DataIdContainer 

39from lsst.coadd.utils.coaddDataIdContainer import CoaddDataIdContainer 

40from lsst.daf.butler import DeferredDatasetHandle, DataCoordinate 

41from lsst.skymap import BaseSkyMap 

42 

43from .parquetTable import ParquetTable 

44from .multiBandUtils import makeMergeArgumentParser, MergeSourcesRunner 

45from .functors import CompositeFunctor, Column 

46 

47log = logging.getLogger(__name__) 

48 

49 

50def flattenFilters(df, noDupCols=['coord_ra', 'coord_dec'], camelCase=False, inputBands=None): 

51 """Flattens a dataframe with multilevel column index 

52 """ 

53 newDf = pd.DataFrame() 

54 # band is the level 0 index 

55 dfBands = df.columns.unique(level=0).values 

56 for band in dfBands: 

57 subdf = df[band] 

58 columnFormat = '{0}{1}' if camelCase else '{0}_{1}' 

59 newColumns = {c: columnFormat.format(band, c) 

60 for c in subdf.columns if c not in noDupCols} 

61 cols = list(newColumns.keys()) 

62 newDf = pd.concat([newDf, subdf[cols].rename(columns=newColumns)], axis=1) 

63 

64 # Band must be present in the input and output or else column is all NaN: 

65 presentBands = dfBands if inputBands is None else list(set(inputBands).intersection(dfBands)) 

66 # Get the unexploded columns from any present band's partition 

67 noDupDf = df[presentBands[0]][noDupCols] 

68 newDf = pd.concat([noDupDf, newDf], axis=1) 

69 return newDf 

70 

71 

72class WriteObjectTableConnections(pipeBase.PipelineTaskConnections, 

73 defaultTemplates={"coaddName": "deep"}, 

74 dimensions=("tract", "patch", "skymap")): 

75 inputCatalogMeas = connectionTypes.Input( 

76 doc="Catalog of source measurements on the deepCoadd.", 

77 dimensions=("tract", "patch", "band", "skymap"), 

78 storageClass="SourceCatalog", 

79 name="{coaddName}Coadd_meas", 

80 multiple=True 

81 ) 

82 inputCatalogForcedSrc = connectionTypes.Input( 

83 doc="Catalog of forced measurements (shape and position parameters held fixed) on the deepCoadd.", 

84 dimensions=("tract", "patch", "band", "skymap"), 

85 storageClass="SourceCatalog", 

86 name="{coaddName}Coadd_forced_src", 

87 multiple=True 

88 ) 

89 inputCatalogRef = connectionTypes.Input( 

90 doc="Catalog marking the primary detection (which band provides a good shape and position)" 

91 "for each detection in deepCoadd_mergeDet.", 

92 dimensions=("tract", "patch", "skymap"), 

93 storageClass="SourceCatalog", 

94 name="{coaddName}Coadd_ref" 

95 ) 

96 outputCatalog = connectionTypes.Output( 

97 doc="A vertical concatenation of the deepCoadd_{ref|meas|forced_src} catalogs, " 

98 "stored as a DataFrame with a multi-level column index per-patch.", 

99 dimensions=("tract", "patch", "skymap"), 

100 storageClass="DataFrame", 

101 name="{coaddName}Coadd_obj" 

102 ) 

103 

104 

105class WriteObjectTableConfig(pipeBase.PipelineTaskConfig, 

106 pipelineConnections=WriteObjectTableConnections): 

107 engine = pexConfig.Field( 

108 dtype=str, 

109 default="pyarrow", 

110 doc="Parquet engine for writing (pyarrow or fastparquet)" 

111 ) 

112 coaddName = pexConfig.Field( 

113 dtype=str, 

114 default="deep", 

115 doc="Name of coadd" 

116 ) 

117 

118 

119class WriteObjectTableTask(CmdLineTask, pipeBase.PipelineTask): 

120 """Write filter-merged source tables to parquet 

121 """ 

122 _DefaultName = "writeObjectTable" 

123 ConfigClass = WriteObjectTableConfig 

124 RunnerClass = MergeSourcesRunner 

125 

126 # Names of table datasets to be merged 

127 inputDatasets = ('forced_src', 'meas', 'ref') 

128 

129 # Tag of output dataset written by `MergeSourcesTask.write` 

130 outputDataset = 'obj' 

131 

132 def __init__(self, butler=None, schema=None, **kwargs): 

133 # It is a shame that this class can't use the default init for CmdLineTask 

134 # But to do so would require its own special task runner, which is many 

135 # more lines of specialization, so this is how it is for now 

136 super().__init__(**kwargs) 

137 

138 def runDataRef(self, patchRefList): 

139 """! 

140 @brief Merge coadd sources from multiple bands. Calls @ref `run` which must be defined in 

141 subclasses that inherit from MergeSourcesTask. 

142 @param[in] patchRefList list of data references for each filter 

143 """ 

144 catalogs = dict(self.readCatalog(patchRef) for patchRef in patchRefList) 

145 dataId = patchRefList[0].dataId 

146 mergedCatalog = self.run(catalogs, tract=dataId['tract'], patch=dataId['patch']) 

147 self.write(patchRefList[0], ParquetTable(dataFrame=mergedCatalog)) 

148 

149 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

150 inputs = butlerQC.get(inputRefs) 

151 

152 measDict = {ref.dataId['band']: {'meas': cat} for ref, cat in 

153 zip(inputRefs.inputCatalogMeas, inputs['inputCatalogMeas'])} 

154 forcedSourceDict = {ref.dataId['band']: {'forced_src': cat} for ref, cat in 

155 zip(inputRefs.inputCatalogForcedSrc, inputs['inputCatalogForcedSrc'])} 

156 

157 catalogs = {} 

158 for band in measDict.keys(): 

159 catalogs[band] = {'meas': measDict[band]['meas'], 

160 'forced_src': forcedSourceDict[band]['forced_src'], 

161 'ref': inputs['inputCatalogRef']} 

162 dataId = butlerQC.quantum.dataId 

163 df = self.run(catalogs=catalogs, tract=dataId['tract'], patch=dataId['patch']) 

164 outputs = pipeBase.Struct(outputCatalog=df) 

165 butlerQC.put(outputs, outputRefs) 

166 

167 @classmethod 

168 def _makeArgumentParser(cls): 

169 """Create a suitable ArgumentParser. 

170 

171 We will use the ArgumentParser to get a list of data 

172 references for patches; the RunnerClass will sort them into lists 

173 of data references for the same patch. 

174 

175 References first of self.inputDatasets, rather than 

176 self.inputDataset 

177 """ 

178 return makeMergeArgumentParser(cls._DefaultName, cls.inputDatasets[0]) 

179 

180 def readCatalog(self, patchRef): 

181 """Read input catalogs 

182 

183 Read all the input datasets given by the 'inputDatasets' 

184 attribute. 

185 

186 Parameters 

187 ---------- 

188 patchRef : `lsst.daf.persistence.ButlerDataRef` 

189 Data reference for patch 

190 

191 Returns 

192 ------- 

193 Tuple consisting of band name and a dict of catalogs, keyed by 

194 dataset name 

195 """ 

196 band = patchRef.get(self.config.coaddName + "Coadd_filter", immediate=True).bandLabel 

197 catalogDict = {} 

198 for dataset in self.inputDatasets: 

199 catalog = patchRef.get(self.config.coaddName + "Coadd_" + dataset, immediate=True) 

200 self.log.info("Read %d sources from %s for band %s: %s", 

201 len(catalog), dataset, band, patchRef.dataId) 

202 catalogDict[dataset] = catalog 

203 return band, catalogDict 

204 

205 def run(self, catalogs, tract, patch): 

206 """Merge multiple catalogs. 

207 

208 Parameters 

209 ---------- 

210 catalogs : `dict` 

211 Mapping from filter names to dict of catalogs. 

212 tract : int 

213 tractId to use for the tractId column 

214 patch : str 

215 patchId to use for the patchId column 

216 

217 Returns 

218 ------- 

219 catalog : `pandas.DataFrame` 

220 Merged dataframe 

221 """ 

222 

223 dfs = [] 

224 for filt, tableDict in catalogs.items(): 

225 for dataset, table in tableDict.items(): 

226 # Convert afwTable to pandas DataFrame 

227 df = table.asAstropy().to_pandas().set_index('id', drop=True) 

228 

229 # Sort columns by name, to ensure matching schema among patches 

230 df = df.reindex(sorted(df.columns), axis=1) 

231 df['tractId'] = tract 

232 df['patchId'] = patch 

233 

234 # Make columns a 3-level MultiIndex 

235 df.columns = pd.MultiIndex.from_tuples([(dataset, filt, c) for c in df.columns], 

236 names=('dataset', 'band', 'column')) 

237 dfs.append(df) 

238 

239 catalog = functools.reduce(lambda d1, d2: d1.join(d2), dfs) 

240 return catalog 

241 

242 def write(self, patchRef, catalog): 

243 """Write the output. 

244 

245 Parameters 

246 ---------- 

247 catalog : `ParquetTable` 

248 Catalog to write 

249 patchRef : `lsst.daf.persistence.ButlerDataRef` 

250 Data reference for patch 

251 """ 

252 patchRef.put(catalog, self.config.coaddName + "Coadd_" + self.outputDataset) 

253 # since the filter isn't actually part of the data ID for the dataset we're saving, 

254 # it's confusing to see it in the log message, even if the butler simply ignores it. 

255 mergeDataId = patchRef.dataId.copy() 

256 del mergeDataId["filter"] 

257 self.log.info("Wrote merged catalog: %s", mergeDataId) 

258 

259 def writeMetadata(self, dataRefList): 

260 """No metadata to write, and not sure how to write it for a list of dataRefs. 

261 """ 

262 pass 

263 

264 

265class WriteSourceTableConnections(pipeBase.PipelineTaskConnections, 

266 defaultTemplates={"catalogType": ""}, 

267 dimensions=("instrument", "visit", "detector")): 

268 

269 catalog = connectionTypes.Input( 

270 doc="Input full-depth catalog of sources produced by CalibrateTask", 

271 name="{catalogType}src", 

272 storageClass="SourceCatalog", 

273 dimensions=("instrument", "visit", "detector") 

274 ) 

275 outputCatalog = connectionTypes.Output( 

276 doc="Catalog of sources, `src` in Parquet format. The 'id' column is " 

277 "replaced with an index; all other columns are unchanged.", 

278 name="{catalogType}source", 

279 storageClass="DataFrame", 

280 dimensions=("instrument", "visit", "detector") 

281 ) 

282 

283 

284class WriteSourceTableConfig(pipeBase.PipelineTaskConfig, 

285 pipelineConnections=WriteSourceTableConnections): 

286 pass 

287 

288 

289class WriteSourceTableTask(CmdLineTask, pipeBase.PipelineTask): 

290 """Write source table to parquet 

291 """ 

292 _DefaultName = "writeSourceTable" 

293 ConfigClass = WriteSourceTableConfig 

294 

295 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

296 inputs = butlerQC.get(inputRefs) 

297 inputs['ccdVisitId'] = butlerQC.quantum.dataId.pack("visit_detector") 

298 result = self.run(**inputs).table 

299 outputs = pipeBase.Struct(outputCatalog=result.toDataFrame()) 

300 butlerQC.put(outputs, outputRefs) 

301 

302 def run(self, catalog, ccdVisitId=None, **kwargs): 

303 """Convert `src` catalog to parquet 

304 

305 Parameters 

306 ---------- 

307 catalog: `afwTable.SourceCatalog` 

308 catalog to be converted 

309 ccdVisitId: `int` 

310 ccdVisitId to be added as a column 

311 

312 Returns 

313 ------- 

314 result : `lsst.pipe.base.Struct` 

315 ``table`` 

316 `ParquetTable` version of the input catalog 

317 """ 

318 self.log.info("Generating parquet table from src catalog ccdVisitId=%s", ccdVisitId) 

319 df = catalog.asAstropy().to_pandas().set_index('id', drop=True) 

320 df['ccdVisitId'] = ccdVisitId 

321 return pipeBase.Struct(table=ParquetTable(dataFrame=df)) 

322 

323 

324class WriteRecalibratedSourceTableConnections(WriteSourceTableConnections, 

325 defaultTemplates={"catalogType": "", 

326 "skyWcsName": "jointcal", 

327 "photoCalibName": "fgcm"}, 

328 dimensions=("instrument", "visit", "detector", "skymap")): 

329 skyMap = connectionTypes.Input( 

330 doc="skyMap needed to choose which tract-level calibrations to use when multiple available", 

331 name=BaseSkyMap.SKYMAP_DATASET_TYPE_NAME, 

332 storageClass="SkyMap", 

333 dimensions=("skymap",), 

334 ) 

335 exposure = connectionTypes.Input( 

336 doc="Input exposure to perform photometry on.", 

337 name="calexp", 

338 storageClass="ExposureF", 

339 dimensions=["instrument", "visit", "detector"], 

340 ) 

341 externalSkyWcsTractCatalog = connectionTypes.Input( 

342 doc=("Per-tract, per-visit wcs calibrations. These catalogs use the detector " 

343 "id for the catalog id, sorted on id for fast lookup."), 

344 name="{skyWcsName}SkyWcsCatalog", 

345 storageClass="ExposureCatalog", 

346 dimensions=["instrument", "visit", "tract"], 

347 multiple=True 

348 ) 

349 externalSkyWcsGlobalCatalog = connectionTypes.Input( 

350 doc=("Per-visit wcs calibrations computed globally (with no tract information). " 

351 "These catalogs use the detector id for the catalog id, sorted on id for " 

352 "fast lookup."), 

353 name="{skyWcsName}SkyWcsCatalog", 

354 storageClass="ExposureCatalog", 

355 dimensions=["instrument", "visit"], 

356 ) 

357 externalPhotoCalibTractCatalog = connectionTypes.Input( 

358 doc=("Per-tract, per-visit photometric calibrations. These catalogs use the " 

359 "detector id for the catalog id, sorted on id for fast lookup."), 

360 name="{photoCalibName}PhotoCalibCatalog", 

361 storageClass="ExposureCatalog", 

362 dimensions=["instrument", "visit", "tract"], 

363 multiple=True 

364 ) 

365 externalPhotoCalibGlobalCatalog = connectionTypes.Input( 

366 doc=("Per-visit photometric calibrations computed globally (with no tract " 

367 "information). These catalogs use the detector id for the catalog id, " 

368 "sorted on id for fast lookup."), 

369 name="{photoCalibName}PhotoCalibCatalog", 

370 storageClass="ExposureCatalog", 

371 dimensions=["instrument", "visit"], 

372 ) 

373 

374 def __init__(self, *, config=None): 

375 super().__init__(config=config) 

376 # Same connection boilerplate as all other applications of 

377 # Global/Tract calibrations 

378 if config.doApplyExternalSkyWcs and config.doReevaluateSkyWcs: 

379 if config.useGlobalExternalSkyWcs: 

380 self.inputs.remove("externalSkyWcsTractCatalog") 

381 else: 

382 self.inputs.remove("externalSkyWcsGlobalCatalog") 

383 else: 

384 self.inputs.remove("externalSkyWcsTractCatalog") 

385 self.inputs.remove("externalSkyWcsGlobalCatalog") 

386 if config.doApplyExternalPhotoCalib and config.doReevaluatePhotoCalib: 

387 if config.useGlobalExternalPhotoCalib: 

388 self.inputs.remove("externalPhotoCalibTractCatalog") 

389 else: 

390 self.inputs.remove("externalPhotoCalibGlobalCatalog") 

391 else: 

392 self.inputs.remove("externalPhotoCalibTractCatalog") 

393 self.inputs.remove("externalPhotoCalibGlobalCatalog") 

394 

395 

396class WriteRecalibratedSourceTableConfig(WriteSourceTableConfig, 

397 pipelineConnections=WriteRecalibratedSourceTableConnections): 

398 

399 doReevaluatePhotoCalib = pexConfig.Field( 

400 dtype=bool, 

401 default=True, 

402 doc=("Add or replace local photoCalib columns from either the calexp.photoCalib or jointcal/FGCM") 

403 ) 

404 doReevaluateSkyWcs = pexConfig.Field( 

405 dtype=bool, 

406 default=True, 

407 doc=("Add or replace local WCS columns from either the calexp.wcs or or jointcal") 

408 ) 

409 doApplyExternalPhotoCalib = pexConfig.Field( 

410 dtype=bool, 

411 default=True, 

412 doc=("Whether to apply external photometric calibration via an " 

413 "`lsst.afw.image.PhotoCalib` object. Uses the " 

414 "``externalPhotoCalibName`` field to determine which calibration " 

415 "to load."), 

416 ) 

417 doApplyExternalSkyWcs = pexConfig.Field( 

418 dtype=bool, 

419 default=True, 

420 doc=("Whether to apply external astrometric calibration via an " 

421 "`lsst.afw.geom.SkyWcs` object. Uses ``externalSkyWcsName`` " 

422 "field to determine which calibration to load."), 

423 ) 

424 useGlobalExternalPhotoCalib = pexConfig.Field( 

425 dtype=bool, 

426 default=True, 

427 doc=("When using doApplyExternalPhotoCalib, use 'global' calibrations " 

428 "that are not run per-tract. When False, use per-tract photometric " 

429 "calibration files.") 

430 ) 

431 useGlobalExternalSkyWcs = pexConfig.Field( 

432 dtype=bool, 

433 default=False, 

434 doc=("When using doApplyExternalSkyWcs, use 'global' calibrations " 

435 "that are not run per-tract. When False, use per-tract wcs " 

436 "files.") 

437 ) 

438 

439 def validate(self): 

440 super().validate() 

441 if self.doApplyExternalSkyWcs and not self.doReevaluateSkyWcs: 

442 log.warning("doApplyExternalSkyWcs=True but doReevaluateSkyWcs=False" 

443 "External SkyWcs will not be read or evaluated.") 

444 if self.doApplyExternalPhotoCalib and not self.doReevaluatePhotoCalib: 

445 log.warning("doApplyExternalPhotoCalib=True but doReevaluatePhotoCalib=False." 

446 "External PhotoCalib will not be read or evaluated.") 

447 

448 

449class WriteRecalibratedSourceTableTask(WriteSourceTableTask): 

450 """Write source table to parquet 

451 """ 

452 _DefaultName = "writeRecalibratedSourceTable" 

453 ConfigClass = WriteRecalibratedSourceTableConfig 

454 

455 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

456 inputs = butlerQC.get(inputRefs) 

457 inputs['ccdVisitId'] = butlerQC.quantum.dataId.pack("visit_detector") 

458 inputs['exposureIdInfo'] = ExposureIdInfo.fromDataId(butlerQC.quantum.dataId, "visit_detector") 

459 

460 if self.config.doReevaluatePhotoCalib or self.config.doReevaluateSkyWcs: 

461 if self.config.doApplyExternalPhotoCalib or self.config.doApplyExternalSkyWcs: 

462 inputs['exposure'] = self.attachCalibs(inputRefs, **inputs) 

463 

464 inputs['catalog'] = self.addCalibColumns(**inputs) 

465 

466 result = self.run(**inputs).table 

467 outputs = pipeBase.Struct(outputCatalog=result.toDataFrame()) 

468 butlerQC.put(outputs, outputRefs) 

469 

470 def attachCalibs(self, inputRefs, skyMap, exposure, externalSkyWcsGlobalCatalog=None, 

471 externalSkyWcsTractCatalog=None, externalPhotoCalibGlobalCatalog=None, 

472 externalPhotoCalibTractCatalog=None, **kwargs): 

473 """Apply external calibrations to exposure per configuration 

474 

475 When multiple tract-level calibrations overlap, select the one with the 

476 center closest to detector. 

477 

478 Parameters 

479 ---------- 

480 inputRefs : `lsst.pipe.base.InputQuantizedConnection`, for dataIds of 

481 tract-level calibs. 

482 skyMap : `lsst.skymap.SkyMap` 

483 exposure : `lsst.afw.image.exposure.Exposure` 

484 Input exposure to adjust calibrations. 

485 externalSkyWcsGlobalCatalog : `lsst.afw.table.ExposureCatalog`, optional 

486 Exposure catalog with external skyWcs to be applied per config 

487 externalSkyWcsTractCatalog : `lsst.afw.table.ExposureCatalog`, optional 

488 Exposure catalog with external skyWcs to be applied per config 

489 externalPhotoCalibGlobalCatalog : `lsst.afw.table.ExposureCatalog`, optional 

490 Exposure catalog with external photoCalib to be applied per config 

491 externalPhotoCalibTractCatalog : `lsst.afw.table.ExposureCatalog`, optional 

492 

493 

494 Returns 

495 ------- 

496 exposure : `lsst.afw.image.exposure.Exposure` 

497 Exposure with adjusted calibrations. 

498 """ 

499 if not self.config.doApplyExternalSkyWcs: 

500 # Do not modify the exposure's SkyWcs 

501 externalSkyWcsCatalog = None 

502 elif self.config.useGlobalExternalSkyWcs: 

503 # Use the global external SkyWcs 

504 externalSkyWcsCatalog = externalSkyWcsGlobalCatalog 

505 self.log.info('Applying global SkyWcs') 

506 else: 

507 # use tract-level external SkyWcs from the closest overlapping tract 

508 inputRef = getattr(inputRefs, 'externalSkyWcsTractCatalog') 

509 tracts = [ref.dataId['tract'] for ref in inputRef] 

510 if len(tracts) == 1: 

511 ind = 0 

512 self.log.info('Applying tract-level SkyWcs from tract %s', tracts[ind]) 

513 else: 

514 ind = self.getClosestTract(tracts, skyMap, 

515 exposure.getBBox(), exposure.getWcs()) 

516 self.log.info('Multiple overlapping externalSkyWcsTractCatalogs found (%s). ' 

517 'Applying closest to detector center: tract=%s', str(tracts), tracts[ind]) 

518 

519 externalSkyWcsCatalog = externalSkyWcsTractCatalog[ind] 

520 

521 if not self.config.doApplyExternalPhotoCalib: 

522 # Do not modify the exposure's PhotoCalib 

523 externalPhotoCalibCatalog = None 

524 elif self.config.useGlobalExternalPhotoCalib: 

525 # Use the global external PhotoCalib 

526 externalPhotoCalibCatalog = externalPhotoCalibGlobalCatalog 

527 self.log.info('Applying global PhotoCalib') 

528 else: 

529 # use tract-level external PhotoCalib from the closest overlapping tract 

530 inputRef = getattr(inputRefs, 'externalPhotoCalibTractCatalog') 

531 tracts = [ref.dataId['tract'] for ref in inputRef] 

532 if len(tracts) == 1: 

533 ind = 0 

534 self.log.info('Applying tract-level PhotoCalib from tract %s', tracts[ind]) 

535 else: 

536 ind = self.getClosestTract(tracts, skyMap, 

537 exposure.getBBox(), exposure.getWcs()) 

538 self.log.info('Multiple overlapping externalPhotoCalibTractCatalogs found (%s). ' 

539 'Applying closest to detector center: tract=%s', str(tracts), tracts[ind]) 

540 

541 externalPhotoCalibCatalog = externalPhotoCalibTractCatalog[ind] 

542 

543 return self.prepareCalibratedExposure(exposure, externalSkyWcsCatalog, externalPhotoCalibCatalog) 

544 

545 def getClosestTract(self, tracts, skyMap, bbox, wcs): 

546 """Find the index of the tract closest to detector from list of tractIds 

547 

548 Parameters 

549 ---------- 

550 tracts: `list` [`int`] 

551 Iterable of integer tractIds 

552 skyMap : `lsst.skymap.SkyMap` 

553 skyMap to lookup tract geometry and wcs 

554 bbox : `lsst.geom.Box2I` 

555 Detector bbox, center of which will compared to tract centers 

556 wcs : `lsst.afw.geom.SkyWcs` 

557 Detector Wcs object to map the detector center to SkyCoord 

558 

559 Returns 

560 ------- 

561 index : `int` 

562 """ 

563 if len(tracts) == 1: 

564 return 0 

565 

566 center = wcs.pixelToSky(bbox.getCenter()) 

567 sep = [] 

568 for tractId in tracts: 

569 tract = skyMap[tractId] 

570 tractCenter = tract.getWcs().pixelToSky(tract.getBBox().getCenter()) 

571 sep.append(center.separation(tractCenter)) 

572 

573 return np.argmin(sep) 

574 

575 def prepareCalibratedExposure(self, exposure, externalSkyWcsCatalog=None, externalPhotoCalibCatalog=None): 

576 """Prepare a calibrated exposure and apply external calibrations 

577 if so configured. 

578 

579 Parameters 

580 ---------- 

581 exposure : `lsst.afw.image.exposure.Exposure` 

582 Input exposure to adjust calibrations. 

583 externalSkyWcsCatalog : `lsst.afw.table.ExposureCatalog`, optional 

584 Exposure catalog with external skyWcs to be applied 

585 if config.doApplyExternalSkyWcs=True. Catalog uses the detector id 

586 for the catalog id, sorted on id for fast lookup. 

587 externalPhotoCalibCatalog : `lsst.afw.table.ExposureCatalog`, optional 

588 Exposure catalog with external photoCalib to be applied 

589 if config.doApplyExternalPhotoCalib=True. Catalog uses the detector 

590 id for the catalog id, sorted on id for fast lookup. 

591 

592 Returns 

593 ------- 

594 exposure : `lsst.afw.image.exposure.Exposure` 

595 Exposure with adjusted calibrations. 

596 """ 

597 detectorId = exposure.getInfo().getDetector().getId() 

598 

599 if externalPhotoCalibCatalog is not None: 

600 row = externalPhotoCalibCatalog.find(detectorId) 

601 if row is None: 

602 self.log.warning("Detector id %s not found in externalPhotoCalibCatalog; " 

603 "Using original photoCalib.", detectorId) 

604 else: 

605 photoCalib = row.getPhotoCalib() 

606 if photoCalib is None: 

607 self.log.warning("Detector id %s has None for photoCalib in externalPhotoCalibCatalog; " 

608 "Using original photoCalib.", detectorId) 

609 else: 

610 exposure.setPhotoCalib(photoCalib) 

611 

612 if externalSkyWcsCatalog is not None: 

613 row = externalSkyWcsCatalog.find(detectorId) 

614 if row is None: 

615 self.log.warning("Detector id %s not found in externalSkyWcsCatalog; " 

616 "Using original skyWcs.", detectorId) 

617 else: 

618 skyWcs = row.getWcs() 

619 if skyWcs is None: 

620 self.log.warning("Detector id %s has None for skyWcs in externalSkyWcsCatalog; " 

621 "Using original skyWcs.", detectorId) 

622 else: 

623 exposure.setWcs(skyWcs) 

624 

625 return exposure 

626 

627 def addCalibColumns(self, catalog, exposure, exposureIdInfo, **kwargs): 

628 """Add replace columns with calibs evaluated at each centroid 

629 

630 Add or replace 'base_LocalWcs' `base_LocalPhotoCalib' columns in a 

631 a source catalog, by rerunning the plugins. 

632 

633 Parameters 

634 ---------- 

635 catalog : `lsst.afw.table.SourceCatalog` 

636 catalog to which calib columns will be added 

637 exposure : `lsst.afw.image.exposure.Exposure` 

638 Exposure with attached PhotoCalibs and SkyWcs attributes to be 

639 reevaluated at local centroids. Pixels are not required. 

640 exposureIdInfo : `lsst.obs.base.ExposureIdInfo` 

641 

642 Returns 

643 ------- 

644 newCat: `lsst.afw.table.SourceCatalog` 

645 Source Catalog with requested local calib columns 

646 """ 

647 measureConfig = SingleFrameMeasurementTask.ConfigClass() 

648 measureConfig.doReplaceWithNoise = False 

649 

650 measureConfig.plugins.names = [] 

651 if self.config.doReevaluateSkyWcs: 

652 measureConfig.plugins.names.add('base_LocalWcs') 

653 self.log.info("Re-evaluating base_LocalWcs plugin") 

654 if self.config.doReevaluatePhotoCalib: 

655 measureConfig.plugins.names.add('base_LocalPhotoCalib') 

656 self.log.info("Re-evaluating base_LocalPhotoCalib plugin") 

657 pluginsNotToCopy = tuple(measureConfig.plugins.names) 

658 

659 # Create a new schema and catalog 

660 # Copy all columns from original except for the ones to reevaluate 

661 aliasMap = catalog.schema.getAliasMap() 

662 mapper = afwTable.SchemaMapper(catalog.schema) 

663 for item in catalog.schema: 

664 if not item.field.getName().startswith(pluginsNotToCopy): 

665 mapper.addMapping(item.key) 

666 

667 schema = mapper.getOutputSchema() 

668 measurement = SingleFrameMeasurementTask(config=measureConfig, schema=schema) 

669 schema.setAliasMap(aliasMap) 

670 newCat = afwTable.SourceCatalog(schema) 

671 newCat.extend(catalog, mapper=mapper) 

672 

673 measurement.run(measCat=newCat, exposure=exposure, exposureId=exposureIdInfo.expId) 

674 

675 return newCat 

676 

677 

678class PostprocessAnalysis(object): 

679 """Calculate columns from ParquetTable 

680 

681 This object manages and organizes an arbitrary set of computations 

682 on a catalog. The catalog is defined by a 

683 `lsst.pipe.tasks.parquetTable.ParquetTable` object (or list thereof), such as a 

684 `deepCoadd_obj` dataset, and the computations are defined by a collection 

685 of `lsst.pipe.tasks.functor.Functor` objects (or, equivalently, 

686 a `CompositeFunctor`). 

687 

688 After the object is initialized, accessing the `.df` attribute (which 

689 holds the `pandas.DataFrame` containing the results of the calculations) triggers 

690 computation of said dataframe. 

691 

692 One of the conveniences of using this object is the ability to define a desired common 

693 filter for all functors. This enables the same functor collection to be passed to 

694 several different `PostprocessAnalysis` objects without having to change the original 

695 functor collection, since the `filt` keyword argument of this object triggers an 

696 overwrite of the `filt` property for all functors in the collection. 

697 

698 This object also allows a list of refFlags to be passed, and defines a set of default 

699 refFlags that are always included even if not requested. 

700 

701 If a list of `ParquetTable` object is passed, rather than a single one, then the 

702 calculations will be mapped over all the input catalogs. In principle, it should 

703 be straightforward to parallelize this activity, but initial tests have failed 

704 (see TODO in code comments). 

705 

706 Parameters 

707 ---------- 

708 parq : `lsst.pipe.tasks.ParquetTable` (or list of such) 

709 Source catalog(s) for computation 

710 

711 functors : `list`, `dict`, or `lsst.pipe.tasks.functors.CompositeFunctor` 

712 Computations to do (functors that act on `parq`). 

713 If a dict, the output 

714 DataFrame will have columns keyed accordingly. 

715 If a list, the column keys will come from the 

716 `.shortname` attribute of each functor. 

717 

718 filt : `str` (optional) 

719 Filter in which to calculate. If provided, 

720 this will overwrite any existing `.filt` attribute 

721 of the provided functors. 

722 

723 flags : `list` (optional) 

724 List of flags (per-band) to include in output table. 

725 Taken from the `meas` dataset if applied to a multilevel Object Table. 

726 

727 refFlags : `list` (optional) 

728 List of refFlags (only reference band) to include in output table. 

729 

730 forcedFlags : `list` (optional) 

731 List of flags (per-band) to include in output table. 

732 Taken from the ``forced_src`` dataset if applied to a 

733 multilevel Object Table. Intended for flags from measurement plugins 

734 only run during multi-band forced-photometry. 

735 """ 

736 _defaultRefFlags = [] 

737 _defaultFuncs = () 

738 

739 def __init__(self, parq, functors, filt=None, flags=None, refFlags=None, forcedFlags=None): 

740 self.parq = parq 

741 self.functors = functors 

742 

743 self.filt = filt 

744 self.flags = list(flags) if flags is not None else [] 

745 self.forcedFlags = list(forcedFlags) if forcedFlags is not None else [] 

746 self.refFlags = list(self._defaultRefFlags) 

747 if refFlags is not None: 

748 self.refFlags += list(refFlags) 

749 

750 self._df = None 

751 

752 @property 

753 def defaultFuncs(self): 

754 funcs = dict(self._defaultFuncs) 

755 return funcs 

756 

757 @property 

758 def func(self): 

759 additionalFuncs = self.defaultFuncs 

760 additionalFuncs.update({flag: Column(flag, dataset='forced_src') for flag in self.forcedFlags}) 

761 additionalFuncs.update({flag: Column(flag, dataset='ref') for flag in self.refFlags}) 

762 additionalFuncs.update({flag: Column(flag, dataset='meas') for flag in self.flags}) 

763 

764 if isinstance(self.functors, CompositeFunctor): 

765 func = self.functors 

766 else: 

767 func = CompositeFunctor(self.functors) 

768 

769 func.funcDict.update(additionalFuncs) 

770 func.filt = self.filt 

771 

772 return func 

773 

774 @property 

775 def noDupCols(self): 

776 return [name for name, func in self.func.funcDict.items() if func.noDup or func.dataset == 'ref'] 

777 

778 @property 

779 def df(self): 

780 if self._df is None: 

781 self.compute() 

782 return self._df 

783 

784 def compute(self, dropna=False, pool=None): 

785 # map over multiple parquet tables 

786 if type(self.parq) in (list, tuple): 

787 if pool is None: 

788 dflist = [self.func(parq, dropna=dropna) for parq in self.parq] 

789 else: 

790 # TODO: Figure out why this doesn't work (pyarrow pickling issues?) 

791 dflist = pool.map(functools.partial(self.func, dropna=dropna), self.parq) 

792 self._df = pd.concat(dflist) 

793 else: 

794 self._df = self.func(self.parq, dropna=dropna) 

795 

796 return self._df 

797 

798 

799class TransformCatalogBaseConnections(pipeBase.PipelineTaskConnections, 

800 dimensions=()): 

801 """Expected Connections for subclasses of TransformCatalogBaseTask. 

802 

803 Must be subclassed. 

804 """ 

805 inputCatalog = connectionTypes.Input( 

806 name="", 

807 storageClass="DataFrame", 

808 ) 

809 outputCatalog = connectionTypes.Output( 

810 name="", 

811 storageClass="DataFrame", 

812 ) 

813 

814 

815class TransformCatalogBaseConfig(pipeBase.PipelineTaskConfig, 

816 pipelineConnections=TransformCatalogBaseConnections): 

817 functorFile = pexConfig.Field( 

818 dtype=str, 

819 doc="Path to YAML file specifying Science Data Model functors to use " 

820 "when copying columns and computing calibrated values.", 

821 default=None, 

822 optional=True 

823 ) 

824 primaryKey = pexConfig.Field( 

825 dtype=str, 

826 doc="Name of column to be set as the DataFrame index. If None, the index" 

827 "will be named `id`", 

828 default=None, 

829 optional=True 

830 ) 

831 columnsFromDataId = pexConfig.ListField( 

832 dtype=str, 

833 default=None, 

834 optional=True, 

835 doc="Columns to extract from the dataId", 

836 ) 

837 

838 

839class TransformCatalogBaseTask(pipeBase.PipelineTask): 

840 """Base class for transforming/standardizing a catalog 

841 

842 by applying functors that convert units and apply calibrations. 

843 The purpose of this task is to perform a set of computations on 

844 an input `ParquetTable` dataset (such as `deepCoadd_obj`) and write the 

845 results to a new dataset (which needs to be declared in an `outputDataset` 

846 attribute). 

847 

848 The calculations to be performed are defined in a YAML file that specifies 

849 a set of functors to be computed, provided as 

850 a `--functorFile` config parameter. An example of such a YAML file 

851 is the following: 

852 

853 funcs: 

854 psfMag: 

855 functor: Mag 

856 args: 

857 - base_PsfFlux 

858 filt: HSC-G 

859 dataset: meas 

860 cmodel_magDiff: 

861 functor: MagDiff 

862 args: 

863 - modelfit_CModel 

864 - base_PsfFlux 

865 filt: HSC-G 

866 gauss_magDiff: 

867 functor: MagDiff 

868 args: 

869 - base_GaussianFlux 

870 - base_PsfFlux 

871 filt: HSC-G 

872 count: 

873 functor: Column 

874 args: 

875 - base_InputCount_value 

876 filt: HSC-G 

877 deconvolved_moments: 

878 functor: DeconvolvedMoments 

879 filt: HSC-G 

880 dataset: forced_src 

881 refFlags: 

882 - calib_psfUsed 

883 - merge_measurement_i 

884 - merge_measurement_r 

885 - merge_measurement_z 

886 - merge_measurement_y 

887 - merge_measurement_g 

888 - base_PixelFlags_flag_inexact_psfCenter 

889 - detect_isPrimary 

890 

891 The names for each entry under "func" will become the names of columns in the 

892 output dataset. All the functors referenced are defined in `lsst.pipe.tasks.functors`. 

893 Positional arguments to be passed to each functor are in the `args` list, 

894 and any additional entries for each column other than "functor" or "args" (e.g., `'filt'`, 

895 `'dataset'`) are treated as keyword arguments to be passed to the functor initialization. 

896 

897 The "flags" entry is the default shortcut for `Column` functors. 

898 All columns listed under "flags" will be copied to the output table 

899 untransformed. They can be of any datatype. 

900 In the special case of transforming a multi-level oject table with 

901 band and dataset indices (deepCoadd_obj), these will be taked from the 

902 `meas` dataset and exploded out per band. 

903 

904 There are two special shortcuts that only apply when transforming 

905 multi-level Object (deepCoadd_obj) tables: 

906 - The "refFlags" entry is shortcut for `Column` functor 

907 taken from the `'ref'` dataset if transforming an ObjectTable. 

908 - The "forcedFlags" entry is shortcut for `Column` functors. 

909 taken from the ``forced_src`` dataset if transforming an ObjectTable. 

910 These are expanded out per band. 

911 

912 

913 This task uses the `lsst.pipe.tasks.postprocess.PostprocessAnalysis` object 

914 to organize and excecute the calculations. 

915 

916 """ 

917 @property 

918 def _DefaultName(self): 

919 raise NotImplementedError('Subclass must define "_DefaultName" attribute') 

920 

921 @property 

922 def outputDataset(self): 

923 raise NotImplementedError('Subclass must define "outputDataset" attribute') 

924 

925 @property 

926 def inputDataset(self): 

927 raise NotImplementedError('Subclass must define "inputDataset" attribute') 

928 

929 @property 

930 def ConfigClass(self): 

931 raise NotImplementedError('Subclass must define "ConfigClass" attribute') 

932 

933 def __init__(self, *args, **kwargs): 

934 super().__init__(*args, **kwargs) 

935 if self.config.functorFile: 

936 self.log.info('Loading tranform functor definitions from %s', 

937 self.config.functorFile) 

938 self.funcs = CompositeFunctor.from_file(self.config.functorFile) 

939 self.funcs.update(dict(PostprocessAnalysis._defaultFuncs)) 

940 else: 

941 self.funcs = None 

942 

943 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

944 inputs = butlerQC.get(inputRefs) 

945 if self.funcs is None: 

946 raise ValueError("config.functorFile is None. " 

947 "Must be a valid path to yaml in order to run Task as a PipelineTask.") 

948 result = self.run(parq=inputs['inputCatalog'], funcs=self.funcs, 

949 dataId=outputRefs.outputCatalog.dataId.full) 

950 outputs = pipeBase.Struct(outputCatalog=result) 

951 butlerQC.put(outputs, outputRefs) 

952 

953 def run(self, parq, funcs=None, dataId=None, band=None): 

954 """Do postprocessing calculations 

955 

956 Takes a `ParquetTable` object and dataId, 

957 returns a dataframe with results of postprocessing calculations. 

958 

959 Parameters 

960 ---------- 

961 parq : `lsst.pipe.tasks.parquetTable.ParquetTable` 

962 ParquetTable from which calculations are done. 

963 funcs : `lsst.pipe.tasks.functors.Functors` 

964 Functors to apply to the table's columns 

965 dataId : dict, optional 

966 Used to add a `patchId` column to the output dataframe. 

967 band : `str`, optional 

968 Filter band that is being processed. 

969 

970 Returns 

971 ------ 

972 `pandas.DataFrame` 

973 

974 """ 

975 self.log.info("Transforming/standardizing the source table dataId: %s", dataId) 

976 

977 df = self.transform(band, parq, funcs, dataId).df 

978 self.log.info("Made a table of %d columns and %d rows", len(df.columns), len(df)) 

979 return df 

980 

981 def getFunctors(self): 

982 return self.funcs 

983 

984 def getAnalysis(self, parq, funcs=None, band=None): 

985 if funcs is None: 

986 funcs = self.funcs 

987 analysis = PostprocessAnalysis(parq, funcs, filt=band) 

988 return analysis 

989 

990 def transform(self, band, parq, funcs, dataId): 

991 analysis = self.getAnalysis(parq, funcs=funcs, band=band) 

992 df = analysis.df 

993 if dataId and self.config.columnsFromDataId: 

994 for key in self.config.columnsFromDataId: 

995 if key in dataId: 

996 df[str(key)] = dataId[key] 

997 else: 

998 raise ValueError(f"'{key}' in config.columnsFromDataId not found in dataId: {dataId}") 

999 

1000 if self.config.primaryKey: 

1001 if df.index.name != self.config.primaryKey and self.config.primaryKey in df: 

1002 df.reset_index(inplace=True, drop=True) 

1003 df.set_index(self.config.primaryKey, inplace=True) 

1004 

1005 return pipeBase.Struct( 

1006 df=df, 

1007 analysis=analysis 

1008 ) 

1009 

1010 def write(self, df, parqRef): 

1011 parqRef.put(ParquetTable(dataFrame=df), self.outputDataset) 

1012 

1013 def writeMetadata(self, dataRef): 

1014 """No metadata to write. 

1015 """ 

1016 pass 

1017 

1018 

1019class TransformObjectCatalogConnections(pipeBase.PipelineTaskConnections, 

1020 defaultTemplates={"coaddName": "deep"}, 

1021 dimensions=("tract", "patch", "skymap")): 

1022 inputCatalog = connectionTypes.Input( 

1023 doc="The vertical concatenation of the deepCoadd_{ref|meas|forced_src} catalogs, " 

1024 "stored as a DataFrame with a multi-level column index per-patch.", 

1025 dimensions=("tract", "patch", "skymap"), 

1026 storageClass="DataFrame", 

1027 name="{coaddName}Coadd_obj", 

1028 deferLoad=True, 

1029 ) 

1030 outputCatalog = connectionTypes.Output( 

1031 doc="Per-Patch Object Table of columns transformed from the deepCoadd_obj table per the standard " 

1032 "data model.", 

1033 dimensions=("tract", "patch", "skymap"), 

1034 storageClass="DataFrame", 

1035 name="objectTable" 

1036 ) 

1037 

1038 

1039class TransformObjectCatalogConfig(TransformCatalogBaseConfig, 

1040 pipelineConnections=TransformObjectCatalogConnections): 

1041 coaddName = pexConfig.Field( 

1042 dtype=str, 

1043 default="deep", 

1044 doc="Name of coadd" 

1045 ) 

1046 # TODO: remove in DM-27177 

1047 filterMap = pexConfig.DictField( 

1048 keytype=str, 

1049 itemtype=str, 

1050 default={}, 

1051 doc=("Dictionary mapping full filter name to short one for column name munging." 

1052 "These filters determine the output columns no matter what filters the " 

1053 "input data actually contain."), 

1054 deprecated=("Coadds are now identified by the band, so this transform is unused." 

1055 "Will be removed after v22.") 

1056 ) 

1057 outputBands = pexConfig.ListField( 

1058 dtype=str, 

1059 default=None, 

1060 optional=True, 

1061 doc=("These bands and only these bands will appear in the output," 

1062 " NaN-filled if the input does not include them." 

1063 " If None, then use all bands found in the input.") 

1064 ) 

1065 camelCase = pexConfig.Field( 

1066 dtype=bool, 

1067 default=False, 

1068 doc=("Write per-band columns names with camelCase, else underscore " 

1069 "For example: gPsFlux instead of g_PsFlux.") 

1070 ) 

1071 multilevelOutput = pexConfig.Field( 

1072 dtype=bool, 

1073 default=False, 

1074 doc=("Whether results dataframe should have a multilevel column index (True) or be flat " 

1075 "and name-munged (False).") 

1076 ) 

1077 goodFlags = pexConfig.ListField( 

1078 dtype=str, 

1079 default=[], 

1080 doc=("List of 'good' flags that should be set False when populating empty tables. " 

1081 "All other flags are considered to be 'bad' flags and will be set to True.") 

1082 ) 

1083 floatFillValue = pexConfig.Field( 

1084 dtype=float, 

1085 default=np.nan, 

1086 doc="Fill value for float fields when populating empty tables." 

1087 ) 

1088 integerFillValue = pexConfig.Field( 

1089 dtype=int, 

1090 default=-1, 

1091 doc="Fill value for integer fields when populating empty tables." 

1092 ) 

1093 

1094 def setDefaults(self): 

1095 super().setDefaults() 

1096 self.functorFile = os.path.join('$PIPE_TASKS_DIR', 'schemas', 'Object.yaml') 

1097 self.primaryKey = 'objectId' 

1098 self.columnsFromDataId = ['tract', 'patch'] 

1099 self.goodFlags = ['calib_astrometry_used', 

1100 'calib_photometry_reserved', 

1101 'calib_photometry_used', 

1102 'calib_psf_candidate', 

1103 'calib_psf_reserved', 

1104 'calib_psf_used'] 

1105 

1106 

1107class TransformObjectCatalogTask(TransformCatalogBaseTask): 

1108 """Produce a flattened Object Table to match the format specified in 

1109 sdm_schemas. 

1110 

1111 Do the same set of postprocessing calculations on all bands 

1112 

1113 This is identical to `TransformCatalogBaseTask`, except for that it does the 

1114 specified functor calculations for all filters present in the 

1115 input `deepCoadd_obj` table. Any specific `"filt"` keywords specified 

1116 by the YAML file will be superceded. 

1117 """ 

1118 _DefaultName = "transformObjectCatalog" 

1119 ConfigClass = TransformObjectCatalogConfig 

1120 

1121 def run(self, parq, funcs=None, dataId=None, band=None): 

1122 # NOTE: band kwarg is ignored here. 

1123 dfDict = {} 

1124 analysisDict = {} 

1125 templateDf = pd.DataFrame() 

1126 

1127 if isinstance(parq, DeferredDatasetHandle): 

1128 columns = parq.get(component='columns') 

1129 inputBands = columns.unique(level=1).values 

1130 else: 

1131 inputBands = parq.columnLevelNames['band'] 

1132 

1133 outputBands = self.config.outputBands if self.config.outputBands else inputBands 

1134 

1135 # Perform transform for data of filters that exist in parq. 

1136 for inputBand in inputBands: 

1137 if inputBand not in outputBands: 

1138 self.log.info("Ignoring %s band data in the input", inputBand) 

1139 continue 

1140 self.log.info("Transforming the catalog of band %s", inputBand) 

1141 result = self.transform(inputBand, parq, funcs, dataId) 

1142 dfDict[inputBand] = result.df 

1143 analysisDict[inputBand] = result.analysis 

1144 if templateDf.empty: 

1145 templateDf = result.df 

1146 

1147 # Put filler values in columns of other wanted bands 

1148 for filt in outputBands: 

1149 if filt not in dfDict: 

1150 self.log.info("Adding empty columns for band %s", filt) 

1151 dfTemp = templateDf.copy() 

1152 for col in dfTemp.columns: 

1153 testValue = dfTemp[col].values[0] 

1154 if isinstance(testValue, (np.bool_, pd.BooleanDtype)): 

1155 # Boolean flag type, check if it is a "good" flag 

1156 if col in self.config.goodFlags: 

1157 fillValue = False 

1158 else: 

1159 fillValue = True 

1160 elif isinstance(testValue, numbers.Integral): 

1161 # Checking numbers.Integral catches all flavors 

1162 # of python, numpy, pandas, etc. integers. 

1163 # We must ensure this is not an unsigned integer. 

1164 if isinstance(testValue, np.unsignedinteger): 

1165 raise ValueError("Parquet tables may not have unsigned integer columns.") 

1166 else: 

1167 fillValue = self.config.integerFillValue 

1168 else: 

1169 fillValue = self.config.floatFillValue 

1170 dfTemp[col].values[:] = fillValue 

1171 dfDict[filt] = dfTemp 

1172 

1173 # This makes a multilevel column index, with band as first level 

1174 df = pd.concat(dfDict, axis=1, names=['band', 'column']) 

1175 

1176 if not self.config.multilevelOutput: 

1177 noDupCols = list(set.union(*[set(v.noDupCols) for v in analysisDict.values()])) 

1178 if self.config.primaryKey in noDupCols: 

1179 noDupCols.remove(self.config.primaryKey) 

1180 if dataId and self.config.columnsFromDataId: 

1181 noDupCols += self.config.columnsFromDataId 

1182 df = flattenFilters(df, noDupCols=noDupCols, camelCase=self.config.camelCase, 

1183 inputBands=inputBands) 

1184 

1185 self.log.info("Made a table of %d columns and %d rows", len(df.columns), len(df)) 

1186 

1187 return df 

1188 

1189 

1190class TractObjectDataIdContainer(CoaddDataIdContainer): 

1191 

1192 def makeDataRefList(self, namespace): 

1193 """Make self.refList from self.idList 

1194 

1195 Generate a list of data references given tract and/or patch. 

1196 This was adapted from `TractQADataIdContainer`, which was 

1197 `TractDataIdContainer` modifie to not require "filter". 

1198 Only existing dataRefs are returned. 

1199 """ 

1200 def getPatchRefList(tract): 

1201 return [namespace.butler.dataRef(datasetType=self.datasetType, 

1202 tract=tract.getId(), 

1203 patch="%d,%d" % patch.getIndex()) for patch in tract] 

1204 

1205 tractRefs = defaultdict(list) # Data references for each tract 

1206 for dataId in self.idList: 

1207 skymap = self.getSkymap(namespace) 

1208 

1209 if "tract" in dataId: 

1210 tractId = dataId["tract"] 

1211 if "patch" in dataId: 

1212 tractRefs[tractId].append(namespace.butler.dataRef(datasetType=self.datasetType, 

1213 tract=tractId, 

1214 patch=dataId['patch'])) 

1215 else: 

1216 tractRefs[tractId] += getPatchRefList(skymap[tractId]) 

1217 else: 

1218 tractRefs = dict((tract.getId(), tractRefs.get(tract.getId(), []) + getPatchRefList(tract)) 

1219 for tract in skymap) 

1220 outputRefList = [] 

1221 for tractRefList in tractRefs.values(): 

1222 existingRefs = [ref for ref in tractRefList if ref.datasetExists()] 

1223 outputRefList.append(existingRefs) 

1224 

1225 self.refList = outputRefList 

1226 

1227 

1228class ConsolidateObjectTableConnections(pipeBase.PipelineTaskConnections, 

1229 dimensions=("tract", "skymap")): 

1230 inputCatalogs = connectionTypes.Input( 

1231 doc="Per-Patch objectTables conforming to the standard data model.", 

1232 name="objectTable", 

1233 storageClass="DataFrame", 

1234 dimensions=("tract", "patch", "skymap"), 

1235 multiple=True, 

1236 ) 

1237 outputCatalog = connectionTypes.Output( 

1238 doc="Pre-tract horizontal concatenation of the input objectTables", 

1239 name="objectTable_tract", 

1240 storageClass="DataFrame", 

1241 dimensions=("tract", "skymap"), 

1242 ) 

1243 

1244 

1245class ConsolidateObjectTableConfig(pipeBase.PipelineTaskConfig, 

1246 pipelineConnections=ConsolidateObjectTableConnections): 

1247 coaddName = pexConfig.Field( 

1248 dtype=str, 

1249 default="deep", 

1250 doc="Name of coadd" 

1251 ) 

1252 

1253 

1254class ConsolidateObjectTableTask(CmdLineTask, pipeBase.PipelineTask): 

1255 """Write patch-merged source tables to a tract-level parquet file 

1256 

1257 Concatenates `objectTable` list into a per-visit `objectTable_tract` 

1258 """ 

1259 _DefaultName = "consolidateObjectTable" 

1260 ConfigClass = ConsolidateObjectTableConfig 

1261 

1262 inputDataset = 'objectTable' 

1263 outputDataset = 'objectTable_tract' 

1264 

1265 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

1266 inputs = butlerQC.get(inputRefs) 

1267 self.log.info("Concatenating %s per-patch Object Tables", 

1268 len(inputs['inputCatalogs'])) 

1269 df = pd.concat(inputs['inputCatalogs']) 

1270 butlerQC.put(pipeBase.Struct(outputCatalog=df), outputRefs) 

1271 

1272 @classmethod 

1273 def _makeArgumentParser(cls): 

1274 parser = ArgumentParser(name=cls._DefaultName) 

1275 

1276 parser.add_id_argument("--id", cls.inputDataset, 

1277 help="data ID, e.g. --id tract=12345", 

1278 ContainerClass=TractObjectDataIdContainer) 

1279 return parser 

1280 

1281 def runDataRef(self, patchRefList): 

1282 df = pd.concat([patchRef.get().toDataFrame() for patchRef in patchRefList]) 

1283 patchRefList[0].put(ParquetTable(dataFrame=df), self.outputDataset) 

1284 

1285 def writeMetadata(self, dataRef): 

1286 """No metadata to write. 

1287 """ 

1288 pass 

1289 

1290 

1291class TransformSourceTableConnections(pipeBase.PipelineTaskConnections, 

1292 defaultTemplates={"catalogType": ""}, 

1293 dimensions=("instrument", "visit", "detector")): 

1294 

1295 inputCatalog = connectionTypes.Input( 

1296 doc="Wide input catalog of sources produced by WriteSourceTableTask", 

1297 name="{catalogType}source", 

1298 storageClass="DataFrame", 

1299 dimensions=("instrument", "visit", "detector"), 

1300 deferLoad=True 

1301 ) 

1302 outputCatalog = connectionTypes.Output( 

1303 doc="Narrower, per-detector Source Table transformed and converted per a " 

1304 "specified set of functors", 

1305 name="{catalogType}sourceTable", 

1306 storageClass="DataFrame", 

1307 dimensions=("instrument", "visit", "detector") 

1308 ) 

1309 

1310 

1311class TransformSourceTableConfig(TransformCatalogBaseConfig, 

1312 pipelineConnections=TransformSourceTableConnections): 

1313 

1314 def setDefaults(self): 

1315 super().setDefaults() 

1316 self.functorFile = os.path.join('$PIPE_TASKS_DIR', 'schemas', 'Source.yaml') 

1317 self.primaryKey = 'sourceId' 

1318 self.columnsFromDataId = ['visit', 'detector', 'band', 'physical_filter'] 

1319 

1320 

1321class TransformSourceTableTask(TransformCatalogBaseTask): 

1322 """Transform/standardize a source catalog 

1323 """ 

1324 _DefaultName = "transformSourceTable" 

1325 ConfigClass = TransformSourceTableConfig 

1326 

1327 

1328class ConsolidateVisitSummaryConnections(pipeBase.PipelineTaskConnections, 

1329 dimensions=("instrument", "visit",), 

1330 defaultTemplates={"calexpType": ""}): 

1331 calexp = connectionTypes.Input( 

1332 doc="Processed exposures used for metadata", 

1333 name="{calexpType}calexp", 

1334 storageClass="ExposureF", 

1335 dimensions=("instrument", "visit", "detector"), 

1336 deferLoad=True, 

1337 multiple=True, 

1338 ) 

1339 visitSummary = connectionTypes.Output( 

1340 doc=("Per-visit consolidated exposure metadata. These catalogs use " 

1341 "detector id for the id and are sorted for fast lookups of a " 

1342 "detector."), 

1343 name="{calexpType}visitSummary", 

1344 storageClass="ExposureCatalog", 

1345 dimensions=("instrument", "visit"), 

1346 ) 

1347 

1348 

1349class ConsolidateVisitSummaryConfig(pipeBase.PipelineTaskConfig, 

1350 pipelineConnections=ConsolidateVisitSummaryConnections): 

1351 """Config for ConsolidateVisitSummaryTask""" 

1352 pass 

1353 

1354 

1355class ConsolidateVisitSummaryTask(pipeBase.PipelineTask, pipeBase.CmdLineTask): 

1356 """Task to consolidate per-detector visit metadata. 

1357 

1358 This task aggregates the following metadata from all the detectors in a 

1359 single visit into an exposure catalog: 

1360 - The visitInfo. 

1361 - The wcs. 

1362 - The photoCalib. 

1363 - The physical_filter and band (if available). 

1364 - The psf size, shape, and effective area at the center of the detector. 

1365 - The corners of the bounding box in right ascension/declination. 

1366 

1367 Other quantities such as Detector, Psf, ApCorrMap, and TransmissionCurve 

1368 are not persisted here because of storage concerns, and because of their 

1369 limited utility as summary statistics. 

1370 

1371 Tests for this task are performed in ci_hsc_gen3. 

1372 """ 

1373 _DefaultName = "consolidateVisitSummary" 

1374 ConfigClass = ConsolidateVisitSummaryConfig 

1375 

1376 @classmethod 

1377 def _makeArgumentParser(cls): 

1378 parser = ArgumentParser(name=cls._DefaultName) 

1379 

1380 parser.add_id_argument("--id", "calexp", 

1381 help="data ID, e.g. --id visit=12345", 

1382 ContainerClass=VisitDataIdContainer) 

1383 return parser 

1384 

1385 def writeMetadata(self, dataRef): 

1386 """No metadata to persist, so override to remove metadata persistance. 

1387 """ 

1388 pass 

1389 

1390 def writeConfig(self, butler, clobber=False, doBackup=True): 

1391 """No config to persist, so override to remove config persistance. 

1392 """ 

1393 pass 

1394 

1395 def runDataRef(self, dataRefList): 

1396 visit = dataRefList[0].dataId['visit'] 

1397 

1398 self.log.debug("Concatenating metadata from %d per-detector calexps (visit %d)", 

1399 len(dataRefList), visit) 

1400 

1401 expCatalog = self._combineExposureMetadata(visit, dataRefList, isGen3=False) 

1402 

1403 dataRefList[0].put(expCatalog, 'visitSummary', visit=visit) 

1404 

1405 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

1406 dataRefs = butlerQC.get(inputRefs.calexp) 

1407 visit = dataRefs[0].dataId.byName()['visit'] 

1408 

1409 self.log.debug("Concatenating metadata from %d per-detector calexps (visit %d)", 

1410 len(dataRefs), visit) 

1411 

1412 expCatalog = self._combineExposureMetadata(visit, dataRefs) 

1413 

1414 butlerQC.put(expCatalog, outputRefs.visitSummary) 

1415 

1416 def _combineExposureMetadata(self, visit, dataRefs, isGen3=True): 

1417 """Make a combined exposure catalog from a list of dataRefs. 

1418 These dataRefs must point to exposures with wcs, summaryStats, 

1419 and other visit metadata. 

1420 

1421 Parameters 

1422 ---------- 

1423 visit : `int` 

1424 Visit identification number. 

1425 dataRefs : `list` 

1426 List of dataRefs in visit. May be list of 

1427 `lsst.daf.persistence.ButlerDataRef` (Gen2) or 

1428 `lsst.daf.butler.DeferredDatasetHandle` (Gen3). 

1429 isGen3 : `bool`, optional 

1430 Specifies if this is a Gen3 list of datarefs. 

1431 

1432 Returns 

1433 ------- 

1434 visitSummary : `lsst.afw.table.ExposureCatalog` 

1435 Exposure catalog with per-detector summary information. 

1436 """ 

1437 schema = self._makeVisitSummarySchema() 

1438 cat = afwTable.ExposureCatalog(schema) 

1439 cat.resize(len(dataRefs)) 

1440 

1441 cat['visit'] = visit 

1442 

1443 for i, dataRef in enumerate(dataRefs): 

1444 if isGen3: 

1445 visitInfo = dataRef.get(component='visitInfo') 

1446 filterLabel = dataRef.get(component='filter') 

1447 summaryStats = dataRef.get(component='summaryStats') 

1448 detector = dataRef.get(component='detector') 

1449 wcs = dataRef.get(component='wcs') 

1450 photoCalib = dataRef.get(component='photoCalib') 

1451 detector = dataRef.get(component='detector') 

1452 bbox = dataRef.get(component='bbox') 

1453 validPolygon = dataRef.get(component='validPolygon') 

1454 else: 

1455 # Note that we need to read the calexp because there is 

1456 # no magic access to the psf except through the exposure. 

1457 gen2_read_bbox = lsst.geom.BoxI(lsst.geom.PointI(0, 0), lsst.geom.PointI(1, 1)) 

1458 exp = dataRef.get(datasetType='calexp_sub', bbox=gen2_read_bbox) 

1459 visitInfo = exp.getInfo().getVisitInfo() 

1460 filterLabel = dataRef.get("calexp_filter") 

1461 summaryStats = exp.getInfo().getSummaryStats() 

1462 wcs = exp.getWcs() 

1463 photoCalib = exp.getPhotoCalib() 

1464 detector = exp.getDetector() 

1465 bbox = dataRef.get(datasetType='calexp_bbox') 

1466 validPolygon = exp.getInfo().getValidPolygon() 

1467 

1468 rec = cat[i] 

1469 rec.setBBox(bbox) 

1470 rec.setVisitInfo(visitInfo) 

1471 rec.setWcs(wcs) 

1472 rec.setPhotoCalib(photoCalib) 

1473 rec.setValidPolygon(validPolygon) 

1474 

1475 rec['physical_filter'] = filterLabel.physicalLabel if filterLabel.hasPhysicalLabel() else "" 

1476 rec['band'] = filterLabel.bandLabel if filterLabel.hasBandLabel() else "" 

1477 rec.setId(detector.getId()) 

1478 rec['psfSigma'] = summaryStats.psfSigma 

1479 rec['psfIxx'] = summaryStats.psfIxx 

1480 rec['psfIyy'] = summaryStats.psfIyy 

1481 rec['psfIxy'] = summaryStats.psfIxy 

1482 rec['psfArea'] = summaryStats.psfArea 

1483 rec['raCorners'][:] = summaryStats.raCorners 

1484 rec['decCorners'][:] = summaryStats.decCorners 

1485 rec['ra'] = summaryStats.ra 

1486 rec['decl'] = summaryStats.decl 

1487 rec['zenithDistance'] = summaryStats.zenithDistance 

1488 rec['zeroPoint'] = summaryStats.zeroPoint 

1489 rec['skyBg'] = summaryStats.skyBg 

1490 rec['skyNoise'] = summaryStats.skyNoise 

1491 rec['meanVar'] = summaryStats.meanVar 

1492 rec['astromOffsetMean'] = summaryStats.astromOffsetMean 

1493 rec['astromOffsetStd'] = summaryStats.astromOffsetStd 

1494 rec['nPsfStar'] = summaryStats.nPsfStar 

1495 rec['psfStarDeltaE1Median'] = summaryStats.psfStarDeltaE1Median 

1496 rec['psfStarDeltaE2Median'] = summaryStats.psfStarDeltaE2Median 

1497 rec['psfStarDeltaE1Scatter'] = summaryStats.psfStarDeltaE1Scatter 

1498 rec['psfStarDeltaE2Scatter'] = summaryStats.psfStarDeltaE2Scatter 

1499 rec['psfStarDeltaSizeMedian'] = summaryStats.psfStarDeltaSizeMedian 

1500 rec['psfStarDeltaSizeScatter'] = summaryStats.psfStarDeltaSizeScatter 

1501 rec['psfStarScaledDeltaSizeScatter'] = summaryStats.psfStarScaledDeltaSizeScatter 

1502 

1503 metadata = dafBase.PropertyList() 

1504 metadata.add("COMMENT", "Catalog id is detector id, sorted.") 

1505 # We are looping over existing datarefs, so the following is true 

1506 metadata.add("COMMENT", "Only detectors with data have entries.") 

1507 cat.setMetadata(metadata) 

1508 

1509 cat.sort() 

1510 return cat 

1511 

1512 def _makeVisitSummarySchema(self): 

1513 """Make the schema for the visitSummary catalog.""" 

1514 schema = afwTable.ExposureTable.makeMinimalSchema() 

1515 schema.addField('visit', type='L', doc='Visit number') 

1516 schema.addField('physical_filter', type='String', size=32, doc='Physical filter') 

1517 schema.addField('band', type='String', size=32, doc='Name of band') 

1518 schema.addField('psfSigma', type='F', 

1519 doc='PSF model second-moments determinant radius (center of chip) (pixel)') 

1520 schema.addField('psfArea', type='F', 

1521 doc='PSF model effective area (center of chip) (pixel**2)') 

1522 schema.addField('psfIxx', type='F', 

1523 doc='PSF model Ixx (center of chip) (pixel**2)') 

1524 schema.addField('psfIyy', type='F', 

1525 doc='PSF model Iyy (center of chip) (pixel**2)') 

1526 schema.addField('psfIxy', type='F', 

1527 doc='PSF model Ixy (center of chip) (pixel**2)') 

1528 schema.addField('raCorners', type='ArrayD', size=4, 

1529 doc='Right Ascension of bounding box corners (degrees)') 

1530 schema.addField('decCorners', type='ArrayD', size=4, 

1531 doc='Declination of bounding box corners (degrees)') 

1532 schema.addField('ra', type='D', 

1533 doc='Right Ascension of bounding box center (degrees)') 

1534 schema.addField('decl', type='D', 

1535 doc='Declination of bounding box center (degrees)') 

1536 schema.addField('zenithDistance', type='F', 

1537 doc='Zenith distance of bounding box center (degrees)') 

1538 schema.addField('zeroPoint', type='F', 

1539 doc='Mean zeropoint in detector (mag)') 

1540 schema.addField('skyBg', type='F', 

1541 doc='Average sky background (ADU)') 

1542 schema.addField('skyNoise', type='F', 

1543 doc='Average sky noise (ADU)') 

1544 schema.addField('meanVar', type='F', 

1545 doc='Mean variance of the weight plane (ADU**2)') 

1546 schema.addField('astromOffsetMean', type='F', 

1547 doc='Mean offset of astrometric calibration matches (arcsec)') 

1548 schema.addField('astromOffsetStd', type='F', 

1549 doc='Standard deviation of offsets of astrometric calibration matches (arcsec)') 

1550 schema.addField('nPsfStar', type='I', doc='Number of stars used for PSF model') 

1551 schema.addField('psfStarDeltaE1Median', type='F', 

1552 doc='Median E1 residual (starE1 - psfE1) for psf stars') 

1553 schema.addField('psfStarDeltaE2Median', type='F', 

1554 doc='Median E2 residual (starE2 - psfE2) for psf stars') 

1555 schema.addField('psfStarDeltaE1Scatter', type='F', 

1556 doc='Scatter (via MAD) of E1 residual (starE1 - psfE1) for psf stars') 

1557 schema.addField('psfStarDeltaE2Scatter', type='F', 

1558 doc='Scatter (via MAD) of E2 residual (starE2 - psfE2) for psf stars') 

1559 schema.addField('psfStarDeltaSizeMedian', type='F', 

1560 doc='Median size residual (starSize - psfSize) for psf stars (pixel)') 

1561 schema.addField('psfStarDeltaSizeScatter', type='F', 

1562 doc='Scatter (via MAD) of size residual (starSize - psfSize) for psf stars (pixel)') 

1563 schema.addField('psfStarScaledDeltaSizeScatter', type='F', 

1564 doc='Scatter (via MAD) of size residual scaled by median size squared') 

1565 

1566 return schema 

1567 

1568 

1569class VisitDataIdContainer(DataIdContainer): 

1570 """DataIdContainer that groups sensor-level id's by visit 

1571 """ 

1572 

1573 def makeDataRefList(self, namespace): 

1574 """Make self.refList from self.idList 

1575 

1576 Generate a list of data references grouped by visit. 

1577 

1578 Parameters 

1579 ---------- 

1580 namespace : `argparse.Namespace` 

1581 Namespace used by `lsst.pipe.base.CmdLineTask` to parse command line arguments 

1582 """ 

1583 # Group by visits 

1584 visitRefs = defaultdict(list) 

1585 for dataId in self.idList: 

1586 if "visit" in dataId: 

1587 visitId = dataId["visit"] 

1588 # append all subsets to 

1589 subset = namespace.butler.subset(self.datasetType, dataId=dataId) 

1590 visitRefs[visitId].extend([dataRef for dataRef in subset]) 

1591 

1592 outputRefList = [] 

1593 for refList in visitRefs.values(): 

1594 existingRefs = [ref for ref in refList if ref.datasetExists()] 

1595 if existingRefs: 

1596 outputRefList.append(existingRefs) 

1597 

1598 self.refList = outputRefList 

1599 

1600 

1601class ConsolidateSourceTableConnections(pipeBase.PipelineTaskConnections, 

1602 defaultTemplates={"catalogType": ""}, 

1603 dimensions=("instrument", "visit")): 

1604 inputCatalogs = connectionTypes.Input( 

1605 doc="Input per-detector Source Tables", 

1606 name="{catalogType}sourceTable", 

1607 storageClass="DataFrame", 

1608 dimensions=("instrument", "visit", "detector"), 

1609 multiple=True 

1610 ) 

1611 outputCatalog = connectionTypes.Output( 

1612 doc="Per-visit concatenation of Source Table", 

1613 name="{catalogType}sourceTable_visit", 

1614 storageClass="DataFrame", 

1615 dimensions=("instrument", "visit") 

1616 ) 

1617 

1618 

1619class ConsolidateSourceTableConfig(pipeBase.PipelineTaskConfig, 

1620 pipelineConnections=ConsolidateSourceTableConnections): 

1621 pass 

1622 

1623 

1624class ConsolidateSourceTableTask(CmdLineTask, pipeBase.PipelineTask): 

1625 """Concatenate `sourceTable` list into a per-visit `sourceTable_visit` 

1626 """ 

1627 _DefaultName = 'consolidateSourceTable' 

1628 ConfigClass = ConsolidateSourceTableConfig 

1629 

1630 inputDataset = 'sourceTable' 

1631 outputDataset = 'sourceTable_visit' 

1632 

1633 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

1634 from .makeCoaddTempExp import reorderRefs 

1635 

1636 detectorOrder = [ref.dataId['detector'] for ref in inputRefs.inputCatalogs] 

1637 detectorOrder.sort() 

1638 inputRefs = reorderRefs(inputRefs, detectorOrder, dataIdKey='detector') 

1639 inputs = butlerQC.get(inputRefs) 

1640 self.log.info("Concatenating %s per-detector Source Tables", 

1641 len(inputs['inputCatalogs'])) 

1642 df = pd.concat(inputs['inputCatalogs']) 

1643 butlerQC.put(pipeBase.Struct(outputCatalog=df), outputRefs) 

1644 

1645 def runDataRef(self, dataRefList): 

1646 self.log.info("Concatenating %s per-detector Source Tables", len(dataRefList)) 

1647 df = pd.concat([dataRef.get().toDataFrame() for dataRef in dataRefList]) 

1648 dataRefList[0].put(ParquetTable(dataFrame=df), self.outputDataset) 

1649 

1650 @classmethod 

1651 def _makeArgumentParser(cls): 

1652 parser = ArgumentParser(name=cls._DefaultName) 

1653 

1654 parser.add_id_argument("--id", cls.inputDataset, 

1655 help="data ID, e.g. --id visit=12345", 

1656 ContainerClass=VisitDataIdContainer) 

1657 return parser 

1658 

1659 def writeMetadata(self, dataRef): 

1660 """No metadata to write. 

1661 """ 

1662 pass 

1663 

1664 def writeConfig(self, butler, clobber=False, doBackup=True): 

1665 """No config to write. 

1666 """ 

1667 pass 

1668 

1669 

1670class MakeCcdVisitTableConnections(pipeBase.PipelineTaskConnections, 

1671 dimensions=("instrument",), 

1672 defaultTemplates={"calexpType": ""}): 

1673 visitSummaryRefs = connectionTypes.Input( 

1674 doc="Data references for per-visit consolidated exposure metadata from ConsolidateVisitSummaryTask", 

1675 name="{calexpType}visitSummary", 

1676 storageClass="ExposureCatalog", 

1677 dimensions=("instrument", "visit"), 

1678 multiple=True, 

1679 deferLoad=True, 

1680 ) 

1681 outputCatalog = connectionTypes.Output( 

1682 doc="CCD and Visit metadata table", 

1683 name="ccdVisitTable", 

1684 storageClass="DataFrame", 

1685 dimensions=("instrument",) 

1686 ) 

1687 

1688 

1689class MakeCcdVisitTableConfig(pipeBase.PipelineTaskConfig, 

1690 pipelineConnections=MakeCcdVisitTableConnections): 

1691 pass 

1692 

1693 

1694class MakeCcdVisitTableTask(CmdLineTask, pipeBase.PipelineTask): 

1695 """Produce a `ccdVisitTable` from the `visitSummary` exposure catalogs. 

1696 """ 

1697 _DefaultName = 'makeCcdVisitTable' 

1698 ConfigClass = MakeCcdVisitTableConfig 

1699 

1700 def run(self, visitSummaryRefs): 

1701 """ Make a table of ccd information from the `visitSummary` catalogs. 

1702 Parameters 

1703 ---------- 

1704 visitSummaryRefs : `list` of `lsst.daf.butler.DeferredDatasetHandle` 

1705 List of DeferredDatasetHandles pointing to exposure catalogs with 

1706 per-detector summary information. 

1707 Returns 

1708 ------- 

1709 result : `lsst.pipe.Base.Struct` 

1710 Results struct with attribute: 

1711 - `outputCatalog` 

1712 Catalog of ccd and visit information. 

1713 """ 

1714 ccdEntries = [] 

1715 for visitSummaryRef in visitSummaryRefs: 

1716 visitSummary = visitSummaryRef.get() 

1717 visitInfo = visitSummary[0].getVisitInfo() 

1718 

1719 ccdEntry = {} 

1720 summaryTable = visitSummary.asAstropy() 

1721 selectColumns = ['id', 'visit', 'physical_filter', 'band', 'ra', 'decl', 'zenithDistance', 

1722 'zeroPoint', 'psfSigma', 'skyBg', 'skyNoise'] 

1723 ccdEntry = summaryTable[selectColumns].to_pandas().set_index('id') 

1724 # 'visit' is the human readible visit number 

1725 # 'visitId' is the key to the visitId table. They are the same 

1726 # Technically you should join to get the visit from the visit table 

1727 ccdEntry = ccdEntry.rename(columns={"visit": "visitId"}) 

1728 dataIds = [DataCoordinate.standardize(visitSummaryRef.dataId, detector=id) for id in 

1729 summaryTable['id']] 

1730 packer = visitSummaryRef.dataId.universe.makePacker('visit_detector', visitSummaryRef.dataId) 

1731 ccdVisitIds = [packer.pack(dataId) for dataId in dataIds] 

1732 ccdEntry['ccdVisitId'] = ccdVisitIds 

1733 ccdEntry['detector'] = summaryTable['id'] 

1734 pixToArcseconds = np.array([vR.getWcs().getPixelScale().asArcseconds() for vR in visitSummary]) 

1735 ccdEntry["seeing"] = visitSummary['psfSigma'] * np.sqrt(8 * np.log(2)) * pixToArcseconds 

1736 

1737 ccdEntry["skyRotation"] = visitInfo.getBoresightRotAngle().asDegrees() 

1738 ccdEntry["expMidpt"] = visitInfo.getDate().toPython() 

1739 ccdEntry["expMidptMJD"] = visitInfo.getDate().get(dafBase.DateTime.MJD) 

1740 expTime = visitInfo.getExposureTime() 

1741 ccdEntry['expTime'] = expTime 

1742 ccdEntry["obsStart"] = ccdEntry["expMidpt"] - 0.5 * pd.Timedelta(seconds=expTime) 

1743 expTime_days = expTime / (60*60*24) 

1744 ccdEntry["obsStartMJD"] = ccdEntry["expMidptMJD"] - 0.5 * expTime_days 

1745 ccdEntry['darkTime'] = visitInfo.getDarkTime() 

1746 ccdEntry['xSize'] = summaryTable['bbox_max_x'] - summaryTable['bbox_min_x'] 

1747 ccdEntry['ySize'] = summaryTable['bbox_max_y'] - summaryTable['bbox_min_y'] 

1748 ccdEntry['llcra'] = summaryTable['raCorners'][:, 0] 

1749 ccdEntry['llcdec'] = summaryTable['decCorners'][:, 0] 

1750 ccdEntry['ulcra'] = summaryTable['raCorners'][:, 1] 

1751 ccdEntry['ulcdec'] = summaryTable['decCorners'][:, 1] 

1752 ccdEntry['urcra'] = summaryTable['raCorners'][:, 2] 

1753 ccdEntry['urcdec'] = summaryTable['decCorners'][:, 2] 

1754 ccdEntry['lrcra'] = summaryTable['raCorners'][:, 3] 

1755 ccdEntry['lrcdec'] = summaryTable['decCorners'][:, 3] 

1756 # TODO: DM-30618, Add raftName, nExposures, ccdTemp, binX, binY, and flags, 

1757 # and decide if WCS, and llcx, llcy, ulcx, ulcy, etc. values are actually wanted. 

1758 ccdEntries.append(ccdEntry) 

1759 

1760 outputCatalog = pd.concat(ccdEntries) 

1761 outputCatalog.set_index('ccdVisitId', inplace=True, verify_integrity=True) 

1762 return pipeBase.Struct(outputCatalog=outputCatalog) 

1763 

1764 

1765class MakeVisitTableConnections(pipeBase.PipelineTaskConnections, 

1766 dimensions=("instrument",), 

1767 defaultTemplates={"calexpType": ""}): 

1768 visitSummaries = connectionTypes.Input( 

1769 doc="Per-visit consolidated exposure metadata from ConsolidateVisitSummaryTask", 

1770 name="{calexpType}visitSummary", 

1771 storageClass="ExposureCatalog", 

1772 dimensions=("instrument", "visit",), 

1773 multiple=True, 

1774 deferLoad=True, 

1775 ) 

1776 outputCatalog = connectionTypes.Output( 

1777 doc="Visit metadata table", 

1778 name="visitTable", 

1779 storageClass="DataFrame", 

1780 dimensions=("instrument",) 

1781 ) 

1782 

1783 

1784class MakeVisitTableConfig(pipeBase.PipelineTaskConfig, 

1785 pipelineConnections=MakeVisitTableConnections): 

1786 pass 

1787 

1788 

1789class MakeVisitTableTask(CmdLineTask, pipeBase.PipelineTask): 

1790 """Produce a `visitTable` from the `visitSummary` exposure catalogs. 

1791 """ 

1792 _DefaultName = 'makeVisitTable' 

1793 ConfigClass = MakeVisitTableConfig 

1794 

1795 def run(self, visitSummaries): 

1796 """ Make a table of visit information from the `visitSummary` catalogs 

1797 

1798 Parameters 

1799 ---------- 

1800 visitSummaries : list of `lsst.afw.table.ExposureCatalog` 

1801 List of exposure catalogs with per-detector summary information. 

1802 Returns 

1803 ------- 

1804 result : `lsst.pipe.Base.Struct` 

1805 Results struct with attribute: 

1806 ``outputCatalog`` 

1807 Catalog of visit information. 

1808 """ 

1809 visitEntries = [] 

1810 for visitSummary in visitSummaries: 

1811 visitSummary = visitSummary.get() 

1812 visitRow = visitSummary[0] 

1813 visitInfo = visitRow.getVisitInfo() 

1814 

1815 visitEntry = {} 

1816 visitEntry["visitId"] = visitRow['visit'] 

1817 visitEntry["visit"] = visitRow['visit'] 

1818 visitEntry["physical_filter"] = visitRow['physical_filter'] 

1819 visitEntry["band"] = visitRow['band'] 

1820 raDec = visitInfo.getBoresightRaDec() 

1821 visitEntry["ra"] = raDec.getRa().asDegrees() 

1822 visitEntry["decl"] = raDec.getDec().asDegrees() 

1823 visitEntry["skyRotation"] = visitInfo.getBoresightRotAngle().asDegrees() 

1824 azAlt = visitInfo.getBoresightAzAlt() 

1825 visitEntry["azimuth"] = azAlt.getLongitude().asDegrees() 

1826 visitEntry["altitude"] = azAlt.getLatitude().asDegrees() 

1827 visitEntry["zenithDistance"] = 90 - azAlt.getLatitude().asDegrees() 

1828 visitEntry["airmass"] = visitInfo.getBoresightAirmass() 

1829 expTime = visitInfo.getExposureTime() 

1830 visitEntry["expTime"] = expTime 

1831 visitEntry["expMidpt"] = visitInfo.getDate().toPython() 

1832 visitEntry["expMidptMJD"] = visitInfo.getDate().get(dafBase.DateTime.MJD) 

1833 visitEntry["obsStart"] = visitEntry["expMidpt"] - 0.5 * pd.Timedelta(seconds=expTime) 

1834 expTime_days = expTime / (60*60*24) 

1835 visitEntry["obsStartMJD"] = visitEntry["expMidptMJD"] - 0.5 * expTime_days 

1836 visitEntries.append(visitEntry) 

1837 

1838 # TODO: DM-30623, Add programId, exposureType, cameraTemp, mirror1Temp, mirror2Temp, 

1839 # mirror3Temp, domeTemp, externalTemp, dimmSeeing, pwvGPS, pwvMW, flags, nExposures 

1840 

1841 outputCatalog = pd.DataFrame(data=visitEntries) 

1842 outputCatalog.set_index('visitId', inplace=True, verify_integrity=True) 

1843 return pipeBase.Struct(outputCatalog=outputCatalog) 

1844 

1845 

1846class WriteForcedSourceTableConnections(pipeBase.PipelineTaskConnections, 

1847 dimensions=("instrument", "visit", "detector", "skymap", "tract")): 

1848 

1849 inputCatalog = connectionTypes.Input( 

1850 doc="Primary per-detector, single-epoch forced-photometry catalog. " 

1851 "By default, it is the output of ForcedPhotCcdTask on calexps", 

1852 name="forced_src", 

1853 storageClass="SourceCatalog", 

1854 dimensions=("instrument", "visit", "detector", "skymap", "tract") 

1855 ) 

1856 inputCatalogDiff = connectionTypes.Input( 

1857 doc="Secondary multi-epoch, per-detector, forced photometry catalog. " 

1858 "By default, it is the output of ForcedPhotCcdTask run on image differences.", 

1859 name="forced_diff", 

1860 storageClass="SourceCatalog", 

1861 dimensions=("instrument", "visit", "detector", "skymap", "tract") 

1862 ) 

1863 outputCatalog = connectionTypes.Output( 

1864 doc="InputCatalogs horizonatally joined on `objectId` in Parquet format", 

1865 name="mergedForcedSource", 

1866 storageClass="DataFrame", 

1867 dimensions=("instrument", "visit", "detector", "skymap", "tract") 

1868 ) 

1869 

1870 

1871class WriteForcedSourceTableConfig(pipeBase.PipelineTaskConfig, 

1872 pipelineConnections=WriteForcedSourceTableConnections): 

1873 key = lsst.pex.config.Field( 

1874 doc="Column on which to join the two input tables on and make the primary key of the output", 

1875 dtype=str, 

1876 default="objectId", 

1877 ) 

1878 

1879 

1880class WriteForcedSourceTableTask(pipeBase.PipelineTask): 

1881 """Merge and convert per-detector forced source catalogs to parquet 

1882 

1883 Because the predecessor ForcedPhotCcdTask operates per-detector, 

1884 per-tract, (i.e., it has tract in its dimensions), detectors 

1885 on the tract boundary may have multiple forced source catalogs. 

1886 

1887 The successor task TransformForcedSourceTable runs per-patch 

1888 and temporally-aggregates overlapping mergedForcedSource catalogs from all 

1889 available multiple epochs. 

1890 """ 

1891 _DefaultName = "writeForcedSourceTable" 

1892 ConfigClass = WriteForcedSourceTableConfig 

1893 

1894 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

1895 inputs = butlerQC.get(inputRefs) 

1896 # Add ccdVisitId to allow joining with CcdVisitTable 

1897 inputs['ccdVisitId'] = butlerQC.quantum.dataId.pack("visit_detector") 

1898 inputs['band'] = butlerQC.quantum.dataId.full['band'] 

1899 outputs = self.run(**inputs) 

1900 butlerQC.put(outputs, outputRefs) 

1901 

1902 def run(self, inputCatalog, inputCatalogDiff, ccdVisitId=None, band=None): 

1903 dfs = [] 

1904 for table, dataset, in zip((inputCatalog, inputCatalogDiff), ('calexp', 'diff')): 

1905 df = table.asAstropy().to_pandas().set_index(self.config.key, drop=False) 

1906 df = df.reindex(sorted(df.columns), axis=1) 

1907 df['ccdVisitId'] = ccdVisitId if ccdVisitId else pd.NA 

1908 df['band'] = band if band else pd.NA 

1909 df.columns = pd.MultiIndex.from_tuples([(dataset, c) for c in df.columns], 

1910 names=('dataset', 'column')) 

1911 

1912 dfs.append(df) 

1913 

1914 outputCatalog = functools.reduce(lambda d1, d2: d1.join(d2), dfs) 

1915 return pipeBase.Struct(outputCatalog=outputCatalog) 

1916 

1917 

1918class TransformForcedSourceTableConnections(pipeBase.PipelineTaskConnections, 

1919 dimensions=("instrument", "skymap", "patch", "tract")): 

1920 

1921 inputCatalogs = connectionTypes.Input( 

1922 doc="Parquet table of merged ForcedSources produced by WriteForcedSourceTableTask", 

1923 name="mergedForcedSource", 

1924 storageClass="DataFrame", 

1925 dimensions=("instrument", "visit", "detector", "skymap", "tract"), 

1926 multiple=True, 

1927 deferLoad=True 

1928 ) 

1929 referenceCatalog = connectionTypes.Input( 

1930 doc="Reference catalog which was used to seed the forcedPhot. Columns " 

1931 "objectId, detect_isPrimary, detect_isTractInner, detect_isPatchInner " 

1932 "are expected.", 

1933 name="objectTable", 

1934 storageClass="DataFrame", 

1935 dimensions=("tract", "patch", "skymap"), 

1936 deferLoad=True 

1937 ) 

1938 outputCatalog = connectionTypes.Output( 

1939 doc="Narrower, temporally-aggregated, per-patch ForcedSource Table transformed and converted per a " 

1940 "specified set of functors", 

1941 name="forcedSourceTable", 

1942 storageClass="DataFrame", 

1943 dimensions=("tract", "patch", "skymap") 

1944 ) 

1945 

1946 

1947class TransformForcedSourceTableConfig(TransformCatalogBaseConfig, 

1948 pipelineConnections=TransformForcedSourceTableConnections): 

1949 referenceColumns = pexConfig.ListField( 

1950 dtype=str, 

1951 default=["detect_isPrimary", "detect_isTractInner", "detect_isPatchInner"], 

1952 optional=True, 

1953 doc="Columns to pull from reference catalog", 

1954 ) 

1955 keyRef = lsst.pex.config.Field( 

1956 doc="Column on which to join the two input tables on and make the primary key of the output", 

1957 dtype=str, 

1958 default="objectId", 

1959 ) 

1960 key = lsst.pex.config.Field( 

1961 doc="Rename the output DataFrame index to this name", 

1962 dtype=str, 

1963 default="forcedSourceId", 

1964 ) 

1965 

1966 def setDefaults(self): 

1967 super().setDefaults() 

1968 self.functorFile = os.path.join('$PIPE_TASKS_DIR', 'schemas', 'ForcedSource.yaml') 

1969 self.columnsFromDataId = ['tract', 'patch'] 

1970 

1971 

1972class TransformForcedSourceTableTask(TransformCatalogBaseTask): 

1973 """Transform/standardize a ForcedSource catalog 

1974 

1975 Transforms each wide, per-detector forcedSource parquet table per the 

1976 specification file (per-camera defaults found in ForcedSource.yaml). 

1977 All epochs that overlap the patch are aggregated into one per-patch 

1978 narrow-parquet file. 

1979 

1980 No de-duplication of rows is performed. Duplicate resolutions flags are 

1981 pulled in from the referenceCatalog: `detect_isPrimary`, 

1982 `detect_isTractInner`,`detect_isPatchInner`, so that user may de-duplicate 

1983 for analysis or compare duplicates for QA. 

1984 

1985 The resulting table includes multiple bands. Epochs (MJDs) and other useful 

1986 per-visit rows can be retreived by joining with the CcdVisitTable on 

1987 ccdVisitId. 

1988 """ 

1989 _DefaultName = "transformForcedSourceTable" 

1990 ConfigClass = TransformForcedSourceTableConfig 

1991 

1992 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

1993 inputs = butlerQC.get(inputRefs) 

1994 if self.funcs is None: 

1995 raise ValueError("config.functorFile is None. " 

1996 "Must be a valid path to yaml in order to run Task as a PipelineTask.") 

1997 outputs = self.run(inputs['inputCatalogs'], inputs['referenceCatalog'], funcs=self.funcs, 

1998 dataId=outputRefs.outputCatalog.dataId.full) 

1999 

2000 butlerQC.put(outputs, outputRefs) 

2001 

2002 def run(self, inputCatalogs, referenceCatalog, funcs=None, dataId=None, band=None): 

2003 dfs = [] 

2004 ref = referenceCatalog.get(parameters={"columns": self.config.referenceColumns}) 

2005 self.log.info("Aggregating %s input catalogs" % (len(inputCatalogs))) 

2006 for handle in inputCatalogs: 

2007 result = self.transform(None, handle, funcs, dataId) 

2008 # Filter for only rows that were detected on (overlap) the patch 

2009 dfs.append(result.df.join(ref, how='inner')) 

2010 

2011 outputCatalog = pd.concat(dfs) 

2012 

2013 # Now that we are done joining on config.keyRef 

2014 # Change index to config.key by 

2015 outputCatalog.index.rename(self.config.keyRef, inplace=True) 

2016 # Add config.keyRef to the column list 

2017 outputCatalog.reset_index(inplace=True) 

2018 # set the forcedSourceId to the index. This is specified in the ForcedSource.yaml 

2019 outputCatalog.set_index("forcedSourceId", inplace=True, verify_integrity=True) 

2020 # Rename it to the config.key 

2021 outputCatalog.index.rename(self.config.key, inplace=True) 

2022 

2023 self.log.info("Made a table of %d columns and %d rows", 

2024 len(outputCatalog.columns), len(outputCatalog)) 

2025 return pipeBase.Struct(outputCatalog=outputCatalog) 

2026 

2027 

2028class ConsolidateTractConnections(pipeBase.PipelineTaskConnections, 

2029 defaultTemplates={"catalogType": ""}, 

2030 dimensions=("instrument", "tract")): 

2031 inputCatalogs = connectionTypes.Input( 

2032 doc="Input per-patch DataFrame Tables to be concatenated", 

2033 name="{catalogType}ForcedSourceTable", 

2034 storageClass="DataFrame", 

2035 dimensions=("tract", "patch", "skymap"), 

2036 multiple=True, 

2037 ) 

2038 

2039 outputCatalog = connectionTypes.Output( 

2040 doc="Output per-tract concatenation of DataFrame Tables", 

2041 name="{catalogType}ForcedSourceTable_tract", 

2042 storageClass="DataFrame", 

2043 dimensions=("tract", "skymap"), 

2044 ) 

2045 

2046 

2047class ConsolidateTractConfig(pipeBase.PipelineTaskConfig, 

2048 pipelineConnections=ConsolidateTractConnections): 

2049 pass 

2050 

2051 

2052class ConsolidateTractTask(CmdLineTask, pipeBase.PipelineTask): 

2053 """Concatenate any per-patch, dataframe list into a single 

2054 per-tract DataFrame 

2055 """ 

2056 _DefaultName = 'ConsolidateTract' 

2057 ConfigClass = ConsolidateTractConfig 

2058 

2059 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

2060 inputs = butlerQC.get(inputRefs) 

2061 # Not checking at least one inputCatalog exists because that'd be an empty QG 

2062 self.log.info("Concatenating %s per-patch %s Tables", 

2063 len(inputs['inputCatalogs']), 

2064 inputRefs.inputCatalogs[0].datasetType.name) 

2065 df = pd.concat(inputs['inputCatalogs']) 

2066 butlerQC.put(pipeBase.Struct(outputCatalog=df), outputRefs)