Coverage for python/lsst/pipe/tasks/finalizeCharacterization.py: 17%

242 statements  

« prev     ^ index     » next       coverage.py v6.4, created at 2022-05-24 03:25 -0700

1# 

2# LSST Data Management System 

3# Copyright 2008-2022 AURA/LSST. 

4# 

5# This product includes software developed by the 

6# LSST Project (http://www.lsst.org/). 

7# 

8# This program is free software: you can redistribute it and/or modify 

9# it under the terms of the GNU General Public License as published by 

10# the Free Software Foundation, either version 3 of the License, or 

11# (at your option) any later version. 

12# 

13# This program is distributed in the hope that it will be useful, 

14# but WITHOUT ANY WARRANTY; without even the implied warranty of 

15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

16# GNU General Public License for more details. 

17# 

18# You should have received a copy of the LSST License Statement and 

19# the GNU General Public License along with this program. If not, 

20# see <http://www.lsstcorp.org/LegalNotices/>. 

21# 

22"""Task to run a finalized image characterization, using additional data. 

23""" 

24import numpy as np 

25import esutil 

26import pandas as pd 

27 

28import lsst.pex.config as pexConfig 

29import lsst.pipe.base as pipeBase 

30import lsst.daf.base as dafBase 

31import lsst.afw.table as afwTable 

32import lsst.meas.algorithms as measAlg 

33import lsst.meas.extensions.piff.piffPsfDeterminer # noqa: F401 

34from lsst.meas.algorithms import MeasureApCorrTask 

35from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask 

36from lsst.meas.algorithms.sourceSelector import sourceSelectorRegistry 

37 

38from .reserveIsolatedStars import ReserveIsolatedStarsTask 

39 

40__all__ = ['FinalizeCharacterizationConnections', 

41 'FinalizeCharacterizationConfig', 

42 'FinalizeCharacterizationTask'] 

43 

44 

45class FinalizeCharacterizationConnections(pipeBase.PipelineTaskConnections, 

46 dimensions=('instrument', 'visit',), 

47 defaultTemplates={}): 

48 src_schema = pipeBase.connectionTypes.InitInput( 

49 doc='Input schema used for src catalogs.', 

50 name='src_schema', 

51 storageClass='SourceCatalog', 

52 ) 

53 srcs = pipeBase.connectionTypes.Input( 

54 doc='Source catalogs for the visit', 

55 name='src', 

56 storageClass='SourceCatalog', 

57 dimensions=('instrument', 'visit', 'detector'), 

58 deferLoad=True, 

59 multiple=True, 

60 ) 

61 calexps = pipeBase.connectionTypes.Input( 

62 doc='Calexps for the visit', 

63 name='calexp', 

64 storageClass='ExposureF', 

65 dimensions=('instrument', 'visit', 'detector'), 

66 deferLoad=True, 

67 multiple=True, 

68 ) 

69 isolated_star_cats = pipeBase.connectionTypes.Input( 

70 doc=('Catalog of isolated stars with average positions, number of associated ' 

71 'sources, and indexes to the isolated_star_sources catalogs.'), 

72 name='isolated_star_cat', 

73 storageClass='DataFrame', 

74 dimensions=('instrument', 'tract', 'skymap'), 

75 deferLoad=True, 

76 multiple=True, 

77 ) 

78 isolated_star_sources = pipeBase.connectionTypes.Input( 

79 doc=('Catalog of isolated star sources with sourceIds, and indexes to the ' 

80 'isolated_star_cats catalogs.'), 

81 name='isolated_star_sources', 

82 storageClass='DataFrame', 

83 dimensions=('instrument', 'tract', 'skymap'), 

84 deferLoad=True, 

85 multiple=True, 

86 ) 

87 finalized_psf_ap_corr_cat = pipeBase.connectionTypes.Output( 

88 doc=('Per-visit finalized psf models and aperture corrections. This ' 

89 'catalog uses detector id for the id and are sorted for fast ' 

90 'lookups of a detector.'), 

91 name='finalized_psf_ap_corr_catalog', 

92 storageClass='ExposureCatalog', 

93 dimensions=('instrument', 'visit'), 

94 ) 

95 finalized_src_table = pipeBase.connectionTypes.Output( 

96 doc=('Per-visit catalog of measurements for psf/flag/etc.'), 

97 name='finalized_src_table', 

98 storageClass='DataFrame', 

99 dimensions=('instrument', 'visit'), 

100 ) 

101 

102 

103class FinalizeCharacterizationConfig(pipeBase.PipelineTaskConfig, 

104 pipelineConnections=FinalizeCharacterizationConnections): 

105 """Configuration for FinalizeCharacterizationTask.""" 

106 source_selector = sourceSelectorRegistry.makeField( 

107 doc="How to select sources", 

108 default="science" 

109 ) 

110 id_column = pexConfig.Field( 

111 doc='Name of column in isolated_star_sources with source id.', 

112 dtype=str, 

113 default='sourceId', 

114 ) 

115 reserve_selection = pexConfig.ConfigurableField( 

116 target=ReserveIsolatedStarsTask, 

117 doc='Task to select reserved stars', 

118 ) 

119 make_psf_candidates = pexConfig.ConfigurableField( 

120 target=measAlg.MakePsfCandidatesTask, 

121 doc='Task to make psf candidates from selected stars.', 

122 ) 

123 psf_determiner = measAlg.psfDeterminerRegistry.makeField( 

124 'PSF Determination algorithm', 

125 default='piff' 

126 ) 

127 measurement = pexConfig.ConfigurableField( 

128 target=SingleFrameMeasurementTask, 

129 doc='Measure sources for aperture corrections' 

130 ) 

131 measure_ap_corr = pexConfig.ConfigurableField( 

132 target=MeasureApCorrTask, 

133 doc="Subtask to measure aperture corrections" 

134 ) 

135 apply_ap_corr = pexConfig.ConfigurableField( 

136 target=ApplyApCorrTask, 

137 doc="Subtask to apply aperture corrections" 

138 ) 

139 

140 def setDefaults(self): 

141 super().setDefaults() 

142 

143 source_selector = self.source_selector['science'] 

144 source_selector.setDefaults() 

145 

146 # We use the source selector only to select out flagged objects 

147 # and signal-to-noise. Isolated, unresolved sources are handled 

148 # by the isolated star catalog. 

149 

150 source_selector.doFlags = True 

151 source_selector.doSignalToNoise = True 

152 source_selector.doFluxLimit = False 

153 source_selector.doUnresolved = False 

154 source_selector.doIsolated = False 

155 

156 source_selector.signalToNoise.minimum = 20.0 

157 source_selector.signalToNoise.maximum = 1000.0 

158 

159 source_selector.signalToNoise.fluxField = 'base_GaussianFlux_instFlux' 

160 source_selector.signalToNoise.errField = 'base_GaussianFlux_instFluxErr' 

161 

162 source_selector.flags.bad = ['base_PixelFlags_flag_edge', 

163 'base_PixelFlags_flag_interpolatedCenter', 

164 'base_PixelFlags_flag_saturatedCenter', 

165 'base_PixelFlags_flag_crCenter', 

166 'base_PixelFlags_flag_bad', 

167 'base_PixelFlags_flag_interpolated', 

168 'base_PixelFlags_flag_saturated', 

169 'slot_Centroid_flag', 

170 'base_GaussianFlux_flag'] 

171 

172 self.measure_ap_corr.sourceSelector['flagged'].field = 'calib_psf_used' 

173 

174 import lsst.meas.modelfit # noqa: F401 

175 import lsst.meas.extensions.photometryKron # noqa: F401 

176 import lsst.meas.extensions.convolved # noqa: F401 

177 import lsst.meas.extensions.gaap # noqa: F401 

178 import lsst.meas.extensions.shapeHSM # noqa: F401 

179 

180 # Set up measurement defaults 

181 self.measurement.plugins.names = [ 

182 'base_PsfFlux', 

183 'base_GaussianFlux', 

184 'modelfit_DoubleShapeletPsfApprox', 

185 'modelfit_CModel', 

186 'ext_photometryKron_KronFlux', 

187 'ext_convolved_ConvolvedFlux', 

188 'ext_gaap_GaapFlux', 

189 'ext_shapeHSM_HsmShapeRegauss', 

190 'ext_shapeHSM_HsmSourceMoments', 

191 'ext_shapeHSM_HsmPsfMoments', 

192 'ext_shapeHSM_HsmSourceMomentsRound', 

193 ] 

194 self.measurement.slots.modelFlux = 'modelfit_CModel' 

195 self.measurement.plugins['ext_convolved_ConvolvedFlux'].seeing.append(8.0) 

196 self.measurement.plugins['ext_gaap_GaapFlux'].sigmas = [ 

197 0.5, 

198 0.7, 

199 1.0, 

200 1.5, 

201 2.5, 

202 3.0 

203 ] 

204 self.measurement.plugins['ext_gaap_GaapFlux'].doPsfPhotometry = True 

205 self.measurement.slots.shape = 'ext_shapeHSM_HsmSourceMoments' 

206 self.measurement.slots.psfShape = 'ext_shapeHSM_HsmPsfMoments' 

207 self.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild = "" 

208 # Turn off slot setting for measurement for centroid and shape 

209 # (for which we use the input src catalog measurements) 

210 self.measurement.slots.centroid = None 

211 self.measurement.slots.apFlux = None 

212 self.measurement.slots.calibFlux = None 

213 

214 names = self.measurement.plugins['ext_convolved_ConvolvedFlux'].getAllResultNames() 

215 self.measure_ap_corr.allowFailure += names 

216 names = self.measurement.plugins["ext_gaap_GaapFlux"].getAllGaapResultNames() 

217 self.measure_ap_corr.allowFailure += names 

218 

219 

220class FinalizeCharacterizationTask(pipeBase.PipelineTask): 

221 """Run final characterization on exposures.""" 

222 ConfigClass = FinalizeCharacterizationConfig 

223 _DefaultName = 'finalize_characterization' 

224 

225 def __init__(self, initInputs=None, **kwargs): 

226 super().__init__(initInputs=initInputs, **kwargs) 

227 

228 self.schema_mapper, self.schema = self._make_output_schema_mapper( 

229 initInputs['src_schema'].schema 

230 ) 

231 

232 self.makeSubtask('reserve_selection') 

233 self.makeSubtask('source_selector') 

234 self.makeSubtask('make_psf_candidates') 

235 self.makeSubtask('psf_determiner') 

236 self.makeSubtask('measurement', schema=self.schema) 

237 self.makeSubtask('measure_ap_corr', schema=self.schema) 

238 self.makeSubtask('apply_ap_corr', schema=self.schema) 

239 

240 # Only log warning and fatal errors from the source_selector 

241 self.source_selector.log.setLevel(self.source_selector.log.WARN) 

242 

243 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

244 input_handle_dict = butlerQC.get(inputRefs) 

245 

246 band = butlerQC.quantum.dataId['band'] 

247 visit = butlerQC.quantum.dataId['visit'] 

248 

249 src_dict_temp = {handle.dataId['detector']: handle 

250 for handle in input_handle_dict['srcs']} 

251 calexp_dict_temp = {handle.dataId['detector']: handle 

252 for handle in input_handle_dict['calexps']} 

253 isolated_star_cat_dict_temp = {handle.dataId['tract']: handle 

254 for handle in input_handle_dict['isolated_star_cats']} 

255 isolated_star_source_dict_temp = {handle.dataId['tract']: handle 

256 for handle in input_handle_dict['isolated_star_sources']} 

257 # TODO: Sort until DM-31701 is done and we have deterministic 

258 # dataset ordering. 

259 src_dict = {detector: src_dict_temp[detector] for 

260 detector in sorted(src_dict_temp.keys())} 

261 calexp_dict = {detector: calexp_dict_temp[detector] for 

262 detector in sorted(calexp_dict_temp.keys())} 

263 isolated_star_cat_dict = {tract: isolated_star_cat_dict_temp[tract] for 

264 tract in sorted(isolated_star_cat_dict_temp.keys())} 

265 isolated_star_source_dict = {tract: isolated_star_source_dict_temp[tract] for 

266 tract in sorted(isolated_star_source_dict_temp.keys())} 

267 

268 struct = self.run(visit, 

269 band, 

270 isolated_star_cat_dict, 

271 isolated_star_source_dict, 

272 src_dict, 

273 calexp_dict) 

274 

275 butlerQC.put(struct.psf_ap_corr_cat, 

276 outputRefs.finalized_psf_ap_corr_cat) 

277 butlerQC.put(pd.DataFrame(struct.output_table), 

278 outputRefs.finalized_src_table) 

279 

280 def run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict): 

281 """ 

282 Run the FinalizeCharacterizationTask. 

283 

284 Parameters 

285 ---------- 

286 visit : `int` 

287 Visit number. Used in the output catalogs. 

288 band : `str` 

289 Band name. Used to select reserved stars. 

290 isolated_star_cat_dict : `dict` 

291 Per-tract dict of isolated star catalog handles. 

292 isolated_star_source_dict : `dict` 

293 Per-tract dict of isolated star source catalog handles. 

294 src_dict : `dict` 

295 Per-detector dict of src catalog handles. 

296 calexp_dict : `dict` 

297 Per-detector dict of calibrated exposure handles. 

298 

299 Returns 

300 ------- 

301 struct : `lsst.pipe.base.struct` 

302 Struct with outputs for persistence. 

303 """ 

304 # We do not need the isolated star table in this task. 

305 # However, it is used in tests to confirm consistency of indexes. 

306 _, isolated_source_table = self.concat_isolated_star_cats( 

307 band, 

308 isolated_star_cat_dict, 

309 isolated_star_source_dict 

310 ) 

311 

312 exposure_cat_schema = afwTable.ExposureTable.makeMinimalSchema() 

313 exposure_cat_schema.addField('visit', type='L', doc='Visit number') 

314 

315 metadata = dafBase.PropertyList() 

316 metadata.add("COMMENT", "Catalog id is detector id, sorted.") 

317 metadata.add("COMMENT", "Only detectors with data have entries.") 

318 

319 psf_ap_corr_cat = afwTable.ExposureCatalog(exposure_cat_schema) 

320 psf_ap_corr_cat.setMetadata(metadata) 

321 

322 measured_src_tables = [] 

323 

324 for detector in src_dict: 

325 src = src_dict[detector].get() 

326 exposure = calexp_dict[detector].get() 

327 

328 psf, ap_corr_map, measured_src = self.compute_psf_and_ap_corr_map( 

329 visit, 

330 detector, 

331 exposure, 

332 src, 

333 isolated_source_table 

334 ) 

335 

336 # And now we package it together... 

337 record = psf_ap_corr_cat.addNew() 

338 record['id'] = int(detector) 

339 record['visit'] = visit 

340 if psf is not None: 

341 record.setPsf(psf) 

342 if ap_corr_map is not None: 

343 record.setApCorrMap(ap_corr_map) 

344 

345 measured_src['visit'][:] = visit 

346 measured_src['detector'][:] = detector 

347 

348 measured_src_tables.append(measured_src.asAstropy().as_array()) 

349 

350 measured_src_table = np.concatenate(measured_src_tables) 

351 

352 return pipeBase.Struct(psf_ap_corr_cat=psf_ap_corr_cat, 

353 output_table=measured_src_table) 

354 

355 def _make_output_schema_mapper(self, input_schema): 

356 """Make the schema mapper from the input schema to the output schema. 

357 

358 Parameters 

359 ---------- 

360 input_schema : `lsst.afw.table.Schema` 

361 Input schema. 

362 

363 Returns 

364 ------- 

365 mapper : `lsst.afw.table.SchemaMapper` 

366 Schema mapper 

367 output_schema : `lsst.afw.table.Schema` 

368 Output schema (with alias map) 

369 """ 

370 mapper = afwTable.SchemaMapper(input_schema) 

371 mapper.addMinimalSchema(afwTable.SourceTable.makeMinimalSchema()) 

372 mapper.addMapping(input_schema['slot_Centroid_x'].asKey()) 

373 mapper.addMapping(input_schema['slot_Centroid_y'].asKey()) 

374 

375 # The aperture fields may be used by the psf determiner. 

376 aper_fields = input_schema.extract('base_CircularApertureFlux_*') 

377 for field, item in aper_fields.items(): 

378 mapper.addMapping(item.key) 

379 

380 # The following two may be redundant, but then the mapping is a no-op. 

381 apflux_fields = input_schema.extract('slot_ApFlux_*') 

382 for field, item in apflux_fields.items(): 

383 mapper.addMapping(item.key) 

384 

385 calibflux_fields = input_schema.extract('slot_CalibFlux_*') 

386 for field, item in calibflux_fields.items(): 

387 mapper.addMapping(item.key) 

388 

389 mapper.addMapping( 

390 input_schema[self.config.source_selector.active.signalToNoise.fluxField].asKey(), 

391 'calib_psf_selection_flux') 

392 mapper.addMapping( 

393 input_schema[self.config.source_selector.active.signalToNoise.errField].asKey(), 

394 'calib_psf_selection_flux_err') 

395 

396 output_schema = mapper.getOutputSchema() 

397 

398 output_schema.addField( 

399 'calib_psf_candidate', 

400 type='Flag', 

401 doc=('set if the source was a candidate for PSF determination, ' 

402 'as determined from FinalizeCharacterizationTask.'), 

403 ) 

404 output_schema.addField( 

405 'calib_psf_reserved', 

406 type='Flag', 

407 doc=('set if source was reserved from PSF determination by ' 

408 'FinalizeCharacterizationTask.'), 

409 ) 

410 output_schema.addField( 

411 'calib_psf_used', 

412 type='Flag', 

413 doc=('set if source was used in the PSF determination by ' 

414 'FinalizeCharacterizationTask.'), 

415 ) 

416 output_schema.addField( 

417 'visit', 

418 type=np.int64, 

419 doc='Visit number for the sources.', 

420 ) 

421 output_schema.addField( 

422 'detector', 

423 type=np.int32, 

424 doc='Detector number for the sources.', 

425 ) 

426 

427 alias_map = input_schema.getAliasMap() 

428 alias_map_output = afwTable.AliasMap() 

429 alias_map_output.set('slot_Centroid', alias_map.get('slot_Centroid')) 

430 alias_map_output.set('slot_ApFlux', alias_map.get('slot_ApFlux')) 

431 alias_map_output.set('slot_CalibFlux', alias_map.get('slot_CalibFlux')) 

432 

433 output_schema.setAliasMap(alias_map_output) 

434 

435 return mapper, output_schema 

436 

437 def _make_selection_schema_mapper(self, input_schema): 

438 """Make the schema mapper from the input schema to the selection schema. 

439 

440 Parameters 

441 ---------- 

442 input_schema : `lsst.afw.table.Schema` 

443 Input schema. 

444 

445 Returns 

446 ------- 

447 mapper : `lsst.afw.table.SchemaMapper` 

448 Schema mapper 

449 selection_schema : `lsst.afw.table.Schema` 

450 Selection schema (with alias map) 

451 """ 

452 mapper = afwTable.SchemaMapper(input_schema) 

453 mapper.addMinimalSchema(input_schema) 

454 

455 selection_schema = mapper.getOutputSchema() 

456 

457 selection_schema.setAliasMap(input_schema.getAliasMap()) 

458 

459 return mapper, selection_schema 

460 

461 def concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict): 

462 """ 

463 Concatenate isolated star catalogs and make reserve selection. 

464 

465 Parameters 

466 ---------- 

467 band : `str` 

468 Band name. Used to select reserved stars. 

469 isolated_star_cat_dict : `dict` 

470 Per-tract dict of isolated star catalog handles. 

471 isolated_star_source_dict : `dict` 

472 Per-tract dict of isolated star source catalog handles. 

473 

474 Returns 

475 ------- 

476 isolated_table : `np.ndarray` (N,) 

477 Table of isolated stars, with indexes to isolated sources. 

478 isolated_source_table : `np.ndarray` (M,) 

479 Table of isolated sources, with indexes to isolated stars. 

480 """ 

481 isolated_tables = [] 

482 isolated_sources = [] 

483 merge_cat_counter = 0 

484 merge_source_counter = 0 

485 

486 for tract in isolated_star_cat_dict: 

487 df_cat = isolated_star_cat_dict[tract].get() 

488 table_cat = df_cat.to_records() 

489 

490 df_source = isolated_star_source_dict[tract].get( 

491 parameters={'columns': [self.config.id_column, 

492 'obj_index']} 

493 ) 

494 table_source = df_source.to_records() 

495 

496 # Cut isolated star table to those observed in this band, and adjust indexes 

497 (use_band,) = (table_cat[f'nsource_{band}'] > 0).nonzero() 

498 

499 if len(use_band) == 0: 

500 # There are no sources in this band in this tract. 

501 self.log.info("No sources found in %s band in tract %d.", band, tract) 

502 continue 

503 

504 # With the following matching: 

505 # table_source[b] <-> table_cat[use_band[a]] 

506 obj_index = table_source['obj_index'][:] 

507 a, b = esutil.numpy_util.match(use_band, obj_index) 

508 

509 # Update indexes and cut to band-selected stars/sources 

510 table_source['obj_index'][b] = a 

511 _, index_new = np.unique(a, return_index=True) 

512 table_cat[f'source_cat_index_{band}'][use_band] = index_new 

513 

514 # After the following cuts, the catalogs have the following properties: 

515 # - table_cat only contains isolated stars that have at least one source 

516 # in ``band``. 

517 # - table_source only contains ``band`` sources. 

518 # - The slice table_cat["source_cat_index_{band}"]: table_cat["source_cat_index_{band}"] 

519 # + table_cat["nsource_{band}] 

520 # applied to table_source will give all the sources associated with the star. 

521 # - For each source, table_source["obj_index"] points to the index of the associated 

522 # isolated star. 

523 table_source = table_source[b] 

524 table_cat = table_cat[use_band] 

525 

526 # Add reserved flag column to tables 

527 table_cat = np.lib.recfunctions.append_fields( 

528 table_cat, 

529 'reserved', 

530 np.zeros(table_cat.size, dtype=bool), 

531 usemask=False 

532 ) 

533 table_source = np.lib.recfunctions.append_fields( 

534 table_source, 

535 'reserved', 

536 np.zeros(table_source.size, dtype=bool), 

537 usemask=False 

538 ) 

539 

540 # Get reserve star flags 

541 table_cat['reserved'][:] = self.reserve_selection.run( 

542 len(table_cat), 

543 extra=f'{band}_{tract}', 

544 ) 

545 table_source['reserved'][:] = table_cat['reserved'][table_source['obj_index']] 

546 

547 # Offset indexes to account for tract merging 

548 table_cat[f'source_cat_index_{band}'] += merge_source_counter 

549 table_source['obj_index'] += merge_cat_counter 

550 

551 isolated_tables.append(table_cat) 

552 isolated_sources.append(table_source) 

553 

554 merge_cat_counter += len(table_cat) 

555 merge_source_counter += len(table_source) 

556 

557 isolated_table = np.concatenate(isolated_tables) 

558 isolated_source_table = np.concatenate(isolated_sources) 

559 

560 return isolated_table, isolated_source_table 

561 

562 def compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table): 

563 """Compute psf model and aperture correction map for a single exposure. 

564 

565 Parameters 

566 ---------- 

567 visit : `int` 

568 Visit number (for logging). 

569 detector : `int` 

570 Detector number (for logging). 

571 exposure : `lsst.afw.image.ExposureF` 

572 src : `lsst.afw.table.SourceCatalog` 

573 isolated_source_table : `np.ndarray` 

574 

575 Returns 

576 ------- 

577 psf : `lsst.meas.algorithms.ImagePsf` 

578 PSF Model 

579 ap_corr_map : `lsst.afw.image.ApCorrMap` 

580 Aperture correction map. 

581 measured_src : `lsst.afw.table.SourceCatalog` 

582 Updated source catalog with measurements, flags and aperture corrections. 

583 """ 

584 # Apply source selector (s/n, flags, etc.) 

585 good_src = self.source_selector.selectSources(src) 

586 

587 # Cut down input src to the selected sources 

588 # We use a separate schema/mapper here than for the output/measurement catalog because of 

589 # clashes between fields that were previously run and those that need to be rerun with 

590 # the new psf model. This may be slightly inefficient but keeps input 

591 # and output values cleanly separated. 

592 selection_mapper, selection_schema = self._make_selection_schema_mapper(src.schema) 

593 

594 selected_src = afwTable.SourceCatalog(selection_schema) 

595 selected_src.reserve(good_src.selected.sum()) 

596 selected_src.extend(src[good_src.selected], mapper=selection_mapper) 

597 

598 # The calib flags have been copied from the input table, 

599 # and we reset them here just to ensure they aren't propagated. 

600 selected_src['calib_psf_candidate'] = np.zeros(len(selected_src), dtype=bool) 

601 selected_src['calib_psf_used'] = np.zeros(len(selected_src), dtype=bool) 

602 selected_src['calib_psf_reserved'] = np.zeros(len(selected_src), dtype=bool) 

603 

604 # Find the isolated sources and set flags 

605 matched_src, matched_iso = esutil.numpy_util.match( 

606 selected_src['id'], 

607 isolated_source_table[self.config.id_column] 

608 ) 

609 

610 matched_arr = np.zeros(len(selected_src), dtype=bool) 

611 matched_arr[matched_src] = True 

612 selected_src['calib_psf_candidate'] = matched_arr 

613 

614 reserved_arr = np.zeros(len(selected_src), dtype=bool) 

615 reserved_arr[matched_src] = isolated_source_table['reserved'][matched_iso] 

616 selected_src['calib_psf_reserved'] = reserved_arr 

617 

618 selected_src = selected_src[selected_src['calib_psf_candidate']].copy(deep=True) 

619 

620 # Make the measured source catalog as well, based on the selected catalog. 

621 measured_src = afwTable.SourceCatalog(self.schema) 

622 measured_src.reserve(len(selected_src)) 

623 measured_src.extend(selected_src, mapper=self.schema_mapper) 

624 

625 # We need to copy over the calib_psf flags because they were not in the mapper 

626 measured_src['calib_psf_candidate'] = selected_src['calib_psf_candidate'] 

627 measured_src['calib_psf_reserved'] = selected_src['calib_psf_reserved'] 

628 

629 # Select the psf candidates from the selection catalog 

630 try: 

631 psf_selection_result = self.make_psf_candidates.run(selected_src, exposure=exposure) 

632 except Exception as e: 

633 self.log.warning('Failed to make psf candidates for visit %d, detector %d: %s', 

634 visit, detector, e) 

635 return None, None, measured_src 

636 

637 psf_cand_cat = psf_selection_result.goodStarCat 

638 

639 # Make list of psf candidates to send to the determiner 

640 # (omitting those marked as reserved) 

641 psf_determiner_list = [cand for cand, use 

642 in zip(psf_selection_result.psfCandidates, 

643 ~psf_cand_cat['calib_psf_reserved']) if use] 

644 flag_key = psf_cand_cat.schema['calib_psf_used'].asKey() 

645 try: 

646 psf, cell_set = self.psf_determiner.determinePsf(exposure, 

647 psf_determiner_list, 

648 self.metadata, 

649 flagKey=flag_key) 

650 except Exception as e: 

651 self.log.warning('Failed to determine psf for visit %d, detector %d: %s', 

652 visit, detector, e) 

653 return None, None, measured_src 

654 

655 # At this point, we need to transfer the psf used flag from the selection 

656 # catalog to the measurement catalog. 

657 matched_selected, matched_measured = esutil.numpy_util.match( 

658 selected_src['id'], 

659 measured_src['id'] 

660 ) 

661 measured_used = np.zeros(len(measured_src), dtype=bool) 

662 measured_used[matched_measured] = selected_src['calib_psf_used'][matched_selected] 

663 measured_src['calib_psf_used'] = measured_used 

664 

665 # Next, we do the measurement on all the psf candidate, used, and reserved stars. 

666 try: 

667 self.measurement.run(measCat=measured_src, exposure=exposure) 

668 except Exception as e: 

669 self.log.warning('Failed to make measurements for visit %d, detector %d: %s', 

670 visit, detector, e) 

671 return psf, None, measured_src 

672 

673 # And finally the ap corr map. 

674 try: 

675 ap_corr_map = self.measure_ap_corr.run(exposure=exposure, 

676 catalog=measured_src).apCorrMap 

677 except Exception as e: 

678 self.log.warning('Failed to compute aperture corrections for visit %d, detector %d: %s', 

679 visit, detector, e) 

680 return psf, None, measured_src 

681 

682 self.apply_ap_corr.run(catalog=measured_src, apCorrMap=ap_corr_map) 

683 

684 return psf, ap_corr_map, measured_src