Coverage for python/lsst/pipe/tasks/finalizeCharacterization.py: 18%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

239 statements  

1# 

2# LSST Data Management System 

3# Copyright 2008-2022 AURA/LSST. 

4# 

5# This product includes software developed by the 

6# LSST Project (http://www.lsst.org/). 

7# 

8# This program is free software: you can redistribute it and/or modify 

9# it under the terms of the GNU General Public License as published by 

10# the Free Software Foundation, either version 3 of the License, or 

11# (at your option) any later version. 

12# 

13# This program is distributed in the hope that it will be useful, 

14# but WITHOUT ANY WARRANTY; without even the implied warranty of 

15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

16# GNU General Public License for more details. 

17# 

18# You should have received a copy of the LSST License Statement and 

19# the GNU General Public License along with this program. If not, 

20# see <http://www.lsstcorp.org/LegalNotices/>. 

21# 

22"""Task to run a finalized image characterization, using additional data. 

23""" 

24import numpy as np 

25import esutil 

26import pandas as pd 

27 

28import lsst.pex.config as pexConfig 

29import lsst.pipe.base as pipeBase 

30import lsst.daf.base as dafBase 

31import lsst.afw.table as afwTable 

32import lsst.meas.algorithms as measAlg 

33import lsst.meas.extensions.piff.piffPsfDeterminer # noqa: F401 

34from lsst.meas.algorithms import MeasureApCorrTask 

35from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask 

36from lsst.meas.algorithms.sourceSelector import sourceSelectorRegistry 

37 

38from .reserveIsolatedStars import ReserveIsolatedStarsTask 

39 

40__all__ = ['FinalizeCharacterizationConnections', 

41 'FinalizeCharacterizationConfig', 

42 'FinalizeCharacterizationTask'] 

43 

44 

45class FinalizeCharacterizationConnections(pipeBase.PipelineTaskConnections, 

46 dimensions=('instrument', 'visit',), 

47 defaultTemplates={}): 

48 src_schema = pipeBase.connectionTypes.InitInput( 

49 doc='Input schema used for src catalogs.', 

50 name='src_schema', 

51 storageClass='SourceCatalog', 

52 ) 

53 srcs = pipeBase.connectionTypes.Input( 

54 doc='Source catalogs for the visit', 

55 name='src', 

56 storageClass='SourceCatalog', 

57 dimensions=('instrument', 'visit', 'detector'), 

58 deferLoad=True, 

59 multiple=True, 

60 ) 

61 calexps = pipeBase.connectionTypes.Input( 

62 doc='Calexps for the visit', 

63 name='calexp', 

64 storageClass='ExposureF', 

65 dimensions=('instrument', 'visit', 'detector'), 

66 deferLoad=True, 

67 multiple=True, 

68 ) 

69 isolated_star_cats = pipeBase.connectionTypes.Input( 

70 doc=('Catalog of isolated stars with average positions, number of associated ' 

71 'sources, and indexes to the isolated_star_sources catalogs.'), 

72 name='isolated_star_cat', 

73 storageClass='DataFrame', 

74 dimensions=('instrument', 'tract', 'skymap'), 

75 deferLoad=True, 

76 multiple=True, 

77 ) 

78 isolated_star_sources = pipeBase.connectionTypes.Input( 

79 doc=('Catalog of isolated star sources with sourceIds, and indexes to the ' 

80 'isolated_star_cats catalogs.'), 

81 name='isolated_star_sources', 

82 storageClass='DataFrame', 

83 dimensions=('instrument', 'tract', 'skymap'), 

84 deferLoad=True, 

85 multiple=True, 

86 ) 

87 finalized_psf_ap_corr_cat = pipeBase.connectionTypes.Output( 

88 doc=('Per-visit finalized psf models and aperture corrections. This ' 

89 'catalog uses detector id for the id and are sorted for fast ' 

90 'lookups of a detector.'), 

91 name='finalized_psf_ap_corr_catalog', 

92 storageClass='ExposureCatalog', 

93 dimensions=('instrument', 'visit'), 

94 ) 

95 finalized_src_table = pipeBase.connectionTypes.Output( 

96 doc=('Per-visit catalog of measurements for psf/flag/etc.'), 

97 name='finalized_src_table', 

98 storageClass='DataFrame', 

99 dimensions=('instrument', 'visit'), 

100 ) 

101 

102 

103class FinalizeCharacterizationConfig(pipeBase.PipelineTaskConfig, 

104 pipelineConnections=FinalizeCharacterizationConnections): 

105 """Configuration for FinalizeCharacterizationTask.""" 

106 source_selector = sourceSelectorRegistry.makeField( 

107 doc="How to select sources", 

108 default="science" 

109 ) 

110 id_column = pexConfig.Field( 

111 doc='Name of column in isolated_star_sources with source id.', 

112 dtype=str, 

113 default='sourceId', 

114 ) 

115 reserve_selection = pexConfig.ConfigurableField( 

116 target=ReserveIsolatedStarsTask, 

117 doc='Task to select reserved stars', 

118 ) 

119 make_psf_candidates = pexConfig.ConfigurableField( 

120 target=measAlg.MakePsfCandidatesTask, 

121 doc='Task to make psf candidates from selected stars.', 

122 ) 

123 psf_determiner = measAlg.psfDeterminerRegistry.makeField( 

124 'PSF Determination algorithm', 

125 default='piff' 

126 ) 

127 measurement = pexConfig.ConfigurableField( 

128 target=SingleFrameMeasurementTask, 

129 doc='Measure sources for aperture corrections' 

130 ) 

131 measure_ap_corr = pexConfig.ConfigurableField( 

132 target=MeasureApCorrTask, 

133 doc="Subtask to measure aperture corrections" 

134 ) 

135 apply_ap_corr = pexConfig.ConfigurableField( 

136 target=ApplyApCorrTask, 

137 doc="Subtask to apply aperture corrections" 

138 ) 

139 

140 def setDefaults(self): 

141 super().setDefaults() 

142 

143 source_selector = self.source_selector['science'] 

144 source_selector.setDefaults() 

145 

146 # We use the source selector only to select out flagged objects 

147 # and signal-to-noise. Isolated, unresolved sources are handled 

148 # by the isolated star catalog. 

149 

150 source_selector.doFlags = True 

151 source_selector.doSignalToNoise = True 

152 source_selector.doFluxLimit = False 

153 source_selector.doUnresolved = False 

154 source_selector.doIsolated = False 

155 

156 source_selector.signalToNoise.minimum = 20.0 

157 source_selector.signalToNoise.maximum = 1000.0 

158 

159 source_selector.signalToNoise.fluxField = 'base_GaussianFlux_instFlux' 

160 source_selector.signalToNoise.errField = 'base_GaussianFlux_instFluxErr' 

161 

162 source_selector.flags.bad = ['base_PixelFlags_flag_edge', 

163 'base_PixelFlags_flag_interpolatedCenter', 

164 'base_PixelFlags_flag_saturatedCenter', 

165 'base_PixelFlags_flag_crCenter', 

166 'base_PixelFlags_flag_bad', 

167 'base_PixelFlags_flag_interpolated', 

168 'base_PixelFlags_flag_saturated', 

169 'slot_Centroid_flag', 

170 'base_GaussianFlux_flag'] 

171 

172 self.measure_ap_corr.sourceSelector['flagged'].field = 'calib_psf_used' 

173 

174 import lsst.meas.modelfit # noqa: F401 

175 import lsst.meas.extensions.photometryKron # noqa: F401 

176 import lsst.meas.extensions.convolved # noqa: F401 

177 import lsst.meas.extensions.gaap # noqa: F401 

178 import lsst.meas.extensions.shapeHSM # noqa: F401 

179 

180 # Set up measurement defaults 

181 self.measurement.plugins.names = [ 

182 'base_PsfFlux', 

183 'base_GaussianFlux', 

184 'modelfit_DoubleShapeletPsfApprox', 

185 'modelfit_CModel', 

186 'ext_photometryKron_KronFlux', 

187 'ext_convolved_ConvolvedFlux', 

188 'ext_gaap_GaapFlux', 

189 'ext_shapeHSM_HsmShapeRegauss', 

190 'ext_shapeHSM_HsmSourceMoments', 

191 'ext_shapeHSM_HsmPsfMoments', 

192 'ext_shapeHSM_HsmSourceMomentsRound', 

193 ] 

194 self.measurement.slots.modelFlux = 'modelfit_CModel' 

195 self.measurement.plugins['ext_convolved_ConvolvedFlux'].seeing.append(8.0) 

196 self.measurement.plugins['ext_gaap_GaapFlux'].sigmas = [ 

197 0.5, 

198 0.7, 

199 1.0, 

200 1.5, 

201 2.5, 

202 3.0 

203 ] 

204 self.measurement.plugins['ext_gaap_GaapFlux'].doPsfPhotometry = True 

205 self.measurement.slots.shape = 'ext_shapeHSM_HsmSourceMoments' 

206 self.measurement.slots.psfShape = 'exp_shapeHSM_HsmPsfMoments' 

207 self.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild = "" 

208 # Turn off slot setting for measurement for centroid and shape 

209 # (for which we use the input src catalog measurements) 

210 self.measurement.slots.centroid = None 

211 self.measurement.slots.apFlux = None 

212 self.measurement.slots.calibFlux = None 

213 

214 names = self.measurement.plugins['ext_convolved_ConvolvedFlux'].getAllResultNames() 

215 self.measure_ap_corr.allowFailure += names 

216 names = self.measurement.plugins["ext_gaap_GaapFlux"].getAllGaapResultNames() 

217 self.measure_ap_corr.allowFailure += names 

218 

219 

220class FinalizeCharacterizationTask(pipeBase.PipelineTask): 

221 """Run final characterization on exposures.""" 

222 ConfigClass = FinalizeCharacterizationConfig 

223 _DefaultName = 'finalize_characterization' 

224 

225 def __init__(self, initInputs=None, **kwargs): 

226 super().__init__(initInputs=initInputs, **kwargs) 

227 

228 self.schema_mapper, self.schema = self._make_output_schema_mapper( 

229 initInputs['src_schema'].schema 

230 ) 

231 

232 self.makeSubtask('reserve_selection') 

233 self.makeSubtask('source_selector') 

234 self.makeSubtask('make_psf_candidates') 

235 self.makeSubtask('psf_determiner') 

236 self.makeSubtask('measurement', schema=self.schema) 

237 self.makeSubtask('measure_ap_corr', schema=self.schema) 

238 self.makeSubtask('apply_ap_corr', schema=self.schema) 

239 

240 # Only log warning and fatal errors from the source_selector 

241 self.source_selector.log.setLevel(self.source_selector.log.WARN) 

242 

243 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

244 input_handle_dict = butlerQC.get(inputRefs) 

245 

246 band = butlerQC.quantum.dataId['band'] 

247 visit = butlerQC.quantum.dataId['visit'] 

248 

249 src_dict_temp = {handle.dataId['detector']: handle 

250 for handle in input_handle_dict['srcs']} 

251 calexp_dict_temp = {handle.dataId['detector']: handle 

252 for handle in input_handle_dict['calexps']} 

253 isolated_star_cat_dict_temp = {handle.dataId['tract']: handle 

254 for handle in input_handle_dict['isolated_star_cats']} 

255 isolated_star_source_dict_temp = {handle.dataId['tract']: handle 

256 for handle in input_handle_dict['isolated_star_sources']} 

257 # TODO: Sort until DM-31701 is done and we have deterministic 

258 # dataset ordering. 

259 src_dict = {detector: src_dict_temp[detector] for 

260 detector in sorted(src_dict_temp.keys())} 

261 calexp_dict = {detector: calexp_dict_temp[detector] for 

262 detector in sorted(calexp_dict_temp.keys())} 

263 isolated_star_cat_dict = {tract: isolated_star_cat_dict_temp[tract] for 

264 tract in sorted(isolated_star_cat_dict_temp.keys())} 

265 isolated_star_source_dict = {tract: isolated_star_source_dict_temp[tract] for 

266 tract in sorted(isolated_star_source_dict_temp.keys())} 

267 

268 struct = self.run(visit, 

269 band, 

270 isolated_star_cat_dict, 

271 isolated_star_source_dict, 

272 src_dict, 

273 calexp_dict) 

274 

275 butlerQC.put(struct.psf_ap_corr_cat, 

276 outputRefs.finalized_psf_ap_corr_cat) 

277 butlerQC.put(pd.DataFrame(struct.output_table), 

278 outputRefs.finalized_src_table) 

279 

280 def run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict): 

281 """ 

282 Run the FinalizeCharacterizationTask. 

283 

284 Parameters 

285 ---------- 

286 visit : `int` 

287 Visit number. Used in the output catalogs. 

288 band : `str` 

289 Band name. Used to select reserved stars. 

290 isolated_star_cat_dict : `dict` 

291 Per-tract dict of isolated star catalog handles. 

292 isolated_star_source_dict : `dict` 

293 Per-tract dict of isolated star source catalog handles. 

294 src_dict : `dict` 

295 Per-detector dict of src catalog handles. 

296 calexp_dict : `dict` 

297 Per-detector dict of calibrated exposure handles. 

298 

299 Returns 

300 ------- 

301 struct : `lsst.pipe.base.struct` 

302 Struct with outputs for persistence. 

303 """ 

304 # We do not need the isolated star table in this task. 

305 # However, it is used in tests to confirm consistency of indexes. 

306 _, isolated_source_table = self.concat_isolated_star_cats( 

307 band, 

308 isolated_star_cat_dict, 

309 isolated_star_source_dict 

310 ) 

311 

312 exposure_cat_schema = afwTable.ExposureTable.makeMinimalSchema() 

313 exposure_cat_schema.addField('visit', type='L', doc='Visit number') 

314 

315 metadata = dafBase.PropertyList() 

316 metadata.add("COMMENT", "Catalog id is detector id, sorted.") 

317 metadata.add("COMMENT", "Only detectors with data have entries.") 

318 

319 psf_ap_corr_cat = afwTable.ExposureCatalog(exposure_cat_schema) 

320 psf_ap_corr_cat.setMetadata(metadata) 

321 

322 measured_src_tables = [] 

323 

324 for detector in src_dict: 

325 src = src_dict[detector].get() 

326 exposure = calexp_dict[detector].get() 

327 

328 psf, ap_corr_map, measured_src = self.compute_psf_and_ap_corr_map( 

329 visit, 

330 detector, 

331 exposure, 

332 src, 

333 isolated_source_table 

334 ) 

335 

336 # And now we package it together... 

337 record = psf_ap_corr_cat.addNew() 

338 record['id'] = int(detector) 

339 record['visit'] = visit 

340 if psf is not None: 

341 record.setPsf(psf) 

342 if ap_corr_map is not None: 

343 record.setApCorrMap(ap_corr_map) 

344 

345 measured_src['visit'][:] = visit 

346 measured_src['detector'][:] = detector 

347 

348 measured_src_tables.append(measured_src.asAstropy().as_array()) 

349 

350 measured_src_table = np.concatenate(measured_src_tables) 

351 

352 return pipeBase.Struct(psf_ap_corr_cat=psf_ap_corr_cat, 

353 output_table=measured_src_table) 

354 

355 def _make_output_schema_mapper(self, input_schema): 

356 """Make the schema mapper from the input schema to the output schema. 

357 

358 Parameters 

359 ---------- 

360 input_schema : `lsst.afw.table.Schema` 

361 Input schema. 

362 

363 Returns 

364 ------- 

365 mapper : `lsst.afw.table.SchemaMapper` 

366 Schema mapper 

367 output_schema : `lsst.afw.table.Schema` 

368 Output schema (with alias map) 

369 """ 

370 mapper = afwTable.SchemaMapper(input_schema) 

371 mapper.addMinimalSchema(afwTable.SourceTable.makeMinimalSchema()) 

372 mapper.addMapping(input_schema['slot_Centroid_x'].asKey()) 

373 mapper.addMapping(input_schema['slot_Centroid_y'].asKey()) 

374 

375 # The aperture fields may be used by the psf determiner. 

376 aper_fields = input_schema.extract('base_CircularApertureFlux_*') 

377 for field, item in aper_fields.items(): 

378 mapper.addMapping(item.key) 

379 

380 # The following two may be redundant, but then the mapping is a no-op. 

381 apflux_fields = input_schema.extract('slot_ApFlux_*') 

382 for field, item in apflux_fields.items(): 

383 mapper.addMapping(item.key) 

384 

385 calibflux_fields = input_schema.extract('slot_CalibFlux_*') 

386 for field, item in calibflux_fields.items(): 

387 mapper.addMapping(item.key) 

388 

389 mapper.addMapping( 

390 input_schema[self.config.source_selector.active.signalToNoise.fluxField].asKey(), 

391 'calib_psf_selection_flux') 

392 mapper.addMapping( 

393 input_schema[self.config.source_selector.active.signalToNoise.errField].asKey(), 

394 'calib_psf_selection_flux_err') 

395 

396 output_schema = mapper.getOutputSchema() 

397 

398 output_schema.addField( 

399 'calib_psf_candidate', 

400 type='Flag', 

401 doc=('set if the source was a candidate for PSF determination, ' 

402 'as determined from FinalizeCharacterizationTask.'), 

403 ) 

404 output_schema.addField( 

405 'calib_psf_reserved', 

406 type='Flag', 

407 doc=('set if source was reserved from PSF determination by ' 

408 'FinalizeCharacterizationTask.'), 

409 ) 

410 output_schema.addField( 

411 'calib_psf_used', 

412 type='Flag', 

413 doc=('set if source was used in the PSF determination by ' 

414 'FinalizeCharacterizationTask.'), 

415 ) 

416 output_schema.addField( 

417 'visit', 

418 type=np.int64, 

419 doc='Visit number for the sources.', 

420 ) 

421 output_schema.addField( 

422 'detector', 

423 type=np.int32, 

424 doc='Detector number for the sources.', 

425 ) 

426 

427 alias_map = input_schema.getAliasMap() 

428 alias_map_output = afwTable.AliasMap() 

429 alias_map_output.set('slot_Centroid', alias_map.get('slot_Centroid')) 

430 alias_map_output.set('slot_ApFlux', alias_map.get('slot_ApFlux')) 

431 alias_map_output.set('slot_CalibFlux', alias_map.get('slot_CalibFlux')) 

432 

433 output_schema.setAliasMap(alias_map_output) 

434 

435 return mapper, output_schema 

436 

437 def _make_selection_schema_mapper(self, input_schema): 

438 """Make the schema mapper from the input schema to the selection schema. 

439 

440 Parameters 

441 ---------- 

442 input_schema : `lsst.afw.table.Schema` 

443 Input schema. 

444 

445 Returns 

446 ------- 

447 mapper : `lsst.afw.table.SchemaMapper` 

448 Schema mapper 

449 selection_schema : `lsst.afw.table.Schema` 

450 Selection schema (with alias map) 

451 """ 

452 mapper = afwTable.SchemaMapper(input_schema) 

453 mapper.addMinimalSchema(input_schema) 

454 

455 selection_schema = mapper.getOutputSchema() 

456 

457 selection_schema.setAliasMap(input_schema.getAliasMap()) 

458 

459 return mapper, selection_schema 

460 

461 def concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict): 

462 """ 

463 Concatenate isolated star catalogs and make reserve selection. 

464 

465 Parameters 

466 ---------- 

467 band : `str` 

468 Band name. Used to select reserved stars. 

469 isolated_star_cat_dict : `dict` 

470 Per-tract dict of isolated star catalog handles. 

471 isolated_star_source_dict : `dict` 

472 Per-tract dict of isolated star source catalog handles. 

473 

474 Returns 

475 ------- 

476 isolated_table : `np.ndarray` (N,) 

477 Table of isolated stars, with indexes to isolated sources. 

478 isolated_source_table : `np.ndarray` (M,) 

479 Table of isolated sources, with indexes to isolated stars. 

480 """ 

481 isolated_tables = [] 

482 isolated_sources = [] 

483 merge_cat_counter = 0 

484 merge_source_counter = 0 

485 

486 for tract in isolated_star_cat_dict: 

487 df_cat = isolated_star_cat_dict[tract].get() 

488 table_cat = df_cat.to_records() 

489 

490 df_source = isolated_star_source_dict[tract].get( 

491 parameters={'columns': [self.config.id_column, 

492 'obj_index']} 

493 ) 

494 table_source = df_source.to_records() 

495 

496 # Cut isolated star table to those observed in this band, and adjust indexes 

497 (use_band,) = (table_cat[f'nsource_{band}'] > 0).nonzero() 

498 

499 # With the following matching: 

500 # table_source[b] <-> table_cat[use_band[a]] 

501 obj_index = table_source['obj_index'][:] 

502 a, b = esutil.numpy_util.match(use_band, obj_index) 

503 

504 # Update indexes and cut to band-selected stars/sources 

505 table_source['obj_index'][b] = a 

506 _, index_new = np.unique(a, return_index=True) 

507 table_cat[f'source_cat_index_{band}'][use_band] = index_new 

508 

509 # After the following cuts, the catalogs have the following properties: 

510 # - table_cat only contains isolated stars that have at least one source 

511 # in ``band``. 

512 # - table_source only contains ``band`` sources. 

513 # - The slice table_cat["source_cat_index_{band}"]: table_cat["source_cat_index_{band}"] 

514 # + table_cat["nsource_{band}] 

515 # applied to table_source will give all the sources associated with the star. 

516 # - For each source, table_source["obj_index"] points to the index of the associated 

517 # isolated star. 

518 table_source = table_source[b] 

519 table_cat = table_cat[use_band] 

520 

521 # Add reserved flag column to tables 

522 table_cat = np.lib.recfunctions.append_fields( 

523 table_cat, 

524 'reserved', 

525 np.zeros(table_cat.size, dtype=bool), 

526 usemask=False 

527 ) 

528 table_source = np.lib.recfunctions.append_fields( 

529 table_source, 

530 'reserved', 

531 np.zeros(table_source.size, dtype=bool), 

532 usemask=False 

533 ) 

534 

535 # Get reserve star flags 

536 table_cat['reserved'][:] = self.reserve_selection.run( 

537 len(table_cat), 

538 extra=f'{band}_{tract}', 

539 ) 

540 table_source['reserved'][:] = table_cat['reserved'][table_source['obj_index']] 

541 

542 # Offset indexes to account for tract merging 

543 table_cat[f'source_cat_index_{band}'] += merge_source_counter 

544 table_source['obj_index'] += merge_cat_counter 

545 

546 isolated_tables.append(table_cat) 

547 isolated_sources.append(table_source) 

548 

549 merge_cat_counter += len(table_cat) 

550 merge_source_counter += len(table_source) 

551 

552 isolated_table = np.concatenate(isolated_tables) 

553 isolated_source_table = np.concatenate(isolated_sources) 

554 

555 return isolated_table, isolated_source_table 

556 

557 def compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table): 

558 """Compute psf model and aperture correction map for a single exposure. 

559 

560 Parameters 

561 ---------- 

562 visit : `int` 

563 Visit number (for logging). 

564 detector : `int` 

565 Detector number (for logging). 

566 exposure : `lsst.afw.image.ExposureF` 

567 src : `lsst.afw.table.SourceCatalog` 

568 isolated_source_table : `np.ndarray` 

569 

570 Returns 

571 ------- 

572 psf : `lsst.meas.algorithms.ImagePsf` 

573 PSF Model 

574 ap_corr_map : `lsst.afw.image.ApCorrMap` 

575 Aperture correction map. 

576 measured_src : `lsst.afw.table.SourceCatalog` 

577 Updated source catalog with measurements, flags and aperture corrections. 

578 """ 

579 # Apply source selector (s/n, flags, etc.) 

580 good_src = self.source_selector.selectSources(src) 

581 

582 # Cut down input src to the selected sources 

583 # We use a separate schema/mapper here than for the output/measurement catalog because of 

584 # clashes between fields that were previously run and those that need to be rerun with 

585 # the new psf model. This may be slightly inefficient but keeps input 

586 # and output values cleanly separated. 

587 selection_mapper, selection_schema = self._make_selection_schema_mapper(src.schema) 

588 

589 selected_src = afwTable.SourceCatalog(selection_schema) 

590 selected_src.reserve(good_src.selected.sum()) 

591 selected_src.extend(src[good_src.selected], mapper=selection_mapper) 

592 

593 # The calib flags have been copied from the input table, 

594 # and we reset them here just to ensure they aren't propagated. 

595 selected_src['calib_psf_candidate'] = np.zeros(len(selected_src), dtype=bool) 

596 selected_src['calib_psf_used'] = np.zeros(len(selected_src), dtype=bool) 

597 selected_src['calib_psf_reserved'] = np.zeros(len(selected_src), dtype=bool) 

598 

599 # Find the isolated sources and set flags 

600 matched_src, matched_iso = esutil.numpy_util.match( 

601 selected_src['id'], 

602 isolated_source_table[self.config.id_column] 

603 ) 

604 

605 matched_arr = np.zeros(len(selected_src), dtype=bool) 

606 matched_arr[matched_src] = True 

607 selected_src['calib_psf_candidate'] = matched_arr 

608 

609 reserved_arr = np.zeros(len(selected_src), dtype=bool) 

610 reserved_arr[matched_src] = isolated_source_table['reserved'][matched_iso] 

611 selected_src['calib_psf_reserved'] = reserved_arr 

612 

613 selected_src = selected_src[selected_src['calib_psf_candidate']].copy(deep=True) 

614 

615 # Make the measured source catalog as well, based on the selected catalog. 

616 measured_src = afwTable.SourceCatalog(self.schema) 

617 measured_src.reserve(len(selected_src)) 

618 measured_src.extend(selected_src, mapper=self.schema_mapper) 

619 

620 # We need to copy over the calib_psf flags because they were not in the mapper 

621 measured_src['calib_psf_candidate'] = selected_src['calib_psf_candidate'] 

622 measured_src['calib_psf_reserved'] = selected_src['calib_psf_reserved'] 

623 

624 # Select the psf candidates from the selection catalog 

625 try: 

626 psf_selection_result = self.make_psf_candidates.run(selected_src, exposure=exposure) 

627 except Exception as e: 

628 self.log.warning('Failed to make psf candidates for visit %d, detector %d: %s', 

629 visit, detector, e) 

630 return None, None, measured_src 

631 

632 psf_cand_cat = psf_selection_result.goodStarCat 

633 

634 # Make list of psf candidates to send to the determiner 

635 # (omitting those marked as reserved) 

636 psf_determiner_list = [cand for cand, use 

637 in zip(psf_selection_result.psfCandidates, 

638 ~psf_cand_cat['calib_psf_reserved']) if use] 

639 flag_key = psf_cand_cat.schema['calib_psf_used'].asKey() 

640 try: 

641 psf, cell_set = self.psf_determiner.determinePsf(exposure, 

642 psf_determiner_list, 

643 self.metadata, 

644 flagKey=flag_key) 

645 except Exception as e: 

646 self.log.warning('Failed to determine psf for visit %d, detector %d: %s', 

647 visit, detector, e) 

648 return None, None, measured_src 

649 

650 # At this point, we need to transfer the psf used flag from the selection 

651 # catalog to the measurement catalog. 

652 matched_selected, matched_measured = esutil.numpy_util.match( 

653 selected_src['id'], 

654 measured_src['id'] 

655 ) 

656 measured_used = np.zeros(len(measured_src), dtype=bool) 

657 measured_used[matched_measured] = selected_src['calib_psf_used'][matched_selected] 

658 measured_src['calib_psf_used'] = measured_used 

659 

660 # Next, we do the measurement on all the psf candidate, used, and reserved stars. 

661 try: 

662 self.measurement.run(measCat=measured_src, exposure=exposure) 

663 except Exception as e: 

664 self.log.warning('Failed to make measurements for visit %d, detector %d: %s', 

665 visit, detector, e) 

666 return psf, None, measured_src 

667 

668 # And finally the ap corr map. 

669 try: 

670 ap_corr_map = self.measure_ap_corr.run(exposure=exposure, 

671 catalog=measured_src).apCorrMap 

672 except Exception as e: 

673 self.log.warning('Failed to compute aperture corrections for visit %d, detector %d: %s', 

674 visit, detector, e) 

675 return psf, None, measured_src 

676 

677 self.apply_ap_corr.run(catalog=measured_src, apCorrMap=ap_corr_map) 

678 

679 return psf, ap_corr_map, measured_src