Coverage for python/lsst/pipe/tasks/finalizeCharacterization.py: 18%

Shortcuts on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

239 statements  

1# 

2# LSST Data Management System 

3# Copyright 2008-2022 AURA/LSST. 

4# 

5# This product includes software developed by the 

6# LSST Project (http://www.lsst.org/). 

7# 

8# This program is free software: you can redistribute it and/or modify 

9# it under the terms of the GNU General Public License as published by 

10# the Free Software Foundation, either version 3 of the License, or 

11# (at your option) any later version. 

12# 

13# This program is distributed in the hope that it will be useful, 

14# but WITHOUT ANY WARRANTY; without even the implied warranty of 

15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

16# GNU General Public License for more details. 

17# 

18# You should have received a copy of the LSST License Statement and 

19# the GNU General Public License along with this program. If not, 

20# see <http://www.lsstcorp.org/LegalNotices/>. 

21# 

22"""Task to run a finalized image characterization, using additional data. 

23""" 

24import numpy as np 

25import esutil 

26import pandas as pd 

27 

28import lsst.pex.config as pexConfig 

29import lsst.pipe.base as pipeBase 

30import lsst.daf.base as dafBase 

31import lsst.afw.table as afwTable 

32import lsst.meas.algorithms as measAlg 

33import lsst.meas.extensions.psfex.psfexPsfDeterminer # noqa: F401 

34from lsst.meas.algorithms import MeasureApCorrTask 

35from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask 

36from lsst.meas.algorithms.sourceSelector import sourceSelectorRegistry 

37 

38from .reserveIsolatedStars import ReserveIsolatedStarsTask 

39 

40__all__ = ['FinalizeCharacterizationConnections', 

41 'FinalizeCharacterizationConfig', 

42 'FinalizeCharacterizationTask'] 

43 

44 

45class FinalizeCharacterizationConnections(pipeBase.PipelineTaskConnections, 

46 dimensions=('instrument', 'visit',), 

47 defaultTemplates={}): 

48 src_schema = pipeBase.connectionTypes.InitInput( 

49 doc='Input schema used for src catalogs.', 

50 name='src_schema', 

51 storageClass='SourceCatalog', 

52 ) 

53 srcs = pipeBase.connectionTypes.Input( 

54 doc='Source catalogs for the visit', 

55 name='src', 

56 storageClass='SourceCatalog', 

57 dimensions=('instrument', 'visit', 'detector'), 

58 deferLoad=True, 

59 multiple=True, 

60 ) 

61 calexps = pipeBase.connectionTypes.Input( 

62 doc='Calexps for the visit', 

63 name='calexp', 

64 storageClass='ExposureF', 

65 dimensions=('instrument', 'visit', 'detector'), 

66 deferLoad=True, 

67 multiple=True, 

68 ) 

69 isolated_star_cats = pipeBase.connectionTypes.Input( 

70 doc=('Catalog of isolated stars with average positions, number of associated ' 

71 'sources, and indexes to the isolated_star_sources catalogs.'), 

72 name='isolated_star_cat', 

73 storageClass='DataFrame', 

74 dimensions=('instrument', 'tract', 'skymap'), 

75 deferLoad=True, 

76 multiple=True, 

77 ) 

78 isolated_star_sources = pipeBase.connectionTypes.Input( 

79 doc=('Catalog of isolated star sources with sourceIds, and indexes to the ' 

80 'isolated_star_cats catalogs.'), 

81 name='isolated_star_sources', 

82 storageClass='DataFrame', 

83 dimensions=('instrument', 'tract', 'skymap'), 

84 deferLoad=True, 

85 multiple=True, 

86 ) 

87 finalized_psf_ap_corr_cat = pipeBase.connectionTypes.Output( 

88 doc=('Per-visit finalized psf models and aperture corrections. This ' 

89 'catalog uses detector id for the id and are sorted for fast ' 

90 'lookups of a detector.'), 

91 name='finalized_psf_ap_corr_catalog', 

92 storageClass='ExposureCatalog', 

93 dimensions=('instrument', 'visit'), 

94 ) 

95 finalized_src_table = pipeBase.connectionTypes.Output( 

96 doc=('Per-visit catalog of measurements for psf/flag/etc.'), 

97 name='finalized_src_table', 

98 storageClass='DataFrame', 

99 dimensions=('instrument', 'visit'), 

100 ) 

101 

102 

103class FinalizeCharacterizationConfig(pipeBase.PipelineTaskConfig, 

104 pipelineConnections=FinalizeCharacterizationConnections): 

105 """Configuration for FinalizeCharacterizationTask.""" 

106 source_selector = sourceSelectorRegistry.makeField( 

107 doc="How to select sources", 

108 default="science" 

109 ) 

110 id_column = pexConfig.Field( 

111 doc='Name of column in isolated_star_sources with source id.', 

112 dtype=str, 

113 default='sourceId', 

114 ) 

115 reserve_selection = pexConfig.ConfigurableField( 

116 target=ReserveIsolatedStarsTask, 

117 doc='Task to select reserved stars', 

118 ) 

119 make_psf_candidates = pexConfig.ConfigurableField( 

120 target=measAlg.MakePsfCandidatesTask, 

121 doc='Task to make psf candidates from selected stars.', 

122 ) 

123 psf_determiner = measAlg.psfDeterminerRegistry.makeField( 

124 'PSF Determination algorithm', 

125 default='psfex' 

126 ) 

127 measurement = pexConfig.ConfigurableField( 

128 target=SingleFrameMeasurementTask, 

129 doc='Measure sources for aperture corrections' 

130 ) 

131 measure_ap_corr = pexConfig.ConfigurableField( 

132 target=MeasureApCorrTask, 

133 doc="Subtask to measure aperture corrections" 

134 ) 

135 apply_ap_corr = pexConfig.ConfigurableField( 

136 target=ApplyApCorrTask, 

137 doc="Subtask to apply aperture corrections" 

138 ) 

139 

140 def setDefaults(self): 

141 super().setDefaults() 

142 

143 source_selector = self.source_selector['science'] 

144 source_selector.setDefaults() 

145 

146 # We use the source selector only to select out flagged objects 

147 # and signal-to-noise. Isolated, unresolved sources are handled 

148 # by the isolated star catalog. 

149 

150 source_selector.doFlags = True 

151 source_selector.doSignalToNoise = True 

152 source_selector.doFluxLimit = False 

153 source_selector.doUnresolved = False 

154 source_selector.doIsolated = False 

155 

156 source_selector.signalToNoise.minimum = 20.0 

157 source_selector.signalToNoise.maximum = 1000.0 

158 

159 source_selector.signalToNoise.fluxField = 'base_GaussianFlux_instFlux' 

160 source_selector.signalToNoise.errField = 'base_GaussianFlux_instFluxErr' 

161 

162 source_selector.flags.bad = ['base_PixelFlags_flag_edge', 

163 'base_PixelFlags_flag_interpolatedCenter', 

164 'base_PixelFlags_flag_saturatedCenter', 

165 'base_PixelFlags_flag_crCenter', 

166 'base_PixelFlags_flag_bad', 

167 'base_PixelFlags_flag_interpolated', 

168 'base_PixelFlags_flag_saturated', 

169 'slot_Centroid_flag', 

170 'base_GaussianFlux_flag'] 

171 

172 self.measure_ap_corr.sourceSelector['flagged'].field = 'final_psf_used' 

173 

174 import lsst.meas.modelfit # noqa: F401 

175 import lsst.meas.extensions.photometryKron # noqa: F401 

176 import lsst.meas.extensions.convolved # noqa: F401 

177 import lsst.meas.extensions.gaap # noqa: F401 

178 import lsst.meas.extensions.shapeHSM # noqa: F401 

179 

180 # Set up measurement defaults 

181 self.measurement.plugins.names = [ 

182 'base_PsfFlux', 

183 'base_GaussianFlux', 

184 'modelfit_DoubleShapeletPsfApprox', 

185 'modelfit_CModel', 

186 'ext_photometryKron_KronFlux', 

187 'ext_convolved_ConvolvedFlux', 

188 'ext_gaap_GaapFlux', 

189 'ext_shapeHSM_HsmShapeRegauss', 

190 'ext_shapeHSM_HsmSourceMoments', 

191 'ext_shapeHSM_HsmPsfMoments', 

192 'ext_shapeHSM_HsmSourceMomentsRound', 

193 ] 

194 self.measurement.slots.modelFlux = 'modelfit_CModel' 

195 self.measurement.plugins['ext_convolved_ConvolvedFlux'].seeing.append(8.0) 

196 self.measurement.plugins['ext_gaap_GaapFlux'].sigmas = [ 

197 0.5, 

198 0.7, 

199 1.0, 

200 1.5, 

201 2.5, 

202 3.0 

203 ] 

204 self.measurement.plugins['ext_gaap_GaapFlux'].doPsfPhotometry = True 

205 self.measurement.slots.shape = 'ext_shapeHSM_HsmSourceMoments' 

206 self.measurement.slots.psfShape = 'exp_shapeHSM_HsmPsfMoments' 

207 self.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild = "" 

208 # Turn off slot setting for measurement for centroid and shape 

209 # (for which we use the input src catalog measurements) 

210 self.measurement.slots.centroid = None 

211 self.measurement.slots.apFlux = None 

212 self.measurement.slots.calibFlux = None 

213 

214 names = self.measurement.plugins['ext_convolved_ConvolvedFlux'].getAllResultNames() 

215 self.measure_ap_corr.allowFailure += names 

216 names = self.measurement.plugins["ext_gaap_GaapFlux"].getAllGaapResultNames() 

217 self.measure_ap_corr.allowFailure += names 

218 

219 

220class FinalizeCharacterizationTask(pipeBase.PipelineTask): 

221 """Run final characterization on exposures.""" 

222 ConfigClass = FinalizeCharacterizationConfig 

223 _DefaultName = 'finalize_characterization' 

224 

225 def __init__(self, initInputs=None, **kwargs): 

226 super().__init__(initInputs=initInputs, **kwargs) 

227 

228 self.schema_mapper, self.schema = self._make_output_schema_mapper( 

229 initInputs['src_schema'].schema 

230 ) 

231 

232 self.makeSubtask('reserve_selection') 

233 self.makeSubtask('source_selector') 

234 self.makeSubtask('make_psf_candidates') 

235 self.makeSubtask('psf_determiner') 

236 self.makeSubtask('measurement', schema=self.schema) 

237 self.makeSubtask('measure_ap_corr', schema=self.schema) 

238 self.makeSubtask('apply_ap_corr', schema=self.schema) 

239 

240 # Only log warning and fatal errors from the source_selector 

241 self.source_selector.log.setLevel(self.source_selector.log.WARN) 

242 

243 def runQuantum(self, butlerQC, inputRefs, outputRefs): 

244 input_handle_dict = butlerQC.get(inputRefs) 

245 

246 band = butlerQC.quantum.dataId['band'] 

247 visit = butlerQC.quantum.dataId['visit'] 

248 

249 src_dict_temp = {handle.dataId['detector']: handle 

250 for handle in input_handle_dict['srcs']} 

251 calexp_dict_temp = {handle.dataId['detector']: handle 

252 for handle in input_handle_dict['calexps']} 

253 isolated_star_cat_dict_temp = {handle.dataId['tract']: handle 

254 for handle in input_handle_dict['isolated_star_cats']} 

255 isolated_star_source_dict_temp = {handle.dataId['tract']: handle 

256 for handle in input_handle_dict['isolated_star_sources']} 

257 # TODO: Sort until DM-31701 is done and we have deterministic 

258 # dataset ordering. 

259 src_dict = {detector: src_dict_temp[detector] for 

260 detector in sorted(src_dict_temp.keys())} 

261 calexp_dict = {detector: calexp_dict_temp[detector] for 

262 detector in sorted(calexp_dict_temp.keys())} 

263 isolated_star_cat_dict = {tract: isolated_star_cat_dict_temp[tract] for 

264 tract in sorted(isolated_star_cat_dict_temp.keys())} 

265 isolated_star_source_dict = {tract: isolated_star_source_dict_temp[tract] for 

266 tract in sorted(isolated_star_source_dict_temp.keys())} 

267 

268 struct = self.run(visit, 

269 band, 

270 isolated_star_cat_dict, 

271 isolated_star_source_dict, 

272 src_dict, 

273 calexp_dict) 

274 

275 butlerQC.put(struct.psf_ap_corr_cat, 

276 outputRefs.finalized_psf_ap_corr_cat) 

277 butlerQC.put(pd.DataFrame(struct.output_table), 

278 outputRefs.finalized_src_table) 

279 

280 def run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict): 

281 """ 

282 Run the FinalizeCharacterizationTask. 

283 

284 Parameters 

285 ---------- 

286 visit : `int` 

287 Visit number. Used in the output catalogs. 

288 band : `str` 

289 Band name. Used to select reserved stars. 

290 isolated_star_cat_dict : `dict` 

291 Per-tract dict of isolated star catalog handles. 

292 isolated_star_source_dict : `dict` 

293 Per-tract dict of isolated star source catalog handles. 

294 src_dict : `dict` 

295 Per-detector dict of src catalog handles. 

296 calexp_dict : `dict` 

297 Per-detector dict of calibrated exposure handles. 

298 

299 Returns 

300 ------- 

301 struct : `lsst.pipe.base.struct` 

302 Struct with outputs for persistence. 

303 """ 

304 # We do not need the isolated star table in this task. 

305 # However, it is used in tests to confirm consistency of indexes. 

306 _, isolated_source_table = self.concat_isolated_star_cats( 

307 band, 

308 isolated_star_cat_dict, 

309 isolated_star_source_dict 

310 ) 

311 

312 exposure_cat_schema = afwTable.ExposureTable.makeMinimalSchema() 

313 exposure_cat_schema.addField('visit', type='I', doc='Visit number') 

314 

315 metadata = dafBase.PropertyList() 

316 metadata.add("COMMENT", "Catalog id is detector id, sorted.") 

317 metadata.add("COMMENT", "Only detectors with data have entries.") 

318 

319 psf_ap_corr_cat = afwTable.ExposureCatalog(exposure_cat_schema) 

320 psf_ap_corr_cat.setMetadata(metadata) 

321 

322 measured_src_tables = [] 

323 

324 for detector in src_dict: 

325 src = src_dict[detector].get() 

326 exposure = calexp_dict[detector].get() 

327 

328 psf, ap_corr_map, measured_src = self.compute_psf_and_ap_corr_map( 

329 visit, 

330 detector, 

331 exposure, 

332 src, 

333 isolated_source_table 

334 ) 

335 

336 # And now we package it together... 

337 record = psf_ap_corr_cat.addNew() 

338 record['id'] = int(detector) 

339 record['visit'] = visit 

340 if psf is not None: 

341 record.setPsf(psf) 

342 if ap_corr_map is not None: 

343 record.setApCorrMap(ap_corr_map) 

344 

345 measured_src['visit'][:] = visit 

346 measured_src['detector'][:] = detector 

347 

348 measured_src_tables.append(measured_src.asAstropy().as_array()) 

349 

350 measured_src_table = np.concatenate(measured_src_tables) 

351 

352 return pipeBase.Struct(psf_ap_corr_cat=psf_ap_corr_cat, 

353 output_table=measured_src_table) 

354 

355 def _make_output_schema_mapper(self, input_schema): 

356 """Make the schema mapper from the input schema to the output schema. 

357 

358 Parameters 

359 ---------- 

360 input_schema : `lsst.afw.table.Schema` 

361 Input schema. 

362 

363 Returns 

364 ------- 

365 mapper : `lsst.afw.table.SchemaMapper` 

366 Schema mapper 

367 output_schema : `lsst.afw.table.Schema` 

368 Output schema (with alias map) 

369 """ 

370 mapper = afwTable.SchemaMapper(input_schema) 

371 mapper.addMinimalSchema(afwTable.SourceTable.makeMinimalSchema()) 

372 mapper.addMapping(input_schema['slot_Centroid_x'].asKey()) 

373 mapper.addMapping(input_schema['slot_Centroid_y'].asKey()) 

374 

375 # The aperture fields may be used by the psf determiner. 

376 aper_fields = input_schema.extract('base_CircularApertureFlux_*') 

377 for field, item in aper_fields.items(): 

378 mapper.addMapping(item.key) 

379 

380 # The following two may be redundant, but then the mapping is a no-op. 

381 apflux_fields = input_schema.extract('slot_ApFlux_*') 

382 for field, item in apflux_fields.items(): 

383 mapper.addMapping(item.key) 

384 

385 calibflux_fields = input_schema.extract('slot_CalibFlux_*') 

386 for field, item in calibflux_fields.items(): 

387 mapper.addMapping(item.key) 

388 

389 mapper.addMapping( 

390 input_schema[self.config.source_selector.active.signalToNoise.fluxField].asKey(), 

391 'final_psf_selection_flux') 

392 mapper.addMapping( 

393 input_schema[self.config.source_selector.active.signalToNoise.errField].asKey(), 

394 'final_psf_selection_flux_err') 

395 

396 output_schema = mapper.getOutputSchema() 

397 

398 output_schema.addField( 

399 'final_psf_candidate', 

400 type='Flag', 

401 doc=('set if the source was a candidate for PSF determination, ' 

402 'as determined from FinalizeCharacterizationTask.'), 

403 ) 

404 output_schema.addField( 

405 'final_psf_reserved', 

406 type='Flag', 

407 doc=('set if source was reserved from PSF determination by ' 

408 'FinalizeCharacterizationTask.'), 

409 ) 

410 output_schema.addField( 

411 'final_psf_used', 

412 type='Flag', 

413 doc=('set if source was used in the PSF determination by ' 

414 'FinalizeCharacterizationTask.'), 

415 ) 

416 output_schema.addField( 

417 'visit', 

418 type=np.int32, 

419 doc='Visit number for the sources.', 

420 ) 

421 output_schema.addField( 

422 'detector', 

423 type=np.int32, 

424 doc='Detector number for the sources.', 

425 ) 

426 

427 alias_map = input_schema.getAliasMap() 

428 alias_map_output = afwTable.AliasMap() 

429 alias_map_output.set('slot_Centroid', alias_map.get('slot_Centroid')) 

430 alias_map_output.set('slot_ApFlux', alias_map.get('slot_ApFlux')) 

431 alias_map_output.set('slot_CalibFlux', alias_map.get('slot_CalibFlux')) 

432 

433 output_schema.setAliasMap(alias_map_output) 

434 

435 return mapper, output_schema 

436 

437 def _make_selection_schema_mapper(self, input_schema): 

438 """Make the schema mapper from the input schema to the selection schema. 

439 

440 Parameters 

441 ---------- 

442 input_schema : `lsst.afw.table.Schema` 

443 Input schema. 

444 

445 Returns 

446 ------- 

447 mapper : `lsst.afw.table.SchemaMapper` 

448 Schema mapper 

449 selection_schema : `lsst.afw.table.Schema` 

450 Selection schema (with alias map) 

451 """ 

452 mapper = afwTable.SchemaMapper(input_schema) 

453 mapper.addMinimalSchema(input_schema) 

454 

455 selection_schema = mapper.getOutputSchema() 

456 

457 selection_schema.addField( 

458 'final_psf_candidate', 

459 type='Flag', 

460 doc=('set if the source was a candidate for PSF determination, ' 

461 'as determined from FinalizeCharacterizationTask.'), 

462 ) 

463 selection_schema.addField( 

464 'final_psf_reserved', 

465 type='Flag', 

466 doc=('set if source was reserved from PSF determination by ' 

467 'FinalizeCharacterizationTask.'), 

468 ) 

469 selection_schema.addField( 

470 'final_psf_used', 

471 type='Flag', 

472 doc=('set if source was used in the PSF determination by ' 

473 'FinalizeCharacterizationTask.'), 

474 ) 

475 

476 selection_schema.setAliasMap(input_schema.getAliasMap()) 

477 

478 return mapper, selection_schema 

479 

480 def concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict): 

481 """ 

482 Concatenate isolated star catalogs and make reserve selection. 

483 

484 Parameters 

485 ---------- 

486 band : `str` 

487 Band name. Used to select reserved stars. 

488 isolated_star_cat_dict : `dict` 

489 Per-tract dict of isolated star catalog handles. 

490 isolated_star_source_dict : `dict` 

491 Per-tract dict of isolated star source catalog handles. 

492 

493 Returns 

494 ------- 

495 isolated_table : `np.ndarray` (N,) 

496 Table of isolated stars, with indexes to isolated sources. 

497 isolated_source_table : `np.ndarray` (M,) 

498 Table of isolated sources, with indexes to isolated stars. 

499 """ 

500 isolated_tables = [] 

501 isolated_sources = [] 

502 merge_cat_counter = 0 

503 merge_source_counter = 0 

504 

505 for tract in isolated_star_cat_dict: 

506 df_cat = isolated_star_cat_dict[tract].get() 

507 table_cat = df_cat.to_records() 

508 

509 df_source = isolated_star_source_dict[tract].get( 

510 parameters={'columns': [self.config.id_column, 

511 'obj_index']} 

512 ) 

513 table_source = df_source.to_records() 

514 

515 # Cut isolated star table to those observed in this band, and adjust indexes 

516 (use_band,) = (table_cat[f'nsource_{band}'] > 0).nonzero() 

517 

518 # With the following matching: 

519 # table_source[b] <-> table_cat[use_band[a]] 

520 obj_index = table_source['obj_index'][:] 

521 a, b = esutil.numpy_util.match(use_band, obj_index) 

522 

523 # Update indexes and cut to band-selected stars/sources 

524 table_source['obj_index'][b] = a 

525 _, index_new = np.unique(a, return_index=True) 

526 table_cat[f'source_cat_index_{band}'][use_band] = index_new 

527 

528 # After the following cuts, the catalogs have the following properties: 

529 # - table_cat only contains isolated stars that have at least one source 

530 # in ``band``. 

531 # - table_source only contains ``band`` sources. 

532 # - The slice table_cat["source_cat_index_{band}"]: table_cat["source_cat_index_{band}"] 

533 # + table_cat["nsource_{band}] 

534 # applied to table_source will give all the sources associated with the star. 

535 # - For each source, table_source["obj_index"] points to the index of the associated 

536 # isolated star. 

537 table_source = table_source[b] 

538 table_cat = table_cat[use_band] 

539 

540 # Add reserved flag column to tables 

541 table_cat = np.lib.recfunctions.append_fields( 

542 table_cat, 

543 'reserved', 

544 np.zeros(table_cat.size, dtype=bool), 

545 usemask=False 

546 ) 

547 table_source = np.lib.recfunctions.append_fields( 

548 table_source, 

549 'reserved', 

550 np.zeros(table_source.size, dtype=bool), 

551 usemask=False 

552 ) 

553 

554 # Get reserve star flags 

555 table_cat['reserved'][:] = self.reserve_selection.run( 

556 len(table_cat), 

557 extra=f'{band}_{tract}', 

558 ) 

559 table_source['reserved'][:] = table_cat['reserved'][table_source['obj_index']] 

560 

561 # Offset indexes to account for tract merging 

562 table_cat[f'source_cat_index_{band}'] += merge_source_counter 

563 table_source['obj_index'] += merge_cat_counter 

564 

565 isolated_tables.append(table_cat) 

566 isolated_sources.append(table_source) 

567 

568 merge_cat_counter += len(table_cat) 

569 merge_source_counter += len(table_source) 

570 

571 isolated_table = np.concatenate(isolated_tables) 

572 isolated_source_table = np.concatenate(isolated_sources) 

573 

574 return isolated_table, isolated_source_table 

575 

576 def compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table): 

577 """Compute psf model and aperture correction map for a single exposure. 

578 

579 Parameters 

580 ---------- 

581 visit : `int` 

582 Visit number (for logging). 

583 detector : `int` 

584 Detector number (for logging). 

585 exposure : `lsst.afw.image.ExposureF` 

586 src : `lsst.afw.table.SourceCatalog` 

587 isolated_source_table : `np.ndarray` 

588 

589 Returns 

590 ------- 

591 psf : `lsst.meas.algorithms.ImagePsf` 

592 PSF Model 

593 ap_corr_map : `lsst.afw.image.ApCorrMap` 

594 Aperture correction map. 

595 measured_src : `lsst.afw.table.SourceCatalog` 

596 Updated source catalog with measurements, flags and aperture corrections. 

597 """ 

598 # Apply source selector (s/n, flags, etc.) 

599 good_src = self.source_selector.selectSources(src) 

600 

601 # Cut down input src to the selected sources 

602 # We use a separate schema/mapper here than for the output/measurement catalog because of 

603 # clashes between fields that were previously run and those that need to be rerun with 

604 # the new psf model. This may be slightly inefficient but keeps input 

605 # and output values cleanly separated. 

606 selection_mapper, selection_schema = self._make_selection_schema_mapper(src.schema) 

607 

608 selected_src = afwTable.SourceCatalog(selection_schema) 

609 selected_src.reserve(good_src.selected.sum()) 

610 selected_src.extend(src[good_src.selected], mapper=selection_mapper) 

611 

612 # Find the isolated sources and set flags 

613 matched_src, matched_iso = esutil.numpy_util.match( 

614 selected_src['id'], 

615 isolated_source_table[self.config.id_column] 

616 ) 

617 

618 matched_arr = np.zeros(len(selected_src), dtype=bool) 

619 matched_arr[matched_src] = True 

620 selected_src['final_psf_candidate'] = matched_arr 

621 

622 reserved_arr = np.zeros(len(selected_src), dtype=bool) 

623 reserved_arr[matched_src] = isolated_source_table['reserved'][matched_iso] 

624 selected_src['final_psf_reserved'] = reserved_arr 

625 

626 selected_src = selected_src[selected_src['final_psf_candidate']].copy(deep=True) 

627 

628 # Make the measured source catalog as well, based on the selected catalog. 

629 measured_src = afwTable.SourceCatalog(self.schema) 

630 measured_src.reserve(len(selected_src)) 

631 measured_src.extend(selected_src, mapper=self.schema_mapper) 

632 

633 # We need to copy over the final_psf flags because they were not in the mapper 

634 measured_src['final_psf_candidate'] = selected_src['final_psf_candidate'] 

635 measured_src['final_psf_reserved'] = selected_src['final_psf_reserved'] 

636 

637 # Select the psf candidates from the selection catalog 

638 try: 

639 psf_selection_result = self.make_psf_candidates.run(selected_src, exposure=exposure) 

640 except Exception as e: 

641 self.log.warning('Failed to make psf candidates for visit %d, detector %d: %s', 

642 visit, detector, e) 

643 return None, None, measured_src 

644 

645 psf_cand_cat = psf_selection_result.goodStarCat 

646 

647 # Make list of psf candidates to send to the determiner 

648 # (omitting those marked as reserved) 

649 psf_determiner_list = [cand for cand, use 

650 in zip(psf_selection_result.psfCandidates, 

651 ~psf_cand_cat['final_psf_reserved']) if use] 

652 flag_key = psf_cand_cat.schema['final_psf_used'].asKey() 

653 try: 

654 psf, cell_set = self.psf_determiner.determinePsf(exposure, 

655 psf_determiner_list, 

656 self.metadata, 

657 flagKey=flag_key) 

658 except Exception as e: 

659 self.log.warning('Failed to determine psf for visit %d, detector %d: %s', 

660 visit, detector, e) 

661 return None, None, measured_src 

662 

663 # At this point, we need to transfer the psf used flag from the selection 

664 # catalog to the measurement catalog. 

665 matched_selected, matched_measured = esutil.numpy_util.match( 

666 selected_src['id'], 

667 measured_src['id'] 

668 ) 

669 measured_used = np.zeros(len(measured_src), dtype=bool) 

670 measured_used[matched_measured] = selected_src['final_psf_used'][matched_selected] 

671 measured_src['final_psf_used'] = measured_used 

672 

673 # Next, we do the measurement on all the psf candidate, used, and reserved stars. 

674 try: 

675 self.measurement.run(measCat=measured_src, exposure=exposure) 

676 except Exception as e: 

677 self.log.warning('Failed to make measurements for visit %d, detector %d: %s', 

678 visit, detector, e) 

679 return psf, None, measured_src 

680 

681 # And finally the ap corr map. 

682 try: 

683 ap_corr_map = self.measure_ap_corr.run(exposure=exposure, 

684 catalog=measured_src).apCorrMap 

685 except Exception as e: 

686 self.log.warning('Failed to compute aperture corrections for visit %d, detector %d: %s', 

687 visit, detector, e) 

688 return psf, None, measured_src 

689 

690 self.apply_ap_corr.run(catalog=measured_src, apCorrMap=ap_corr_map) 

691 

692 return psf, ap_corr_map, measured_src