Coverage for python/lsst/pipe/tasks/finalizeCharacterization.py: 14%
256 statements
« prev ^ index » next coverage.py v7.4.1, created at 2024-02-20 12:29 +0000
« prev ^ index » next coverage.py v7.4.1, created at 2024-02-20 12:29 +0000
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22"""Task to run a finalized image characterization, using additional data.
23"""
25__all__ = ['FinalizeCharacterizationConnections',
26 'FinalizeCharacterizationConfig',
27 'FinalizeCharacterizationTask']
29import numpy as np
30import esutil
31import pandas as pd
33import lsst.pex.config as pexConfig
34import lsst.pipe.base as pipeBase
35import lsst.daf.base as dafBase
36import lsst.afw.table as afwTable
37import lsst.meas.algorithms as measAlg
38import lsst.meas.extensions.piff.piffPsfDeterminer # noqa: F401
39from lsst.meas.algorithms import MeasureApCorrTask
40from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask
41from lsst.meas.algorithms.sourceSelector import sourceSelectorRegistry
43from .reserveIsolatedStars import ReserveIsolatedStarsTask
46class FinalizeCharacterizationConnections(pipeBase.PipelineTaskConnections,
47 dimensions=('instrument', 'visit',),
48 defaultTemplates={}):
49 src_schema = pipeBase.connectionTypes.InitInput(
50 doc='Input schema used for src catalogs.',
51 name='src_schema',
52 storageClass='SourceCatalog',
53 )
54 srcs = pipeBase.connectionTypes.Input(
55 doc='Source catalogs for the visit',
56 name='src',
57 storageClass='SourceCatalog',
58 dimensions=('instrument', 'visit', 'detector'),
59 deferLoad=True,
60 multiple=True,
61 )
62 calexps = pipeBase.connectionTypes.Input(
63 doc='Calexps for the visit',
64 name='calexp',
65 storageClass='ExposureF',
66 dimensions=('instrument', 'visit', 'detector'),
67 deferLoad=True,
68 multiple=True,
69 )
70 isolated_star_cats = pipeBase.connectionTypes.Input(
71 doc=('Catalog of isolated stars with average positions, number of associated '
72 'sources, and indexes to the isolated_star_sources catalogs.'),
73 name='isolated_star_cat',
74 storageClass='DataFrame',
75 dimensions=('instrument', 'tract', 'skymap'),
76 deferLoad=True,
77 multiple=True,
78 )
79 isolated_star_sources = pipeBase.connectionTypes.Input(
80 doc=('Catalog of isolated star sources with sourceIds, and indexes to the '
81 'isolated_star_cats catalogs.'),
82 name='isolated_star_sources',
83 storageClass='DataFrame',
84 dimensions=('instrument', 'tract', 'skymap'),
85 deferLoad=True,
86 multiple=True,
87 )
88 finalized_psf_ap_corr_cat = pipeBase.connectionTypes.Output(
89 doc=('Per-visit finalized psf models and aperture corrections. This '
90 'catalog uses detector id for the id and are sorted for fast '
91 'lookups of a detector.'),
92 name='finalized_psf_ap_corr_catalog',
93 storageClass='ExposureCatalog',
94 dimensions=('instrument', 'visit'),
95 )
96 finalized_src_table = pipeBase.connectionTypes.Output(
97 doc=('Per-visit catalog of measurements for psf/flag/etc.'),
98 name='finalized_src_table',
99 storageClass='DataFrame',
100 dimensions=('instrument', 'visit'),
101 )
104class FinalizeCharacterizationConfig(pipeBase.PipelineTaskConfig,
105 pipelineConnections=FinalizeCharacterizationConnections):
106 """Configuration for FinalizeCharacterizationTask."""
107 source_selector = sourceSelectorRegistry.makeField(
108 doc="How to select sources",
109 default="science"
110 )
111 id_column = pexConfig.Field(
112 doc='Name of column in isolated_star_sources with source id.',
113 dtype=str,
114 default='sourceId',
115 )
116 reserve_selection = pexConfig.ConfigurableField(
117 target=ReserveIsolatedStarsTask,
118 doc='Task to select reserved stars',
119 )
120 make_psf_candidates = pexConfig.ConfigurableField(
121 target=measAlg.MakePsfCandidatesTask,
122 doc='Task to make psf candidates from selected stars.',
123 )
124 psf_determiner = measAlg.psfDeterminerRegistry.makeField(
125 'PSF Determination algorithm',
126 default='piff'
127 )
128 measurement = pexConfig.ConfigurableField(
129 target=SingleFrameMeasurementTask,
130 doc='Measure sources for aperture corrections'
131 )
132 measure_ap_corr = pexConfig.ConfigurableField(
133 target=MeasureApCorrTask,
134 doc="Subtask to measure aperture corrections"
135 )
136 apply_ap_corr = pexConfig.ConfigurableField(
137 target=ApplyApCorrTask,
138 doc="Subtask to apply aperture corrections"
139 )
141 def setDefaults(self):
142 super().setDefaults()
144 source_selector = self.source_selector['science']
145 source_selector.setDefaults()
147 # We use the source selector only to select out flagged objects
148 # and signal-to-noise. Isolated, unresolved sources are handled
149 # by the isolated star catalog.
151 source_selector.doFlags = True
152 source_selector.doSignalToNoise = True
153 source_selector.doFluxLimit = False
154 source_selector.doUnresolved = False
155 source_selector.doIsolated = False
157 source_selector.signalToNoise.minimum = 20.0
158 source_selector.signalToNoise.maximum = 1000.0
160 source_selector.signalToNoise.fluxField = 'base_GaussianFlux_instFlux'
161 source_selector.signalToNoise.errField = 'base_GaussianFlux_instFluxErr'
163 source_selector.flags.bad = ['base_PixelFlags_flag_edge',
164 'base_PixelFlags_flag_interpolatedCenter',
165 'base_PixelFlags_flag_saturatedCenter',
166 'base_PixelFlags_flag_crCenter',
167 'base_PixelFlags_flag_bad',
168 'base_PixelFlags_flag_interpolated',
169 'base_PixelFlags_flag_saturated',
170 'slot_Centroid_flag',
171 'base_GaussianFlux_flag']
173 # Configure aperture correction to select only high s/n sources (that
174 # were used in the psf modeling) to avoid background problems when
175 # computing the aperture correction map.
176 self.measure_ap_corr.sourceSelector = 'science'
178 ap_selector = self.measure_ap_corr.sourceSelector['science']
179 # We do not need to filter flags or unresolved because we have used
180 # the filtered isolated stars as an input
181 ap_selector.doFlags = False
182 ap_selector.doUnresolved = False
184 import lsst.meas.modelfit # noqa: F401
185 import lsst.meas.extensions.photometryKron # noqa: F401
186 import lsst.meas.extensions.convolved # noqa: F401
187 import lsst.meas.extensions.gaap # noqa: F401
188 import lsst.meas.extensions.shapeHSM # noqa: F401
190 # Set up measurement defaults
191 self.measurement.plugins.names = [
192 'base_PsfFlux',
193 'base_GaussianFlux',
194 'modelfit_DoubleShapeletPsfApprox',
195 'modelfit_CModel',
196 'ext_photometryKron_KronFlux',
197 'ext_convolved_ConvolvedFlux',
198 'ext_gaap_GaapFlux',
199 'ext_shapeHSM_HsmShapeRegauss',
200 'ext_shapeHSM_HsmSourceMoments',
201 'ext_shapeHSM_HsmPsfMoments',
202 'ext_shapeHSM_HsmSourceMomentsRound',
203 ]
204 self.measurement.slots.modelFlux = 'modelfit_CModel'
205 self.measurement.plugins['ext_convolved_ConvolvedFlux'].seeing.append(8.0)
206 self.measurement.plugins['ext_gaap_GaapFlux'].sigmas = [
207 0.5,
208 0.7,
209 1.0,
210 1.5,
211 2.5,
212 3.0
213 ]
214 self.measurement.plugins['ext_gaap_GaapFlux'].doPsfPhotometry = True
215 self.measurement.slots.shape = 'ext_shapeHSM_HsmSourceMoments'
216 self.measurement.slots.psfShape = 'ext_shapeHSM_HsmPsfMoments'
217 self.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild = ""
219 # Keep track of which footprints contain streaks
220 self.measurement.plugins['base_PixelFlags'].masksFpAnywhere = ['STREAK']
221 self.measurement.plugins['base_PixelFlags'].masksFpCenter = ['STREAK']
223 # Turn off slot setting for measurement for centroid and shape
224 # (for which we use the input src catalog measurements)
225 self.measurement.slots.centroid = None
226 self.measurement.slots.apFlux = None
227 self.measurement.slots.calibFlux = None
229 names = self.measurement.plugins['ext_convolved_ConvolvedFlux'].getAllResultNames()
230 self.measure_ap_corr.allowFailure += names
231 names = self.measurement.plugins["ext_gaap_GaapFlux"].getAllGaapResultNames()
232 self.measure_ap_corr.allowFailure += names
235class FinalizeCharacterizationTask(pipeBase.PipelineTask):
236 """Run final characterization on exposures."""
237 ConfigClass = FinalizeCharacterizationConfig
238 _DefaultName = 'finalize_characterization'
240 def __init__(self, initInputs=None, **kwargs):
241 super().__init__(initInputs=initInputs, **kwargs)
243 self.schema_mapper, self.schema = self._make_output_schema_mapper(
244 initInputs['src_schema'].schema
245 )
247 self.makeSubtask('reserve_selection')
248 self.makeSubtask('source_selector')
249 self.makeSubtask('make_psf_candidates')
250 self.makeSubtask('psf_determiner')
251 self.makeSubtask('measurement', schema=self.schema)
252 self.makeSubtask('measure_ap_corr', schema=self.schema)
253 self.makeSubtask('apply_ap_corr', schema=self.schema)
255 # Only log warning and fatal errors from the source_selector
256 self.source_selector.log.setLevel(self.source_selector.log.WARN)
258 def runQuantum(self, butlerQC, inputRefs, outputRefs):
259 input_handle_dict = butlerQC.get(inputRefs)
261 band = butlerQC.quantum.dataId['band']
262 visit = butlerQC.quantum.dataId['visit']
264 src_dict_temp = {handle.dataId['detector']: handle
265 for handle in input_handle_dict['srcs']}
266 calexp_dict_temp = {handle.dataId['detector']: handle
267 for handle in input_handle_dict['calexps']}
268 isolated_star_cat_dict_temp = {handle.dataId['tract']: handle
269 for handle in input_handle_dict['isolated_star_cats']}
270 isolated_star_source_dict_temp = {handle.dataId['tract']: handle
271 for handle in input_handle_dict['isolated_star_sources']}
272 # TODO: Sort until DM-31701 is done and we have deterministic
273 # dataset ordering.
274 src_dict = {detector: src_dict_temp[detector] for
275 detector in sorted(src_dict_temp.keys())}
276 calexp_dict = {detector: calexp_dict_temp[detector] for
277 detector in sorted(calexp_dict_temp.keys())}
278 isolated_star_cat_dict = {tract: isolated_star_cat_dict_temp[tract] for
279 tract in sorted(isolated_star_cat_dict_temp.keys())}
280 isolated_star_source_dict = {tract: isolated_star_source_dict_temp[tract] for
281 tract in sorted(isolated_star_source_dict_temp.keys())}
283 struct = self.run(visit,
284 band,
285 isolated_star_cat_dict,
286 isolated_star_source_dict,
287 src_dict,
288 calexp_dict)
290 butlerQC.put(struct.psf_ap_corr_cat,
291 outputRefs.finalized_psf_ap_corr_cat)
292 butlerQC.put(pd.DataFrame(struct.output_table),
293 outputRefs.finalized_src_table)
295 def run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict):
296 """
297 Run the FinalizeCharacterizationTask.
299 Parameters
300 ----------
301 visit : `int`
302 Visit number. Used in the output catalogs.
303 band : `str`
304 Band name. Used to select reserved stars.
305 isolated_star_cat_dict : `dict`
306 Per-tract dict of isolated star catalog handles.
307 isolated_star_source_dict : `dict`
308 Per-tract dict of isolated star source catalog handles.
309 src_dict : `dict`
310 Per-detector dict of src catalog handles.
311 calexp_dict : `dict`
312 Per-detector dict of calibrated exposure handles.
314 Returns
315 -------
316 struct : `lsst.pipe.base.struct`
317 Struct with outputs for persistence.
319 Raises
320 ------
321 NoWorkFound
322 Raised if the selector returns no good sources.
323 """
324 # We do not need the isolated star table in this task.
325 # However, it is used in tests to confirm consistency of indexes.
326 _, isolated_source_table = self.concat_isolated_star_cats(
327 band,
328 isolated_star_cat_dict,
329 isolated_star_source_dict
330 )
332 exposure_cat_schema = afwTable.ExposureTable.makeMinimalSchema()
333 exposure_cat_schema.addField('visit', type='L', doc='Visit number')
335 metadata = dafBase.PropertyList()
336 metadata.add("COMMENT", "Catalog id is detector id, sorted.")
337 metadata.add("COMMENT", "Only detectors with data have entries.")
339 psf_ap_corr_cat = afwTable.ExposureCatalog(exposure_cat_schema)
340 psf_ap_corr_cat.setMetadata(metadata)
342 measured_src_tables = []
343 measured_src_table = None
345 for detector in src_dict:
346 src = src_dict[detector].get()
347 exposure = calexp_dict[detector].get()
349 psf, ap_corr_map, measured_src = self.compute_psf_and_ap_corr_map(
350 visit,
351 detector,
352 exposure,
353 src,
354 isolated_source_table
355 )
357 # And now we package it together...
358 if measured_src is not None:
359 record = psf_ap_corr_cat.addNew()
360 record['id'] = int(detector)
361 record['visit'] = visit
362 if psf is not None:
363 record.setPsf(psf)
364 if ap_corr_map is not None:
365 record.setApCorrMap(ap_corr_map)
367 measured_src['visit'][:] = visit
368 measured_src['detector'][:] = detector
370 measured_src_tables.append(measured_src.asAstropy().as_array())
372 if len(measured_src_tables) > 0:
373 measured_src_table = np.concatenate(measured_src_tables)
375 if measured_src_table is None:
376 raise pipeBase.NoWorkFound(f'No good sources found for any detectors in visit {visit}')
378 return pipeBase.Struct(psf_ap_corr_cat=psf_ap_corr_cat,
379 output_table=measured_src_table)
381 def _make_output_schema_mapper(self, input_schema):
382 """Make the schema mapper from the input schema to the output schema.
384 Parameters
385 ----------
386 input_schema : `lsst.afw.table.Schema`
387 Input schema.
389 Returns
390 -------
391 mapper : `lsst.afw.table.SchemaMapper`
392 Schema mapper
393 output_schema : `lsst.afw.table.Schema`
394 Output schema (with alias map)
395 """
396 mapper = afwTable.SchemaMapper(input_schema)
397 mapper.addMinimalSchema(afwTable.SourceTable.makeMinimalSchema())
398 mapper.addMapping(input_schema['slot_Centroid_x'].asKey())
399 mapper.addMapping(input_schema['slot_Centroid_y'].asKey())
401 # The aperture fields may be used by the psf determiner.
402 aper_fields = input_schema.extract('base_CircularApertureFlux_*')
403 for field, item in aper_fields.items():
404 mapper.addMapping(item.key)
406 # The following two may be redundant, but then the mapping is a no-op.
407 apflux_fields = input_schema.extract('slot_ApFlux_*')
408 for field, item in apflux_fields.items():
409 mapper.addMapping(item.key)
411 calibflux_fields = input_schema.extract('slot_CalibFlux_*')
412 for field, item in calibflux_fields.items():
413 mapper.addMapping(item.key)
415 mapper.addMapping(
416 input_schema[self.config.source_selector.active.signalToNoise.fluxField].asKey(),
417 'calib_psf_selection_flux')
418 mapper.addMapping(
419 input_schema[self.config.source_selector.active.signalToNoise.errField].asKey(),
420 'calib_psf_selection_flux_err')
422 output_schema = mapper.getOutputSchema()
424 output_schema.addField(
425 'calib_psf_candidate',
426 type='Flag',
427 doc=('set if the source was a candidate for PSF determination, '
428 'as determined from FinalizeCharacterizationTask.'),
429 )
430 output_schema.addField(
431 'calib_psf_reserved',
432 type='Flag',
433 doc=('set if source was reserved from PSF determination by '
434 'FinalizeCharacterizationTask.'),
435 )
436 output_schema.addField(
437 'calib_psf_used',
438 type='Flag',
439 doc=('set if source was used in the PSF determination by '
440 'FinalizeCharacterizationTask.'),
441 )
442 output_schema.addField(
443 'visit',
444 type=np.int64,
445 doc='Visit number for the sources.',
446 )
447 output_schema.addField(
448 'detector',
449 type=np.int32,
450 doc='Detector number for the sources.',
451 )
453 alias_map = input_schema.getAliasMap()
454 alias_map_output = afwTable.AliasMap()
455 alias_map_output.set('slot_Centroid', alias_map.get('slot_Centroid'))
456 alias_map_output.set('slot_ApFlux', alias_map.get('slot_ApFlux'))
457 alias_map_output.set('slot_CalibFlux', alias_map.get('slot_CalibFlux'))
459 output_schema.setAliasMap(alias_map_output)
461 return mapper, output_schema
463 def _make_selection_schema_mapper(self, input_schema):
464 """Make the schema mapper from the input schema to the selection schema.
466 Parameters
467 ----------
468 input_schema : `lsst.afw.table.Schema`
469 Input schema.
471 Returns
472 -------
473 mapper : `lsst.afw.table.SchemaMapper`
474 Schema mapper
475 selection_schema : `lsst.afw.table.Schema`
476 Selection schema (with alias map)
477 """
478 mapper = afwTable.SchemaMapper(input_schema)
479 mapper.addMinimalSchema(input_schema)
481 selection_schema = mapper.getOutputSchema()
483 selection_schema.setAliasMap(input_schema.getAliasMap())
485 return mapper, selection_schema
487 def concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict):
488 """
489 Concatenate isolated star catalogs and make reserve selection.
491 Parameters
492 ----------
493 band : `str`
494 Band name. Used to select reserved stars.
495 isolated_star_cat_dict : `dict`
496 Per-tract dict of isolated star catalog handles.
497 isolated_star_source_dict : `dict`
498 Per-tract dict of isolated star source catalog handles.
500 Returns
501 -------
502 isolated_table : `np.ndarray` (N,)
503 Table of isolated stars, with indexes to isolated sources.
504 isolated_source_table : `np.ndarray` (M,)
505 Table of isolated sources, with indexes to isolated stars.
506 """
507 isolated_tables = []
508 isolated_sources = []
509 merge_cat_counter = 0
510 merge_source_counter = 0
512 for tract in isolated_star_cat_dict:
513 df_cat = isolated_star_cat_dict[tract].get()
514 table_cat = df_cat.to_records()
516 df_source = isolated_star_source_dict[tract].get(
517 parameters={'columns': [self.config.id_column,
518 'obj_index']}
519 )
520 table_source = df_source.to_records()
522 # Cut isolated star table to those observed in this band, and adjust indexes
523 (use_band,) = (table_cat[f'nsource_{band}'] > 0).nonzero()
525 if len(use_band) == 0:
526 # There are no sources in this band in this tract.
527 self.log.info("No sources found in %s band in tract %d.", band, tract)
528 continue
530 # With the following matching:
531 # table_source[b] <-> table_cat[use_band[a]]
532 obj_index = table_source['obj_index'][:]
533 a, b = esutil.numpy_util.match(use_band, obj_index)
535 # Update indexes and cut to band-selected stars/sources
536 table_source['obj_index'][b] = a
537 _, index_new = np.unique(a, return_index=True)
538 table_cat[f'source_cat_index_{band}'][use_band] = index_new
540 # After the following cuts, the catalogs have the following properties:
541 # - table_cat only contains isolated stars that have at least one source
542 # in ``band``.
543 # - table_source only contains ``band`` sources.
544 # - The slice table_cat["source_cat_index_{band}"]: table_cat["source_cat_index_{band}"]
545 # + table_cat["nsource_{band}]
546 # applied to table_source will give all the sources associated with the star.
547 # - For each source, table_source["obj_index"] points to the index of the associated
548 # isolated star.
549 table_source = table_source[b]
550 table_cat = table_cat[use_band]
552 # Add reserved flag column to tables
553 table_cat = np.lib.recfunctions.append_fields(
554 table_cat,
555 'reserved',
556 np.zeros(table_cat.size, dtype=bool),
557 usemask=False
558 )
559 table_source = np.lib.recfunctions.append_fields(
560 table_source,
561 'reserved',
562 np.zeros(table_source.size, dtype=bool),
563 usemask=False
564 )
566 # Get reserve star flags
567 table_cat['reserved'][:] = self.reserve_selection.run(
568 len(table_cat),
569 extra=f'{band}_{tract}',
570 )
571 table_source['reserved'][:] = table_cat['reserved'][table_source['obj_index']]
573 # Offset indexes to account for tract merging
574 table_cat[f'source_cat_index_{band}'] += merge_source_counter
575 table_source['obj_index'] += merge_cat_counter
577 isolated_tables.append(table_cat)
578 isolated_sources.append(table_source)
580 merge_cat_counter += len(table_cat)
581 merge_source_counter += len(table_source)
583 isolated_table = np.concatenate(isolated_tables)
584 isolated_source_table = np.concatenate(isolated_sources)
586 return isolated_table, isolated_source_table
588 def compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table):
589 """Compute psf model and aperture correction map for a single exposure.
591 Parameters
592 ----------
593 visit : `int`
594 Visit number (for logging).
595 detector : `int`
596 Detector number (for logging).
597 exposure : `lsst.afw.image.ExposureF`
598 src : `lsst.afw.table.SourceCatalog`
599 isolated_source_table : `np.ndarray`
601 Returns
602 -------
603 psf : `lsst.meas.algorithms.ImagePsf`
604 PSF Model
605 ap_corr_map : `lsst.afw.image.ApCorrMap`
606 Aperture correction map.
607 measured_src : `lsst.afw.table.SourceCatalog`
608 Updated source catalog with measurements, flags and aperture corrections.
609 """
610 # Apply source selector (s/n, flags, etc.)
611 good_src = self.source_selector.selectSources(src)
612 if sum(good_src.selected) == 0:
613 self.log.warning('No good sources remain after cuts for visit %d, detector %d',
614 visit, detector)
615 return None, None, None
617 # Cut down input src to the selected sources
618 # We use a separate schema/mapper here than for the output/measurement catalog because of
619 # clashes between fields that were previously run and those that need to be rerun with
620 # the new psf model. This may be slightly inefficient but keeps input
621 # and output values cleanly separated.
622 selection_mapper, selection_schema = self._make_selection_schema_mapper(src.schema)
624 selected_src = afwTable.SourceCatalog(selection_schema)
625 selected_src.reserve(good_src.selected.sum())
626 selected_src.extend(src[good_src.selected], mapper=selection_mapper)
628 # The calib flags have been copied from the input table,
629 # and we reset them here just to ensure they aren't propagated.
630 selected_src['calib_psf_candidate'] = np.zeros(len(selected_src), dtype=bool)
631 selected_src['calib_psf_used'] = np.zeros(len(selected_src), dtype=bool)
632 selected_src['calib_psf_reserved'] = np.zeros(len(selected_src), dtype=bool)
634 # Find the isolated sources and set flags
635 matched_src, matched_iso = esutil.numpy_util.match(
636 selected_src['id'],
637 isolated_source_table[self.config.id_column]
638 )
640 matched_arr = np.zeros(len(selected_src), dtype=bool)
641 matched_arr[matched_src] = True
642 selected_src['calib_psf_candidate'] = matched_arr
644 reserved_arr = np.zeros(len(selected_src), dtype=bool)
645 reserved_arr[matched_src] = isolated_source_table['reserved'][matched_iso]
646 selected_src['calib_psf_reserved'] = reserved_arr
648 selected_src = selected_src[selected_src['calib_psf_candidate']].copy(deep=True)
650 # Make the measured source catalog as well, based on the selected catalog.
651 measured_src = afwTable.SourceCatalog(self.schema)
652 measured_src.reserve(len(selected_src))
653 measured_src.extend(selected_src, mapper=self.schema_mapper)
655 # We need to copy over the calib_psf flags because they were not in the mapper
656 measured_src['calib_psf_candidate'] = selected_src['calib_psf_candidate']
657 measured_src['calib_psf_reserved'] = selected_src['calib_psf_reserved']
659 # Select the psf candidates from the selection catalog
660 try:
661 psf_selection_result = self.make_psf_candidates.run(selected_src, exposure=exposure)
662 except Exception as e:
663 self.log.warning('Failed to make psf candidates for visit %d, detector %d: %s',
664 visit, detector, e)
665 return None, None, measured_src
667 psf_cand_cat = psf_selection_result.goodStarCat
669 # Make list of psf candidates to send to the determiner
670 # (omitting those marked as reserved)
671 psf_determiner_list = [cand for cand, use
672 in zip(psf_selection_result.psfCandidates,
673 ~psf_cand_cat['calib_psf_reserved']) if use]
674 flag_key = psf_cand_cat.schema['calib_psf_used'].asKey()
675 try:
676 psf, cell_set = self.psf_determiner.determinePsf(exposure,
677 psf_determiner_list,
678 self.metadata,
679 flagKey=flag_key)
680 except Exception as e:
681 self.log.warning('Failed to determine psf for visit %d, detector %d: %s',
682 visit, detector, e)
683 return None, None, measured_src
685 # Set the psf in the exposure for measurement/aperture corrections.
686 exposure.setPsf(psf)
688 # At this point, we need to transfer the psf used flag from the selection
689 # catalog to the measurement catalog.
690 matched_selected, matched_measured = esutil.numpy_util.match(
691 selected_src['id'],
692 measured_src['id']
693 )
694 measured_used = np.zeros(len(measured_src), dtype=bool)
695 measured_used[matched_measured] = selected_src['calib_psf_used'][matched_selected]
696 measured_src['calib_psf_used'] = measured_used
698 # Next, we do the measurement on all the psf candidate, used, and reserved stars.
699 try:
700 self.measurement.run(measCat=measured_src, exposure=exposure)
701 except Exception as e:
702 self.log.warning('Failed to make measurements for visit %d, detector %d: %s',
703 visit, detector, e)
704 return psf, None, measured_src
706 # And finally the ap corr map.
707 try:
708 ap_corr_map = self.measure_ap_corr.run(exposure=exposure,
709 catalog=measured_src).apCorrMap
710 except Exception as e:
711 self.log.warning('Failed to compute aperture corrections for visit %d, detector %d: %s',
712 visit, detector, e)
713 return psf, None, measured_src
715 self.apply_ap_corr.run(catalog=measured_src, apCorrMap=ap_corr_map)
717 return psf, ap_corr_map, measured_src