lsst.pipe.tasks gb50b65b01f+d769758449
Loading...
Searching...
No Matches
finalizeCharacterization.py
Go to the documentation of this file.
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
21
22"""Task to run a finalized image characterization, using additional data.
23"""
24
25__all__ = ['FinalizeCharacterizationConnections',
26 'FinalizeCharacterizationConfig',
27 'FinalizeCharacterizationTask']
28
29import numpy as np
30import esutil
31import pandas as pd
32
33import lsst.pex.config as pexConfig
34import lsst.pipe.base as pipeBase
35import lsst.daf.base as dafBase
36import lsst.afw.table as afwTable
37import lsst.meas.algorithms as measAlg
38import lsst.meas.extensions.piff.piffPsfDeterminer # noqa: F401
39from lsst.meas.algorithms import MeasureApCorrTask
40from lsst.meas.base import SingleFrameMeasurementTask, ApplyApCorrTask
41from lsst.meas.algorithms.sourceSelector import sourceSelectorRegistry
42
43from .reserveIsolatedStars import ReserveIsolatedStarsTask
44
45
46class FinalizeCharacterizationConnections(pipeBase.PipelineTaskConnections,
47 dimensions=('instrument', 'visit',),
48 defaultTemplates={}):
49 src_schema = pipeBase.connectionTypes.InitInput(
50 doc='Input schema used for src catalogs.',
51 name='src_schema',
52 storageClass='SourceCatalog',
53 )
54 srcs = pipeBase.connectionTypes.Input(
55 doc='Source catalogs for the visit',
56 name='src',
57 storageClass='SourceCatalog',
58 dimensions=('instrument', 'visit', 'detector'),
59 deferLoad=True,
60 multiple=True,
61 )
62 calexps = pipeBase.connectionTypes.Input(
63 doc='Calexps for the visit',
64 name='calexp',
65 storageClass='ExposureF',
66 dimensions=('instrument', 'visit', 'detector'),
67 deferLoad=True,
68 multiple=True,
69 )
70 isolated_star_cats = pipeBase.connectionTypes.Input(
71 doc=('Catalog of isolated stars with average positions, number of associated '
72 'sources, and indexes to the isolated_star_sources catalogs.'),
73 name='isolated_star_cat',
74 storageClass='DataFrame',
75 dimensions=('instrument', 'tract', 'skymap'),
76 deferLoad=True,
77 multiple=True,
78 )
79 isolated_star_sources = pipeBase.connectionTypes.Input(
80 doc=('Catalog of isolated star sources with sourceIds, and indexes to the '
81 'isolated_star_cats catalogs.'),
82 name='isolated_star_sources',
83 storageClass='DataFrame',
84 dimensions=('instrument', 'tract', 'skymap'),
85 deferLoad=True,
86 multiple=True,
87 )
88 finalized_psf_ap_corr_cat = pipeBase.connectionTypes.Output(
89 doc=('Per-visit finalized psf models and aperture corrections. This '
90 'catalog uses detector id for the id and are sorted for fast '
91 'lookups of a detector.'),
92 name='finalized_psf_ap_corr_catalog',
93 storageClass='ExposureCatalog',
94 dimensions=('instrument', 'visit'),
95 )
96 finalized_src_table = pipeBase.connectionTypes.Output(
97 doc=('Per-visit catalog of measurements for psf/flag/etc.'),
98 name='finalized_src_table',
99 storageClass='DataFrame',
100 dimensions=('instrument', 'visit'),
101 )
102
103
104class FinalizeCharacterizationConfig(pipeBase.PipelineTaskConfig,
105 pipelineConnections=FinalizeCharacterizationConnections):
106 """Configuration for FinalizeCharacterizationTask."""
107 source_selector = sourceSelectorRegistry.makeField(
108 doc="How to select sources",
109 default="science"
110 )
111 id_column = pexConfig.Field(
112 doc='Name of column in isolated_star_sources with source id.',
113 dtype=str,
114 default='sourceId',
115 )
116 reserve_selection = pexConfig.ConfigurableField(
117 target=ReserveIsolatedStarsTask,
118 doc='Task to select reserved stars',
119 )
120 make_psf_candidates = pexConfig.ConfigurableField(
121 target=measAlg.MakePsfCandidatesTask,
122 doc='Task to make psf candidates from selected stars.',
123 )
124 psf_determiner = measAlg.psfDeterminerRegistry.makeField(
125 'PSF Determination algorithm',
126 default='piff'
127 )
128 measurement = pexConfig.ConfigurableField(
129 target=SingleFrameMeasurementTask,
130 doc='Measure sources for aperture corrections'
131 )
132 measure_ap_corr = pexConfig.ConfigurableField(
133 target=MeasureApCorrTask,
134 doc="Subtask to measure aperture corrections"
135 )
136 apply_ap_corr = pexConfig.ConfigurableField(
137 target=ApplyApCorrTask,
138 doc="Subtask to apply aperture corrections"
139 )
140
141 def setDefaults(self):
142 super().setDefaults()
143
144 source_selector = self.source_selector['science']
145 source_selector.setDefaults()
146
147 # We use the source selector only to select out flagged objects
148 # and signal-to-noise. Isolated, unresolved sources are handled
149 # by the isolated star catalog.
150
151 source_selector.doFlags = True
152 source_selector.doSignalToNoise = True
153 source_selector.doFluxLimit = False
154 source_selector.doUnresolved = False
155 source_selector.doIsolated = False
156
157 source_selector.signalToNoise.minimum = 20.0
158 source_selector.signalToNoise.maximum = 1000.0
159
160 source_selector.signalToNoise.fluxField = 'base_GaussianFlux_instFlux'
161 source_selector.signalToNoise.errField = 'base_GaussianFlux_instFluxErr'
162
163 source_selector.flags.bad = ['base_PixelFlags_flag_edge',
164 'base_PixelFlags_flag_interpolatedCenter',
165 'base_PixelFlags_flag_saturatedCenter',
166 'base_PixelFlags_flag_crCenter',
167 'base_PixelFlags_flag_bad',
168 'base_PixelFlags_flag_interpolated',
169 'base_PixelFlags_flag_saturated',
170 'slot_Centroid_flag',
171 'base_GaussianFlux_flag']
172
173 # Configure aperture correction to select only high s/n sources (that
174 # were used in the psf modeling) to avoid background problems when
175 # computing the aperture correction map.
176 self.measure_ap_corr.sourceSelector = 'science'
177
178 ap_selector = self.measure_ap_corr.sourceSelector['science']
179 # We do not need to filter flags or unresolved because we have used
180 # the filtered isolated stars as an input
181 ap_selector.doFlags = False
182 ap_selector.doUnresolved = False
183
184 import lsst.meas.modelfit # noqa: F401
185 import lsst.meas.extensions.photometryKron # noqa: F401
186 import lsst.meas.extensions.convolved # noqa: F401
187 import lsst.meas.extensions.gaap # noqa: F401
188 import lsst.meas.extensions.shapeHSM # noqa: F401
189
190 # Set up measurement defaults
191 self.measurement.plugins.names = [
192 'base_PsfFlux',
193 'base_GaussianFlux',
194 'modelfit_DoubleShapeletPsfApprox',
195 'modelfit_CModel',
196 'ext_photometryKron_KronFlux',
197 'ext_convolved_ConvolvedFlux',
198 'ext_gaap_GaapFlux',
199 'ext_shapeHSM_HsmShapeRegauss',
200 'ext_shapeHSM_HsmSourceMoments',
201 'ext_shapeHSM_HsmPsfMoments',
202 'ext_shapeHSM_HsmSourceMomentsRound',
203 ]
204 self.measurement.slots.modelFlux = 'modelfit_CModel'
205 self.measurement.plugins['ext_convolved_ConvolvedFlux'].seeing.append(8.0)
206 self.measurement.plugins['ext_gaap_GaapFlux'].sigmas = [
207 0.5,
208 0.7,
209 1.0,
210 1.5,
211 2.5,
212 3.0
213 ]
214 self.measurement.plugins['ext_gaap_GaapFlux'].doPsfPhotometry = True
215 self.measurement.slots.shape = 'ext_shapeHSM_HsmSourceMoments'
216 self.measurement.slots.psfShape = 'ext_shapeHSM_HsmPsfMoments'
217 self.measurement.plugins['ext_shapeHSM_HsmShapeRegauss'].deblendNChild = ""
218
219 # Keep track of which footprints contain streaks
220 self.measurement.plugins['base_PixelFlags'].masksFpAnywhere = ['STREAK']
221 self.measurement.plugins['base_PixelFlags'].masksFpCenter = ['STREAK']
222
223 # Turn off slot setting for measurement for centroid and shape
224 # (for which we use the input src catalog measurements)
225 self.measurement.slots.centroid = None
226 self.measurement.slots.apFlux = None
227 self.measurement.slots.calibFlux = None
228
229 names = self.measurement.plugins['ext_convolved_ConvolvedFlux'].getAllResultNames()
230 self.measure_ap_corr.allowFailure += names
231 names = self.measurement.plugins["ext_gaap_GaapFlux"].getAllGaapResultNames()
232 self.measure_ap_corr.allowFailure += names
233
234
235class FinalizeCharacterizationTask(pipeBase.PipelineTask):
236 """Run final characterization on exposures."""
237 ConfigClass = FinalizeCharacterizationConfig
238 _DefaultName = 'finalize_characterization'
239
240 def __init__(self, initInputs=None, **kwargs):
241 super().__init__(initInputs=initInputs, **kwargs)
242
244 initInputs['src_schema'].schema
245 )
246
247 self.makeSubtask('reserve_selection')
248 self.makeSubtask('source_selector')
249 self.makeSubtask('make_psf_candidates')
250 self.makeSubtask('psf_determiner')
251 self.makeSubtask('measurement', schema=self.schema)
252 self.makeSubtask('measure_ap_corr', schema=self.schema)
253 self.makeSubtask('apply_ap_corr', schema=self.schema)
254
255 # Only log warning and fatal errors from the source_selector
256 self.source_selector.log.setLevel(self.source_selector.log.WARN)
257
258 def runQuantum(self, butlerQC, inputRefs, outputRefs):
259 input_handle_dict = butlerQC.get(inputRefs)
260
261 band = butlerQC.quantum.dataId['band']
262 visit = butlerQC.quantum.dataId['visit']
263
264 src_dict_temp = {handle.dataId['detector']: handle
265 for handle in input_handle_dict['srcs']}
266 calexp_dict_temp = {handle.dataId['detector']: handle
267 for handle in input_handle_dict['calexps']}
268 isolated_star_cat_dict_temp = {handle.dataId['tract']: handle
269 for handle in input_handle_dict['isolated_star_cats']}
270 isolated_star_source_dict_temp = {handle.dataId['tract']: handle
271 for handle in input_handle_dict['isolated_star_sources']}
272 # TODO: Sort until DM-31701 is done and we have deterministic
273 # dataset ordering.
274 src_dict = {detector: src_dict_temp[detector] for
275 detector in sorted(src_dict_temp.keys())}
276 calexp_dict = {detector: calexp_dict_temp[detector] for
277 detector in sorted(calexp_dict_temp.keys())}
278 isolated_star_cat_dict = {tract: isolated_star_cat_dict_temp[tract] for
279 tract in sorted(isolated_star_cat_dict_temp.keys())}
280 isolated_star_source_dict = {tract: isolated_star_source_dict_temp[tract] for
281 tract in sorted(isolated_star_source_dict_temp.keys())}
282
283 struct = self.run(visit,
284 band,
285 isolated_star_cat_dict,
286 isolated_star_source_dict,
287 src_dict,
288 calexp_dict)
289
290 butlerQC.put(struct.psf_ap_corr_cat,
291 outputRefs.finalized_psf_ap_corr_cat)
292 butlerQC.put(pd.DataFrame(struct.output_table),
293 outputRefs.finalized_src_table)
294
295 def run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict):
296 """
297 Run the FinalizeCharacterizationTask.
298
299 Parameters
300 ----------
301 visit : `int`
302 Visit number. Used in the output catalogs.
303 band : `str`
304 Band name. Used to select reserved stars.
305 isolated_star_cat_dict : `dict`
306 Per-tract dict of isolated star catalog handles.
307 isolated_star_source_dict : `dict`
308 Per-tract dict of isolated star source catalog handles.
309 src_dict : `dict`
310 Per-detector dict of src catalog handles.
311 calexp_dict : `dict`
312 Per-detector dict of calibrated exposure handles.
313
314 Returns
315 -------
316 struct : `lsst.pipe.base.struct`
317 Struct with outputs for persistence.
318 """
319 # We do not need the isolated star table in this task.
320 # However, it is used in tests to confirm consistency of indexes.
321 _, isolated_source_table = self.concat_isolated_star_cats(
322 band,
323 isolated_star_cat_dict,
324 isolated_star_source_dict
325 )
326
327 exposure_cat_schema = afwTable.ExposureTable.makeMinimalSchema()
328 exposure_cat_schema.addField('visit', type='L', doc='Visit number')
329
330 metadata = dafBase.PropertyList()
331 metadata.add("COMMENT", "Catalog id is detector id, sorted.")
332 metadata.add("COMMENT", "Only detectors with data have entries.")
333
334 psf_ap_corr_cat = afwTable.ExposureCatalog(exposure_cat_schema)
335 psf_ap_corr_cat.setMetadata(metadata)
336
337 measured_src_tables = []
338
339 for detector in src_dict:
340 src = src_dict[detector].get()
341 exposure = calexp_dict[detector].get()
342
343 psf, ap_corr_map, measured_src = self.compute_psf_and_ap_corr_map(
344 visit,
345 detector,
346 exposure,
347 src,
348 isolated_source_table
349 )
350
351 # And now we package it together...
352 record = psf_ap_corr_cat.addNew()
353 record['id'] = int(detector)
354 record['visit'] = visit
355 if psf is not None:
356 record.setPsf(psf)
357 if ap_corr_map is not None:
358 record.setApCorrMap(ap_corr_map)
359
360 measured_src['visit'][:] = visit
361 measured_src['detector'][:] = detector
362
363 measured_src_tables.append(measured_src.asAstropy().as_array())
364
365 measured_src_table = np.concatenate(measured_src_tables)
366
367 return pipeBase.Struct(psf_ap_corr_cat=psf_ap_corr_cat,
368 output_table=measured_src_table)
369
370 def _make_output_schema_mapper(self, input_schema):
371 """Make the schema mapper from the input schema to the output schema.
372
373 Parameters
374 ----------
375 input_schema : `lsst.afw.table.Schema`
376 Input schema.
377
378 Returns
379 -------
380 mapper : `lsst.afw.table.SchemaMapper`
381 Schema mapper
382 output_schema : `lsst.afw.table.Schema`
383 Output schema (with alias map)
384 """
385 mapper = afwTable.SchemaMapper(input_schema)
386 mapper.addMinimalSchema(afwTable.SourceTable.makeMinimalSchema())
387 mapper.addMapping(input_schema['slot_Centroid_x'].asKey())
388 mapper.addMapping(input_schema['slot_Centroid_y'].asKey())
389
390 # The aperture fields may be used by the psf determiner.
391 aper_fields = input_schema.extract('base_CircularApertureFlux_*')
392 for field, item in aper_fields.items():
393 mapper.addMapping(item.key)
394
395 # The following two may be redundant, but then the mapping is a no-op.
396 apflux_fields = input_schema.extract('slot_ApFlux_*')
397 for field, item in apflux_fields.items():
398 mapper.addMapping(item.key)
399
400 calibflux_fields = input_schema.extract('slot_CalibFlux_*')
401 for field, item in calibflux_fields.items():
402 mapper.addMapping(item.key)
403
404 mapper.addMapping(
405 input_schema[self.config.source_selector.active.signalToNoise.fluxField].asKey(),
406 'calib_psf_selection_flux')
407 mapper.addMapping(
408 input_schema[self.config.source_selector.active.signalToNoise.errField].asKey(),
409 'calib_psf_selection_flux_err')
410
411 output_schema = mapper.getOutputSchema()
412
413 output_schema.addField(
414 'calib_psf_candidate',
415 type='Flag',
416 doc=('set if the source was a candidate for PSF determination, '
417 'as determined from FinalizeCharacterizationTask.'),
418 )
419 output_schema.addField(
420 'calib_psf_reserved',
421 type='Flag',
422 doc=('set if source was reserved from PSF determination by '
423 'FinalizeCharacterizationTask.'),
424 )
425 output_schema.addField(
426 'calib_psf_used',
427 type='Flag',
428 doc=('set if source was used in the PSF determination by '
429 'FinalizeCharacterizationTask.'),
430 )
431 output_schema.addField(
432 'visit',
433 type=np.int64,
434 doc='Visit number for the sources.',
435 )
436 output_schema.addField(
437 'detector',
438 type=np.int32,
439 doc='Detector number for the sources.',
440 )
441
442 alias_map = input_schema.getAliasMap()
443 alias_map_output = afwTable.AliasMap()
444 alias_map_output.set('slot_Centroid', alias_map.get('slot_Centroid'))
445 alias_map_output.set('slot_ApFlux', alias_map.get('slot_ApFlux'))
446 alias_map_output.set('slot_CalibFlux', alias_map.get('slot_CalibFlux'))
447
448 output_schema.setAliasMap(alias_map_output)
449
450 return mapper, output_schema
451
452 def _make_selection_schema_mapper(self, input_schema):
453 """Make the schema mapper from the input schema to the selection schema.
454
455 Parameters
456 ----------
457 input_schema : `lsst.afw.table.Schema`
458 Input schema.
459
460 Returns
461 -------
462 mapper : `lsst.afw.table.SchemaMapper`
463 Schema mapper
464 selection_schema : `lsst.afw.table.Schema`
465 Selection schema (with alias map)
466 """
467 mapper = afwTable.SchemaMapper(input_schema)
468 mapper.addMinimalSchema(input_schema)
469
470 selection_schema = mapper.getOutputSchema()
471
472 selection_schema.setAliasMap(input_schema.getAliasMap())
473
474 return mapper, selection_schema
475
476 def concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict):
477 """
478 Concatenate isolated star catalogs and make reserve selection.
479
480 Parameters
481 ----------
482 band : `str`
483 Band name. Used to select reserved stars.
484 isolated_star_cat_dict : `dict`
485 Per-tract dict of isolated star catalog handles.
486 isolated_star_source_dict : `dict`
487 Per-tract dict of isolated star source catalog handles.
488
489 Returns
490 -------
491 isolated_table : `np.ndarray` (N,)
492 Table of isolated stars, with indexes to isolated sources.
493 isolated_source_table : `np.ndarray` (M,)
494 Table of isolated sources, with indexes to isolated stars.
495 """
496 isolated_tables = []
497 isolated_sources = []
498 merge_cat_counter = 0
499 merge_source_counter = 0
500
501 for tract in isolated_star_cat_dict:
502 df_cat = isolated_star_cat_dict[tract].get()
503 table_cat = df_cat.to_records()
504
505 df_source = isolated_star_source_dict[tract].get(
506 parameters={'columns': [self.config.id_column,
507 'obj_index']}
508 )
509 table_source = df_source.to_records()
510
511 # Cut isolated star table to those observed in this band, and adjust indexes
512 (use_band,) = (table_cat[f'nsource_{band}'] > 0).nonzero()
513
514 if len(use_band) == 0:
515 # There are no sources in this band in this tract.
516 self.log.info("No sources found in %s band in tract %d.", band, tract)
517 continue
518
519 # With the following matching:
520 # table_source[b] <-> table_cat[use_band[a]]
521 obj_index = table_source['obj_index'][:]
522 a, b = esutil.numpy_util.match(use_band, obj_index)
523
524 # Update indexes and cut to band-selected stars/sources
525 table_source['obj_index'][b] = a
526 _, index_new = np.unique(a, return_index=True)
527 table_cat[f'source_cat_index_{band}'][use_band] = index_new
528
529 # After the following cuts, the catalogs have the following properties:
530 # - table_cat only contains isolated stars that have at least one source
531 # in ``band``.
532 # - table_source only contains ``band`` sources.
533 # - The slice table_cat["source_cat_index_{band}"]: table_cat["source_cat_index_{band}"]
534 # + table_cat["nsource_{band}]
535 # applied to table_source will give all the sources associated with the star.
536 # - For each source, table_source["obj_index"] points to the index of the associated
537 # isolated star.
538 table_source = table_source[b]
539 table_cat = table_cat[use_band]
540
541 # Add reserved flag column to tables
542 table_cat = np.lib.recfunctions.append_fields(
543 table_cat,
544 'reserved',
545 np.zeros(table_cat.size, dtype=bool),
546 usemask=False
547 )
548 table_source = np.lib.recfunctions.append_fields(
549 table_source,
550 'reserved',
551 np.zeros(table_source.size, dtype=bool),
552 usemask=False
553 )
554
555 # Get reserve star flags
556 table_cat['reserved'][:] = self.reserve_selection.run(
557 len(table_cat),
558 extra=f'{band}_{tract}',
559 )
560 table_source['reserved'][:] = table_cat['reserved'][table_source['obj_index']]
561
562 # Offset indexes to account for tract merging
563 table_cat[f'source_cat_index_{band}'] += merge_source_counter
564 table_source['obj_index'] += merge_cat_counter
565
566 isolated_tables.append(table_cat)
567 isolated_sources.append(table_source)
568
569 merge_cat_counter += len(table_cat)
570 merge_source_counter += len(table_source)
571
572 isolated_table = np.concatenate(isolated_tables)
573 isolated_source_table = np.concatenate(isolated_sources)
574
575 return isolated_table, isolated_source_table
576
577 def compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table):
578 """Compute psf model and aperture correction map for a single exposure.
579
580 Parameters
581 ----------
582 visit : `int`
583 Visit number (for logging).
584 detector : `int`
585 Detector number (for logging).
586 exposure : `lsst.afw.image.ExposureF`
587 src : `lsst.afw.table.SourceCatalog`
588 isolated_source_table : `np.ndarray`
589
590 Returns
591 -------
592 psf : `lsst.meas.algorithms.ImagePsf`
593 PSF Model
594 ap_corr_map : `lsst.afw.image.ApCorrMap`
595 Aperture correction map.
596 measured_src : `lsst.afw.table.SourceCatalog`
597 Updated source catalog with measurements, flags and aperture corrections.
598 """
599 # Apply source selector (s/n, flags, etc.)
600 good_src = self.source_selector.selectSources(src)
601
602 # Cut down input src to the selected sources
603 # We use a separate schema/mapper here than for the output/measurement catalog because of
604 # clashes between fields that were previously run and those that need to be rerun with
605 # the new psf model. This may be slightly inefficient but keeps input
606 # and output values cleanly separated.
607 selection_mapper, selection_schema = self._make_selection_schema_mapper(src.schema)
608
609 selected_src = afwTable.SourceCatalog(selection_schema)
610 selected_src.reserve(good_src.selected.sum())
611 selected_src.extend(src[good_src.selected], mapper=selection_mapper)
612
613 # The calib flags have been copied from the input table,
614 # and we reset them here just to ensure they aren't propagated.
615 selected_src['calib_psf_candidate'] = np.zeros(len(selected_src), dtype=bool)
616 selected_src['calib_psf_used'] = np.zeros(len(selected_src), dtype=bool)
617 selected_src['calib_psf_reserved'] = np.zeros(len(selected_src), dtype=bool)
618
619 # Find the isolated sources and set flags
620 matched_src, matched_iso = esutil.numpy_util.match(
621 selected_src['id'],
622 isolated_source_table[self.config.id_column]
623 )
624
625 matched_arr = np.zeros(len(selected_src), dtype=bool)
626 matched_arr[matched_src] = True
627 selected_src['calib_psf_candidate'] = matched_arr
628
629 reserved_arr = np.zeros(len(selected_src), dtype=bool)
630 reserved_arr[matched_src] = isolated_source_table['reserved'][matched_iso]
631 selected_src['calib_psf_reserved'] = reserved_arr
632
633 selected_src = selected_src[selected_src['calib_psf_candidate']].copy(deep=True)
634
635 # Make the measured source catalog as well, based on the selected catalog.
636 measured_src = afwTable.SourceCatalog(self.schema)
637 measured_src.reserve(len(selected_src))
638 measured_src.extend(selected_src, mapper=self.schema_mapper)
639
640 # We need to copy over the calib_psf flags because they were not in the mapper
641 measured_src['calib_psf_candidate'] = selected_src['calib_psf_candidate']
642 measured_src['calib_psf_reserved'] = selected_src['calib_psf_reserved']
643
644 # Select the psf candidates from the selection catalog
645 try:
646 psf_selection_result = self.make_psf_candidates.run(selected_src, exposure=exposure)
647 except Exception as e:
648 self.log.warning('Failed to make psf candidates for visit %d, detector %d: %s',
649 visit, detector, e)
650 return None, None, measured_src
651
652 psf_cand_cat = psf_selection_result.goodStarCat
653
654 # Make list of psf candidates to send to the determiner
655 # (omitting those marked as reserved)
656 psf_determiner_list = [cand for cand, use
657 in zip(psf_selection_result.psfCandidates,
658 ~psf_cand_cat['calib_psf_reserved']) if use]
659 flag_key = psf_cand_cat.schema['calib_psf_used'].asKey()
660 try:
661 psf, cell_set = self.psf_determiner.determinePsf(exposure,
662 psf_determiner_list,
664 flagKey=flag_key)
665 except Exception as e:
666 self.log.warning('Failed to determine psf for visit %d, detector %d: %s',
667 visit, detector, e)
668 return None, None, measured_src
669
670 # Set the psf in the exposure for measurement/aperture corrections.
671 exposure.setPsf(psf)
672
673 # At this point, we need to transfer the psf used flag from the selection
674 # catalog to the measurement catalog.
675 matched_selected, matched_measured = esutil.numpy_util.match(
676 selected_src['id'],
677 measured_src['id']
678 )
679 measured_used = np.zeros(len(measured_src), dtype=bool)
680 measured_used[matched_measured] = selected_src['calib_psf_used'][matched_selected]
681 measured_src['calib_psf_used'] = measured_used
682
683 # Next, we do the measurement on all the psf candidate, used, and reserved stars.
684 try:
685 self.measurement.run(measCat=measured_src, exposure=exposure)
686 except Exception as e:
687 self.log.warning('Failed to make measurements for visit %d, detector %d: %s',
688 visit, detector, e)
689 return psf, None, measured_src
690
691 # And finally the ap corr map.
692 try:
693 ap_corr_map = self.measure_ap_corr.run(exposure=exposure,
694 catalog=measured_src).apCorrMap
695 except Exception as e:
696 self.log.warning('Failed to compute aperture corrections for visit %d, detector %d: %s',
697 visit, detector, e)
698 return psf, None, measured_src
699
700 self.apply_ap_corr.run(catalog=measured_src, apCorrMap=ap_corr_map)
701
702 return psf, ap_corr_map, measured_src
run(self, visit, band, isolated_star_cat_dict, isolated_star_source_dict, src_dict, calexp_dict)
compute_psf_and_ap_corr_map(self, visit, detector, exposure, src, isolated_source_table)
concat_isolated_star_cats(self, band, isolated_star_cat_dict, isolated_star_source_dict)