Coverage for python/lsst/ap/association/diaPipe.py: 28%
135 statements
« prev ^ index » next coverage.py v6.5.0, created at 2023-03-04 02:49 -0800
« prev ^ index » next coverage.py v6.5.0, created at 2023-03-04 02:49 -0800
1#
2# LSST Data Management System
3# Copyright 2008-2016 AURA/LSST.
4#
5# This product includes software developed by the
6# LSST Project (http://www.lsst.org/).
7#
8# This program is free software: you can redistribute it and/or modify
9# it under the terms of the GNU General Public License as published by
10# the Free Software Foundation, either version 3 of the License, or
11# (at your option) any later version.
12#
13# This program is distributed in the hope that it will be useful,
14# but WITHOUT ANY WARRANTY; without even the implied warranty of
15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16# GNU General Public License for more details.
17#
18# You should have received a copy of the LSST License Statement and
19# the GNU General Public License along with this program. If not,
20# see <https://www.lsstcorp.org/LegalNotices/>.
21#
23"""PipelineTask for associating DiaSources with previous DiaObjects.
25Additionally performs forced photometry on the calibrated and difference
26images at the updated locations of DiaObjects.
28Currently loads directly from the Apdb rather than pre-loading.
29"""
31import pandas as pd
33import lsst.dax.apdb as daxApdb
34from lsst.meas.base import DiaObjectCalculationTask
35import lsst.pex.config as pexConfig
36import lsst.pipe.base as pipeBase
37import lsst.pipe.base.connectionTypes as connTypes
38from lsst.utils.timer import timeMethod
40from lsst.ap.association import (
41 AssociationTask,
42 DiaForcedSourceTask,
43 LoadDiaCatalogsTask,
44 PackageAlertsTask)
45from lsst.ap.association.ssoAssociation import SolarSystemAssociationTask
47__all__ = ("DiaPipelineConfig",
48 "DiaPipelineTask",
49 "DiaPipelineConnections")
52class DiaPipelineConnections(
53 pipeBase.PipelineTaskConnections,
54 dimensions=("instrument", "visit", "detector"),
55 defaultTemplates={"coaddName": "deep", "fakesType": ""}):
56 """Butler connections for DiaPipelineTask.
57 """
58 diaSourceTable = connTypes.Input(
59 doc="Catalog of calibrated DiaSources.",
60 name="{fakesType}{coaddName}Diff_diaSrcTable",
61 storageClass="DataFrame",
62 dimensions=("instrument", "visit", "detector"),
63 )
64 solarSystemObjectTable = connTypes.Input(
65 doc="Catalog of SolarSolarSystem objects expected to be observable in "
66 "this detectorVisit.",
67 name="visitSsObjects",
68 storageClass="DataFrame",
69 dimensions=("instrument", "visit"),
70 )
71 diffIm = connTypes.Input(
72 doc="Difference image on which the DiaSources were detected.",
73 name="{fakesType}{coaddName}Diff_differenceExp",
74 storageClass="ExposureF",
75 dimensions=("instrument", "visit", "detector"),
76 )
77 exposure = connTypes.Input(
78 doc="Calibrated exposure differenced with a template image during "
79 "image differencing.",
80 name="{fakesType}calexp",
81 storageClass="ExposureF",
82 dimensions=("instrument", "visit", "detector"),
83 )
84 template = connTypes.Input(
85 doc="Warped template used to create `subtractedExposure`. Not PSF "
86 "matched.",
87 dimensions=("instrument", "visit", "detector"),
88 storageClass="ExposureF",
89 name="{fakesType}{coaddName}Diff_templateExp",
90 )
91 apdbMarker = connTypes.Output(
92 doc="Marker dataset storing the configuration of the Apdb for each "
93 "visit/detector. Used to signal the completion of the pipeline.",
94 name="apdb_marker",
95 storageClass="Config",
96 dimensions=("instrument", "visit", "detector"),
97 )
98 associatedDiaSources = connTypes.Output(
99 doc="Optional output storing the DiaSource catalog after matching, "
100 "calibration, and standardization for insertion into the Apdb.",
101 name="{fakesType}{coaddName}Diff_assocDiaSrc",
102 storageClass="DataFrame",
103 dimensions=("instrument", "visit", "detector"),
104 )
105 diaForcedSources = connTypes.Output(
106 doc="Optional output storing the forced sources computed at the diaObject positions.",
107 name="{fakesType}{coaddName}Diff_diaForcedSrc",
108 storageClass="DataFrame",
109 dimensions=("instrument", "visit", "detector"),
110 )
111 diaObjects = connTypes.Output(
112 doc="Optional output storing the updated diaObjects associated to these sources.",
113 name="{fakesType}{coaddName}Diff_diaObject",
114 storageClass="DataFrame",
115 dimensions=("instrument", "visit", "detector"),
116 )
118 def __init__(self, *, config=None):
119 super().__init__(config=config)
121 if not config.doWriteAssociatedSources:
122 self.outputs.remove("associatedDiaSources")
123 self.outputs.remove("diaForcedSources")
124 self.outputs.remove("diaObjects")
125 if not config.doSolarSystemAssociation:
126 self.inputs.remove("solarSystemObjectTable")
128 def adjustQuantum(self, inputs, outputs, label, dataId):
129 """Override to make adjustments to `lsst.daf.butler.DatasetRef` objects
130 in the `lsst.daf.butler.core.Quantum` during the graph generation stage
131 of the activator.
133 This implementation checks to make sure that the filters in the dataset
134 are compatible with AP processing as set by the Apdb/DPDD schema.
136 Parameters
137 ----------
138 inputs : `dict`
139 Dictionary whose keys are an input (regular or prerequisite)
140 connection name and whose values are a tuple of the connection
141 instance and a collection of associated `DatasetRef` objects.
142 The exact type of the nested collections is unspecified; it can be
143 assumed to be multi-pass iterable and support `len` and ``in``, but
144 it should not be mutated in place. In contrast, the outer
145 dictionaries are guaranteed to be temporary copies that are true
146 `dict` instances, and hence may be modified and even returned; this
147 is especially useful for delegating to `super` (see notes below).
148 outputs : `dict`
149 Dict of output datasets, with the same structure as ``inputs``.
150 label : `str`
151 Label for this task in the pipeline (should be used in all
152 diagnostic messages).
153 data_id : `lsst.daf.butler.DataCoordinate`
154 Data ID for this quantum in the pipeline (should be used in all
155 diagnostic messages).
157 Returns
158 -------
159 adjusted_inputs : `dict`
160 Dict of the same form as ``inputs`` with updated containers of
161 input `DatasetRef` objects. Connections that are not changed
162 should not be returned at all. Datasets may only be removed, not
163 added. Nested collections may be of any multi-pass iterable type,
164 and the order of iteration will set the order of iteration within
165 `PipelineTask.runQuantum`.
166 adjusted_outputs : `dict`
167 Dict of updated output datasets, with the same structure and
168 interpretation as ``adjusted_inputs``.
170 Raises
171 ------
172 ScalarError
173 Raised if any `Input` or `PrerequisiteInput` connection has
174 ``multiple`` set to `False`, but multiple datasets.
175 NoWorkFound
176 Raised to indicate that this quantum should not be run; not enough
177 datasets were found for a regular `Input` connection, and the
178 quantum should be pruned or skipped.
179 FileNotFoundError
180 Raised to cause QuantumGraph generation to fail (with the message
181 included in this exception); not enough datasets were found for a
182 `PrerequisiteInput` connection.
183 """
184 _, refs = inputs["diffIm"]
185 for ref in refs:
186 if ref.dataId["band"] not in self.config.validBands:
187 raise ValueError(
188 f"Requested '{ref.dataId['band']}' not in "
189 "DiaPipelineConfig.validBands. To process bands not in "
190 "the standard Rubin set (ugrizy) you must add the band to "
191 "the validBands list in DiaPipelineConfig and add the "
192 "appropriate columns to the Apdb schema.")
193 return super().adjustQuantum(inputs, outputs, label, dataId)
196class DiaPipelineConfig(pipeBase.PipelineTaskConfig,
197 pipelineConnections=DiaPipelineConnections):
198 """Config for DiaPipelineTask.
199 """
200 coaddName = pexConfig.Field(
201 doc="coadd name: typically one of deep, goodSeeing, or dcr",
202 dtype=str,
203 default="deep",
204 )
205 apdb = daxApdb.ApdbSql.makeField(
206 doc="Database connection for storing associated DiaSources and "
207 "DiaObjects. Must already be initialized.",
208 )
209 validBands = pexConfig.ListField(
210 dtype=str,
211 default=["u", "g", "r", "i", "z", "y"],
212 doc="List of bands that are valid for AP processing. To process a "
213 "band not on this list, the appropriate band specific columns "
214 "must be added to the Apdb schema in dax_apdb.",
215 )
216 diaCatalogLoader = pexConfig.ConfigurableField(
217 target=LoadDiaCatalogsTask,
218 doc="Task to load DiaObjects and DiaSources from the Apdb.",
219 )
220 associator = pexConfig.ConfigurableField(
221 target=AssociationTask,
222 doc="Task used to associate DiaSources with DiaObjects.",
223 )
224 doSolarSystemAssociation = pexConfig.Field(
225 dtype=bool,
226 default=False,
227 doc="Process SolarSystem objects through the pipeline.",
228 )
229 solarSystemAssociator = pexConfig.ConfigurableField(
230 target=SolarSystemAssociationTask,
231 doc="Task used to associate DiaSources with SolarSystemObjects.",
232 )
233 diaCalculation = pexConfig.ConfigurableField(
234 target=DiaObjectCalculationTask,
235 doc="Task to compute summary statistics for DiaObjects.",
236 )
237 diaForcedSource = pexConfig.ConfigurableField(
238 target=DiaForcedSourceTask,
239 doc="Task used for force photometer DiaObject locations in direct and "
240 "difference images.",
241 )
242 alertPackager = pexConfig.ConfigurableField(
243 target=PackageAlertsTask,
244 doc="Subtask for packaging Ap data into alerts.",
245 )
246 doPackageAlerts = pexConfig.Field(
247 dtype=bool,
248 default=False,
249 doc="Package Dia-data into serialized alerts for distribution and "
250 "write them to disk.",
251 )
252 doWriteAssociatedSources = pexConfig.Field(
253 dtype=bool,
254 default=False,
255 doc="Write out associated DiaSources, DiaForcedSources, and DiaObjects, "
256 "formatted following the Science Data Model.",
257 )
259 def setDefaults(self):
260 self.apdb.dia_object_index = "baseline"
261 self.apdb.dia_object_columns = []
262 self.diaCalculation.plugins = ["ap_meanPosition",
263 "ap_nDiaSources",
264 "ap_diaObjectFlag",
265 "ap_meanFlux",
266 "ap_percentileFlux",
267 "ap_sigmaFlux",
268 "ap_chi2Flux",
269 "ap_madFlux",
270 "ap_skewFlux",
271 "ap_minMaxFlux",
272 "ap_maxSlopeFlux",
273 "ap_meanErrFlux",
274 "ap_linearFit",
275 "ap_stetsonJ",
276 "ap_meanTotFlux",
277 "ap_sigmaTotFlux"]
280class DiaPipelineTask(pipeBase.PipelineTask):
281 """Task for loading, associating and storing Difference Image Analysis
282 (DIA) Objects and Sources.
283 """
284 ConfigClass = DiaPipelineConfig
285 _DefaultName = "diaPipe"
287 def __init__(self, initInputs=None, **kwargs):
288 super().__init__(**kwargs)
289 self.apdb = self.config.apdb.apply()
290 self.makeSubtask("diaCatalogLoader")
291 self.makeSubtask("associator")
292 self.makeSubtask("diaCalculation")
293 self.makeSubtask("diaForcedSource")
294 if self.config.doPackageAlerts:
295 self.makeSubtask("alertPackager")
296 if self.config.doSolarSystemAssociation:
297 self.makeSubtask("solarSystemAssociator")
299 def runQuantum(self, butlerQC, inputRefs, outputRefs):
300 inputs = butlerQC.get(inputRefs)
301 expId, expBits = butlerQC.quantum.dataId.pack("visit_detector",
302 returnMaxBits=True)
303 inputs["ccdExposureIdBits"] = expBits
304 inputs["band"] = butlerQC.quantum.dataId["band"]
305 if not self.config.doSolarSystemAssociation:
306 inputs["solarSystemObjectTable"] = None
308 outputs = self.run(**inputs)
310 butlerQC.put(outputs, outputRefs)
312 @timeMethod
313 def run(self,
314 diaSourceTable,
315 solarSystemObjectTable,
316 diffIm,
317 exposure,
318 template,
319 ccdExposureIdBits,
320 band):
321 """Process DiaSources and DiaObjects.
323 Load previous DiaObjects and their DiaSource history. Calibrate the
324 values in the diaSourceCat. Associate new DiaSources with previous
325 DiaObjects. Run forced photometry at the updated DiaObject locations.
326 Store the results in the Alert Production Database (Apdb).
328 Parameters
329 ----------
330 diaSourceTable : `pandas.DataFrame`
331 Newly detected DiaSources.
332 diffIm : `lsst.afw.image.ExposureF`
333 Difference image exposure in which the sources in ``diaSourceCat``
334 were detected.
335 exposure : `lsst.afw.image.ExposureF`
336 Calibrated exposure differenced with a template to create
337 ``diffIm``.
338 template : `lsst.afw.image.ExposureF`
339 Template exposure used to create diffIm.
340 ccdExposureIdBits : `int`
341 Number of bits used for a unique ``ccdVisitId``.
342 band : `str`
343 The band in which the new DiaSources were detected.
345 Returns
346 -------
347 results : `lsst.pipe.base.Struct`
348 Results struct with components.
350 - ``apdbMaker`` : Marker dataset to store in the Butler indicating
351 that this ccdVisit has completed successfully.
352 (`lsst.dax.apdb.ApdbConfig`)
353 - ``associatedDiaSources`` : Catalog of newly associated
354 DiaSources. (`pandas.DataFrame`)
355 """
356 # Load the DiaObjects and DiaSource history.
357 loaderResult = self.diaCatalogLoader.run(diffIm, self.apdb)
359 # Associate new DiaSources with existing DiaObjects.
360 assocResults = self.associator.run(diaSourceTable,
361 loaderResult.diaObjects)
362 if self.config.doSolarSystemAssociation:
363 ssoAssocResult = self.solarSystemAssociator.run(
364 assocResults.unAssocDiaSources,
365 solarSystemObjectTable,
366 diffIm)
367 createResults = self.createNewDiaObjects(
368 ssoAssocResult.unAssocDiaSources)
369 associatedDiaSources = pd.concat(
370 [assocResults.matchedDiaSources,
371 ssoAssocResult.ssoAssocDiaSources,
372 createResults.diaSources])
373 nTotalSsObjects = ssoAssocResult.nTotalSsObjects
374 nAssociatedSsObjects = ssoAssocResult.nAssociatedSsObjects
375 else:
376 createResults = self.createNewDiaObjects(
377 assocResults.unAssocDiaSources)
378 associatedDiaSources = pd.concat(
379 [assocResults.matchedDiaSources,
380 createResults.diaSources])
381 nTotalSsObjects = 0
382 nAssociatedSsObjects = 0
384 # Create new DiaObjects from unassociated diaSources.
385 self._add_association_meta_data(assocResults.nUpdatedDiaObjects,
386 assocResults.nUnassociatedDiaObjects,
387 createResults.nNewDiaObjects,
388 nTotalSsObjects,
389 nAssociatedSsObjects)
390 # Index the DiaSource catalog for this visit after all associations
391 # have been made.
392 updatedDiaObjectIds = associatedDiaSources["diaObjectId"][
393 associatedDiaSources["diaObjectId"] != 0].to_numpy()
394 associatedDiaSources.set_index(["diaObjectId",
395 "filterName",
396 "diaSourceId"],
397 drop=False,
398 inplace=True)
400 # Append new DiaObjects and DiaSources to their previous history.
401 diaObjects = pd.concat(
402 [loaderResult.diaObjects,
403 createResults.newDiaObjects.set_index("diaObjectId", drop=False)],
404 sort=True)
405 if self.testDataFrameIndex(diaObjects):
406 raise RuntimeError(
407 "Duplicate DiaObjects created after association. This is "
408 "likely due to re-running data with an already populated "
409 "Apdb. If this was not the case then there was an unexpected "
410 "failure in Association while matching and creating new "
411 "DiaObjects and should be reported. Exiting.")
412 mergedDiaSourceHistory = pd.concat(
413 [loaderResult.diaSources, associatedDiaSources],
414 sort=True)
415 # Test for DiaSource duplication first. If duplicates are found,
416 # this likely means this is duplicate data being processed and sent
417 # to the Apdb.
418 if self.testDataFrameIndex(mergedDiaSourceHistory):
419 raise RuntimeError(
420 "Duplicate DiaSources found after association and merging "
421 "with history. This is likely due to re-running data with an "
422 "already populated Apdb. If this was not the case then there "
423 "was an unexpected failure in Association while matching "
424 "sources to objects, and should be reported. Exiting.")
426 # Compute DiaObject Summary statistics from their full DiaSource
427 # history.
428 diaCalResult = self.diaCalculation.run(
429 diaObjects,
430 mergedDiaSourceHistory,
431 updatedDiaObjectIds,
432 [band])
433 # Test for duplication in the updated DiaObjects.
434 if self.testDataFrameIndex(diaCalResult.diaObjectCat):
435 raise RuntimeError(
436 "Duplicate DiaObjects (loaded + updated) created after "
437 "DiaCalculation. This is unexpected behavior and should be "
438 "reported. Exiting.")
439 if self.testDataFrameIndex(diaCalResult.updatedDiaObjects):
440 raise RuntimeError(
441 "Duplicate DiaObjects (updated) created after "
442 "DiaCalculation. This is unexpected behavior and should be "
443 "reported. Exiting.")
445 # Force photometer on the Difference and Calibrated exposures using
446 # the new and updated DiaObject locations.
447 diaForcedSources = self.diaForcedSource.run(
448 diaCalResult.diaObjectCat,
449 diaCalResult.updatedDiaObjects.loc[:, "diaObjectId"].to_numpy(),
450 ccdExposureIdBits,
451 exposure,
452 diffIm)
454 # Store DiaSources, updated DiaObjects, and DiaForcedSources in the
455 # Apdb.
456 self.apdb.store(
457 exposure.getInfo().getVisitInfo().getDate(),
458 diaCalResult.updatedDiaObjects,
459 associatedDiaSources,
460 diaForcedSources)
462 if self.config.doPackageAlerts:
463 if len(loaderResult.diaForcedSources) > 1:
464 diaForcedSources = pd.concat(
465 [diaForcedSources, loaderResult.diaForcedSources],
466 sort=True)
467 if self.testDataFrameIndex(diaForcedSources):
468 self.log.warning(
469 "Duplicate DiaForcedSources created after merge with "
470 "history and new sources. This may cause downstream "
471 "problems. Dropping duplicates.")
472 # Drop duplicates via index and keep the first appearance.
473 # Reset due to the index shape being slight different than
474 # expected.
475 diaForcedSources = diaForcedSources.groupby(
476 diaForcedSources.index).first()
477 diaForcedSources.reset_index(drop=True, inplace=True)
478 diaForcedSources.set_index(
479 ["diaObjectId", "diaForcedSourceId"],
480 drop=False,
481 inplace=True)
482 self.alertPackager.run(associatedDiaSources,
483 diaCalResult.diaObjectCat,
484 loaderResult.diaSources,
485 diaForcedSources,
486 diffIm,
487 template,
488 ccdExposureIdBits)
490 return pipeBase.Struct(apdbMarker=self.config.apdb.value,
491 associatedDiaSources=associatedDiaSources,
492 diaForcedSources=diaForcedSources,
493 diaObjects=diaObjects,
494 )
496 def createNewDiaObjects(self, unAssocDiaSources):
497 """Loop through the set of DiaSources and create new DiaObjects
498 for unassociated DiaSources.
500 Parameters
501 ----------
502 unAssocDiaSources : `pandas.DataFrame`
503 Set of DiaSources to create new DiaObjects from.
505 Returns
506 -------
507 results : `lsst.pipe.base.Struct`
508 Results struct containing:
510 - ``diaSources`` : DiaSource catalog with updated DiaObject ids.
511 (`pandas.DataFrame`)
512 - ``newDiaObjects`` : Newly created DiaObjects from the
513 unassociated DiaSources. (`pandas.DataFrame`)
514 - ``nNewDiaObjects`` : Number of newly created diaObjects.(`int`)
515 """
516 if len(unAssocDiaSources) == 0:
517 tmpObj = self._initialize_dia_object(0)
518 newDiaObjects = pd.DataFrame(data=[],
519 columns=tmpObj.keys())
520 else:
521 newDiaObjects = unAssocDiaSources["diaSourceId"].apply(
522 self._initialize_dia_object)
523 unAssocDiaSources["diaObjectId"] = unAssocDiaSources["diaSourceId"]
524 return pipeBase.Struct(diaSources=unAssocDiaSources,
525 newDiaObjects=newDiaObjects,
526 nNewDiaObjects=len(newDiaObjects))
528 def _initialize_dia_object(self, objId):
529 """Create a new DiaObject with values required to be initialized by the
530 Ppdb.
532 Parameters
533 ----------
534 objid : `int`
535 ``diaObjectId`` value for the of the new DiaObject.
537 Returns
538 -------
539 diaObject : `dict`
540 Newly created DiaObject with keys:
542 ``diaObjectId``
543 Unique DiaObjectId (`int`).
544 ``pmParallaxNdata``
545 Number of data points used for parallax calculation (`int`).
546 ``nearbyObj1``
547 Id of the a nearbyObject in the Object table (`int`).
548 ``nearbyObj2``
549 Id of the a nearbyObject in the Object table (`int`).
550 ``nearbyObj3``
551 Id of the a nearbyObject in the Object table (`int`).
552 ``?PSFluxData``
553 Number of data points used to calculate point source flux
554 summary statistics in each bandpass (`int`).
555 """
556 new_dia_object = {"diaObjectId": objId,
557 "pmParallaxNdata": 0,
558 "nearbyObj1": 0,
559 "nearbyObj2": 0,
560 "nearbyObj3": 0,
561 "flags": 0}
562 for f in ["u", "g", "r", "i", "z", "y"]:
563 new_dia_object["%sPSFluxNdata" % f] = 0
564 return pd.Series(data=new_dia_object)
566 def testDataFrameIndex(self, df):
567 """Test the sorted DataFrame index for duplicates.
569 Wrapped as a separate function to allow for mocking of the this task
570 in unittesting. Default of a mock return for this test is True.
572 Parameters
573 ----------
574 df : `pandas.DataFrame`
575 DataFrame to text.
577 Returns
578 -------
579 `bool`
580 True if DataFrame contains duplicate rows.
581 """
582 return df.index.has_duplicates
584 def _add_association_meta_data(self,
585 nUpdatedDiaObjects,
586 nUnassociatedDiaObjects,
587 nNewDiaObjects,
588 nTotalSsObjects,
589 nAssociatedSsObjects):
590 """Store summaries of the association step in the task metadata.
592 Parameters
593 ----------
594 nUpdatedDiaObjects : `int`
595 Number of previous DiaObjects associated and updated in this
596 ccdVisit.
597 nUnassociatedDiaObjects : `int`
598 Number of previous DiaObjects that were not associated or updated
599 in this ccdVisit.
600 nNewDiaObjects : `int`
601 Number of newly created DiaObjects for this ccdVisit.
602 nTotalSsObjects : `int`
603 Number of SolarSystemObjects within the observable detector
604 area.
605 nAssociatedSsObjects : `int`
606 Number of successfully associated SolarSystemObjects.
607 """
608 self.metadata.add('numUpdatedDiaObjects', nUpdatedDiaObjects)
609 self.metadata.add('numUnassociatedDiaObjects', nUnassociatedDiaObjects)
610 self.metadata.add('numNewDiaObjects', nNewDiaObjects)
611 self.metadata.add('numTotalSolarSystemObjects', nTotalSsObjects)
612 self.metadata.add('numAssociatedSsObjects', nAssociatedSsObjects)