Coverage for python/lsst/ap/association/diaCalculation.py : 25%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
# This file is part of ap_association. # # Developed for the LSST Data Management System. # This product includes software developed by the LSST Project # (https://www.lsst.org). # See the COPYRIGHT file at the top-level directory of this distribution # for details of code ownership. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>.
BasePlugin, CatalogCalculationPluginConfig, CatalogCalculationPlugin, CatalogCalculationConfig, CatalogCalculationTask, PluginRegistry, PluginMap)
# Enforce an error for unsafe column/array value setting in pandas.
"DiaObjectCalculationTask", "DiaObjectCalculationConfig")
"""Default configuration class for DIA catalog calculation plugins. """
"""Base class for DIA catalog calculation plugins.
Task follows CatalogCalculationPlugin with modifications for use in AP.
Parameters ---------- config : `DiaObjectCalculationPlugin.ConfigClass` Plugin configuration. name : `str` The string the plugin was registered with. metadata : `lsst.daf.base.PropertySet` Plugin metadata that will be attached to the output catalog """
"""List of available plugins (`lsst.meas.base.PluginRegistry`). """
"""Add order after flux means and stds are calculated. """
"""Does the plugin operate on a single source or the whole catalog (`str`)? If the plugin operates on a single source at a time, this should be set to ``"single"``; if it expects the whoe catalog, to ``"multi"``. If the plugin is of type ``"multi"``, the `fail` method must be implemented to accept the whole catalog. If the plugin is of type ``"single"``, `fail` should accept a single source record. """
"""DiaObject column names required by the plugin in order to run and complete its calculation. DiaCalculationTask should raise an error is a plugin is instantiated without the needed column available. Input columns should be defined in the DPDD/cat/Ppdb schema. Filter dependent columns should be specified without the filter name perpended to them. eg ``PSFluxMean`` instead of ``uPSFluxMean``. """ """DiaObject column names output by the plugin. DiaCalculationTask should raise an error if another pluging is run output to the same column. Output columns should be defined in the DPDD/cat/Ppdb schema. Filter dependent columns should be specified without the filter name perpended to them. eg ``PSFluxMean`` instead of ``uPSFluxMean``. """
BasePlugin.__init__(self, config, name)
diaObject, diaSources, filterDiaFluxes=None, filterName=None, **kwargs): """Perform the calculation specified by this plugin.
This method can either be used to operate on a single catalog record or a whole catalog, populating it with the output defined by this plugin.
Note that results may be added to catalog records as new columns, or may result in changes to existing values.
Parameters ---------- diaObject : `dict` Summary object to store values in. diaSources : `pandas.DataFrame` DataFrame representing all diaSources associated with this diaObject. filterDiaFluxes : `pandas.DataFrame` DataFrame representing diaSources associated with this diaObject that are observed in the band pass ``filterName``. filterName : `str` Simple name of the filter for the flux being calculated. **kwargs Any additional keyword arguments that may be passed to the plugin. """ raise NotImplementedError()
"""Set diaObject position values to nan.
Parameters ---------- diaObject : `dict` Summary object to store values in. columns : `list` of `str` List of string names of columns to write a the failed value. error : `BaseException` or `None` Error to pass. Kept for consistency with CatologCalculationPlugin. Unused. """ for colName in columns: diaObject[colName] = np.nan
"""Config class for the catalog calculation driver task.
Specifies which plugins will execute when the `CatalogCalculationTask` associated with this configuration is run. """
multi=True, default=["ap_meanPosition", "ap_meanFlux"], doc="Plugins to be run and their configuration")
"""Run plugins which operate on a catalog of DIA sources.
This task facilitates running plugins which will operate on a source catalog. These plugins may do things such as classifying an object based on source record entries inserted during a measurement task.
This task differs from CatalogCaculationTask in the following ways:
-No multi mode is available for plugins. All plugins are assumed to run in single mode.
-Input and output catalog types are assumed to be `pandas.DataFrames` with columns following those used in the Ppdb.
-No schema argument is passed to the plugins. Each plugin specifies output columns and required inputs.
Parameters ---------- plugMetaData : `lsst.daf.base.PropertyList` or `None` Will be modified in-place to contain metadata about the plugins being run. If `None`, an empty `~lsst.daf.base.PropertyList` will be created. **kwargs Additional arguments passed to the superclass constructor.
Notes ----- Plugins may either take an entire catalog to work on at a time, or work on individual records. """
lsst.pipe.base.Task.__init__(self, **kwargs) if plugMetadata is None: plugMetadata = lsst.daf.base.PropertyList() self.plugMetadata = plugMetadata self.plugins = PluginMap() self.outputCols = []
self.initializePlugins()
"""Initialize the plugins according to the configuration. """
pluginType = namedtuple('pluginType', 'single') self.executionDict = {} # Read the properties for each plugin. Allocate a dictionary entry for # each run level. Verify that the plugins are above the minimum run # level for an catalogCalculation plugin. For each run level, the # plugins are sorted into either single record, or multi record groups # to later be run appropriately for executionOrder, name, config, PluginClass in sorted(self.config.plugins.apply()): if executionOrder not in self.executionDict: self.executionDict[executionOrder] = pluginType(single=[]) if PluginClass.getExecutionOrder() >= BasePlugin.DEFAULT_CATALOGCALCULATION: plug = PluginClass(config, name, metadata=self.plugMetadata)
self._validatePluginCols(plug)
self.plugins[name] = plug if plug.plugType == 'single': self.executionDict[executionOrder].single.append(plug) elif plug.plugType == 'multi': errorTuple = (PluginClass,) raise ValueError( "{} requested `multi` for execution type. `multi` is " "not supported by DiaObjectCalculationTask. Please " "use `single`.".format(*errorTuple)) else: errorTuple = (PluginClass, PluginClass.getExecutionOrder(), BasePlugin.DEFAULT_CATALOGCALCULATION) raise ValueError("{} has an execution order less than the minimum for an catalogCalculation " "plugin. Value {} : Minimum {}".format(*errorTuple))
"""Assert that output columns are not duplicated and input columns exist for dependent plugins.
Parameters ---------- plug : `lsst.ap.association.DiaCalculationPlugin` Plugin to test for output collisions and input needs. """ for inputName in plug.inputCols: if inputName not in self.outputCols: errorTuple = (plug.name, plug.getExecutionOrder(), inputName) raise ValueError( "Plugin, {} with execution order {} requires DiaObject " "column {} to exist. Check the execution order of the " "plugin and make sure it runs after a plugin creating " "the column is run.".format(*errorTuple)) for outputName in plug.outputCols: if outputName in self.outputCols: errorTuple = (plug.name, plug.getExecutionOrder(), outputName) raise ValueError( "Plugin, {} with execution order {} is attempting to " "output a column {}, however the column is already being " "produced by another plugin. Check other plugins for " "collisions with this one.".format(*errorTuple)) else: self.outputCols.append(outputName)
def run(self, diaObjectCat, diaSourceCat, updatedDiaObjectIds, filterName): """The entry point for the DIA catalog calculation task.
Run method both updates the values in the diaObjectCat and appends newly created DiaObjects to the catalog. For catalog column names see the lsst.cat schema definitions for the DiaObject and DiaSource tables (http://github.com/lsst/cat).
Parameters ---------- diaObjectCat : `pandas.DataFrame` DiaObjects to update values of and append new objects to. DataFrame should be indexed on "diaObjectId" diaSourceCat : `pandas.DataFrame` DiaSources associated with the DiaObjects in diaObjectCat. DataFrame should be indexed on `["diaObjectId", "filterName", "diaSourceId"]` updatedDiaObjectIds : `numpy.ndarray` Integer ids of the DiaObjects to update and create. filterName : `str` String name of the filter being processed.
Returns ------- returnStruct : `lsst.pipe.base.Struct` Struct containing:
``diaObjectCat`` Full set of DiaObjects including both un-updated and updated/new DiaObjects (`pandas.DataFrame`). ``updatedDiaObjects`` Catalog of DiaObjects that were updated or created by this task (`pandas.DataFrame`). """ return self.callCompute(diaObjectCat, diaSourceCat, updatedDiaObjectIds, filterName)
diaObjectCat, diaSourceCat, updatedDiaObjectIds, filterName): """Run each of the plugins on the catalog.
For catalog column names see the lsst.cat schema definitions for the DiaObject and DiaSource tables (http://github.com/lsst/cat).
Parameters ---------- diaObjectCat : `pandas.DataFrame` DiaObjects to update values of and append new objects to. DataFrame should be indexed on "diaObjectId" diaSourceCat : `pandas.DataFrame` DiaSources associated with the DiaObjects in diaObjectCat. DataFrame must be indexed on ["diaObjectId", "filterName", "diaSourceId"]` updatedDiaObjectIds : `numpy.ndarray` Integer ids of the DiaObjects to update and create. filterName : `str` String name of the filter being processed.
Returns ------- returnStruct : `lsst.pipe.base.Struct` Struct containing:
``diaObjectCat`` Full set of DiaObjects including both un-updated and updated/new DiaObjects (`pandas.DataFrame`). ``updatedDiaObjects`` Catalog of DiaObjects that were updated or created by this task (`pandas.DataFrame`).
Raises ------ KeyError Raises if `pandas.DataFrame` indexing is not properly set. """
diaObjectUsed = pd.DataFrame( False, index=diaObjectCat.index, columns=["used"])
updatedDiaObjects = []
for objId in updatedDiaObjectIds: try: updatedDiaObjDF = diaObjectCat.loc[objId] updatedDiaObject = updatedDiaObjDF.to_dict() updatedDiaObject["diaObjectId"] = objId diaObjectUsed.loc[objId] = True except KeyError: updatedDiaObject = self._initialize_dia_object(objId)
# Sub-select diaSources associated with this diaObject. objDiaSources = diaSourceCat.loc[objId] # Currently needed as dataFrames loaded from sql do not currently # map Null to NaN for custom queries. This can either stay here # or move to dax_ppdb or ap_association. objDiaSources.replace(to_replace=[None], value=np.nan)
# Sub-select on diaSources observed in the current filter. filterObjDiaSources = objDiaSources.loc[filterName]
for runlevel in sorted(self.executionDict): for plug in self.executionDict[runlevel].single: with CCContext(plug, updatedDiaObject, self.log): plug.calculate(diaObject=updatedDiaObject, diaSources=objDiaSources, filterDiaSources=filterObjDiaSources, filterName=filterName)
updatedDiaObjects.append(updatedDiaObject)
updatedDiaObjects = pd.DataFrame(data=updatedDiaObjects)
return lsst.pipe.base.Struct( diaObjectCat=diaObjectCat[~diaObjectUsed["used"]].append( updatedDiaObjects.set_index("diaObjectId"), sort=False), updatedDiaObjects=updatedDiaObjects)
"""Create a new DiaObject with values required to be initialized by the Ppdb.
Parameters ---------- objid : `int` ``diaObjectId`` value for the of the new DiaObject.
Returns ------- diaObject : `dict` Newly created DiaObject with keys:
``diaObjectId`` Unique DiaObjectId (`int`). ``pmParallaxNdata`` Number of data points used for parallax calculation (`int`). ``nearbyObj1`` Id of the a nearbyObject in the Object table (`int`). ``nearbyObj2`` Id of the a nearbyObject in the Object table (`int`). ``nearbyObj3`` Id of the a nearbyObject in the Object table (`int`). ``?PSFluxData`` Number of data points used to calculate point source flux summary statistics in each bandpass (`int`). """ new_dia_object = {"diaObjectId": objId, "pmParallaxNdata": 0, "nearbyObj1": 0, "nearbyObj2": 0, "nearbyObj3": 0} for f in ["u", "g", "r", "i", "z", "y"]: new_dia_object["%sPSFluxNdata" % f] = 0 return new_dia_object |