6 import astropy.units
as u
9 from .parquetTable
import MultilevelParquetTable
12 def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors',
13 typeKey='functor', name=None):
14 """Initialize an object defined in a dictionary 16 The object needs to be importable as 17 '{0}.{1}'.format(basePath, initDict[typeKey]) 18 The positional and keyword arguments (if any) are contained in 19 "args" and "kwargs" entries in the dictionary, respectively. 20 This is used in `functors.CompositeFunctor.from_yaml` to initialize 21 a composite functor from a specification in a YAML file. 26 Dictionary describing object's initialization. Must contain 27 an entry keyed by ``typeKey`` that is the name of the object, 28 relative to ``basePath``. 30 Path relative to module in which ``initDict[typeKey]`` is defined. 32 Key of ``initDict`` that is the name of the object 33 (relative to `basePath`). 35 initDict = initDict.copy()
37 pythonType = doImport(
'{0}.{1}'.format(basePath, initDict.pop(typeKey)))
39 if 'args' in initDict:
40 args = initDict.pop(
'args')
41 if isinstance(args, str):
44 element = pythonType(*args, **initDict)
45 except Exception
as e:
46 message = f
'Error in constructing functor "{name}" of type {pythonType.__name__} with args: {args}' 47 raise type(e)(message, e.args)
52 """Define and execute a calculation on a ParquetTable 54 The `__call__` method accepts a `ParquetTable` object, and returns the 55 result of the calculation as a single column. Each functor defines what 56 columns are needed for the calculation, and only these columns are read 57 from the `ParquetTable`. 59 The action of `__call__` consists of two steps: first, loading the 60 necessary columns from disk into memory as a `pandas.DataFrame` object; 61 and second, performing the computation on this dataframe and returning the 65 To define a new `Functor`, a subclass must define a `_func` method, 66 that takes a `pandas.DataFrame` and returns result in a `pandas.Series`. 67 In addition, it must define the following attributes 69 * `_columns`: The columns necessary to perform the calculation 70 * `name`: A name appropriate for a figure axis label 71 * `shortname`: A name appropriate for use as a dictionary key 73 On initialization, a `Functor` should declare what filter (`filt` kwarg) 74 and dataset (e.g. `'ref'`, `'meas'`, `'forced_src'`) it is intended to be 75 applied to. This enables the `_get_cols` method to extract the proper 76 columns from the parquet file. If not specified, the dataset will fall back 77 on the `_defaultDataset`attribute. If filter is not specified and `dataset` 78 is anything other than `'ref'`, then an error will be raised when trying to 79 perform the calculation. 81 As currently implemented, `Functor` is only set up to expect a 82 `ParquetTable` of the format of the `deepCoadd_obj` dataset; that is, a 83 `MultilevelParquetTable` with the levels of the column index being `filter`, 84 `dataset`, and `column`. This is defined in the `_columnLevels` attribute, 85 as well as being implicit in the role of the `filt` and `dataset` attributes 86 defined at initialization. In addition, the `_get_cols` method that reads 87 the dataframe from the `ParquetTable` will return a dataframe with column 88 index levels defined by the `_dfLevels` attribute; by default, this is 91 The `_columnLevels` and `_dfLevels` attributes should generally not need to 92 be changed, unless `_func` needs columns from multiple filters or datasets 93 to do the calculation. 94 An example of this is the `lsst.pipe.tasks.functors.Color` functor, for 95 which `_dfLevels = ('filter', 'column')`, and `_func` expects the dataframe 96 it gets to have those levels in the column index. 101 Filter upon which to do the calculation 104 Dataset upon which to do the calculation 105 (e.g., 'ref', 'meas', 'forced_src'). 109 _defaultDataset =
'ref' 110 _columnLevels = (
'filter',
'dataset',
'column')
111 _dfLevels = (
'column',)
112 _defaultNoDup =
False 114 def __init__(self, filt=None, dataset=None, noDup=None):
121 if self.
_noDup is not None:
128 """Columns required to perform calculation 130 if not hasattr(self,
'_columns'):
131 raise NotImplementedError(
'Must define columns property or _columns attribute')
136 raise ValueError(
'ParquetTable does not have the expected column levels. ' +
137 'Got {0}; expected {1}.'.format(parq.columnLevels, self.
_columnLevels))
139 columnDict = {
'column': self.
columns,
141 if self.
filt is None:
142 if 'filter' in parq.columnLevels:
144 columnDict[
'filter'] = parq.columnLevelNames[
'filter'][0]
146 raise ValueError(
"'filt' not set for functor {}".format(self.
name) +
147 "(dataset {}) ".format(self.
dataset) +
148 "and ParquetTable " +
149 "contains multiple filters in column index. " +
150 "Set 'filt' or set 'dataset' to 'ref'.")
152 columnDict[
'filter'] = self.
filt 154 return parq._colsFromDict(columnDict)
156 def _func(self, df, dropna=True):
157 raise NotImplementedError(
'Must define calculation on dataframe')
159 def _get_cols(self, parq):
160 """Retrieve dataframe necessary for calculation. 162 Returns dataframe upon which `self._func` can act. 164 if isinstance(parq, MultilevelParquetTable):
166 df = parq.toDataFrame(columns=columns, droplevels=
False)
170 df = parq.toDataFrame(columns=columns)
174 def _setLevels(self, df):
175 levelsToDrop = [n
for n
in df.columns.names
if n
not in self.
_dfLevels]
176 df.columns = df.columns.droplevel(levelsToDrop)
179 def _dropna(self, vals):
185 vals = self.
_func(df)
194 return pd.Series(np.full(len(df), np.nan), index=df.index)
198 """Full name of functor (suitable for figure labels) 200 return NotImplementedError
204 """Short name of functor (suitable for column name/dict key) 210 """Perform multiple calculations at once on a catalog 212 The role of a `CompositeFunctor` is to group together computations from 213 multiple functors. Instead of returning `pandas.Series` a 214 `CompositeFunctor` returns a `pandas.Dataframe`, with the column names 215 being the keys of `funcDict`. 217 The `columns` attribute of a `CompositeFunctor` is the union of all columns 218 in all the component functors. 220 A `CompositeFunctor` does not use a `_func` method itself; rather, 221 when a `CompositeFunctor` is called, all its columns are loaded 222 at once, and the resulting dataframe is passed to the `_func` method of each component 223 functor. This has the advantage of only doing I/O (reading from parquet file) once, 224 and works because each individual `_func` method of each component functor does not 225 care if there are *extra* columns in the dataframe being passed; only that it must contain 226 *at least* the `columns` it expects. 228 An important and useful class method is `from_yaml`, which takes as argument the path to a YAML 229 file specifying a collection of functors. 233 funcs : `dict` or `list` 234 Dictionary or list of functors. If a list, then it will be converted 235 into a dictonary according to the `.shortname` attribute of each functor. 242 if type(funcs) == dict:
245 self.
funcDict = {f.shortname: f
for f
in funcs}
263 if isinstance(new, dict):
265 elif isinstance(new, CompositeFunctor):
268 raise TypeError(
'Can only update with dictionary or CompositeFunctor.')
271 if self.
filt is not None:
276 return list(set([x
for y
in [f.columns
for f
in self.
funcDict.values()]
for x
in y]))
279 return list(set([x
for y
in [f.multilevelColumns(parq)
280 for f
in self.
funcDict.values()]
for x
in y]))
283 if isinstance(parq, MultilevelParquetTable):
285 df = parq.toDataFrame(columns=columns, droplevels=
False)
289 subdf = f._setLevels(df[f.multilevelColumns(parq)])
290 valDict[k] = f._func(subdf)
292 valDict[k] = f.fail(subdf)
295 df = parq.toDataFrame(columns=columns)
296 valDict = {k: f._func(df)
for k, f
in self.
funcDict.items()}
299 valDf = pd.concat(valDict, axis=1)
301 print([(k, type(v))
for k, v
in valDict.items()])
304 if kwargs.get(
'dropna',
False):
305 valDf = valDf.dropna(how=
'any')
311 if renameRules
is None:
313 for old, new
in renameRules:
314 if col.startswith(old):
315 col = col.replace(old, new)
320 with open(filename)
as f:
321 translationDefinition = yaml.safe_load(f)
323 return cls.
from_yaml(translationDefinition, **kwargs)
328 for func, val
in translationDefinition[
'funcs'].items():
331 if 'flag_rename_rules' in translationDefinition:
332 renameRules = translationDefinition[
'flag_rename_rules']
336 if 'refFlags' in translationDefinition:
337 for flag
in translationDefinition[
'refFlags']:
340 if 'flags' in translationDefinition:
341 for flag
in translationDefinition[
'flags']:
344 return cls(funcs, **kwargs)
348 """Evaluate an expression on a DataFrame, knowing what the 'mag' function means 350 Builds on `pandas.DataFrame.eval`, which parses and executes math on dataframes. 354 df : pandas.DataFrame 355 Dataframe on which to evaluate expression. 361 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>)/log(10)', expr)
362 val = df.eval(expr_new, truediv=
True)
364 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>_instFlux)/log(10)', expr)
365 val = df.eval(expr_new, truediv=
True)
370 """Arbitrary computation on a catalog 372 Column names (and thus the columns to be loaded from catalog) are found 373 by finding all words and trying to ignore all "math-y" words. 378 Expression to evaluate, to be parsed and executed by `mag_aware_eval`. 380 _ignore_words = (
'mag',
'sin',
'cos',
'exp',
'log',
'sqrt')
392 flux_cols = re.findall(
r'mag\(\s*(\w+)\s*\)', self.
expr)
394 cols = [c
for c
in re.findall(
r'[a-zA-Z_]+', self.
expr)
if c
not in self.
_ignore_words]
397 if not re.search(
'_instFlux$', c):
398 cols.append(
'{}_instFlux'.format(c))
403 return list(set([c
for c
in cols
if c
not in not_a_col]))
410 """Get column with specified name 430 """Return the value of the index for each object 433 columns = [
'coord_ra']
434 _defaultDataset =
'ref' 438 return pd.Series(df.index, index=df.index)
443 _allow_difference =
False 447 return pd.Series(df.index, index=df.index)
451 col =
'base_Footprint_nPix' 455 """Base class for coordinate column, in degrees 464 output = df[self.
col] * 180 / np.pi
if self.
_radians else df[self.
col]
469 """Right Ascension, in degrees 475 super().
__init__(
'coord_ra', **kwargs)
478 return super().
__call__(catalog, **kwargs)
482 """Declination, in degrees 488 super().
__init__(
'coord_dec', **kwargs)
491 return super().
__call__(catalog, **kwargs)
495 if not col.endswith(
'_instFlux'):
501 if not col.endswith(
'_instFluxErr'):
502 col +=
'_instFluxErr' 507 """Compute calibrated magnitude 509 Takes a `calib` argument, which returns the flux at mag=0 510 as `calib.getFluxMag0()`. If not provided, then the default 511 `fluxMag0` is 63095734448.0194, which is default for HSC. 512 This default should be removed in DM-21955 514 This calculation hides warnings about invalid values and dividing by zero. 516 As for all functors, a `dataset` and `filt` kwarg should be provided upon 517 initialization. Unlike the default `Functor`, however, the default dataset 518 for a `Mag` is `'meas'`, rather than `'ref'`. 523 Name of flux column from which to compute magnitude. Can be parseable 524 by `lsst.pipe.tasks.functors.fluxName` function---that is, you can pass 525 `'modelfit_CModel'` instead of `'modelfit_CModel_instFlux'`) and it will 527 calib : `lsst.afw.image.calib.Calib` (optional) 528 Object that knows zero point. 530 _defaultDataset =
'meas' 535 if calib
is not None:
548 with np.warnings.catch_warnings():
549 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
550 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
555 return 'mag_{0}'.format(self.
col)
559 """Compute calibrated magnitude uncertainty 561 Takes the same `calib` object as `lsst.pipe.tasks.functors.Mag`. 566 calib : `lsst.afw.image.calib.Calib` (optional) 567 Object that knows zero point. 572 if self.
calib is not None:
579 return [self.
col, self.
col +
'Err']
582 with np.warnings.catch_warnings():
583 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
584 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
585 fluxCol, fluxErrCol = self.
columns 586 x = df[fluxErrCol] / df[fluxCol]
588 magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y)
593 return super().name +
'_err' 605 _defaultDataset =
'meas' 607 """Functor to calculate magnitude difference""" 619 with np.warnings.catch_warnings():
620 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
621 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
622 return -2.5*np.log10(df[self.
col1]/df[self.
col2])
626 return '(mag_{0} - mag_{1})'.format(self.
col1, self.
col2)
630 return 'magDiff_{0}_{1}'.format(self.
col1, self.
col2)
634 """Compute the color between two filters 636 Computes color by initializing two different `Mag` 637 functors based on the `col` and filters provided, and 638 then returning the difference. 640 This is enabled by the `_func` expecting a dataframe with a 641 multilevel column index, with both `'filter'` and `'column'`, 642 instead of just `'column'`, which is the `Functor` default. 643 This is controlled by the `_dfLevels` attribute. 645 Also of note, the default dataset for `Color` is `forced_src'`, 646 whereas for `Mag` it is `'meas'`. 651 Name of flux column from which to compute; same as would be passed to 652 `lsst.pipe.tasks.functors.Mag`. 655 Filters from which to compute magnitude difference. 656 Color computed is `Mag(filt2) - Mag(filt1)`. 658 _defaultDataset =
'forced_src' 659 _dfLevels = (
'filter',
'column')
665 raise RuntimeError(
"Cannot compute Color for %s: %s - %s " % (col, filt2, filt1))
683 mag2 = self.mag2._func(df[self.filt2])
684 mag1 = self.mag1._func(df[self.filt1])
689 return [self.
mag1.col, self.
mag2.col]
697 return '{0} - {1} ({2})'.format(self.
filt2, self.
filt1, self.
col)
701 return '{0}_{1}m{2}'.format(self.
col, self.
filt2.replace(
'-',
''),
702 self.
filt1.replace(
'-',
''))
706 """Main function of this subclass is to override the dropna=True 709 _allow_difference =
False 714 return super().
__call__(parq, dropna=
False, **kwargs)
718 _columns = [
"base_ClassificationExtendedness_value"]
719 _column =
"base_ClassificationExtendedness_value" 724 test = (x < 0.5).astype(int)
725 test = test.mask(mask, 2)
730 label = pd.Series(pd.Categorical.from_codes(test, categories=categories),
731 index=x.index, name=
'label')
733 label = label.astype(str)
738 _columns = [
'numStarFlags']
739 labels = {
"star": 0,
"maybe": 1,
"notStar": 2}
745 n = len(x.unique()) - 1
747 labels = [
'noStar',
'maybe',
'star']
748 label = pd.Series(pd.cut(x, [-1, 0, n-1, n], labels=labels),
749 index=x.index, name=
'label')
752 label = label.astype(str)
758 name =
'Deconvolved Moments' 759 shortname =
'deconvolvedMoments' 760 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
761 "ext_shapeHSM_HsmSourceMoments_yy",
762 "base_SdssShape_xx",
"base_SdssShape_yy",
763 "ext_shapeHSM_HsmPsfMoments_xx",
764 "ext_shapeHSM_HsmPsfMoments_yy")
767 """Calculate deconvolved moments""" 768 if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns:
769 hsm = df[
"ext_shapeHSM_HsmSourceMoments_xx"] + df[
"ext_shapeHSM_HsmSourceMoments_yy"]
771 hsm = np.ones(len(df))*np.nan
772 sdss = df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]
773 if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns:
774 psf = df[
"ext_shapeHSM_HsmPsfMoments_xx"] + df[
"ext_shapeHSM_HsmPsfMoments_yy"]
779 raise RuntimeError(
'No psf shape parameter found in catalog')
781 return hsm.where(np.isfinite(hsm), sdss) - psf
785 """Functor to calculate SDSS trace radius size for sources""" 786 name =
"SDSS Trace Size" 787 shortname =
'sdssTrace' 788 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy")
791 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
796 """Functor to calculate SDSS trace radius size difference (%) between object and psf model""" 797 name =
"PSF - SDSS Trace Size" 798 shortname =
'psf_sdssTrace' 799 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy",
800 "base_SdssShape_psf_xx",
"base_SdssShape_psf_yy")
803 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
804 psfSize = np.sqrt(0.5*(df[
"base_SdssShape_psf_xx"] + df[
"base_SdssShape_psf_yy"]))
805 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
810 """Functor to calculate HSM trace radius size for sources""" 811 name =
'HSM Trace Size' 812 shortname =
'hsmTrace' 813 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
814 "ext_shapeHSM_HsmSourceMoments_yy")
817 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"] +
818 df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
823 """Functor to calculate HSM trace radius size difference (%) between object and psf model""" 824 name =
'PSF - HSM Trace Size' 825 shortname =
'psf_HsmTrace' 826 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
827 "ext_shapeHSM_HsmSourceMoments_yy",
828 "ext_shapeHSM_HsmPsfMoments_xx",
829 "ext_shapeHSM_HsmPsfMoments_yy")
832 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"] +
833 df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
834 psfSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmPsfMoments_xx"] +
835 df[
"ext_shapeHSM_HsmPsfMoments_yy"]))
836 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
841 name =
'HSM Psf FWHM' 842 _columns = (
'ext_shapeHSM_HsmPsfMoments_xx',
'ext_shapeHSM_HsmPsfMoments_yy')
845 SIGMA2FWHM = 2*np.sqrt(2*np.log(2))
849 0.5*(df[
'ext_shapeHSM_HsmPsfMoments_xx'] + df[
'ext_shapeHSM_HsmPsfMoments_yy']))
853 name =
"Distortion Ellipticity (e1)" 854 shortname =
"Distortion" 872 name =
"Ellipticity e2" 905 """Compute the local pixel scale from the stored CDMatrix. 927 """Compute the local pixel scale conversion. 931 cd11 : `pandas.Series` 932 [1, 1] element of the local CDMatricies. 933 cd12 : `pandas.Series` 934 [1, 2] element of the local CDMatricies. 935 cd21 : `pandas.Series` 936 [2, 1] element of the local CDMatricies. 937 cd2 : `pandas.Series` 938 [2, 2] element of the local CDMatricies. 942 pixScale : `pandas.Series` 943 Arcseconds per pixel at the location of the local WC 945 return 3600 * np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21))
953 """Convert a value in units pixels to units arcseconds. 955 name =
"Pixel scale converter" 962 colCD_2_2, **kwargs):
964 super().
__init__(colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
968 return f
"{self.col}_asArcseconds" 982 name =
'Reference Band' 983 shortname =
'refBand' 987 return [
"merge_measurement_i",
988 "merge_measurement_r",
989 "merge_measurement_z",
990 "merge_measurement_y",
991 "merge_measurement_g"]
994 def getFilterAliasName(row):
996 colName = row.idxmax()
997 return colName.replace(
'merge_measurement_',
'')
999 return df[self.
columns].apply(getFilterAliasName, axis=1)
1004 AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy)
1005 LOG_AB_FLUX_SCALE = 12.56
1006 FIVE_OVER_2LOG10 = 1.085736204758129569
1010 def __init__(self, colFlux, colFluxErr=None, calib=None, **kwargs):
1016 if calib
is not None:
1030 return 'mag_{0}'.format(self.
col)
1034 if np.abs(a) < np.abs(b):
1039 return np.abs(a) * np.sqrt(1. + q*q)
1045 with np.warnings.catch_warnings():
1046 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
1047 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
1048 return -2.5 * np.log10(dn/fluxMag0)
1051 retVal = self.
vhypot(dn * fluxMag0Err, dnErr * fluxMag0)
1056 retVal = self.
dn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.
dn2flux(dn, fluxMag0)
1061 def _func(self, df):
1070 def _func(self, df):
1072 return pd.Series(retArr, index=df.index)
1076 def _func(self, df):
1085 def _func(self, df):
1087 return pd.Series(retArr, index=df.index)
1091 """Base class for calibrating the specified instrument flux column using 1092 the local photometric calibration. 1097 Name of the instrument flux column. 1098 instFluxErrCol : `str` 1099 Name of the assocated error columns for ``instFluxCol``. 1100 photoCalibCol : `str` 1101 Name of local calibration column. 1102 photoCalibErrCol : `str` 1103 Error associated with ``photoCalibCol`` 1113 logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag)
1128 """Convert instrument flux to nanojanskys. 1132 instFlux : `numpy.ndarray` or `pandas.Series` 1133 Array of instrument flux measurements 1134 localCalib : `numpy.ndarray` or `pandas.Series` 1135 Array of local photometric calibration estimates. 1139 calibFlux : `numpy.ndarray` or `pandas.Series` 1140 Array of calibrated flux measurements. 1142 return instFlux * localCalib
1145 """Convert instrument flux to nanojanskys. 1149 instFlux : `numpy.ndarray` or `pandas.Series` 1150 Array of instrument flux measurements 1151 instFluxErr : `numpy.ndarray` or `pandas.Series` 1152 Errors on associated ``instFlux`` values 1153 localCalib : `numpy.ndarray` or `pandas.Series` 1154 Array of local photometric calibration estimates. 1155 localCalibErr : `numpy.ndarray` or `pandas.Series` 1156 Errors on associated ``localCalib`` values 1160 calibFluxErr : `numpy.ndarray` or `pandas.Series` 1161 Errors on calibrated flux measurements. 1163 return np.hypot(instFluxErr * localCalib, instFlux * localCalibErr)
1166 """Convert instrument flux to nanojanskys. 1170 instFlux : `numpy.ndarray` or `pandas.Series` 1171 Array of instrument flux measurements 1172 localCalib : `numpy.ndarray` or `pandas.Series` 1173 Array of local photometric calibration estimates. 1177 calibMag : `numpy.ndarray` or `pandas.Series` 1178 Array of calibrated AB magnitudes. 1183 """Convert instrument flux err to nanojanskys. 1187 instFlux : `numpy.ndarray` or `pandas.Series` 1188 Array of instrument flux measurements 1189 instFluxErr : `numpy.ndarray` or `pandas.Series` 1190 Errors on associated ``instFlux`` values 1191 localCalib : `numpy.ndarray` or `pandas.Series` 1192 Array of local photometric calibration estimates. 1193 localCalibErr : `numpy.ndarray` or `pandas.Series` 1194 Errors on associated ``localCalib`` values 1198 calibMagErr: `numpy.ndarray` or `pandas.Series` 1199 Error on calibrated AB magnitudes. 1206 """Compute calibrated fluxes using the local calibration value. 1222 return f
'flux_{self.instFluxCol}' 1224 def _func(self, df):
1229 """Compute calibrated flux errors using the local calibration value. 1246 return f
'fluxErr_{self.instFluxCol}' 1248 def _func(self, df):
1254 """Compute calibrated AB magnitudes using the local calibration value. 1270 return f
'mag_{self.instFluxCol}' 1272 def _func(self, df):
1278 """Compute calibrated AB magnitude errors using the local calibration value. 1295 return f
'magErr_{self.instFluxCol}' 1297 def _func(self, df):
def instFluxToNanojansky(self, instFlux, localCalib)
def __init__(self, expr, kwargs)
def __call__(self, parq, dropna=False)
def pixelScale(self, cd11, cd12, cd21, cd22)
def __init__(self, col, kwargs)
def dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
def instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr)
def __call__(self, catalog, kwargs)
def _func(self, df, dropna=True)
def __init__(self, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, kwargs)
def __call__(self, parq, kwargs)
def __call__(self, catalog, kwargs)
def __init__(self, colXX, colXY, colYY, kwargs)
def __init__(self, col1, col2, kwargs)
def multilevelColumns(self, parq)
def __init__(self, col, filt2, filt1, kwargs)
def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors', typeKey='functor', name=None)
def __call__(self, parq, dropna=False, kwargs)
def mag_aware_eval(df, expr)
def renameCol(cls, col, renameRules)
def __init__(self, instFluxCol, instFluxErrCol, photoCalibCol, photoCalibErrCol, kwargs)
def __init__(self, filt=None, dataset=None, noDup=None)
def __init__(self, colXX, colXY, colYY, kwargs)
def _get_cols(self, parq)
def from_yaml(cls, translationDefinition, kwargs)
def from_file(cls, filename, kwargs)
def __init__(self, colFlux, colFluxErr=None, calib=None, kwargs)
def __init__(self, kwargs)
def __init__(self, kwargs)
def dn2mag(self, dn, fluxMag0)
def __init__(self, col, calib=None, kwargs)
def __init__(self, args, kwargs)
def dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
def multilevelColumns(self, parq)
def dn2flux(self, dn, fluxMag0)
def multilevelColumns(self, parq)
def __init__(self, funcs, kwargs)
def instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr)
def __init__(self, colXX, colXY, colYY, kwargs)
def instFluxToMagnitude(self, instFlux, localCalib)
def __init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, kwargs)
def __init__(self, col, kwargs)