6 import astropy.units
as u
9 from .parquetTable
import MultilevelParquetTable
12 def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors', typeKey='functor'):
13 """Initialize an object defined in a dictionary
15 The object needs to be importable as
16 '{0}.{1}'.format(basePath, initDict[typeKey])
17 The positional and keyword arguments (if any) are contained in
18 "args" and "kwargs" entries in the dictionary, respectively.
19 This is used in `functors.CompositeFunctor.from_yaml` to initialize
20 a composite functor from a specification in a YAML file.
25 Dictionary describing object's initialization. Must contain
26 an entry keyed by ``typeKey`` that is the name of the object,
27 relative to ``basePath``.
29 Path relative to module in which ``initDict[typeKey]`` is defined.
31 Key of ``initDict`` that is the name of the object
32 (relative to `basePath`).
34 initDict = initDict.copy()
36 pythonType = doImport(
'{0}.{1}'.format(basePath, initDict.pop(typeKey)))
38 if 'args' in initDict:
39 args = initDict.pop(
'args')
40 if isinstance(args, str):
43 return pythonType(*args, **initDict)
47 """Define and execute a calculation on a ParquetTable
49 The `__call__` method accepts a `ParquetTable` object, and returns the
50 result of the calculation as a single column. Each functor defines what
51 columns are needed for the calculation, and only these columns are read
52 from the `ParquetTable`.
54 The action of `__call__` consists of two steps: first, loading the
55 necessary columns from disk into memory as a `pandas.DataFrame` object;
56 and second, performing the computation on this dataframe and returning the
60 To define a new `Functor`, a subclass must define a `_func` method,
61 that takes a `pandas.DataFrame` and returns result in a `pandas.Series`.
62 In addition, it must define the following attributes
64 * `_columns`: The columns necessary to perform the calculation
65 * `name`: A name appropriate for a figure axis label
66 * `shortname`: A name appropriate for use as a dictionary key
68 On initialization, a `Functor` should declare what filter (`filt` kwarg)
69 and dataset (e.g. `'ref'`, `'meas'`, `'forced_src'`) it is intended to be
70 applied to. This enables the `_get_cols` method to extract the proper
71 columns from the parquet file. If not specified, the dataset will fall back
72 on the `_defaultDataset`attribute. If filter is not specified and `dataset`
73 is anything other than `'ref'`, then an error will be raised when trying to
74 perform the calculation.
76 As currently implemented, `Functor` is only set up to expect a
77 `ParquetTable` of the format of the `deepCoadd_obj` dataset; that is, a
78 `MultilevelParquetTable` with the levels of the column index being `filter`,
79 `dataset`, and `column`. This is defined in the `_columnLevels` attribute,
80 as well as being implicit in the role of the `filt` and `dataset` attributes
81 defined at initialization. In addition, the `_get_cols` method that reads
82 the dataframe from the `ParquetTable` will return a dataframe with column
83 index levels defined by the `_dfLevels` attribute; by default, this is
86 The `_columnLevels` and `_dfLevels` attributes should generally not need to
87 be changed, unless `_func` needs columns from multiple filters or datasets
88 to do the calculation.
89 An example of this is the `lsst.pipe.tasks.functors.Color` functor, for
90 which `_dfLevels = ('filter', 'column')`, and `_func` expects the dataframe
91 it gets to have those levels in the column index.
96 Filter upon which to do the calculation
99 Dataset upon which to do the calculation
100 (e.g., 'ref', 'meas', 'forced_src').
104 _defaultDataset =
'ref'
105 _columnLevels = (
'filter',
'dataset',
'column')
106 _dfLevels = (
'column',)
107 _defaultNoDup =
False
109 def __init__(self, filt=None, dataset=None, noDup=None):
116 if self.
_noDup is not None:
123 """Columns required to perform calculation
125 if not hasattr(self,
'_columns'):
126 raise NotImplementedError(
'Must define columns property or _columns attribute')
131 raise ValueError(
'ParquetTable does not have the expected column levels. ' +
132 'Got {0}; expected {1}.'.format(parq.columnLevels, self.
_columnLevels))
134 columnDict = {
'column': self.
columns,
136 if self.
filt is None:
137 if 'filter' in parq.columnLevels:
139 columnDict[
'filter'] = parq.columnLevelNames[
'filter'][0]
141 raise ValueError(
"'filt' not set for functor {}".format(self.
name) +
142 "(dataset {}) ".format(self.
dataset) +
143 "and ParquetTable " +
144 "contains multiple filters in column index. " +
145 "Set 'filt' or set 'dataset' to 'ref'.")
147 columnDict[
'filter'] = self.
filt
149 return parq._colsFromDict(columnDict)
151 def _func(self, df, dropna=True):
152 raise NotImplementedError(
'Must define calculation on dataframe')
154 def _get_cols(self, parq):
155 """Retrieve dataframe necessary for calculation.
157 Returns dataframe upon which `self._func` can act.
159 if isinstance(parq, MultilevelParquetTable):
161 df = parq.toDataFrame(columns=columns, droplevels=
False)
165 df = parq.toDataFrame(columns=columns)
169 def _setLevels(self, df):
170 levelsToDrop = [n
for n
in df.columns.names
if n
not in self.
_dfLevels]
171 df.columns = df.columns.droplevel(levelsToDrop)
174 def _dropna(self, vals):
180 vals = self.
_func(df)
189 return pd.Series(np.full(len(df), np.nan), index=df.index)
193 """Full name of functor (suitable for figure labels)
195 return NotImplementedError
199 """Short name of functor (suitable for column name/dict key)
205 """Perform multiple calculations at once on a catalog
207 The role of a `CompositeFunctor` is to group together computations from
208 multiple functors. Instead of returning `pandas.Series` a
209 `CompositeFunctor` returns a `pandas.Dataframe`, with the column names
210 being the keys of `funcDict`.
212 The `columns` attribute of a `CompositeFunctor` is the union of all columns
213 in all the component functors.
215 A `CompositeFunctor` does not use a `_func` method itself; rather,
216 when a `CompositeFunctor` is called, all its columns are loaded
217 at once, and the resulting dataframe is passed to the `_func` method of each component
218 functor. This has the advantage of only doing I/O (reading from parquet file) once,
219 and works because each individual `_func` method of each component functor does not
220 care if there are *extra* columns in the dataframe being passed; only that it must contain
221 *at least* the `columns` it expects.
223 An important and useful class method is `from_yaml`, which takes as argument the path to a YAML
224 file specifying a collection of functors.
228 funcs : `dict` or `list`
229 Dictionary or list of functors. If a list, then it will be converted
230 into a dictonary according to the `.shortname` attribute of each functor.
237 if type(funcs) == dict:
240 self.
funcDict = {f.shortname: f
for f
in funcs}
258 if isinstance(new, dict):
260 elif isinstance(new, CompositeFunctor):
263 raise TypeError(
'Can only update with dictionary or CompositeFunctor.')
266 if self.
filt is not None:
271 return list(set([x
for y
in [f.columns
for f
in self.
funcDict.values()]
for x
in y]))
274 return list(set([x
for y
in [f.multilevelColumns(parq)
275 for f
in self.
funcDict.values()]
for x
in y]))
278 if isinstance(parq, MultilevelParquetTable):
280 df = parq.toDataFrame(columns=columns, droplevels=
False)
284 subdf = f._setLevels(df[f.multilevelColumns(parq)])
285 valDict[k] = f._func(subdf)
287 valDict[k] = f.fail(subdf)
290 df = parq.toDataFrame(columns=columns)
291 valDict = {k: f._func(df)
for k, f
in self.
funcDict.items()}
294 valDf = pd.concat(valDict, axis=1)
296 print([(k, type(v))
for k, v
in valDict.items()])
299 if kwargs.get(
'dropna',
False):
300 valDf = valDf.dropna(how=
'any')
306 if renameRules
is None:
308 for old, new
in renameRules:
309 if col.startswith(old):
310 col = col.replace(old, new)
315 with open(filename)
as f:
316 translationDefinition = yaml.safe_load(f)
318 return cls.
from_yaml(translationDefinition, **kwargs)
323 for func, val
in translationDefinition[
'funcs'].items():
326 if 'flag_rename_rules' in translationDefinition:
327 renameRules = translationDefinition[
'flag_rename_rules']
331 if 'refFlags' in translationDefinition:
332 for flag
in translationDefinition[
'refFlags']:
335 if 'flags' in translationDefinition:
336 for flag
in translationDefinition[
'flags']:
339 return cls(funcs, **kwargs)
343 """Evaluate an expression on a DataFrame, knowing what the 'mag' function means
345 Builds on `pandas.DataFrame.eval`, which parses and executes math on dataframes.
349 df : pandas.DataFrame
350 Dataframe on which to evaluate expression.
356 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>)/log(10)', expr)
357 val = df.eval(expr_new, truediv=
True)
359 expr_new = re.sub(
r'mag\((\w+)\)',
r'-2.5*log(\g<1>_instFlux)/log(10)', expr)
360 val = df.eval(expr_new, truediv=
True)
365 """Arbitrary computation on a catalog
367 Column names (and thus the columns to be loaded from catalog) are found
368 by finding all words and trying to ignore all "math-y" words.
373 Expression to evaluate, to be parsed and executed by `mag_aware_eval`.
375 _ignore_words = (
'mag',
'sin',
'cos',
'exp',
'log',
'sqrt')
387 flux_cols = re.findall(
r'mag\(\s*(\w+)\s*\)', self.
expr)
389 cols = [c
for c
in re.findall(
r'[a-zA-Z_]+', self.
expr)
if c
not in self.
_ignore_words]
392 if not re.search(
'_instFlux$', c):
393 cols.append(
'{}_instFlux'.format(c))
398 return list(set([c
for c
in cols
if c
not in not_a_col]))
405 """Get column with specified name
425 """Return the value of the index for each object
428 columns = [
'coord_ra']
429 _defaultDataset =
'ref'
433 return pd.Series(df.index, index=df.index)
438 _allow_difference =
False
442 return pd.Series(df.index, index=df.index)
446 col =
'base_Footprint_nPix'
450 """Base class for coordinate column, in degrees
465 """Right Ascension, in degrees
471 super().
__init__(
'coord_ra', **kwargs)
474 return super().
__call__(catalog, **kwargs)
478 """Declination, in degrees
484 super().
__init__(
'coord_dec', **kwargs)
487 return super().
__call__(catalog, **kwargs)
491 if not col.endswith(
'_instFlux'):
497 if not col.endswith(
'_instFluxErr'):
498 col +=
'_instFluxErr'
503 """Compute calibrated magnitude
505 Takes a `calib` argument, which returns the flux at mag=0
506 as `calib.getFluxMag0()`. If not provided, then the default
507 `fluxMag0` is 63095734448.0194, which is default for HSC.
508 This default should be removed in DM-21955
510 This calculation hides warnings about invalid values and dividing by zero.
512 As for all functors, a `dataset` and `filt` kwarg should be provided upon
513 initialization. Unlike the default `Functor`, however, the default dataset
514 for a `Mag` is `'meas'`, rather than `'ref'`.
519 Name of flux column from which to compute magnitude. Can be parseable
520 by `lsst.pipe.tasks.functors.fluxName` function---that is, you can pass
521 `'modelfit_CModel'` instead of `'modelfit_CModel_instFlux'`) and it will
523 calib : `lsst.afw.image.calib.Calib` (optional)
524 Object that knows zero point.
526 _defaultDataset =
'meas'
531 if calib
is not None:
544 with np.warnings.catch_warnings():
545 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
546 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
551 return 'mag_{0}'.format(self.
col)
555 """Compute calibrated magnitude uncertainty
557 Takes the same `calib` object as `lsst.pipe.tasks.functors.Mag`.
562 calib : `lsst.afw.image.calib.Calib` (optional)
563 Object that knows zero point.
568 if self.
calib is not None:
575 return [self.
col, self.
col +
'Err']
578 with np.warnings.catch_warnings():
579 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
580 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
581 fluxCol, fluxErrCol = self.
columns
582 x = df[fluxErrCol] / df[fluxCol]
584 magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y)
589 return super().name +
'_err'
601 _defaultDataset =
'meas'
603 """Functor to calculate magnitude difference"""
615 with np.warnings.catch_warnings():
616 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
617 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
618 return -2.5*np.log10(df[self.
col1]/df[self.
col2])
622 return '(mag_{0} - mag_{1})'.format(self.
col1, self.
col2)
626 return 'magDiff_{0}_{1}'.format(self.
col1, self.
col2)
630 """Compute the color between two filters
632 Computes color by initializing two different `Mag`
633 functors based on the `col` and filters provided, and
634 then returning the difference.
636 This is enabled by the `_func` expecting a dataframe with a
637 multilevel column index, with both `'filter'` and `'column'`,
638 instead of just `'column'`, which is the `Functor` default.
639 This is controlled by the `_dfLevels` attribute.
641 Also of note, the default dataset for `Color` is `forced_src'`,
642 whereas for `Mag` it is `'meas'`.
647 Name of flux column from which to compute; same as would be passed to
648 `lsst.pipe.tasks.functors.Mag`.
651 Filters from which to compute magnitude difference.
652 Color computed is `Mag(filt2) - Mag(filt1)`.
654 _defaultDataset =
'forced_src'
655 _dfLevels = (
'filter',
'column')
661 raise RuntimeError(
"Cannot compute Color for %s: %s - %s " % (col, filt2, filt1))
679 mag2 = self.mag2._func(df[self.filt2])
680 mag1 = self.mag1._func(df[self.filt1])
685 return [self.
mag1.col, self.
mag2.col]
693 return '{0} - {1} ({2})'.format(self.
filt2, self.
filt1, self.
col)
697 return '{0}_{1}m{2}'.format(self.
col, self.
filt2.replace(
'-',
''),
698 self.
filt1.replace(
'-',
''))
702 """Main function of this subclass is to override the dropna=True
705 _allow_difference =
False
710 return super().
__call__(parq, dropna=
False, **kwargs)
714 _columns = [
"base_ClassificationExtendedness_value"]
715 _column =
"base_ClassificationExtendedness_value"
720 test = (x < 0.5).astype(int)
721 test = test.mask(mask, 2)
726 label = pd.Series(pd.Categorical.from_codes(test, categories=categories),
727 index=x.index, name=
'label')
729 label = label.astype(str)
734 _columns = [
'numStarFlags']
735 labels = {
"star": 0,
"maybe": 1,
"notStar": 2}
741 n = len(x.unique()) - 1
743 labels = [
'noStar',
'maybe',
'star']
744 label = pd.Series(pd.cut(x, [-1, 0, n-1, n], labels=labels),
745 index=x.index, name=
'label')
748 label = label.astype(str)
754 name =
'Deconvolved Moments'
755 shortname =
'deconvolvedMoments'
756 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
757 "ext_shapeHSM_HsmSourceMoments_yy",
758 "base_SdssShape_xx",
"base_SdssShape_yy",
759 "ext_shapeHSM_HsmPsfMoments_xx",
760 "ext_shapeHSM_HsmPsfMoments_yy")
763 """Calculate deconvolved moments"""
764 if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns:
765 hsm = df[
"ext_shapeHSM_HsmSourceMoments_xx"] + df[
"ext_shapeHSM_HsmSourceMoments_yy"]
767 hsm = np.ones(len(df))*np.nan
768 sdss = df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]
769 if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns:
770 psf = df[
"ext_shapeHSM_HsmPsfMoments_xx"] + df[
"ext_shapeHSM_HsmPsfMoments_yy"]
775 raise RuntimeError(
'No psf shape parameter found in catalog')
777 return hsm.where(np.isfinite(hsm), sdss) - psf
781 """Functor to calculate SDSS trace radius size for sources"""
782 name =
"SDSS Trace Size"
783 shortname =
'sdssTrace'
784 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy")
787 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
792 """Functor to calculate SDSS trace radius size difference (%) between object and psf model"""
793 name =
"PSF - SDSS Trace Size"
794 shortname =
'psf_sdssTrace'
795 _columns = (
"base_SdssShape_xx",
"base_SdssShape_yy",
796 "base_SdssShape_psf_xx",
"base_SdssShape_psf_yy")
799 srcSize = np.sqrt(0.5*(df[
"base_SdssShape_xx"] + df[
"base_SdssShape_yy"]))
800 psfSize = np.sqrt(0.5*(df[
"base_SdssShape_psf_xx"] + df[
"base_SdssShape_psf_yy"]))
801 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
806 """Functor to calculate HSM trace radius size for sources"""
807 name =
'HSM Trace Size'
808 shortname =
'hsmTrace'
809 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
810 "ext_shapeHSM_HsmSourceMoments_yy")
813 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"] +
814 df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
819 """Functor to calculate HSM trace radius size difference (%) between object and psf model"""
820 name =
'PSF - HSM Trace Size'
821 shortname =
'psf_HsmTrace'
822 _columns = (
"ext_shapeHSM_HsmSourceMoments_xx",
823 "ext_shapeHSM_HsmSourceMoments_yy",
824 "ext_shapeHSM_HsmPsfMoments_xx",
825 "ext_shapeHSM_HsmPsfMoments_yy")
828 srcSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmSourceMoments_xx"] +
829 df[
"ext_shapeHSM_HsmSourceMoments_yy"]))
830 psfSize = np.sqrt(0.5*(df[
"ext_shapeHSM_HsmPsfMoments_xx"] +
831 df[
"ext_shapeHSM_HsmPsfMoments_yy"]))
832 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
837 name =
'HSM Psf FWHM'
838 _columns = (
'ext_shapeHSM_HsmPsfMoments_xx',
'ext_shapeHSM_HsmPsfMoments_yy')
841 SIGMA2FWHM = 2*np.sqrt(2*np.log(2))
845 0.5*(df[
'ext_shapeHSM_HsmPsfMoments_xx'] + df[
'ext_shapeHSM_HsmPsfMoments_yy']))
849 name =
"Distortion Ellipticity (e1)"
850 shortname =
"Distortion"
868 name =
"Ellipticity e2"
901 """Compute the local pixel scale from the stored CDMatrix.
923 """Compute the local pixel scale conversion.
927 cd11 : `pandas.Series`
928 [1, 1] element of the local CDMatricies.
929 cd12 : `pandas.Series`
930 [1, 2] element of the local CDMatricies.
931 cd21 : `pandas.Series`
932 [2, 1] element of the local CDMatricies.
933 cd2 : `pandas.Series`
934 [2, 2] element of the local CDMatricies.
938 pixScale : `pandas.Series`
939 Arcseconds per pixel at the location of the local WC
941 return 3600 * np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21))
949 """Convert a value in units pixels to units arcseconds.
951 name =
"Pixel scale converter"
958 colCD_2_2, **kwargs):
960 super().
__init__(colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
964 return f
"{self.col}_asArcseconds"
978 name =
'Reference Band'
979 shortname =
'refBand'
983 return [
"merge_measurement_i",
984 "merge_measurement_r",
985 "merge_measurement_z",
986 "merge_measurement_y",
987 "merge_measurement_g"]
990 def getFilterAliasName(row):
992 colName = row.idxmax()
993 return colName.replace(
'merge_measurement_',
'')
995 return df[self.
columns].apply(getFilterAliasName, axis=1)
1000 AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy)
1001 LOG_AB_FLUX_SCALE = 12.56
1002 FIVE_OVER_2LOG10 = 1.085736204758129569
1006 def __init__(self, colFlux, colFluxErr=None, calib=None, **kwargs):
1012 if calib
is not None:
1026 return 'mag_{0}'.format(self.
col)
1030 if np.abs(a) < np.abs(b):
1035 return np.abs(a) * np.sqrt(1. + q*q)
1041 with np.warnings.catch_warnings():
1042 np.warnings.filterwarnings(
'ignore',
r'invalid value encountered')
1043 np.warnings.filterwarnings(
'ignore',
r'divide by zero')
1044 return -2.5 * np.log10(dn/fluxMag0)
1047 retVal = self.
vhypot(dn * fluxMag0Err, dnErr * fluxMag0)
1052 retVal = self.
dn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.
dn2flux(dn, fluxMag0)
1057 def _func(self, df):
1066 def _func(self, df):
1068 return pd.Series(retArr, index=df.index)
1072 def _func(self, df):
1081 def _func(self, df):
1083 return pd.Series(retArr, index=df.index)
1087 """Base class for calibrating the specified instrument flux column using
1088 the local photometric calibration.
1093 Name of the instrument flux column.
1094 instFluxErrCol : `str`
1095 Name of the assocated error columns for ``instFluxCol``.
1096 photoCalibCol : `str`
1097 Name of local calibration column.
1098 photoCalibErrCol : `str`
1099 Error associated with ``photoCalibCol``
1109 logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag)
1124 """Convert instrument flux to nanojanskys.
1128 instFlux : `numpy.ndarray` or `pandas.Series`
1129 Array of instrument flux measurements
1130 localCalib : `numpy.ndarray` or `pandas.Series`
1131 Array of local photometric calibration estimates.
1135 calibFlux : `numpy.ndarray` or `pandas.Series`
1136 Array of calibrated flux measurements.
1138 return instFlux * localCalib
1141 """Convert instrument flux to nanojanskys.
1145 instFlux : `numpy.ndarray` or `pandas.Series`
1146 Array of instrument flux measurements
1147 instFluxErr : `numpy.ndarray` or `pandas.Series`
1148 Errors on associated ``instFlux`` values
1149 localCalib : `numpy.ndarray` or `pandas.Series`
1150 Array of local photometric calibration estimates.
1151 localCalibErr : `numpy.ndarray` or `pandas.Series`
1152 Errors on associated ``localCalib`` values
1156 calibFluxErr : `numpy.ndarray` or `pandas.Series`
1157 Errors on calibrated flux measurements.
1159 return np.hypot(instFluxErr * localCalib, instFlux * localCalibErr)
1162 """Convert instrument flux to nanojanskys.
1166 instFlux : `numpy.ndarray` or `pandas.Series`
1167 Array of instrument flux measurements
1168 localCalib : `numpy.ndarray` or `pandas.Series`
1169 Array of local photometric calibration estimates.
1173 calibMag : `numpy.ndarray` or `pandas.Series`
1174 Array of calibrated AB magnitudes.
1179 """Convert instrument flux err to nanojanskys.
1183 instFlux : `numpy.ndarray` or `pandas.Series`
1184 Array of instrument flux measurements
1185 instFluxErr : `numpy.ndarray` or `pandas.Series`
1186 Errors on associated ``instFlux`` values
1187 localCalib : `numpy.ndarray` or `pandas.Series`
1188 Array of local photometric calibration estimates.
1189 localCalibErr : `numpy.ndarray` or `pandas.Series`
1190 Errors on associated ``localCalib`` values
1194 calibMagErr: `numpy.ndarray` or `pandas.Series`
1195 Error on calibrated AB magnitudes.
1202 """Compute calibrated fluxes using the local calibration value.
1218 return f
'flux_{self.instFluxCol}'
1220 def _func(self, df):
1225 """Compute calibrated flux errors using the local calibration value.
1242 return f
'fluxErr_{self.instFluxCol}'
1244 def _func(self, df):
1250 """Compute calibrated AB magnitudes using the local calibration value.
1266 return f
'mag_{self.instFluxCol}'
1268 def _func(self, df):
1274 """Compute calibrated AB magnitude errors using the local calibration value.
1291 return f
'magErr_{self.instFluxCol}'
1293 def _func(self, df):