lsst.pipe.tasks  21.0.0-142-gef555c1e+bcbe3dafff
functors.py
Go to the documentation of this file.
1 # This file is part of pipe_tasks.
2 #
3 # LSST Data Management System
4 # This product includes software developed by the
5 # LSST Project (http://www.lsst.org/).
6 # See COPYRIGHT file at the top of the source tree.
7 #
8 # This program is free software: you can redistribute it and/or modify
9 # it under the terms of the GNU General Public License as published by
10 # the Free Software Foundation, either version 3 of the License, or
11 # (at your option) any later version.
12 #
13 # This program is distributed in the hope that it will be useful,
14 # but WITHOUT ANY WARRANTY; without even the implied warranty of
15 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 # GNU General Public License for more details.
17 #
18 # You should have received a copy of the LSST License Statement and
19 # the GNU General Public License along with this program. If not,
20 # see <https://www.lsstcorp.org/LegalNotices/>.
21 #
22 import yaml
23 import re
24 from itertools import product
25 import os.path
26 
27 import pandas as pd
28 import numpy as np
29 import astropy.units as u
30 
31 from lsst.daf.persistence import doImport
32 from lsst.daf.butler import DeferredDatasetHandle
33 import lsst.geom as geom
34 import lsst.sphgeom as sphgeom
35 
36 from .parquetTable import ParquetTable, MultilevelParquetTable
37 
38 
39 def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors',
40  typeKey='functor', name=None):
41  """Initialize an object defined in a dictionary
42 
43  The object needs to be importable as
44  f'{basePath}.{initDict[typeKey]}'
45  The positional and keyword arguments (if any) are contained in
46  "args" and "kwargs" entries in the dictionary, respectively.
47  This is used in `functors.CompositeFunctor.from_yaml` to initialize
48  a composite functor from a specification in a YAML file.
49 
50  Parameters
51  ----------
52  initDict : dictionary
53  Dictionary describing object's initialization. Must contain
54  an entry keyed by ``typeKey`` that is the name of the object,
55  relative to ``basePath``.
56  basePath : str
57  Path relative to module in which ``initDict[typeKey]`` is defined.
58  typeKey : str
59  Key of ``initDict`` that is the name of the object
60  (relative to `basePath`).
61  """
62  initDict = initDict.copy()
63  # TO DO: DM-21956 We should be able to define functors outside this module
64  pythonType = doImport(f'{basePath}.{initDict.pop(typeKey)}')
65  args = []
66  if 'args' in initDict:
67  args = initDict.pop('args')
68  if isinstance(args, str):
69  args = [args]
70  try:
71  element = pythonType(*args, **initDict)
72  except Exception as e:
73  message = f'Error in constructing functor "{name}" of type {pythonType.__name__} with args: {args}'
74  raise type(e)(message, e.args)
75  return element
76 
77 
78 class Functor(object):
79  """Define and execute a calculation on a ParquetTable
80 
81  The `__call__` method accepts either a `ParquetTable` object or a
82  `DeferredDatasetHandle`, and returns the
83  result of the calculation as a single column. Each functor defines what
84  columns are needed for the calculation, and only these columns are read
85  from the `ParquetTable`.
86 
87  The action of `__call__` consists of two steps: first, loading the
88  necessary columns from disk into memory as a `pandas.DataFrame` object;
89  and second, performing the computation on this dataframe and returning the
90  result.
91 
92 
93  To define a new `Functor`, a subclass must define a `_func` method,
94  that takes a `pandas.DataFrame` and returns result in a `pandas.Series`.
95  In addition, it must define the following attributes
96 
97  * `_columns`: The columns necessary to perform the calculation
98  * `name`: A name appropriate for a figure axis label
99  * `shortname`: A name appropriate for use as a dictionary key
100 
101  On initialization, a `Functor` should declare what band (`filt` kwarg)
102  and dataset (e.g. `'ref'`, `'meas'`, `'forced_src'`) it is intended to be
103  applied to. This enables the `_get_data` method to extract the proper
104  columns from the parquet file. If not specified, the dataset will fall back
105  on the `_defaultDataset`attribute. If band is not specified and `dataset`
106  is anything other than `'ref'`, then an error will be raised when trying to
107  perform the calculation.
108 
109  Originally, `Functor` was set up to expect
110  datasets formatted like the `deepCoadd_obj` dataset; that is, a
111  dataframe with a multi-level column index, with the levels of the
112  column index being `band`, `dataset`, and `column`.
113  It has since been generalized to apply to dataframes without mutli-level
114  indices and multi-level indices with just `dataset` and `column` levels.
115  In addition, the `_get_data` method that reads
116  the dataframe from the `ParquetTable` will return a dataframe with column
117  index levels defined by the `_dfLevels` attribute; by default, this is
118  `column`.
119 
120  The `_dfLevels` attributes should generally not need to
121  be changed, unless `_func` needs columns from multiple filters or datasets
122  to do the calculation.
123  An example of this is the `lsst.pipe.tasks.functors.Color` functor, for
124  which `_dfLevels = ('band', 'column')`, and `_func` expects the dataframe
125  it gets to have those levels in the column index.
126 
127  Parameters
128  ----------
129  filt : str
130  Filter upon which to do the calculation
131 
132  dataset : str
133  Dataset upon which to do the calculation
134  (e.g., 'ref', 'meas', 'forced_src').
135 
136  """
137 
138  _defaultDataset = 'ref'
139  _dfLevels = ('column',)
140  _defaultNoDup = False
141 
142  def __init__(self, filt=None, dataset=None, noDup=None):
143  self.filtfilt = filt
144  self.datasetdataset = dataset if dataset is not None else self._defaultDataset_defaultDataset
145  self._noDup_noDup = noDup
146 
147  @property
148  def noDup(self):
149  if self._noDup_noDup is not None:
150  return self._noDup_noDup
151  else:
152  return self._defaultNoDup_defaultNoDup
153 
154  @property
155  def columns(self):
156  """Columns required to perform calculation
157  """
158  if not hasattr(self, '_columns'):
159  raise NotImplementedError('Must define columns property or _columns attribute')
160  return self._columns
161 
162  def _get_data_columnLevels(self, data, columnIndex=None):
163  """Gets the names of the column index levels
164 
165  This should only be called in the context of a multilevel table.
166  The logic here is to enable this to work both with the gen2 `MultilevelParquetTable`
167  and with the gen3 `DeferredDatasetHandle`.
168 
169  Parameters
170  ----------
171  data : `MultilevelParquetTable` or `DeferredDatasetHandle`
172 
173  columnnIndex (optional): pandas `Index` object
174  if not passed, then it is read from the `DeferredDatasetHandle`
175  """
176  if isinstance(data, DeferredDatasetHandle):
177  if columnIndex is None:
178  columnIndex = data.get(component="columns")
179  if columnIndex is not None:
180  return columnIndex.names
181  if isinstance(data, MultilevelParquetTable):
182  return data.columnLevels
183  else:
184  raise TypeError(f"Unknown type for data: {type(data)}!")
185 
186  def _get_data_columnLevelNames(self, data, columnIndex=None):
187  """Gets the content of each of the column levels for a multilevel table
188 
189  Similar to `_get_data_columnLevels`, this enables backward compatibility with gen2.
190 
191  Mirrors original gen2 implementation within `pipe.tasks.parquetTable.MultilevelParquetTable`
192  """
193  if isinstance(data, DeferredDatasetHandle):
194  if columnIndex is None:
195  columnIndex = data.get(component="columns")
196  if columnIndex is not None:
197  columnLevels = columnIndex.names
198  columnLevelNames = {
199  level: list(np.unique(np.array([c for c in columnIndex])[:, i]))
200  for i, level in enumerate(columnLevels)
201  }
202  return columnLevelNames
203  if isinstance(data, MultilevelParquetTable):
204  return data.columnLevelNames
205  else:
206  raise TypeError(f"Unknown type for data: {type(data)}!")
207 
208  def _colsFromDict(self, colDict, columnIndex=None):
209  """Converts dictionary column specficiation to a list of columns
210 
211  This mirrors the original gen2 implementation within `pipe.tasks.parquetTable.MultilevelParquetTable`
212  """
213  new_colDict = {}
214  columnLevels = self._get_data_columnLevels_get_data_columnLevels(None, columnIndex=columnIndex)
215 
216  for i, lev in enumerate(columnLevels):
217  if lev in colDict:
218  if isinstance(colDict[lev], str):
219  new_colDict[lev] = [colDict[lev]]
220  else:
221  new_colDict[lev] = colDict[lev]
222  else:
223  new_colDict[lev] = columnIndex.levels[i]
224 
225  levelCols = [new_colDict[lev] for lev in columnLevels]
226  cols = product(*levelCols)
227  return list(cols)
228 
229  def multilevelColumns(self, data, columnIndex=None, returnTuple=False):
230  """Returns columns needed by functor from multilevel dataset
231 
232  To access tables with multilevel column structure, the `MultilevelParquetTable`
233  or `DeferredDatasetHandle` need to be passed either a list of tuples or a
234  dictionary.
235 
236  Parameters
237  ----------
238  data : `MultilevelParquetTable` or `DeferredDatasetHandle`
239 
240  columnIndex (optional): pandas `Index` object
241  either passed or read in from `DeferredDatasetHandle`.
242 
243  `returnTuple` : bool
244  If true, then return a list of tuples rather than the column dictionary
245  specification. This is set to `True` by `CompositeFunctor` in order to be able to
246  combine columns from the various component functors.
247 
248  """
249  if isinstance(data, DeferredDatasetHandle) and columnIndex is None:
250  columnIndex = data.get(component="columns")
251 
252  # Confirm that the dataset has the column levels the functor is expecting it to have.
253  columnLevels = self._get_data_columnLevels_get_data_columnLevels(data, columnIndex)
254 
255  columnDict = {'column': self.columnscolumns,
256  'dataset': self.datasetdataset}
257  if self.filtfilt is None:
258  columnLevelNames = self._get_data_columnLevelNames_get_data_columnLevelNames(data, columnIndex)
259  if "band" in columnLevels:
260  if self.datasetdataset == "ref":
261  columnDict["band"] = columnLevelNames["band"][0]
262  else:
263  raise ValueError(f"'filt' not set for functor {self.name}"
264  f"(dataset {self.dataset}) "
265  "and ParquetTable "
266  "contains multiple filters in column index. "
267  "Set 'filt' or set 'dataset' to 'ref'.")
268  else:
269  columnDict['band'] = self.filtfilt
270 
271  if isinstance(data, MultilevelParquetTable):
272  return data._colsFromDict(columnDict)
273  elif isinstance(data, DeferredDatasetHandle):
274  if returnTuple:
275  return self._colsFromDict_colsFromDict(columnDict, columnIndex=columnIndex)
276  else:
277  return columnDict
278 
279  def _func(self, df, dropna=True):
280  raise NotImplementedError('Must define calculation on dataframe')
281 
282  def _get_columnIndex(self, data):
283  """Return columnIndex
284  """
285 
286  if isinstance(data, DeferredDatasetHandle):
287  return data.get(component="columns")
288  else:
289  return None
290 
291  def _get_data(self, data):
292  """Retrieve dataframe necessary for calculation.
293 
294  The data argument can be a DataFrame, a ParquetTable instance, or a gen3 DeferredDatasetHandle
295 
296  Returns dataframe upon which `self._func` can act.
297 
298  N.B. while passing a raw pandas `DataFrame` *should* work here, it has not been tested.
299  """
300  if isinstance(data, pd.DataFrame):
301  return data
302 
303  # First thing to do: check to see if the data source has a multilevel column index or not.
304  columnIndex = self._get_columnIndex_get_columnIndex(data)
305  is_multiLevel = isinstance(data, MultilevelParquetTable) or isinstance(columnIndex, pd.MultiIndex)
306 
307  # Simple single-level parquet table, gen2
308  if isinstance(data, ParquetTable) and not is_multiLevel:
309  columns = self.columnscolumns
310  df = data.toDataFrame(columns=columns)
311  return df
312 
313  # Get proper columns specification for this functor
314  if is_multiLevel:
315  columns = self.multilevelColumnsmultilevelColumns(data, columnIndex=columnIndex)
316  else:
317  columns = self.columnscolumns
318 
319  if isinstance(data, MultilevelParquetTable):
320  # Load in-memory dataframe with appropriate columns the gen2 way
321  df = data.toDataFrame(columns=columns, droplevels=False)
322  elif isinstance(data, DeferredDatasetHandle):
323  # Load in-memory dataframe with appropriate columns the gen3 way
324  df = data.get(parameters={"columns": columns})
325 
326  # Drop unnecessary column levels
327  if is_multiLevel:
328  df = self._setLevels_setLevels(df)
329 
330  return df
331 
332  def _setLevels(self, df):
333  levelsToDrop = [n for n in df.columns.names if n not in self._dfLevels_dfLevels]
334  df.columns = df.columns.droplevel(levelsToDrop)
335  return df
336 
337  def _dropna(self, vals):
338  return vals.dropna()
339 
340  def __call__(self, data, dropna=False):
341  try:
342  df = self._get_data_get_data(data)
343  vals = self._func_func(df)
344  except Exception:
345  vals = self.failfail(df)
346  if dropna:
347  vals = self._dropna_dropna(vals)
348 
349  return vals
350 
351  def difference(self, data1, data2, **kwargs):
352  """Computes difference between functor called on two different ParquetTable objects
353  """
354  return self(data1, **kwargs) - self(data2, **kwargs)
355 
356  def fail(self, df):
357  return pd.Series(np.full(len(df), np.nan), index=df.index)
358 
359  @property
360  def name(self):
361  """Full name of functor (suitable for figure labels)
362  """
363  return NotImplementedError
364 
365  @property
366  def shortname(self):
367  """Short name of functor (suitable for column name/dict key)
368  """
369  return self.namename
370 
371 
373  """Perform multiple calculations at once on a catalog
374 
375  The role of a `CompositeFunctor` is to group together computations from
376  multiple functors. Instead of returning `pandas.Series` a
377  `CompositeFunctor` returns a `pandas.Dataframe`, with the column names
378  being the keys of `funcDict`.
379 
380  The `columns` attribute of a `CompositeFunctor` is the union of all columns
381  in all the component functors.
382 
383  A `CompositeFunctor` does not use a `_func` method itself; rather,
384  when a `CompositeFunctor` is called, all its columns are loaded
385  at once, and the resulting dataframe is passed to the `_func` method of each component
386  functor. This has the advantage of only doing I/O (reading from parquet file) once,
387  and works because each individual `_func` method of each component functor does not
388  care if there are *extra* columns in the dataframe being passed; only that it must contain
389  *at least* the `columns` it expects.
390 
391  An important and useful class method is `from_yaml`, which takes as argument the path to a YAML
392  file specifying a collection of functors.
393 
394  Parameters
395  ----------
396  funcs : `dict` or `list`
397  Dictionary or list of functors. If a list, then it will be converted
398  into a dictonary according to the `.shortname` attribute of each functor.
399 
400  """
401  dataset = None
402 
403  def __init__(self, funcs, **kwargs):
404 
405  if type(funcs) == dict:
406  self.funcDictfuncDict = funcs
407  else:
408  self.funcDictfuncDict = {f.shortname: f for f in funcs}
409 
410  self._filt_filt = None
411 
412  super().__init__(**kwargs)
413 
414  @property
415  def filt(self):
416  return self._filt_filt
417 
418  @filt.setter
419  def filt(self, filt):
420  if filt is not None:
421  for _, f in self.funcDictfuncDict.items():
422  f.filt = filt
423  self._filt_filt = filt
424 
425  def update(self, new):
426  if isinstance(new, dict):
427  self.funcDictfuncDict.update(new)
428  elif isinstance(new, CompositeFunctor):
429  self.funcDictfuncDict.update(new.funcDict)
430  else:
431  raise TypeError('Can only update with dictionary or CompositeFunctor.')
432 
433  # Make sure new functors have the same 'filt' set
434  if self.filtfiltfiltfiltfilt is not None:
435  self.filtfiltfiltfiltfilt = self.filtfiltfiltfiltfilt
436 
437  @property
438  def columns(self):
439  return list(set([x for y in [f.columns for f in self.funcDictfuncDict.values()] for x in y]))
440 
441  def multilevelColumns(self, data, **kwargs):
442  # Get the union of columns for all component functors. Note the need to have `returnTuple=True` here.
443  return list(
444  set(
445  [
446  x
447  for y in [
448  f.multilevelColumns(data, returnTuple=True, **kwargs) for f in self.funcDictfuncDict.values()
449  ]
450  for x in y
451  ]
452  )
453  )
454 
455  def __call__(self, data, **kwargs):
456  """Apply the functor to the data table
457 
458  Parameters
459  ----------
460  data : `lsst.daf.butler.DeferredDatasetHandle`,
461  `lsst.pipe.tasks.parquetTable.MultilevelParquetTable`,
462  `lsst.pipe.tasks.parquetTable.ParquetTable`,
463  or `pandas.DataFrame`.
464  The table or a pointer to a table on disk from which columns can
465  be accessed
466  """
467  columnIndex = self._get_columnIndex_get_columnIndex(data)
468 
469  # First, determine whether data has a multilevel index (either gen2 or gen3)
470  is_multiLevel = isinstance(data, MultilevelParquetTable) or isinstance(columnIndex, pd.MultiIndex)
471 
472  # Multilevel index, gen2 or gen3
473  if is_multiLevel:
474  columns = self.multilevelColumnsmultilevelColumnsmultilevelColumns(data, columnIndex=columnIndex)
475 
476  if isinstance(data, MultilevelParquetTable):
477  # Read data into memory the gen2 way
478  df = data.toDataFrame(columns=columns, droplevels=False)
479  elif isinstance(data, DeferredDatasetHandle):
480  # Read data into memory the gen3 way
481  df = data.get(parameters={"columns": columns})
482 
483  valDict = {}
484  for k, f in self.funcDictfuncDict.items():
485  try:
486  subdf = f._setLevels(
487  df[f.multilevelColumns(data, returnTuple=True, columnIndex=columnIndex)]
488  )
489  valDict[k] = f._func(subdf)
490  except Exception:
491  valDict[k] = f.fail(subdf)
492 
493  else:
494  if isinstance(data, DeferredDatasetHandle):
495  # input if Gen3 deferLoad=True
496  df = data.get(parameters={"columns": self.columnscolumnscolumns})
497  elif isinstance(data, pd.DataFrame):
498  # input if Gen3 deferLoad=False
499  df = data
500  else:
501  # Original Gen2 input is type ParquetTable and the fallback
502  df = data.toDataFrame(columns=self.columnscolumnscolumns)
503 
504  valDict = {k: f._func(df) for k, f in self.funcDictfuncDict.items()}
505 
506  try:
507  valDf = pd.concat(valDict, axis=1)
508  except TypeError:
509  print([(k, type(v)) for k, v in valDict.items()])
510  raise
511 
512  if kwargs.get('dropna', False):
513  valDf = valDf.dropna(how='any')
514 
515  return valDf
516 
517  @classmethod
518  def renameCol(cls, col, renameRules):
519  if renameRules is None:
520  return col
521  for old, new in renameRules:
522  if col.startswith(old):
523  col = col.replace(old, new)
524  return col
525 
526  @classmethod
527  def from_file(cls, filename, **kwargs):
528  # Allow environment variables in the filename.
529  filename = os.path.expandvars(filename)
530  with open(filename) as f:
531  translationDefinition = yaml.safe_load(f)
532 
533  return cls.from_yamlfrom_yaml(translationDefinition, **kwargs)
534 
535  @classmethod
536  def from_yaml(cls, translationDefinition, **kwargs):
537  funcs = {}
538  for func, val in translationDefinition['funcs'].items():
539  funcs[func] = init_fromDict(val, name=func)
540 
541  if 'flag_rename_rules' in translationDefinition:
542  renameRules = translationDefinition['flag_rename_rules']
543  else:
544  renameRules = None
545 
546  if 'calexpFlags' in translationDefinition:
547  for flag in translationDefinition['calexpFlags']:
548  funcs[cls.renameColrenameCol(flag, renameRules)] = Column(flag, dataset='calexp')
549 
550  if 'refFlags' in translationDefinition:
551  for flag in translationDefinition['refFlags']:
552  funcs[cls.renameColrenameCol(flag, renameRules)] = Column(flag, dataset='ref')
553 
554  if 'forcedFlags' in translationDefinition:
555  for flag in translationDefinition['forcedFlags']:
556  funcs[cls.renameColrenameCol(flag, renameRules)] = Column(flag, dataset='forced_src')
557 
558  if 'flags' in translationDefinition:
559  for flag in translationDefinition['flags']:
560  funcs[cls.renameColrenameCol(flag, renameRules)] = Column(flag, dataset='meas')
561 
562  return cls(funcs, **kwargs)
563 
564 
565 def mag_aware_eval(df, expr):
566  """Evaluate an expression on a DataFrame, knowing what the 'mag' function means
567 
568  Builds on `pandas.DataFrame.eval`, which parses and executes math on dataframes.
569 
570  Parameters
571  ----------
572  df : pandas.DataFrame
573  Dataframe on which to evaluate expression.
574 
575  expr : str
576  Expression.
577  """
578  try:
579  expr_new = re.sub(r'mag\‍((\w+)\‍)', r'-2.5*log(\g<1>)/log(10)', expr)
580  val = df.eval(expr_new, truediv=True)
581  except Exception: # Should check what actually gets raised
582  expr_new = re.sub(r'mag\‍((\w+)\‍)', r'-2.5*log(\g<1>_instFlux)/log(10)', expr)
583  val = df.eval(expr_new, truediv=True)
584  return val
585 
586 
588  """Arbitrary computation on a catalog
589 
590  Column names (and thus the columns to be loaded from catalog) are found
591  by finding all words and trying to ignore all "math-y" words.
592 
593  Parameters
594  ----------
595  expr : str
596  Expression to evaluate, to be parsed and executed by `mag_aware_eval`.
597  """
598  _ignore_words = ('mag', 'sin', 'cos', 'exp', 'log', 'sqrt')
599 
600  def __init__(self, expr, **kwargs):
601  self.exprexpr = expr
602  super().__init__(**kwargs)
603 
604  @property
605  def name(self):
606  return self.exprexpr
607 
608  @property
609  def columns(self):
610  flux_cols = re.findall(r'mag\‍(\s*(\w+)\s*\‍)', self.exprexpr)
611 
612  cols = [c for c in re.findall(r'[a-zA-Z_]+', self.exprexpr) if c not in self._ignore_words_ignore_words]
613  not_a_col = []
614  for c in flux_cols:
615  if not re.search('_instFlux$', c):
616  cols.append(f'{c}_instFlux')
617  not_a_col.append(c)
618  else:
619  cols.append(c)
620 
621  return list(set([c for c in cols if c not in not_a_col]))
622 
623  def _func(self, df):
624  return mag_aware_eval(df, self.exprexpr)
625 
626 
628  """Get column with specified name
629  """
630 
631  def __init__(self, col, **kwargs):
632  self.colcol = col
633  super().__init__(**kwargs)
634 
635  @property
636  def name(self):
637  return self.colcol
638 
639  @property
640  def columns(self):
641  return [self.colcol]
642 
643  def _func(self, df):
644  return df[self.colcol]
645 
646 
647 class Index(Functor):
648  """Return the value of the index for each object
649  """
650 
651  columns = ['coord_ra'] # just a dummy; something has to be here
652  _defaultDataset = 'ref'
653  _defaultNoDup = True
654 
655  def _func(self, df):
656  return pd.Series(df.index, index=df.index)
657 
658 
660  col = 'id'
661  _allow_difference = False
662  _defaultNoDup = True
663 
664  def _func(self, df):
665  return pd.Series(df.index, index=df.index)
666 
667 
669  col = 'base_Footprint_nPix'
670 
671 
673  """Base class for coordinate column, in degrees
674  """
675  _radians = True
676 
677  def __init__(self, col, **kwargs):
678  super().__init__(col, **kwargs)
679 
680  def _func(self, df):
681  # Must not modify original column in case that column is used by another functor
682  output = df[self.colcol] * 180 / np.pi if self._radians_radians else df[self.colcol]
683  return output
684 
685 
687  """Right Ascension, in degrees
688  """
689  name = 'RA'
690  _defaultNoDup = True
691 
692  def __init__(self, **kwargs):
693  super().__init__('coord_ra', **kwargs)
694 
695  def __call__(self, catalog, **kwargs):
696  return super().__call__(catalog, **kwargs)
697 
698 
700  """Declination, in degrees
701  """
702  name = 'Dec'
703  _defaultNoDup = True
704 
705  def __init__(self, **kwargs):
706  super().__init__('coord_dec', **kwargs)
707 
708  def __call__(self, catalog, **kwargs):
709  return super().__call__(catalog, **kwargs)
710 
711 
713  """Compute the level 20 HtmIndex for the catalog.
714  """
715  name = "Htm20"
716  htmLevel = 20
717  _radians = True
718 
719  def __init__(self, ra, decl, **kwargs):
720  self.pixelatorpixelator = sphgeom.HtmPixelization(self.htmLevelhtmLevel)
721  self.rara = ra
722  self.decldecl = decl
723  self._columns_columns = [self.rara, self.decldecl]
724  super().__init__(**kwargs)
725 
726  def _func(self, df):
727 
728  def computePixel(row):
729  if self._radians_radians:
730  sphPoint = geom.SpherePoint(row[self.rara],
731  row[self.decldecl],
732  geom.radians)
733  else:
734  sphPoint = geom.SpherePoint(row[self.rara],
735  row[self.decldecl],
736  geom.degrees)
737  return self.pixelatorpixelator.index(sphPoint.getVector())
738 
739  return df.apply(computePixel, axis=1)
740 
741 
742 def fluxName(col):
743  if not col.endswith('_instFlux'):
744  col += '_instFlux'
745  return col
746 
747 
748 def fluxErrName(col):
749  if not col.endswith('_instFluxErr'):
750  col += '_instFluxErr'
751  return col
752 
753 
754 class Mag(Functor):
755  """Compute calibrated magnitude
756 
757  Takes a `calib` argument, which returns the flux at mag=0
758  as `calib.getFluxMag0()`. If not provided, then the default
759  `fluxMag0` is 63095734448.0194, which is default for HSC.
760  This default should be removed in DM-21955
761 
762  This calculation hides warnings about invalid values and dividing by zero.
763 
764  As for all functors, a `dataset` and `filt` kwarg should be provided upon
765  initialization. Unlike the default `Functor`, however, the default dataset
766  for a `Mag` is `'meas'`, rather than `'ref'`.
767 
768  Parameters
769  ----------
770  col : `str`
771  Name of flux column from which to compute magnitude. Can be parseable
772  by `lsst.pipe.tasks.functors.fluxName` function---that is, you can pass
773  `'modelfit_CModel'` instead of `'modelfit_CModel_instFlux'`) and it will
774  understand.
775  calib : `lsst.afw.image.calib.Calib` (optional)
776  Object that knows zero point.
777  """
778  _defaultDataset = 'meas'
779 
780  def __init__(self, col, calib=None, **kwargs):
781  self.colcol = fluxName(col)
782  self.calibcalib = calib
783  if calib is not None:
784  self.fluxMag0fluxMag0 = calib.getFluxMag0()[0]
785  else:
786  # TO DO: DM-21955 Replace hard coded photometic calibration values
787  self.fluxMag0fluxMag0 = 63095734448.0194
788 
789  super().__init__(**kwargs)
790 
791  @property
792  def columns(self):
793  return [self.colcol]
794 
795  def _func(self, df):
796  with np.warnings.catch_warnings():
797  np.warnings.filterwarnings('ignore', r'invalid value encountered')
798  np.warnings.filterwarnings('ignore', r'divide by zero')
799  return -2.5*np.log10(df[self.colcol] / self.fluxMag0fluxMag0)
800 
801  @property
802  def name(self):
803  return f'mag_{self.col}'
804 
805 
806 class MagErr(Mag):
807  """Compute calibrated magnitude uncertainty
808 
809  Takes the same `calib` object as `lsst.pipe.tasks.functors.Mag`.
810 
811  Parameters
812  col : `str`
813  Name of flux column
814  calib : `lsst.afw.image.calib.Calib` (optional)
815  Object that knows zero point.
816  """
817 
818  def __init__(self, *args, **kwargs):
819  super().__init__(*args, **kwargs)
820  if self.calibcalib is not None:
821  self.fluxMag0ErrfluxMag0Err = self.calibcalib.getFluxMag0()[1]
822  else:
823  self.fluxMag0ErrfluxMag0Err = 0.
824 
825  @property
826  def columns(self):
827  return [self.colcol, self.colcol + 'Err']
828 
829  def _func(self, df):
830  with np.warnings.catch_warnings():
831  np.warnings.filterwarnings('ignore', r'invalid value encountered')
832  np.warnings.filterwarnings('ignore', r'divide by zero')
833  fluxCol, fluxErrCol = self.columnscolumnscolumnscolumns
834  x = df[fluxErrCol] / df[fluxCol]
835  y = self.fluxMag0ErrfluxMag0Err / self.fluxMag0fluxMag0
836  magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y)
837  return magErr
838 
839  @property
840  def name(self):
841  return super().name + '_err'
842 
843 
845  """
846  """
847 
848  def _func(self, df):
849  return (df[self.colcol] / self.fluxMag0fluxMag0) * 1e9
850 
851 
853  _defaultDataset = 'meas'
854 
855  """Functor to calculate magnitude difference"""
856 
857  def __init__(self, col1, col2, **kwargs):
858  self.col1col1 = fluxName(col1)
859  self.col2col2 = fluxName(col2)
860  super().__init__(**kwargs)
861 
862  @property
863  def columns(self):
864  return [self.col1col1, self.col2col2]
865 
866  def _func(self, df):
867  with np.warnings.catch_warnings():
868  np.warnings.filterwarnings('ignore', r'invalid value encountered')
869  np.warnings.filterwarnings('ignore', r'divide by zero')
870  return -2.5*np.log10(df[self.col1col1]/df[self.col2col2])
871 
872  @property
873  def name(self):
874  return f'(mag_{self.col1} - mag_{self.col2})'
875 
876  @property
877  def shortname(self):
878  return f'magDiff_{self.col1}_{self.col2}'
879 
880 
881 class Color(Functor):
882  """Compute the color between two filters
883 
884  Computes color by initializing two different `Mag`
885  functors based on the `col` and filters provided, and
886  then returning the difference.
887 
888  This is enabled by the `_func` expecting a dataframe with a
889  multilevel column index, with both `'band'` and `'column'`,
890  instead of just `'column'`, which is the `Functor` default.
891  This is controlled by the `_dfLevels` attribute.
892 
893  Also of note, the default dataset for `Color` is `forced_src'`,
894  whereas for `Mag` it is `'meas'`.
895 
896  Parameters
897  ----------
898  col : str
899  Name of flux column from which to compute; same as would be passed to
900  `lsst.pipe.tasks.functors.Mag`.
901 
902  filt2, filt1 : str
903  Filters from which to compute magnitude difference.
904  Color computed is `Mag(filt2) - Mag(filt1)`.
905  """
906  _defaultDataset = 'forced_src'
907  _dfLevels = ('band', 'column')
908  _defaultNoDup = True
909 
910  def __init__(self, col, filt2, filt1, **kwargs):
911  self.colcol = fluxName(col)
912  if filt2 == filt1:
913  raise RuntimeError("Cannot compute Color for %s: %s - %s " % (col, filt2, filt1))
914  self.filt2filt2 = filt2
915  self.filt1filt1 = filt1
916 
917  self.mag2mag2 = Mag(col, filt=filt2, **kwargs)
918  self.mag1mag1 = Mag(col, filt=filt1, **kwargs)
919 
920  super().__init__(**kwargs)
921 
922  @property
923  def filt(self):
924  return None
925 
926  @filt.setter
927  def filt(self, filt):
928  pass
929 
930  def _func(self, df):
931  mag2 = self.mag2._func(df[self.filt2])
932  mag1 = self.mag1._func(df[self.filt1])
933  return mag2 - mag1
934 
935  @property
936  def columns(self):
937  return [self.mag1mag1.col, self.mag2mag2.col]
938 
939  def multilevelColumns(self, parq, **kwargs):
940  return [(self.datasetdataset, self.filt1filt1, self.colcol), (self.datasetdataset, self.filt2filt2, self.colcol)]
941 
942  @property
943  def name(self):
944  return f'{self.filt2} - {self.filt1} ({self.col})'
945 
946  @property
947  def shortname(self):
948  return f"{self.col}_{self.filt2.replace('-', '')}m{self.filt1.replace('-', '')}"
949 
950 
952  """Main function of this subclass is to override the dropna=True
953  """
954  _null_label = 'null'
955  _allow_difference = False
956  name = 'label'
957  _force_str = False
958 
959  def __call__(self, parq, dropna=False, **kwargs):
960  return super().__call__(parq, dropna=False, **kwargs)
961 
962 
964  _columns = ["base_ClassificationExtendedness_value"]
965  _column = "base_ClassificationExtendedness_value"
966 
967  def _func(self, df):
968  x = df[self._columns_columns][self._column_column]
969  mask = x.isnull()
970  test = (x < 0.5).astype(int)
971  test = test.mask(mask, 2)
972 
973  # TODO: DM-21954 Look into veracity of inline comment below
974  # are these backwards?
975  categories = ['galaxy', 'star', self._null_label_null_label]
976  label = pd.Series(pd.Categorical.from_codes(test, categories=categories),
977  index=x.index, name='label')
978  if self._force_str_force_str:
979  label = label.astype(str)
980  return label
981 
982 
984  _columns = ['numStarFlags']
985  labels = {"star": 0, "maybe": 1, "notStar": 2}
986 
987  def _func(self, df):
988  x = df[self._columns_columns][self._columns_columns[0]]
989 
990  # Number of filters
991  n = len(x.unique()) - 1
992 
993  labels = ['noStar', 'maybe', 'star']
994  label = pd.Series(pd.cut(x, [-1, 0, n-1, n], labels=labels),
995  index=x.index, name='label')
996 
997  if self._force_str_force_str:
998  label = label.astype(str)
999 
1000  return label
1001 
1002 
1004  name = 'Deconvolved Moments'
1005  shortname = 'deconvolvedMoments'
1006  _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
1007  "ext_shapeHSM_HsmSourceMoments_yy",
1008  "base_SdssShape_xx", "base_SdssShape_yy",
1009  "ext_shapeHSM_HsmPsfMoments_xx",
1010  "ext_shapeHSM_HsmPsfMoments_yy")
1011 
1012  def _func(self, df):
1013  """Calculate deconvolved moments"""
1014  if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns: # _xx added by tdm
1015  hsm = df["ext_shapeHSM_HsmSourceMoments_xx"] + df["ext_shapeHSM_HsmSourceMoments_yy"]
1016  else:
1017  hsm = np.ones(len(df))*np.nan
1018  sdss = df["base_SdssShape_xx"] + df["base_SdssShape_yy"]
1019  if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns:
1020  psf = df["ext_shapeHSM_HsmPsfMoments_xx"] + df["ext_shapeHSM_HsmPsfMoments_yy"]
1021  else:
1022  # LSST does not have shape.sdss.psf. Could instead add base_PsfShape to catalog using
1023  # exposure.getPsf().computeShape(s.getCentroid()).getIxx()
1024  # raise TaskError("No psf shape parameter found in catalog")
1025  raise RuntimeError('No psf shape parameter found in catalog')
1026 
1027  return hsm.where(np.isfinite(hsm), sdss) - psf
1028 
1029 
1031  """Functor to calculate SDSS trace radius size for sources"""
1032  name = "SDSS Trace Size"
1033  shortname = 'sdssTrace'
1034  _columns = ("base_SdssShape_xx", "base_SdssShape_yy")
1035 
1036  def _func(self, df):
1037  srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"]))
1038  return srcSize
1039 
1040 
1042  """Functor to calculate SDSS trace radius size difference (%) between object and psf model"""
1043  name = "PSF - SDSS Trace Size"
1044  shortname = 'psf_sdssTrace'
1045  _columns = ("base_SdssShape_xx", "base_SdssShape_yy",
1046  "base_SdssShape_psf_xx", "base_SdssShape_psf_yy")
1047 
1048  def _func(self, df):
1049  srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"]))
1050  psfSize = np.sqrt(0.5*(df["base_SdssShape_psf_xx"] + df["base_SdssShape_psf_yy"]))
1051  sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
1052  return sizeDiff
1053 
1054 
1056  """Functor to calculate HSM trace radius size for sources"""
1057  name = 'HSM Trace Size'
1058  shortname = 'hsmTrace'
1059  _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
1060  "ext_shapeHSM_HsmSourceMoments_yy")
1061 
1062  def _func(self, df):
1063  srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"]
1064  + df["ext_shapeHSM_HsmSourceMoments_yy"]))
1065  return srcSize
1066 
1067 
1069  """Functor to calculate HSM trace radius size difference (%) between object and psf model"""
1070  name = 'PSF - HSM Trace Size'
1071  shortname = 'psf_HsmTrace'
1072  _columns = ("ext_shapeHSM_HsmSourceMoments_xx",
1073  "ext_shapeHSM_HsmSourceMoments_yy",
1074  "ext_shapeHSM_HsmPsfMoments_xx",
1075  "ext_shapeHSM_HsmPsfMoments_yy")
1076 
1077  def _func(self, df):
1078  srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"]
1079  + df["ext_shapeHSM_HsmSourceMoments_yy"]))
1080  psfSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmPsfMoments_xx"]
1081  + df["ext_shapeHSM_HsmPsfMoments_yy"]))
1082  sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize))
1083  return sizeDiff
1084 
1085 
1087  name = 'HSM Psf FWHM'
1088  _columns = ('ext_shapeHSM_HsmPsfMoments_xx', 'ext_shapeHSM_HsmPsfMoments_yy')
1089  # TODO: DM-21403 pixel scale should be computed from the CD matrix or transform matrix
1090  pixelScale = 0.168
1091  SIGMA2FWHM = 2*np.sqrt(2*np.log(2))
1092 
1093  def _func(self, df):
1094  return self.pixelScalepixelScale*self.SIGMA2FWHMSIGMA2FWHM*np.sqrt(
1095  0.5*(df['ext_shapeHSM_HsmPsfMoments_xx'] + df['ext_shapeHSM_HsmPsfMoments_yy']))
1096 
1097 
1098 class E1(Functor):
1099  name = "Distortion Ellipticity (e1)"
1100  shortname = "Distortion"
1101 
1102  def __init__(self, colXX, colXY, colYY, **kwargs):
1103  self.colXXcolXX = colXX
1104  self.colXYcolXY = colXY
1105  self.colYYcolYY = colYY
1106  self._columns_columns = [self.colXXcolXX, self.colXYcolXY, self.colYYcolYY]
1107  super().__init__(**kwargs)
1108 
1109  @property
1110  def columns(self):
1111  return [self.colXXcolXX, self.colXYcolXY, self.colYYcolYY]
1112 
1113  def _func(self, df):
1114  return df[self.colXXcolXX] - df[self.colYYcolYY] / (df[self.colXXcolXX] + df[self.colYYcolYY])
1115 
1116 
1117 class E2(Functor):
1118  name = "Ellipticity e2"
1119 
1120  def __init__(self, colXX, colXY, colYY, **kwargs):
1121  self.colXXcolXX = colXX
1122  self.colXYcolXY = colXY
1123  self.colYYcolYY = colYY
1124  super().__init__(**kwargs)
1125 
1126  @property
1127  def columns(self):
1128  return [self.colXXcolXX, self.colXYcolXY, self.colYYcolYY]
1129 
1130  def _func(self, df):
1131  return 2*df[self.colXYcolXY] / (df[self.colXXcolXX] + df[self.colYYcolYY])
1132 
1133 
1135 
1136  def __init__(self, colXX, colXY, colYY, **kwargs):
1137  self.colXXcolXX = colXX
1138  self.colXYcolXY = colXY
1139  self.colYYcolYY = colYY
1140  super().__init__(**kwargs)
1141 
1142  @property
1143  def columns(self):
1144  return [self.colXXcolXX, self.colXYcolXY, self.colYYcolYY]
1145 
1146  def _func(self, df):
1147  return (df[self.colXXcolXX]*df[self.colYYcolYY] - df[self.colXYcolXY]**2)**0.25
1148 
1149 
1151  """Computations using the stored localWcs.
1152  """
1153  name = "LocalWcsOperations"
1154 
1155  def __init__(self,
1156  colCD_1_1,
1157  colCD_1_2,
1158  colCD_2_1,
1159  colCD_2_2,
1160  **kwargs):
1161  self.colCD_1_1colCD_1_1 = colCD_1_1
1162  self.colCD_1_2colCD_1_2 = colCD_1_2
1163  self.colCD_2_1colCD_2_1 = colCD_2_1
1164  self.colCD_2_2colCD_2_2 = colCD_2_2
1165  super().__init__(**kwargs)
1166 
1167  def computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22):
1168  """Compute the distance on the sphere from x2, y1 to x1, y1.
1169 
1170  Parameters
1171  ----------
1172  x : `pandas.Series`
1173  X pixel coordinate.
1174  y : `pandas.Series`
1175  Y pixel coordinate.
1176  cd11 : `pandas.Series`
1177  [1, 1] element of the local Wcs affine transform.
1178  cd11 : `pandas.Series`
1179  [1, 1] element of the local Wcs affine transform.
1180  cd12 : `pandas.Series`
1181  [1, 2] element of the local Wcs affine transform.
1182  cd21 : `pandas.Series`
1183  [2, 1] element of the local Wcs affine transform.
1184  cd22 : `pandas.Series`
1185  [2, 2] element of the local Wcs affine transform.
1186 
1187  Returns
1188  -------
1189  raDecTuple : tuple
1190  RA and dec conversion of x and y given the local Wcs. Returned
1191  units are in radians.
1192 
1193  """
1194  return (x * cd11 + y * cd12, x * cd21 + y * cd22)
1195 
1196  def computeSkySeperation(self, ra1, dec1, ra2, dec2):
1197  """Compute the local pixel scale conversion.
1198 
1199  Parameters
1200  ----------
1201  ra1 : `pandas.Series`
1202  Ra of the first coordinate in radians.
1203  dec1 : `pandas.Series`
1204  Dec of the first coordinate in radians.
1205  ra2 : `pandas.Series`
1206  Ra of the second coordinate in radians.
1207  dec2 : `pandas.Series`
1208  Dec of the second coordinate in radians.
1209 
1210  Returns
1211  -------
1212  dist : `pandas.Series`
1213  Distance on the sphere in radians.
1214  """
1215  deltaDec = dec2 - dec1
1216  deltaRa = ra2 - ra1
1217  return 2 * np.arcsin(
1218  np.sqrt(
1219  np.sin(deltaDec / 2) ** 2
1220  + np.cos(dec2) * np.cos(dec1) * np.sin(deltaRa / 2) ** 2))
1221 
1222  def getSkySeperationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22):
1223  """Compute the distance on the sphere from x2, y1 to x1, y1.
1224 
1225  Parameters
1226  ----------
1227  x1 : `pandas.Series`
1228  X pixel coordinate.
1229  y1 : `pandas.Series`
1230  Y pixel coordinate.
1231  x2 : `pandas.Series`
1232  X pixel coordinate.
1233  y2 : `pandas.Series`
1234  Y pixel coordinate.
1235  cd11 : `pandas.Series`
1236  [1, 1] element of the local Wcs affine transform.
1237  cd11 : `pandas.Series`
1238  [1, 1] element of the local Wcs affine transform.
1239  cd12 : `pandas.Series`
1240  [1, 2] element of the local Wcs affine transform.
1241  cd21 : `pandas.Series`
1242  [2, 1] element of the local Wcs affine transform.
1243  cd22 : `pandas.Series`
1244  [2, 2] element of the local Wcs affine transform.
1245 
1246  Returns
1247  -------
1248  Distance : `pandas.Series`
1249  Arcseconds per pixel at the location of the local WC
1250  """
1251  ra1, dec1 = self.computeDeltaRaDeccomputeDeltaRaDec(x1, y1, cd11, cd12, cd21, cd22)
1252  ra2, dec2 = self.computeDeltaRaDeccomputeDeltaRaDec(x2, y2, cd11, cd12, cd21, cd22)
1253  # Great circle distance for small separations.
1254  return self.computeSkySeperationcomputeSkySeperation(ra1, dec1, ra2, dec2)
1255 
1256 
1258  """Compute the local pixel scale from the stored CDMatrix.
1259  """
1260  name = "PixelScale"
1261 
1262  @property
1263  def columns(self):
1264  return [self.colCD_1_1colCD_1_1,
1265  self.colCD_1_2colCD_1_2,
1266  self.colCD_2_1colCD_2_1,
1267  self.colCD_2_2colCD_2_2]
1268 
1269  def pixelScaleArcseconds(self, cd11, cd12, cd21, cd22):
1270  """Compute the local pixel to scale conversion in arcseconds.
1271 
1272  Parameters
1273  ----------
1274  cd11 : `pandas.Series`
1275  [1, 1] element of the local Wcs affine transform in radians.
1276  cd11 : `pandas.Series`
1277  [1, 1] element of the local Wcs affine transform in radians.
1278  cd12 : `pandas.Series`
1279  [1, 2] element of the local Wcs affine transform in radians.
1280  cd21 : `pandas.Series`
1281  [2, 1] element of the local Wcs affine transform in radians.
1282  cd22 : `pandas.Series`
1283  [2, 2] element of the local Wcs affine transform in radians.
1284 
1285  Returns
1286  -------
1287  pixScale : `pandas.Series`
1288  Arcseconds per pixel at the location of the local WC
1289  """
1290  return 3600 * np.degrees(np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21)))
1291 
1292  def _func(self, df):
1293  return self.pixelScaleArcsecondspixelScaleArcseconds(df[self.colCD_1_1colCD_1_1],
1294  df[self.colCD_1_2colCD_1_2],
1295  df[self.colCD_2_1colCD_2_1],
1296  df[self.colCD_2_2colCD_2_2])
1297 
1298 
1300  """Convert a value in units pixels squared to units arcseconds squared.
1301  """
1302 
1303  def __init__(self,
1304  col,
1305  colCD_1_1,
1306  colCD_1_2,
1307  colCD_2_1,
1308  colCD_2_2,
1309  **kwargs):
1310  self.colcol = col
1311  super().__init__(colCD_1_1,
1312  colCD_1_2,
1313  colCD_2_1,
1314  colCD_2_2,
1315  **kwargs)
1316 
1317  @property
1318  def name(self):
1319  return f"{self.col}_asArcseconds"
1320 
1321  @property
1322  def columns(self):
1323  return [self.colcol,
1324  self.colCD_1_1colCD_1_1,
1325  self.colCD_1_2colCD_1_2,
1326  self.colCD_2_1colCD_2_1,
1327  self.colCD_2_2colCD_2_2]
1328 
1329  def _func(self, df):
1330  return df[self.colcol] * self.pixelScaleArcsecondspixelScaleArcseconds(df[self.colCD_1_1colCD_1_1],
1331  df[self.colCD_1_2colCD_1_2],
1332  df[self.colCD_2_1colCD_2_1],
1333  df[self.colCD_2_2colCD_2_2])
1334 
1335 
1337  """Convert a value in units pixels to units arcseconds.
1338  """
1339 
1340  def __init__(self,
1341  col,
1342  colCD_1_1,
1343  colCD_1_2,
1344  colCD_2_1,
1345  colCD_2_2,
1346  **kwargs):
1347  self.colcol = col
1348  super().__init__(colCD_1_1,
1349  colCD_1_2,
1350  colCD_2_1,
1351  colCD_2_2,
1352  **kwargs)
1353 
1354  @property
1355  def name(self):
1356  return f"{self.col}_asArcsecondsSq"
1357 
1358  @property
1359  def columns(self):
1360  return [self.colcol,
1361  self.colCD_1_1colCD_1_1,
1362  self.colCD_1_2colCD_1_2,
1363  self.colCD_2_1colCD_2_1,
1364  self.colCD_2_2colCD_2_2]
1365 
1366  def _func(self, df):
1367  pixScale = self.pixelScaleArcsecondspixelScaleArcseconds(df[self.colCD_1_1colCD_1_1],
1368  df[self.colCD_1_2colCD_1_2],
1369  df[self.colCD_2_1colCD_2_1],
1370  df[self.colCD_2_2colCD_2_2])
1371  return df[self.colcol] * pixScale * pixScale
1372 
1373 
1375  name = 'Reference Band'
1376  shortname = 'refBand'
1377 
1378  @property
1379  def columns(self):
1380  return ["merge_measurement_i",
1381  "merge_measurement_r",
1382  "merge_measurement_z",
1383  "merge_measurement_y",
1384  "merge_measurement_g"]
1385 
1386  def _func(self, df):
1387  def getFilterAliasName(row):
1388  # get column name with the max value (True > False)
1389  colName = row.idxmax()
1390  return colName.replace('merge_measurement_', '')
1391 
1392  return df[self.columnscolumnscolumns].apply(getFilterAliasName, axis=1)
1393 
1394 
1396  # AB to NanoJansky (3631 Jansky)
1397  AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy)
1398  LOG_AB_FLUX_SCALE = 12.56
1399  FIVE_OVER_2LOG10 = 1.085736204758129569
1400  # TO DO: DM-21955 Replace hard coded photometic calibration values
1401  COADD_ZP = 27
1402 
1403  def __init__(self, colFlux, colFluxErr=None, calib=None, **kwargs):
1404  self.vhypotvhypot = np.vectorize(self.hypothypot)
1405  self.colcol = colFlux
1406  self.colFluxErrcolFluxErr = colFluxErr
1407 
1408  self.calibcalib = calib
1409  if calib is not None:
1410  self.fluxMag0fluxMag0, self.fluxMag0ErrfluxMag0Err = calib.getFluxMag0()
1411  else:
1412  self.fluxMag0fluxMag0 = 1./np.power(10, -0.4*self.COADD_ZPCOADD_ZP)
1413  self.fluxMag0ErrfluxMag0Err = 0.
1414 
1415  super().__init__(**kwargs)
1416 
1417  @property
1418  def columns(self):
1419  return [self.colcol]
1420 
1421  @property
1422  def name(self):
1423  return f'mag_{self.col}'
1424 
1425  @classmethod
1426  def hypot(cls, a, b):
1427  if np.abs(a) < np.abs(b):
1428  a, b = b, a
1429  if a == 0.:
1430  return 0.
1431  q = b/a
1432  return np.abs(a) * np.sqrt(1. + q*q)
1433 
1434  def dn2flux(self, dn, fluxMag0):
1435  return self.AB_FLUX_SCALEAB_FLUX_SCALE * dn / fluxMag0
1436 
1437  def dn2mag(self, dn, fluxMag0):
1438  with np.warnings.catch_warnings():
1439  np.warnings.filterwarnings('ignore', r'invalid value encountered')
1440  np.warnings.filterwarnings('ignore', r'divide by zero')
1441  return -2.5 * np.log10(dn/fluxMag0)
1442 
1443  def dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err):
1444  retVal = self.vhypotvhypot(dn * fluxMag0Err, dnErr * fluxMag0)
1445  retVal *= self.AB_FLUX_SCALEAB_FLUX_SCALE / fluxMag0 / fluxMag0
1446  return retVal
1447 
1448  def dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err):
1449  retVal = self.dn2fluxErrdn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.dn2fluxdn2flux(dn, fluxMag0)
1450  return self.FIVE_OVER_2LOG10FIVE_OVER_2LOG10 * retVal
1451 
1452 
1454  def _func(self, df):
1455  return self.dn2fluxdn2flux(df[self.colcol], self.fluxMag0fluxMag0)
1456 
1457 
1459  @property
1460  def columns(self):
1461  return [self.colcol, self.colFluxErrcolFluxErr]
1462 
1463  def _func(self, df):
1464  retArr = self.dn2fluxErrdn2fluxErr(df[self.colcol], df[self.colFluxErrcolFluxErr], self.fluxMag0fluxMag0, self.fluxMag0ErrfluxMag0Err)
1465  return pd.Series(retArr, index=df.index)
1466 
1467 
1469  def _func(self, df):
1470  return self.dn2magdn2mag(df[self.colcol], self.fluxMag0fluxMag0)
1471 
1472 
1474  @property
1475  def columns(self):
1476  return [self.colcol, self.colFluxErrcolFluxErr]
1477 
1478  def _func(self, df):
1479  retArr = self.dn2MagErrdn2MagErr(df[self.colcol], df[self.colFluxErrcolFluxErr], self.fluxMag0fluxMag0, self.fluxMag0ErrfluxMag0Err)
1480  return pd.Series(retArr, index=df.index)
1481 
1482 
1484  """Base class for calibrating the specified instrument flux column using
1485  the local photometric calibration.
1486 
1487  Parameters
1488  ----------
1489  instFluxCol : `str`
1490  Name of the instrument flux column.
1491  instFluxErrCol : `str`
1492  Name of the assocated error columns for ``instFluxCol``.
1493  photoCalibCol : `str`
1494  Name of local calibration column.
1495  photoCalibErrCol : `str`
1496  Error associated with ``photoCalibCol``
1497 
1498  See also
1499  --------
1500  LocalPhotometry
1501  LocalNanojansky
1502  LocalNanojanskyErr
1503  LocalMagnitude
1504  LocalMagnitudeErr
1505  """
1506  logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag)
1507 
1508  def __init__(self,
1509  instFluxCol,
1510  instFluxErrCol,
1511  photoCalibCol,
1512  photoCalibErrCol,
1513  **kwargs):
1514  self.instFluxColinstFluxCol = instFluxCol
1515  self.instFluxErrColinstFluxErrCol = instFluxErrCol
1516  self.photoCalibColphotoCalibCol = photoCalibCol
1517  self.photoCalibErrColphotoCalibErrCol = photoCalibErrCol
1518  super().__init__(**kwargs)
1519 
1520  def instFluxToNanojansky(self, instFlux, localCalib):
1521  """Convert instrument flux to nanojanskys.
1522 
1523  Parameters
1524  ----------
1525  instFlux : `numpy.ndarray` or `pandas.Series`
1526  Array of instrument flux measurements
1527  localCalib : `numpy.ndarray` or `pandas.Series`
1528  Array of local photometric calibration estimates.
1529 
1530  Returns
1531  -------
1532  calibFlux : `numpy.ndarray` or `pandas.Series`
1533  Array of calibrated flux measurements.
1534  """
1535  return instFlux * localCalib
1536 
1537  def instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr):
1538  """Convert instrument flux to nanojanskys.
1539 
1540  Parameters
1541  ----------
1542  instFlux : `numpy.ndarray` or `pandas.Series`
1543  Array of instrument flux measurements
1544  instFluxErr : `numpy.ndarray` or `pandas.Series`
1545  Errors on associated ``instFlux`` values
1546  localCalib : `numpy.ndarray` or `pandas.Series`
1547  Array of local photometric calibration estimates.
1548  localCalibErr : `numpy.ndarray` or `pandas.Series`
1549  Errors on associated ``localCalib`` values
1550 
1551  Returns
1552  -------
1553  calibFluxErr : `numpy.ndarray` or `pandas.Series`
1554  Errors on calibrated flux measurements.
1555  """
1556  return np.hypot(instFluxErr * localCalib, instFlux * localCalibErr)
1557 
1558  def instFluxToMagnitude(self, instFlux, localCalib):
1559  """Convert instrument flux to nanojanskys.
1560 
1561  Parameters
1562  ----------
1563  instFlux : `numpy.ndarray` or `pandas.Series`
1564  Array of instrument flux measurements
1565  localCalib : `numpy.ndarray` or `pandas.Series`
1566  Array of local photometric calibration estimates.
1567 
1568  Returns
1569  -------
1570  calibMag : `numpy.ndarray` or `pandas.Series`
1571  Array of calibrated AB magnitudes.
1572  """
1573  return -2.5 * np.log10(self.instFluxToNanojanskyinstFluxToNanojansky(instFlux, localCalib)) + self.logNJanskyToABlogNJanskyToAB
1574 
1575  def instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr):
1576  """Convert instrument flux err to nanojanskys.
1577 
1578  Parameters
1579  ----------
1580  instFlux : `numpy.ndarray` or `pandas.Series`
1581  Array of instrument flux measurements
1582  instFluxErr : `numpy.ndarray` or `pandas.Series`
1583  Errors on associated ``instFlux`` values
1584  localCalib : `numpy.ndarray` or `pandas.Series`
1585  Array of local photometric calibration estimates.
1586  localCalibErr : `numpy.ndarray` or `pandas.Series`
1587  Errors on associated ``localCalib`` values
1588 
1589  Returns
1590  -------
1591  calibMagErr: `numpy.ndarray` or `pandas.Series`
1592  Error on calibrated AB magnitudes.
1593  """
1594  err = self.instFluxErrToNanojanskyErrinstFluxErrToNanojanskyErr(instFlux, instFluxErr, localCalib, localCalibErr)
1595  return 2.5 / np.log(10) * err / self.instFluxToNanojanskyinstFluxToNanojansky(instFlux, instFluxErr)
1596 
1597 
1599  """Compute calibrated fluxes using the local calibration value.
1600 
1601  See also
1602  --------
1603  LocalNanojansky
1604  LocalNanojanskyErr
1605  LocalMagnitude
1606  LocalMagnitudeErr
1607  """
1608 
1609  @property
1610  def columns(self):
1611  return [self.instFluxColinstFluxCol, self.photoCalibColphotoCalibCol]
1612 
1613  @property
1614  def name(self):
1615  return f'flux_{self.instFluxCol}'
1616 
1617  def _func(self, df):
1618  return self.instFluxToNanojanskyinstFluxToNanojansky(df[self.instFluxColinstFluxCol], df[self.photoCalibColphotoCalibCol])
1619 
1620 
1622  """Compute calibrated flux errors using the local calibration value.
1623 
1624  See also
1625  --------
1626  LocalNanojansky
1627  LocalNanojanskyErr
1628  LocalMagnitude
1629  LocalMagnitudeErr
1630  """
1631 
1632  @property
1633  def columns(self):
1634  return [self.instFluxColinstFluxCol, self.instFluxErrColinstFluxErrCol,
1635  self.photoCalibColphotoCalibCol, self.photoCalibErrColphotoCalibErrCol]
1636 
1637  @property
1638  def name(self):
1639  return f'fluxErr_{self.instFluxCol}'
1640 
1641  def _func(self, df):
1642  return self.instFluxErrToNanojanskyErrinstFluxErrToNanojanskyErr(df[self.instFluxColinstFluxCol], df[self.instFluxErrColinstFluxErrCol],
1643  df[self.photoCalibColphotoCalibCol], df[self.photoCalibErrColphotoCalibErrCol])
1644 
1645 
1647  """Compute calibrated AB magnitudes using the local calibration value.
1648 
1649  See also
1650  --------
1651  LocalNanojansky
1652  LocalNanojanskyErr
1653  LocalMagnitude
1654  LocalMagnitudeErr
1655  """
1656 
1657  @property
1658  def columns(self):
1659  return [self.instFluxColinstFluxCol, self.photoCalibColphotoCalibCol]
1660 
1661  @property
1662  def name(self):
1663  return f'mag_{self.instFluxCol}'
1664 
1665  def _func(self, df):
1666  return self.instFluxToMagnitudeinstFluxToMagnitude(df[self.instFluxColinstFluxCol],
1667  df[self.photoCalibColphotoCalibCol])
1668 
1669 
1671  """Compute calibrated AB magnitude errors using the local calibration value.
1672 
1673  See also
1674  --------
1675  LocalNanojansky
1676  LocalNanojanskyErr
1677  LocalMagnitude
1678  LocalMagnitudeErr
1679  """
1680 
1681  @property
1682  def columns(self):
1683  return [self.instFluxColinstFluxCol, self.instFluxErrColinstFluxErrCol,
1684  self.photoCalibColphotoCalibCol, self.photoCalibErrColphotoCalibErrCol]
1685 
1686  @property
1687  def name(self):
1688  return f'magErr_{self.instFluxCol}'
1689 
1690  def _func(self, df):
1691  return self.instFluxErrToMagnitudeErrinstFluxErrToMagnitudeErr(df[self.instFluxColinstFluxCol],
1692  df[self.instFluxErrColinstFluxErrCol],
1693  df[self.photoCalibColphotoCalibCol],
1694  df[self.photoCalibErrColphotoCalibErrCol])
1695 
1696 
1698  """Compute absolute mean of dipole fluxes.
1699 
1700  See also
1701  --------
1702  LocalNanojansky
1703  LocalNanojanskyErr
1704  LocalMagnitude
1705  LocalMagnitudeErr
1706  LocalDipoleMeanFlux
1707  LocalDipoleMeanFluxErr
1708  LocalDipoleDiffFlux
1709  LocalDipoleDiffFluxErr
1710  """
1711  def __init__(self,
1712  instFluxPosCol,
1713  instFluxNegCol,
1714  instFluxPosErrCol,
1715  instFluxNegErrCol,
1716  photoCalibCol,
1717  photoCalibErrCol,
1718  **kwargs):
1719  self.instFluxNegColinstFluxNegCol = instFluxNegCol
1720  self.instFluxPosColinstFluxPosCol = instFluxPosCol
1721  self.instFluxNegErrColinstFluxNegErrCol = instFluxNegErrCol
1722  self.instFluxPosErrColinstFluxPosErrCol = instFluxPosErrCol
1723  self.photoCalibColphotoCalibColphotoCalibCol = photoCalibCol
1724  self.photoCalibErrColphotoCalibErrColphotoCalibErrCol = photoCalibErrCol
1725  super().__init__(instFluxNegCol,
1726  instFluxNegErrCol,
1727  photoCalibCol,
1728  photoCalibErrCol,
1729  **kwargs)
1730 
1731  @property
1732  def columns(self):
1733  return [self.instFluxPosColinstFluxPosCol,
1734  self.instFluxNegColinstFluxNegCol,
1735  self.photoCalibColphotoCalibColphotoCalibCol]
1736 
1737  @property
1738  def name(self):
1739  return f'dipMeanFlux_{self.instFluxPosCol}_{self.instFluxNegCol}'
1740 
1741  def _func(self, df):
1742  return 0.5*(np.fabs(self.instFluxToNanojanskyinstFluxToNanojansky(df[self.instFluxNegColinstFluxNegCol], df[self.photoCalibColphotoCalibColphotoCalibCol]))
1743  + np.fabs(self.instFluxToNanojanskyinstFluxToNanojansky(df[self.instFluxPosColinstFluxPosCol], df[self.photoCalibColphotoCalibColphotoCalibCol])))
1744 
1745 
1747  """Compute the error on the absolute mean of dipole fluxes.
1748 
1749  See also
1750  --------
1751  LocalNanojansky
1752  LocalNanojanskyErr
1753  LocalMagnitude
1754  LocalMagnitudeErr
1755  LocalDipoleMeanFlux
1756  LocalDipoleMeanFluxErr
1757  LocalDipoleDiffFlux
1758  LocalDipoleDiffFluxErr
1759  """
1760 
1761  @property
1762  def columns(self):
1763  return [self.instFluxPosColinstFluxPosCol,
1764  self.instFluxNegColinstFluxNegCol,
1765  self.instFluxPosErrColinstFluxPosErrCol,
1766  self.instFluxNegErrColinstFluxNegErrCol,
1767  self.photoCalibColphotoCalibColphotoCalibCol,
1768  self.photoCalibErrColphotoCalibErrColphotoCalibErrCol]
1769 
1770  @property
1771  def name(self):
1772  return f'dipMeanFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}'
1773 
1774  def _func(self, df):
1775  return 0.5*np.sqrt(
1776  (np.fabs(df[self.instFluxNegColinstFluxNegCol]) + np.fabs(df[self.instFluxPosColinstFluxPosCol])
1777  * df[self.photoCalibErrColphotoCalibErrColphotoCalibErrCol])**2
1778  + (df[self.instFluxNegErrColinstFluxNegErrCol]**2 + df[self.instFluxPosErrColinstFluxPosErrCol]**2)
1779  * df[self.photoCalibColphotoCalibColphotoCalibCol]**2)
1780 
1781 
1783  """Compute the absolute difference of dipole fluxes.
1784 
1785  Value is (abs(pos) - abs(neg))
1786 
1787  See also
1788  --------
1789  LocalNanojansky
1790  LocalNanojanskyErr
1791  LocalMagnitude
1792  LocalMagnitudeErr
1793  LocalDipoleMeanFlux
1794  LocalDipoleMeanFluxErr
1795  LocalDipoleDiffFlux
1796  LocalDipoleDiffFluxErr
1797  """
1798 
1799  @property
1800  def columns(self):
1801  return [self.instFluxPosColinstFluxPosCol,
1802  self.instFluxNegColinstFluxNegCol,
1803  self.photoCalibColphotoCalibColphotoCalibCol]
1804 
1805  @property
1806  def name(self):
1807  return f'dipDiffFlux_{self.instFluxPosCol}_{self.instFluxNegCol}'
1808 
1809  def _func(self, df):
1810  return (np.fabs(self.instFluxToNanojanskyinstFluxToNanojansky(df[self.instFluxPosColinstFluxPosCol], df[self.photoCalibColphotoCalibColphotoCalibCol]))
1811  - np.fabs(self.instFluxToNanojanskyinstFluxToNanojansky(df[self.instFluxNegColinstFluxNegCol], df[self.photoCalibColphotoCalibColphotoCalibCol])))
1812 
1813 
1815  """Compute the error on the absolute difference of dipole fluxes.
1816 
1817  See also
1818  --------
1819  LocalNanojansky
1820  LocalNanojanskyErr
1821  LocalMagnitude
1822  LocalMagnitudeErr
1823  LocalDipoleMeanFlux
1824  LocalDipoleMeanFluxErr
1825  LocalDipoleDiffFlux
1826  LocalDipoleDiffFluxErr
1827  """
1828 
1829  @property
1830  def columns(self):
1831  return [self.instFluxPosColinstFluxPosCol,
1832  self.instFluxNegColinstFluxNegCol,
1833  self.instFluxPosErrColinstFluxPosErrCol,
1834  self.instFluxNegErrColinstFluxNegErrCol,
1835  self.photoCalibColphotoCalibColphotoCalibCol,
1836  self.photoCalibErrColphotoCalibErrColphotoCalibErrCol]
1837 
1838  @property
1839  def name(self):
1840  return f'dipDiffFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}'
1841 
1842  def _func(self, df):
1843  return np.sqrt(
1844  ((np.fabs(df[self.instFluxPosColinstFluxPosCol]) - np.fabs(df[self.instFluxNegColinstFluxNegCol]))
1845  * df[self.photoCalibErrColphotoCalibErrColphotoCalibErrCol])**2
1846  + (df[self.instFluxPosErrColinstFluxPosErrCol]**2 + df[self.instFluxNegErrColinstFluxNegErrCol]**2)
1847  * df[self.photoCalibColphotoCalibColphotoCalibCol]**2)
1848 
1849 
1851  """Base class for returning the ratio of 2 columns.
1852 
1853  Can be used to compute a Signal to Noise ratio for any input flux.
1854 
1855  Parameters
1856  ----------
1857  numerator : `str`
1858  Name of the column to use at the numerator in the ratio
1859  denominator : `str`
1860  Name of the column to use as the denominator in the ratio.
1861  """
1862  def __init__(self,
1863  numerator,
1864  denominator,
1865  **kwargs):
1866  self.numeratornumerator = numerator
1867  self.denominatordenominator = denominator
1868  super().__init__(**kwargs)
1869 
1870  @property
1871  def columns(self):
1872  return [self.numeratornumerator, self.denominatordenominator]
1873 
1874  @property
1875  def name(self):
1876  return f'ratio_{self.numerator}_{self.denominator}'
1877 
1878  def _func(self, df):
1879  with np.warnings.catch_warnings():
1880  np.warnings.filterwarnings('ignore', r'invalid value encountered')
1881  np.warnings.filterwarnings('ignore', r'divide by zero')
1882  return df[self.numeratornumerator] / df[self.denominatordenominator]
def multilevelColumns(self, parq, **kwargs)
Definition: functors.py:939
def __init__(self, col, filt2, filt1, **kwargs)
Definition: functors.py:910
def __init__(self, col, **kwargs)
Definition: functors.py:631
def __init__(self, funcs, **kwargs)
Definition: functors.py:403
def __call__(self, data, **kwargs)
Definition: functors.py:455
def from_file(cls, filename, **kwargs)
Definition: functors.py:527
def from_yaml(cls, translationDefinition, **kwargs)
Definition: functors.py:536
def renameCol(cls, col, renameRules)
Definition: functors.py:518
def multilevelColumns(self, data, **kwargs)
Definition: functors.py:441
def pixelScaleArcseconds(self, cd11, cd12, cd21, cd22)
Definition: functors.py:1269
def __init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition: functors.py:1346
def __init__(self, col, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition: functors.py:1309
def __init__(self, col, **kwargs)
Definition: functors.py:677
def __init__(self, expr, **kwargs)
Definition: functors.py:600
def __init__(self, **kwargs)
Definition: functors.py:705
def __call__(self, catalog, **kwargs)
Definition: functors.py:708
def __init__(self, colXX, colXY, colYY, **kwargs)
Definition: functors.py:1102
def __init__(self, colXX, colXY, colYY, **kwargs)
Definition: functors.py:1120
def __call__(self, data, dropna=False)
Definition: functors.py:340
def _func(self, df, dropna=True)
Definition: functors.py:279
def multilevelColumns(self, data, columnIndex=None, returnTuple=False)
Definition: functors.py:229
def _get_data_columnLevelNames(self, data, columnIndex=None)
Definition: functors.py:186
def difference(self, data1, data2, **kwargs)
Definition: functors.py:351
def __init__(self, filt=None, dataset=None, noDup=None)
Definition: functors.py:142
def _get_columnIndex(self, data)
Definition: functors.py:282
def _colsFromDict(self, colDict, columnIndex=None)
Definition: functors.py:208
def _get_data_columnLevels(self, data, columnIndex=None)
Definition: functors.py:162
def __init__(self, ra, decl, **kwargs)
Definition: functors.py:719
def __call__(self, parq, dropna=False, **kwargs)
Definition: functors.py:959
def __init__(self, instFluxPosCol, instFluxNegCol, instFluxPosErrCol, instFluxNegErrCol, photoCalibCol, photoCalibErrCol, **kwargs)
Definition: functors.py:1718
def instFluxToNanojansky(self, instFlux, localCalib)
Definition: functors.py:1520
def instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr)
Definition: functors.py:1575
def __init__(self, instFluxCol, instFluxErrCol, photoCalibCol, photoCalibErrCol, **kwargs)
Definition: functors.py:1513
def instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr)
Definition: functors.py:1537
def instFluxToMagnitude(self, instFlux, localCalib)
Definition: functors.py:1558
def __init__(self, colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs)
Definition: functors.py:1160
def computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22)
Definition: functors.py:1167
def computeSkySeperation(self, ra1, dec1, ra2, dec2)
Definition: functors.py:1196
def getSkySeperationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22)
Definition: functors.py:1222
def __init__(self, col1, col2, **kwargs)
Definition: functors.py:857
def __init__(self, *args, **kwargs)
Definition: functors.py:818
def __init__(self, col, calib=None, **kwargs)
Definition: functors.py:780
def dn2mag(self, dn, fluxMag0)
Definition: functors.py:1437
def dn2flux(self, dn, fluxMag0)
Definition: functors.py:1434
def dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
Definition: functors.py:1443
def dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err)
Definition: functors.py:1448
def __init__(self, colFlux, colFluxErr=None, calib=None, **kwargs)
Definition: functors.py:1403
def __call__(self, catalog, **kwargs)
Definition: functors.py:695
def __init__(self, **kwargs)
Definition: functors.py:692
def __init__(self, colXX, colXY, colYY, **kwargs)
Definition: functors.py:1136
def __init__(self, numerator, denominator, **kwargs)
Definition: functors.py:1865
def mag_aware_eval(df, expr)
Definition: functors.py:565
def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors', typeKey='functor', name=None)
Definition: functors.py:40