Coverage for python / lsst / pipe / tasks / functors.py: 38%

940 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-04-21 10:40 +0000

1# This file is part of pipe_tasks. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (https://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <https://www.gnu.org/licenses/>. 

21 

22__all__ = ["init_fromDict", "Functor", "CompositeFunctor", "mag_aware_eval", 

23 "CustomFunctor", "Column", "Index", "CoordColumn", "RAColumn", 

24 "DecColumn", "SinglePrecisionFloatColumn", "HtmIndex20", "fluxName", "fluxErrName", "Mag", 

25 "MagErr", "MagDiff", "Color", "DeconvolvedMoments", "SdssTraceSize", 

26 "PsfSdssTraceSizeDiff", "HsmTraceSize", "PsfHsmTraceSizeDiff", 

27 "HsmFwhm", "E1", "E2", "RadiusFromQuadrupole", "LocalWcs", 

28 "ComputePixelScale", "ConvertPixelToArcseconds", 

29 "ConvertPixelSqToArcsecondsSq", 

30 "ConvertDetectorAngleToPositionAngle", 

31 "ReferenceBand", "Photometry", 

32 "NanoJansky", "NanoJanskyErr", "LocalPhotometry", "LocalNanojansky", 

33 "LocalNanojanskyErr", "LocalDipoleMeanFlux", 

34 "LocalDipoleMeanFluxErr", "LocalDipoleDiffFlux", 

35 "LocalDipoleDiffFluxErr", "Ebv", 

36 "MomentsIuuSky", "MomentsIvvSky", "MomentsIuvSky", 

37 "CorrelationIuuSky", "CorrelationIvvSky", "CorrelationIuvSky", 

38 "PositionAngleFromMoments", "PositionAngleFromCorrelation", 

39 "SemimajorAxisFromMoments", "SemimajorAxisFromCorrelation", 

40 "SemiminorAxisFromMoments", "SemiminorAxisFromCorrelation", 

41 ] 

42 

43import logging 

44import os 

45import os.path 

46import re 

47import warnings 

48from contextlib import redirect_stdout 

49from itertools import product 

50 

51import astropy.units as u 

52import lsst.geom as geom 

53import lsst.sphgeom as sphgeom 

54import numpy as np 

55import pandas as pd 

56import yaml 

57from astropy.coordinates import SkyCoord 

58from lsst.daf.butler import DeferredDatasetHandle 

59from lsst.pipe.base import InMemoryDatasetHandle 

60from lsst.utils import doImport 

61from lsst.utils.introspection import get_full_type_name 

62 

63 

64def init_fromDict(initDict, basePath='lsst.pipe.tasks.functors', 

65 typeKey='functor', name=None): 

66 """Initialize an object defined in a dictionary. 

67 

68 The object needs to be importable as f'{basePath}.{initDict[typeKey]}'. 

69 The positional and keyword arguments (if any) are contained in "args" and 

70 "kwargs" entries in the dictionary, respectively. 

71 This is used in `~lsst.pipe.tasks.functors.CompositeFunctor.from_yaml` to 

72 initialize a composite functor from a specification in a YAML file. 

73 

74 Parameters 

75 ---------- 

76 initDict : dictionary 

77 Dictionary describing object's initialization. 

78 Must contain an entry keyed by ``typeKey`` that is the name of the 

79 object, relative to ``basePath``. 

80 basePath : str 

81 Path relative to module in which ``initDict[typeKey]`` is defined. 

82 typeKey : str 

83 Key of ``initDict`` that is the name of the object (relative to 

84 ``basePath``). 

85 """ 

86 initDict = initDict.copy() 

87 # TO DO: DM-21956 We should be able to define functors outside this module 

88 pythonType = doImport(f'{basePath}.{initDict.pop(typeKey)}') 

89 args = [] 

90 if 'args' in initDict: 

91 args = initDict.pop('args') 

92 if isinstance(args, str): 

93 args = [args] 

94 try: 

95 element = pythonType(*args, **initDict) 

96 except Exception as e: 

97 message = f'Error in constructing functor "{name}" of type {pythonType.__name__} with args: {args}' 

98 raise type(e)(message, e.args) 

99 return element 

100 

101 

102class Functor(object): 

103 """Define and execute a calculation on a DataFrame or Handle holding a 

104 DataFrame. 

105 

106 The `__call__` method accepts either a `~pandas.DataFrame` object or a 

107 `~lsst.daf.butler.DeferredDatasetHandle` or 

108 `~lsst.pipe.base.InMemoryDatasetHandle`, and returns the 

109 result of the calculation as a single column. 

110 Each functor defines what columns are needed for the calculation, and only 

111 these columns are read from the dataset handle. 

112 

113 The action of `__call__` consists of two steps: first, loading the 

114 necessary columns from disk into memory as a `~pandas.DataFrame` object; 

115 and second, performing the computation on this DataFrame and returning the 

116 result. 

117 

118 To define a new `Functor`, a subclass must define a `_func` method, 

119 that takes a `~pandas.DataFrame` and returns result in a `~pandas.Series`. 

120 In addition, it must define the following attributes: 

121 

122 * `_columns`: The columns necessary to perform the calculation 

123 * `name`: A name appropriate for a figure axis label 

124 * `shortname`: A name appropriate for use as a dictionary key 

125 

126 On initialization, a `Functor` should declare what band (``filt`` kwarg) 

127 and dataset (e.g. ``'ref'``, ``'meas'``, ``'forced_src'``) it is intended 

128 to be applied to. 

129 This enables the `_get_data` method to extract the proper columns from the 

130 underlying data. 

131 If not specified, the dataset will fall back on the `_defaultDataset` 

132 attribute. 

133 If band is not specified and ``dataset`` is anything other than ``'ref'``, 

134 then an error will be raised when trying to perform the calculation. 

135 

136 Originally, `Functor` was set up to expect datasets formatted like the 

137 ``deepCoadd_obj`` dataset; that is, a DataFrame with a multi-level column 

138 index, with the levels of the column index being ``band``, ``dataset``, and 

139 ``column``. 

140 It has since been generalized to apply to DataFrames without multi-level 

141 indices and multi-level indices with just ``dataset`` and ``column`` 

142 levels. 

143 In addition, the `_get_data` method that reads the columns from the 

144 underlying data will return a DataFrame with column index levels defined by 

145 the `_dfLevels` attribute; by default, this is ``column``. 

146 

147 The `_dfLevels` attributes should generally not need to be changed, unless 

148 `_func` needs columns from multiple filters or datasets to do the 

149 calculation. 

150 An example of this is the `~lsst.pipe.tasks.functors.Color` functor, for 

151 which `_dfLevels = ('band', 'column')`, and `_func` expects the DataFrame 

152 it gets to have those levels in the column index. 

153 

154 Parameters 

155 ---------- 

156 filt : str 

157 Band upon which to do the calculation. 

158 

159 dataset : str 

160 Dataset upon which to do the calculation (e.g., 'ref', 'meas', 

161 'forced_src'). 

162 """ 

163 

164 _defaultDataset = 'ref' 

165 _dfLevels = ('column',) 

166 _defaultNoDup = False 

167 

168 def __init__(self, filt=None, dataset=None, noDup=None): 

169 self.filt = filt 

170 self.dataset = dataset if dataset is not None else self._defaultDataset 

171 self._noDup = noDup 

172 self.log = logging.getLogger(type(self).__name__) 

173 

174 @property 

175 def noDup(self): 

176 """Do not explode by band if used on object table.""" 

177 if self._noDup is not None: 

178 return self._noDup 

179 else: 

180 return self._defaultNoDup 

181 

182 @property 

183 def columns(self): 

184 """Columns required to perform calculation.""" 

185 if not hasattr(self, '_columns'): 

186 raise NotImplementedError('Must define columns property or _columns attribute') 

187 return self._columns 

188 

189 def _get_data_columnLevels(self, data, columnIndex=None): 

190 """Gets the names of the column index levels. 

191 

192 This should only be called in the context of a multilevel table. 

193 

194 Parameters 

195 ---------- 

196 data : various 

197 The data to be read, can be a 

198 `~lsst.daf.butler.DeferredDatasetHandle` or 

199 `~lsst.pipe.base.InMemoryDatasetHandle`. 

200 columnIndex (optional): pandas `~pandas.Index` object 

201 If not passed, then it is read from the 

202 `~lsst.daf.butler.DeferredDatasetHandle` 

203 for `~lsst.pipe.base.InMemoryDatasetHandle`. 

204 """ 

205 if columnIndex is None: 

206 columnIndex = data.get(component="columns") 

207 return columnIndex.names 

208 

209 def _get_data_columnLevelNames(self, data, columnIndex=None): 

210 """Gets the content of each of the column levels for a multilevel 

211 table. 

212 """ 

213 if columnIndex is None: 

214 columnIndex = data.get(component="columns") 

215 

216 columnLevels = columnIndex.names 

217 columnLevelNames = { 

218 level: list(np.unique(np.array([c for c in columnIndex])[:, i])) 

219 for i, level in enumerate(columnLevels) 

220 } 

221 return columnLevelNames 

222 

223 def _colsFromDict(self, colDict, columnIndex=None): 

224 """Converts dictionary column specficiation to a list of columns.""" 

225 new_colDict = {} 

226 columnLevels = self._get_data_columnLevels(None, columnIndex=columnIndex) 

227 

228 for i, lev in enumerate(columnLevels): 

229 if lev in colDict: 

230 if isinstance(colDict[lev], str): 

231 new_colDict[lev] = [colDict[lev]] 

232 else: 

233 new_colDict[lev] = colDict[lev] 

234 else: 

235 new_colDict[lev] = columnIndex.levels[i] 

236 

237 levelCols = [new_colDict[lev] for lev in columnLevels] 

238 cols = list(product(*levelCols)) 

239 colsAvailable = [col for col in cols if col in columnIndex] 

240 return colsAvailable 

241 

242 def multilevelColumns(self, data, columnIndex=None, returnTuple=False): 

243 """Returns columns needed by functor from multilevel dataset. 

244 

245 To access tables with multilevel column structure, the 

246 `~lsst.daf.butler.DeferredDatasetHandle` or 

247 `~lsst.pipe.base.InMemoryDatasetHandle` needs to be passed 

248 either a list of tuples or a dictionary. 

249 

250 Parameters 

251 ---------- 

252 data : various 

253 The data as either `~lsst.daf.butler.DeferredDatasetHandle`, or 

254 `~lsst.pipe.base.InMemoryDatasetHandle`. 

255 columnIndex (optional): pandas `~pandas.Index` object 

256 Either passed or read in from 

257 `~lsst.daf.butler.DeferredDatasetHandle`. 

258 `returnTuple` : `bool` 

259 If true, then return a list of tuples rather than the column 

260 dictionary specification. 

261 This is set to `True` by `CompositeFunctor` in order to be able to 

262 combine columns from the various component functors. 

263 

264 """ 

265 if not isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)): 

266 raise RuntimeError(f"Unexpected data type. Got {get_full_type_name(data)}.") 

267 

268 if columnIndex is None: 

269 columnIndex = data.get(component="columns") 

270 

271 # Confirm that the dataset has the column levels the functor is 

272 # expecting it to have. 

273 columnLevels = self._get_data_columnLevels(data, columnIndex) 

274 

275 columnDict = {'column': self.columns, 

276 'dataset': self.dataset} 

277 if self.filt is None: 

278 columnLevelNames = self._get_data_columnLevelNames(data, columnIndex) 

279 if "band" in columnLevels: 

280 if self.dataset == "ref": 

281 columnDict["band"] = columnLevelNames["band"][0] 

282 else: 

283 raise ValueError(f"'filt' not set for functor {self.name}" 

284 f"(dataset {self.dataset}) " 

285 "and DataFrame " 

286 "contains multiple filters in column index. " 

287 "Set 'filt' or set 'dataset' to 'ref'.") 

288 else: 

289 columnDict['band'] = self.filt 

290 

291 if returnTuple: 

292 return self._colsFromDict(columnDict, columnIndex=columnIndex) 

293 else: 

294 return columnDict 

295 

296 def _func(self, df, dropna=True): 

297 raise NotImplementedError('Must define calculation on DataFrame') 

298 

299 def _get_columnIndex(self, data): 

300 """Return columnIndex.""" 

301 

302 if isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)): 

303 return data.get(component="columns") 

304 else: 

305 return None 

306 

307 def _get_data(self, data): 

308 """Retrieve DataFrame necessary for calculation. 

309 

310 The data argument can be a `~pandas.DataFrame`, a 

311 `~lsst.daf.butler.DeferredDatasetHandle`, or 

312 an `~lsst.pipe.base.InMemoryDatasetHandle`. 

313 

314 Returns a DataFrame upon which `self._func` can act. 

315 """ 

316 # We wrap a DataFrame in a handle here to take advantage of the 

317 # DataFrame delegate DataFrame column wrangling abilities. 

318 if isinstance(data, pd.DataFrame): 

319 _data = InMemoryDatasetHandle(data, storageClass="DataFrame") 

320 elif isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)): 

321 _data = data 

322 else: 

323 raise RuntimeError(f"Unexpected type provided for data. Got {get_full_type_name(data)}.") 

324 

325 # First thing to do: check to see if the data source has a multilevel 

326 # column index or not. 

327 columnIndex = self._get_columnIndex(_data) 

328 is_multiLevel = isinstance(columnIndex, pd.MultiIndex) 

329 

330 # Get proper columns specification for this functor. 

331 if is_multiLevel: 

332 columns = self.multilevelColumns(_data, columnIndex=columnIndex) 

333 else: 

334 columns = self.columns 

335 

336 # Load in-memory DataFrame with appropriate columns the gen3 way. 

337 df = _data.get(parameters={"columns": columns}) 

338 

339 # Drop unnecessary column levels. 

340 if is_multiLevel: 

341 df = self._setLevels(df) 

342 

343 return df 

344 

345 def _setLevels(self, df): 

346 levelsToDrop = [n for n in df.columns.names if n not in self._dfLevels] 

347 df.columns = df.columns.droplevel(levelsToDrop) 

348 return df 

349 

350 def _dropna(self, vals): 

351 return vals.dropna() 

352 

353 def __call__(self, data, dropna=False): 

354 df = self._get_data(data) 

355 try: 

356 vals = self._func(df) 

357 except Exception as e: 

358 self.log.error("Exception in %s call: %s: %s", self.name, type(e).__name__, e) 

359 vals = self.fail(df) 

360 if dropna: 

361 vals = self._dropna(vals) 

362 

363 return vals 

364 

365 def difference(self, data1, data2, **kwargs): 

366 """Computes difference between functor called on two different 

367 DataFrame/Handle objects. 

368 """ 

369 return self(data1, **kwargs) - self(data2, **kwargs) 

370 

371 def fail(self, df): 

372 return pd.Series(np.full(len(df), np.nan), index=df.index) 

373 

374 @property 

375 def name(self): 

376 """Full name of functor (suitable for figure labels).""" 

377 return NotImplementedError 

378 

379 @property 

380 def shortname(self): 

381 """Short name of functor (suitable for column name/dict key).""" 

382 return self.name 

383 

384 

385class CompositeFunctor(Functor): 

386 """Perform multiple calculations at once on a catalog. 

387 

388 The role of a `CompositeFunctor` is to group together computations from 

389 multiple functors. 

390 Instead of returning `~pandas.Series` a `CompositeFunctor` returns a 

391 `~pandas.DataFrame`, with the column names being the keys of ``funcDict``. 

392 

393 The `columns` attribute of a `CompositeFunctor` is the union of all columns 

394 in all the component functors. 

395 

396 A `CompositeFunctor` does not use a `_func` method itself; rather, when a 

397 `CompositeFunctor` is called, all its columns are loaded at once, and the 

398 resulting DataFrame is passed to the `_func` method of each component 

399 functor. 

400 This has the advantage of only doing I/O (reading from parquet file) once, 

401 and works because each individual `_func` method of each component functor 

402 does not care if there are *extra* columns in the DataFrame being passed; 

403 only that it must contain *at least* the `columns` it expects. 

404 

405 An important and useful class method is `from_yaml`, which takes as an 

406 argument the path to a YAML file specifying a collection of functors. 

407 

408 Parameters 

409 ---------- 

410 funcs : `dict` or `list` 

411 Dictionary or list of functors. 

412 If a list, then it will be converted into a dictonary according to the 

413 `.shortname` attribute of each functor. 

414 """ 

415 dataset = None 

416 name = "CompositeFunctor" 

417 

418 def __init__(self, funcs, **kwargs): 

419 

420 if type(funcs) is dict: 

421 self.funcDict = funcs 

422 else: 

423 self.funcDict = {f.shortname: f for f in funcs} 

424 

425 self._filt = None 

426 

427 super().__init__(**kwargs) 

428 

429 @property 

430 def filt(self): 

431 return self._filt 

432 

433 @filt.setter 

434 def filt(self, filt): 

435 if filt is not None: 

436 for _, f in self.funcDict.items(): 

437 f.filt = filt 

438 self._filt = filt 

439 

440 def update(self, new): 

441 """Update the functor with new functors.""" 

442 if isinstance(new, dict): 

443 self.funcDict.update(new) 

444 elif isinstance(new, CompositeFunctor): 

445 self.funcDict.update(new.funcDict) 

446 else: 

447 raise TypeError('Can only update with dictionary or CompositeFunctor.') 

448 

449 # Make sure new functors have the same 'filt' set. 

450 if self.filt is not None: 

451 self.filt = self.filt 

452 

453 @property 

454 def columns(self): 

455 return list(set([x for y in [f.columns for f in self.funcDict.values()] for x in y])) 

456 

457 def multilevelColumns(self, data, **kwargs): 

458 # Get the union of columns for all component functors. 

459 # Note the need to have `returnTuple=True` here. 

460 return list( 

461 set( 

462 [ 

463 x 

464 for y in [ 

465 f.multilevelColumns(data, returnTuple=True, **kwargs) for f in self.funcDict.values() 

466 ] 

467 for x in y 

468 ] 

469 ) 

470 ) 

471 

472 def __call__(self, data, **kwargs): 

473 """Apply the functor to the data table. 

474 

475 Parameters 

476 ---------- 

477 data : various 

478 The data represented as `~lsst.daf.butler.DeferredDatasetHandle`, 

479 `~lsst.pipe.base.InMemoryDatasetHandle`, or `~pandas.DataFrame`. 

480 The table or a pointer to a table on disk from which columns can 

481 be accessed. 

482 """ 

483 if isinstance(data, pd.DataFrame): 

484 _data = InMemoryDatasetHandle(data, storageClass="DataFrame") 

485 elif isinstance(data, (DeferredDatasetHandle, InMemoryDatasetHandle)): 

486 _data = data 

487 else: 

488 raise RuntimeError(f"Unexpected type provided for data. Got {get_full_type_name(data)}.") 

489 

490 columnIndex = self._get_columnIndex(_data) 

491 

492 if isinstance(columnIndex, pd.MultiIndex): 

493 columns = self.multilevelColumns(_data, columnIndex=columnIndex) 

494 df = _data.get(parameters={"columns": columns}) 

495 

496 valDict = {} 

497 for k, f in self.funcDict.items(): 

498 try: 

499 subdf = f._setLevels( 

500 df[f.multilevelColumns(_data, returnTuple=True, columnIndex=columnIndex)] 

501 ) 

502 valDict[k] = f._func(subdf) 

503 except Exception as e: 

504 self.log.exception( 

505 "Exception in %s (funcs: %s) call: %s", 

506 self.name, 

507 str(list(self.funcDict.keys())), 

508 type(e).__name__, 

509 ) 

510 try: 

511 valDict[k] = f.fail(subdf) 

512 except NameError: 

513 raise e 

514 

515 else: 

516 df = _data.get(parameters={"columns": self.columns}) 

517 

518 valDict = {k: f._func(df) for k, f in self.funcDict.items()} 

519 

520 # Check that output columns are actually columns. 

521 for name, colVal in valDict.items(): 

522 if len(colVal.shape) != 1: 

523 raise RuntimeError("Transformed column '%s' is not the shape of a column. " 

524 "It is shaped %s and type %s." % (name, colVal.shape, type(colVal))) 

525 

526 try: 

527 valDf = pd.concat(valDict, axis=1) 

528 except TypeError: 

529 print([(k, type(v)) for k, v in valDict.items()]) 

530 raise 

531 

532 if kwargs.get('dropna', False): 

533 valDf = valDf.dropna(how='any') 

534 

535 return valDf 

536 

537 @classmethod 

538 def renameCol(cls, col, renameRules): 

539 if renameRules is None: 

540 return col 

541 for old, new in renameRules: 

542 if col.startswith(old): 

543 col = col.replace(old, new) 

544 return col 

545 

546 @classmethod 

547 def from_file(cls, filename, **kwargs): 

548 # Allow environment variables in the filename. 

549 filename = os.path.expandvars(filename) 

550 with open(filename) as f: 

551 translationDefinition = yaml.safe_load(f) 

552 

553 return cls.from_yaml(translationDefinition, **kwargs) 

554 

555 @classmethod 

556 def from_yaml(cls, translationDefinition, **kwargs): 

557 funcs = {} 

558 for func, val in translationDefinition['funcs'].items(): 

559 funcs[func] = init_fromDict(val, name=func) 

560 

561 if 'flag_rename_rules' in translationDefinition: 

562 renameRules = translationDefinition['flag_rename_rules'] 

563 else: 

564 renameRules = None 

565 

566 if 'calexpFlags' in translationDefinition: 

567 for flag in translationDefinition['calexpFlags']: 

568 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='calexp') 

569 

570 if 'refFlags' in translationDefinition: 

571 for flag in translationDefinition['refFlags']: 

572 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='ref') 

573 

574 if 'forcedFlags' in translationDefinition: 

575 for flag in translationDefinition['forcedFlags']: 

576 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='forced_src') 

577 

578 if 'flags' in translationDefinition: 

579 for flag in translationDefinition['flags']: 

580 funcs[cls.renameCol(flag, renameRules)] = Column(flag, dataset='meas') 

581 

582 return cls(funcs, **kwargs) 

583 

584 

585def mag_aware_eval(df, expr, log): 

586 """Evaluate an expression on a DataFrame, knowing what the 'mag' function 

587 means. 

588 

589 Builds on `pandas.DataFrame.eval`, which parses and executes math on 

590 DataFrames. 

591 

592 Parameters 

593 ---------- 

594 df : ~pandas.DataFrame 

595 DataFrame on which to evaluate expression. 

596 

597 expr : str 

598 Expression. 

599 """ 

600 try: 

601 expr_new = re.sub(r'mag\((\w+)\)', r'-2.5*log(\g<1>)/log(10)', expr) 

602 val = df.eval(expr_new) 

603 except Exception as e: # Should check what actually gets raised 

604 log.error("Exception in mag_aware_eval: %s: %s", type(e).__name__, e) 

605 expr_new = re.sub(r'mag\((\w+)\)', r'-2.5*log(\g<1>_instFlux)/log(10)', expr) 

606 val = df.eval(expr_new) 

607 return val 

608 

609 

610class CustomFunctor(Functor): 

611 """Arbitrary computation on a catalog. 

612 

613 Column names (and thus the columns to be loaded from catalog) are found by 

614 finding all words and trying to ignore all "math-y" words. 

615 

616 Parameters 

617 ---------- 

618 expr : str 

619 Expression to evaluate, to be parsed and executed by 

620 `~lsst.pipe.tasks.functors.mag_aware_eval`. 

621 """ 

622 _ignore_words = ('mag', 'sin', 'cos', 'exp', 'log', 'sqrt') 

623 

624 def __init__(self, expr, **kwargs): 

625 self.expr = expr 

626 super().__init__(**kwargs) 

627 

628 @property 

629 def name(self): 

630 return self.expr 

631 

632 @property 

633 def columns(self): 

634 flux_cols = re.findall(r'mag\(\s*(\w+)\s*\)', self.expr) 

635 

636 cols = [c for c in re.findall(r'[a-zA-Z_]+', self.expr) if c not in self._ignore_words] 

637 not_a_col = [] 

638 for c in flux_cols: 

639 if not re.search('_instFlux$', c): 

640 cols.append(f'{c}_instFlux') 

641 not_a_col.append(c) 

642 else: 

643 cols.append(c) 

644 

645 return list(set([c for c in cols if c not in not_a_col])) 

646 

647 def _func(self, df): 

648 return mag_aware_eval(df, self.expr, self.log) 

649 

650 

651class Column(Functor): 

652 """Get column with a specified name.""" 

653 

654 def __init__(self, col, **kwargs): 

655 self.col = col 

656 super().__init__(**kwargs) 

657 

658 @property 

659 def name(self): 

660 return self.col 

661 

662 @property 

663 def columns(self): 

664 return [self.col] 

665 

666 def _func(self, df): 

667 return df[self.col] 

668 

669 

670class Index(Functor): 

671 """Return the value of the index for each object.""" 

672 

673 columns = ['coord_ra'] # Just a dummy; something has to be here. 

674 _defaultDataset = 'ref' 

675 _defaultNoDup = True 

676 

677 def _func(self, df): 

678 return pd.Series(df.index, index=df.index) 

679 

680 

681class CoordColumn(Column): 

682 """Base class for coordinate column, in degrees.""" 

683 _radians = True 

684 

685 def __init__(self, col, **kwargs): 

686 super().__init__(col, **kwargs) 

687 

688 def _func(self, df): 

689 # Must not modify original column in case that column is used by 

690 # another functor. 

691 output = df[self.col] * 180 / np.pi if self._radians else df[self.col] 

692 return output 

693 

694 

695class RAColumn(CoordColumn): 

696 """Right Ascension, in degrees.""" 

697 name = 'RA' 

698 _defaultNoDup = True 

699 

700 def __init__(self, **kwargs): 

701 super().__init__('coord_ra', **kwargs) 

702 

703 def __call__(self, catalog, **kwargs): 

704 return super().__call__(catalog, **kwargs) 

705 

706 

707class DecColumn(CoordColumn): 

708 """Declination, in degrees.""" 

709 name = 'Dec' 

710 _defaultNoDup = True 

711 

712 def __init__(self, **kwargs): 

713 super().__init__('coord_dec', **kwargs) 

714 

715 def __call__(self, catalog, **kwargs): 

716 return super().__call__(catalog, **kwargs) 

717 

718 

719class RAErrColumn(CoordColumn): 

720 """Uncertainty in Right Ascension, in degrees.""" 

721 name = 'RAErr' 

722 _defaultNoDup = True 

723 

724 def __init__(self, **kwargs): 

725 super().__init__('coord_raErr', **kwargs) 

726 

727 

728class DecErrColumn(CoordColumn): 

729 """Uncertainty in declination, in degrees.""" 

730 name = 'DecErr' 

731 _defaultNoDup = True 

732 

733 def __init__(self, **kwargs): 

734 super().__init__('coord_decErr', **kwargs) 

735 

736 

737class RADecCovColumn(Column): 

738 """Coordinate covariance column, in degrees.""" 

739 _radians = True 

740 name = 'RADecCov' 

741 _defaultNoDup = True 

742 

743 def __init__(self, **kwargs): 

744 super().__init__('coord_ra_dec_Cov', **kwargs) 

745 

746 def _func(self, df): 

747 # Must not modify original column in case that column is used by 

748 # another functor. 

749 output = df[self.col]*(180/np.pi)**2 if self._radians else df[self.col] 

750 return output 

751 

752 

753class MultibandColumn(Column): 

754 """A column with a band in a multiband table.""" 

755 def __init__(self, col, band_to_check, **kwargs): 

756 self._band_to_check = band_to_check 

757 super().__init__(col=col, **kwargs) 

758 

759 @property 

760 def band_to_check(self): 

761 return self._band_to_check 

762 

763 

764class MultibandSinglePrecisionFloatColumn(MultibandColumn): 

765 """A float32 MultibandColumn""" 

766 def _func(self, df): 

767 return super()._func(df).astype(np.float32) 

768 

769 

770class SinglePrecisionFloatColumn(Column): 

771 """Return a column cast to a single-precision float.""" 

772 

773 def _func(self, df): 

774 return df[self.col].astype(np.float32) 

775 

776 

777class HtmIndex20(Functor): 

778 """Compute the level 20 HtmIndex for the catalog. 

779 

780 Notes 

781 ----- 

782 This functor was implemented to satisfy requirements of old APDB interface 

783 which required the ``pixelId`` column in DiaObject with HTM20 index. 

784 The APDB interface had migrated to not need that information, but we keep 

785 this class in case it may be useful for something else. 

786 """ 

787 name = "Htm20" 

788 htmLevel = 20 

789 _radians = True 

790 

791 def __init__(self, ra, dec, **kwargs): 

792 self.pixelator = sphgeom.HtmPixelization(self.htmLevel) 

793 self.ra = ra 

794 self.dec = dec 

795 self._columns = [self.ra, self.dec] 

796 super().__init__(**kwargs) 

797 

798 def _func(self, df): 

799 

800 def computePixel(row): 

801 if self._radians: 

802 sphPoint = geom.SpherePoint(row[self.ra], 

803 row[self.dec], 

804 geom.radians) 

805 else: 

806 sphPoint = geom.SpherePoint(row[self.ra], 

807 row[self.dec], 

808 geom.degrees) 

809 return self.pixelator.index(sphPoint.getVector()) 

810 

811 return df.apply(computePixel, axis=1, result_type='reduce').astype('int64') 

812 

813 

814def fluxName(col): 

815 """Append _instFlux to the column name if it doesn't have it already.""" 

816 if not col.endswith('_instFlux'): 

817 col += '_instFlux' 

818 return col 

819 

820 

821def fluxErrName(col): 

822 """Append _instFluxErr to the column name if it doesn't have it already.""" 

823 if not col.endswith('_instFluxErr'): 

824 col += '_instFluxErr' 

825 return col 

826 

827 

828class Mag(Functor): 

829 """Compute calibrated magnitude. 

830 

831 Returns the flux at mag=0. 

832 The default ``fluxMag0`` is 63095734448.0194, which is default for HSC. 

833 TO DO: This default should be made configurable in DM-21955. 

834 

835 This calculation hides warnings about invalid values and dividing by zero. 

836 

837 As with all functors, a ``dataset`` and ``filt`` kwarg should be provided 

838 upon initialization. 

839 Unlike the default `Functor`, however, the default dataset for a `Mag` is 

840 ``'meas'``, rather than ``'ref'``. 

841 

842 Parameters 

843 ---------- 

844 col : `str` 

845 Name of flux column from which to compute magnitude. 

846 Can be parseable by the `~lsst.pipe.tasks.functors.fluxName` function; 

847 that is, you can pass ``'modelfit_CModel'`` instead of 

848 ``'modelfit_CModel_instFlux'``, and it will understand. 

849 """ 

850 _defaultDataset = 'meas' 

851 

852 def __init__(self, col, **kwargs): 

853 self.col = fluxName(col) 

854 # TO DO: DM-21955 Replace hard coded photometic calibration values. 

855 self.fluxMag0 = 63095734448.0194 

856 

857 super().__init__(**kwargs) 

858 

859 @property 

860 def columns(self): 

861 return [self.col] 

862 

863 def _func(self, df): 

864 with warnings.catch_warnings(): 

865 warnings.filterwarnings('ignore', r'invalid value encountered') 

866 warnings.filterwarnings('ignore', r'divide by zero') 

867 return -2.5*np.log10(df[self.col] / self.fluxMag0) 

868 

869 @property 

870 def name(self): 

871 return f'mag_{self.col}' 

872 

873 

874class MagErr(Mag): 

875 """Compute calibrated magnitude uncertainty. 

876 

877 Parameters 

878 ---------- 

879 col : `str` 

880 Name of the flux column. 

881 """ 

882 

883 def __init__(self, *args, **kwargs): 

884 super().__init__(*args, **kwargs) 

885 # TO DO: DM-21955 Replace hard coded photometic calibration values. 

886 self.fluxMag0Err = 0. 

887 

888 @property 

889 def columns(self): 

890 return [self.col, self.col + 'Err'] 

891 

892 def _func(self, df): 

893 with warnings.catch_warnings(): 

894 warnings.filterwarnings('ignore', r'invalid value encountered') 

895 warnings.filterwarnings('ignore', r'divide by zero') 

896 fluxCol, fluxErrCol = self.columns 

897 x = df[fluxErrCol] / df[fluxCol] 

898 y = self.fluxMag0Err / self.fluxMag0 

899 magErr = (2.5 / np.log(10.)) * np.sqrt(x*x + y*y) 

900 return magErr 

901 

902 @property 

903 def name(self): 

904 return super().name + '_err' 

905 

906 

907class MagDiff(Functor): 

908 """Functor to calculate magnitude difference.""" 

909 _defaultDataset = 'meas' 

910 

911 def __init__(self, col1, col2, **kwargs): 

912 self.col1 = fluxName(col1) 

913 self.col2 = fluxName(col2) 

914 super().__init__(**kwargs) 

915 

916 @property 

917 def columns(self): 

918 return [self.col1, self.col2] 

919 

920 def _func(self, df): 

921 with warnings.catch_warnings(): 

922 warnings.filterwarnings('ignore', r'invalid value encountered') 

923 warnings.filterwarnings('ignore', r'divide by zero') 

924 return -2.5*np.log10(df[self.col1]/df[self.col2]) 

925 

926 @property 

927 def name(self): 

928 return f'(mag_{self.col1} - mag_{self.col2})' 

929 

930 @property 

931 def shortname(self): 

932 return f'magDiff_{self.col1}_{self.col2}' 

933 

934 

935class Color(Functor): 

936 """Compute the color between two filters. 

937 

938 Computes color by initializing two different `Mag` functors based on the 

939 ``col`` and filters provided, and then returning the difference. 

940 

941 This is enabled by the `_func` method expecting a DataFrame with a 

942 multilevel column index, with both ``'band'`` and ``'column'``, instead of 

943 just ``'column'``, which is the `Functor` default. 

944 This is controlled by the `_dfLevels` attribute. 

945 

946 Also of note, the default dataset for `Color` is ``forced_src'``, whereas 

947 for `Mag` it is ``'meas'``. 

948 

949 Parameters 

950 ---------- 

951 col : str 

952 Name of the flux column from which to compute; same as would be passed 

953 to `~lsst.pipe.tasks.functors.Mag`. 

954 

955 filt2, filt1 : str 

956 Filters from which to compute magnitude difference. 

957 Color computed is ``Mag(filt2) - Mag(filt1)``. 

958 """ 

959 _defaultDataset = 'forced_src' 

960 _dfLevels = ('band', 'column') 

961 _defaultNoDup = True 

962 

963 def __init__(self, col, filt2, filt1, **kwargs): 

964 self.col = fluxName(col) 

965 if filt2 == filt1: 

966 raise RuntimeError("Cannot compute Color for %s: %s - %s " % (col, filt2, filt1)) 

967 self.filt2 = filt2 

968 self.filt1 = filt1 

969 

970 self.mag2 = Mag(col, filt=filt2, **kwargs) 

971 self.mag1 = Mag(col, filt=filt1, **kwargs) 

972 

973 super().__init__(**kwargs) 

974 

975 @property 

976 def filt(self): 

977 return None 

978 

979 @filt.setter 

980 def filt(self, filt): 

981 pass 

982 

983 def _func(self, df): 

984 mag2 = self.mag2._func(df[self.filt2]) 

985 mag1 = self.mag1._func(df[self.filt1]) 

986 return mag2 - mag1 

987 

988 @property 

989 def columns(self): 

990 return [self.mag1.col, self.mag2.col] 

991 

992 def multilevelColumns(self, parq, **kwargs): 

993 return [(self.dataset, self.filt1, self.col), (self.dataset, self.filt2, self.col)] 

994 

995 @property 

996 def name(self): 

997 return f'{self.filt2} - {self.filt1} ({self.col})' 

998 

999 @property 

1000 def shortname(self): 

1001 return f"{self.col}_{self.filt2.replace('-', '')}m{self.filt1.replace('-', '')}" 

1002 

1003 

1004class DeconvolvedMoments(Functor): 

1005 """This functor subtracts the trace of the PSF second moments from the 

1006 trace of the second moments of the source. 

1007 

1008 If the HsmShapeAlgorithm measurement is valid, then these will be used for 

1009 the sources. 

1010 Otherwise, the SdssShapeAlgorithm measurements will be used. 

1011 """ 

1012 name = 'Deconvolved Moments' 

1013 shortname = 'deconvolvedMoments' 

1014 _columns = ("ext_shapeHSM_HsmSourceMoments_xx", 

1015 "ext_shapeHSM_HsmSourceMoments_yy", 

1016 "base_SdssShape_xx", "base_SdssShape_yy", 

1017 "ext_shapeHSM_HsmPsfMoments_xx", 

1018 "ext_shapeHSM_HsmPsfMoments_yy") 

1019 

1020 def _func(self, df): 

1021 """Calculate deconvolved moments.""" 

1022 if "ext_shapeHSM_HsmSourceMoments_xx" in df.columns: # _xx added by tdm 

1023 hsm = df["ext_shapeHSM_HsmSourceMoments_xx"] + df["ext_shapeHSM_HsmSourceMoments_yy"] 

1024 else: 

1025 hsm = np.ones(len(df))*np.nan 

1026 sdss = df["base_SdssShape_xx"] + df["base_SdssShape_yy"] 

1027 if "ext_shapeHSM_HsmPsfMoments_xx" in df.columns: 

1028 psf = df["ext_shapeHSM_HsmPsfMoments_xx"] + df["ext_shapeHSM_HsmPsfMoments_yy"] 

1029 else: 

1030 # LSST does not have shape.sdss.psf. 

1031 # We could instead add base_PsfShape to the catalog using 

1032 # exposure.getPsf().computeShape(s.getCentroid()).getIxx(). 

1033 raise RuntimeError('No psf shape parameter found in catalog') 

1034 

1035 return hsm.where(np.isfinite(hsm), sdss) - psf 

1036 

1037 

1038class SdssTraceSize(Functor): 

1039 """Functor to calculate the SDSS trace radius size for sources. 

1040 

1041 The SDSS trace radius size is a measure of size equal to the square root of 

1042 half of the trace of the second moments tensor measured with the 

1043 SdssShapeAlgorithm plugin. 

1044 This has units of pixels. 

1045 """ 

1046 name = "SDSS Trace Size" 

1047 shortname = 'sdssTrace' 

1048 _columns = ("base_SdssShape_xx", "base_SdssShape_yy") 

1049 

1050 def _func(self, df): 

1051 srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"])) 

1052 return srcSize 

1053 

1054 

1055class PsfSdssTraceSizeDiff(Functor): 

1056 """Functor to calculate the SDSS trace radius size difference (%) between 

1057 the object and the PSF model. 

1058 

1059 See Also 

1060 -------- 

1061 SdssTraceSize 

1062 """ 

1063 name = "PSF - SDSS Trace Size" 

1064 shortname = 'psf_sdssTrace' 

1065 _columns = ("base_SdssShape_xx", "base_SdssShape_yy", 

1066 "base_SdssShape_psf_xx", "base_SdssShape_psf_yy") 

1067 

1068 def _func(self, df): 

1069 srcSize = np.sqrt(0.5*(df["base_SdssShape_xx"] + df["base_SdssShape_yy"])) 

1070 psfSize = np.sqrt(0.5*(df["base_SdssShape_psf_xx"] + df["base_SdssShape_psf_yy"])) 

1071 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize)) 

1072 return sizeDiff 

1073 

1074 

1075class HsmTraceSize(Functor): 

1076 """Functor to calculate the HSM trace radius size for sources. 

1077 

1078 The HSM trace radius size is a measure of size equal to the square root of 

1079 half of the trace of the second moments tensor measured with the 

1080 HsmShapeAlgorithm plugin. 

1081 This has units of pixels. 

1082 """ 

1083 name = 'HSM Trace Size' 

1084 shortname = 'hsmTrace' 

1085 _columns = ("ext_shapeHSM_HsmSourceMoments_xx", 

1086 "ext_shapeHSM_HsmSourceMoments_yy") 

1087 

1088 def _func(self, df): 

1089 srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"] 

1090 + df["ext_shapeHSM_HsmSourceMoments_yy"])) 

1091 return srcSize 

1092 

1093 

1094class PsfHsmTraceSizeDiff(Functor): 

1095 """Functor to calculate the HSM trace radius size difference (%) between 

1096 the object and the PSF model. 

1097 

1098 See Also 

1099 -------- 

1100 HsmTraceSize 

1101 """ 

1102 name = 'PSF - HSM Trace Size' 

1103 shortname = 'psf_HsmTrace' 

1104 _columns = ("ext_shapeHSM_HsmSourceMoments_xx", 

1105 "ext_shapeHSM_HsmSourceMoments_yy", 

1106 "ext_shapeHSM_HsmPsfMoments_xx", 

1107 "ext_shapeHSM_HsmPsfMoments_yy") 

1108 

1109 def _func(self, df): 

1110 srcSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmSourceMoments_xx"] 

1111 + df["ext_shapeHSM_HsmSourceMoments_yy"])) 

1112 psfSize = np.sqrt(0.5*(df["ext_shapeHSM_HsmPsfMoments_xx"] 

1113 + df["ext_shapeHSM_HsmPsfMoments_yy"])) 

1114 sizeDiff = 100*(srcSize - psfSize)/(0.5*(srcSize + psfSize)) 

1115 return sizeDiff 

1116 

1117 

1118class HsmFwhm(Functor): 

1119 """Functor to calculate the PSF FWHM with second moments measured from the 

1120 HsmShapeAlgorithm plugin. 

1121 

1122 This is in units of arcseconds, and assumes the hsc_rings_v1 skymap pixel 

1123 scale of 0.168 arcseconds/pixel. 

1124 

1125 Notes 

1126 ----- 

1127 This conversion assumes the PSF is Gaussian, which is not always the case. 

1128 """ 

1129 name = 'HSM Psf FWHM' 

1130 _columns = ('ext_shapeHSM_HsmPsfMoments_xx', 'ext_shapeHSM_HsmPsfMoments_yy') 

1131 # TODO: DM-21403 pixel scale should be computed from the CD matrix or transform matrix 

1132 pixelScale = 0.168 

1133 SIGMA2FWHM = 2*np.sqrt(2*np.log(2)) 

1134 

1135 def _func(self, df): 

1136 return (self.pixelScale*self.SIGMA2FWHM*np.sqrt( 

1137 0.5*(df['ext_shapeHSM_HsmPsfMoments_xx'] 

1138 + df['ext_shapeHSM_HsmPsfMoments_yy']))).astype(np.float32) 

1139 

1140 

1141class E1(Functor): 

1142 r"""Calculate :math:`e_1` ellipticity component for sources, defined as: 

1143 

1144 .. math:: 

1145 e_1 &= (I_{xx}-I_{yy})/(I_{xx}+I_{yy}) 

1146 

1147 See Also 

1148 -------- 

1149 E2 

1150 """ 

1151 name = "Distortion Ellipticity (e1)" 

1152 shortname = "Distortion" 

1153 

1154 def __init__(self, colXX, colXY, colYY, **kwargs): 

1155 self.colXX = colXX 

1156 self.colXY = colXY 

1157 self.colYY = colYY 

1158 self._columns = [self.colXX, self.colXY, self.colYY] 

1159 super().__init__(**kwargs) 

1160 

1161 @property 

1162 def columns(self): 

1163 return [self.colXX, self.colXY, self.colYY] 

1164 

1165 def _func(self, df): 

1166 return ((df[self.colXX] - df[self.colYY]) / ( 

1167 df[self.colXX] + df[self.colYY])).astype(np.float32) 

1168 

1169 

1170class E2(Functor): 

1171 r"""Calculate :math:`e_2` ellipticity component for sources, defined as: 

1172 

1173 .. math:: 

1174 e_2 &= 2I_{xy}/(I_{xx}+I_{yy}) 

1175 

1176 See Also 

1177 -------- 

1178 E1 

1179 """ 

1180 name = "Ellipticity e2" 

1181 

1182 def __init__(self, colXX, colXY, colYY, **kwargs): 

1183 self.colXX = colXX 

1184 self.colXY = colXY 

1185 self.colYY = colYY 

1186 super().__init__(**kwargs) 

1187 

1188 @property 

1189 def columns(self): 

1190 return [self.colXX, self.colXY, self.colYY] 

1191 

1192 def _func(self, df): 

1193 return (2*df[self.colXY] / (df[self.colXX] + df[self.colYY])).astype(np.float32) 

1194 

1195 

1196class RadiusFromQuadrupole(Functor): 

1197 """Calculate the radius from the quadrupole moments. 

1198 

1199 This returns the fourth root of the determinant of the second moments 

1200 tensor, which has units of pixels. 

1201 

1202 See Also 

1203 -------- 

1204 SdssTraceSize 

1205 HsmTraceSize 

1206 """ 

1207 

1208 def __init__(self, colXX, colXY, colYY, **kwargs): 

1209 self.colXX = colXX 

1210 self.colXY = colXY 

1211 self.colYY = colYY 

1212 super().__init__(**kwargs) 

1213 

1214 @property 

1215 def columns(self): 

1216 return [self.colXX, self.colXY, self.colYY] 

1217 

1218 def _func(self, df): 

1219 return ((df[self.colXX]*df[self.colYY] - df[self.colXY]**2)**0.25).astype(np.float32) 

1220 

1221 

1222class LocalWcs(Functor): 

1223 """Computations using the stored localWcs.""" 

1224 name = "LocalWcsOperations" 

1225 

1226 def __init__(self, 

1227 colCD_1_1, 

1228 colCD_1_2, 

1229 colCD_2_1, 

1230 colCD_2_2, 

1231 **kwargs): 

1232 self.colCD_1_1 = colCD_1_1 

1233 self.colCD_1_2 = colCD_1_2 

1234 self.colCD_2_1 = colCD_2_1 

1235 self.colCD_2_2 = colCD_2_2 

1236 super().__init__(**kwargs) 

1237 

1238 def computeDeltaRaDec(self, x, y, cd11, cd12, cd21, cd22): 

1239 """Compute the dRA, dDec from dx, dy. 

1240 

1241 Parameters 

1242 ---------- 

1243 x : `~pandas.Series` 

1244 X pixel coordinate. 

1245 y : `~pandas.Series` 

1246 Y pixel coordinate. 

1247 cd11 : `~pandas.Series` 

1248 [1, 1] element of the local Wcs affine transform. 

1249 cd12 : `~pandas.Series` 

1250 [1, 2] element of the local Wcs affine transform. 

1251 cd21 : `~pandas.Series` 

1252 [2, 1] element of the local Wcs affine transform. 

1253 cd22 : `~pandas.Series` 

1254 [2, 2] element of the local Wcs affine transform. 

1255 

1256 Returns 

1257 ------- 

1258 raDecTuple : tuple 

1259 RA and Dec conversion of x and y given the local Wcs. 

1260 Returned units are in radians. 

1261 

1262 Notes 

1263 ----- 

1264 If x and y are with respect to the CRVAL1, CRVAL2 

1265 then this will return the RA, Dec for that WCS. 

1266 """ 

1267 return (x * cd11 + y * cd12, x * cd21 + y * cd22) 

1268 

1269 def computeSkySeparation(self, ra1, dec1, ra2, dec2): 

1270 """Compute the local pixel scale conversion. 

1271 

1272 Parameters 

1273 ---------- 

1274 ra1 : `~pandas.Series` 

1275 Ra of the first coordinate in radians. 

1276 dec1 : `~pandas.Series` 

1277 Dec of the first coordinate in radians. 

1278 ra2 : `~pandas.Series` 

1279 Ra of the second coordinate in radians. 

1280 dec2 : `~pandas.Series` 

1281 Dec of the second coordinate in radians. 

1282 

1283 Returns 

1284 ------- 

1285 dist : `~pandas.Series` 

1286 Distance on the sphere in radians. 

1287 """ 

1288 deltaDec = dec2 - dec1 

1289 deltaRa = ra2 - ra1 

1290 return 2 * np.arcsin( 

1291 np.sqrt( 

1292 np.sin(deltaDec / 2) ** 2 

1293 + np.cos(dec2) * np.cos(dec1) * np.sin(deltaRa / 2) ** 2)) 

1294 

1295 def getSkySeparationFromPixel(self, x1, y1, x2, y2, cd11, cd12, cd21, cd22): 

1296 """Compute the distance on the sphere from x2, y1 to x1, y1. 

1297 

1298 Parameters 

1299 ---------- 

1300 x1 : `~pandas.Series` 

1301 X pixel coordinate. 

1302 y1 : `~pandas.Series` 

1303 Y pixel coordinate. 

1304 x2 : `~pandas.Series` 

1305 X pixel coordinate. 

1306 y2 : `~pandas.Series` 

1307 Y pixel coordinate. 

1308 cd11 : `~pandas.Series` 

1309 [1, 1] element of the local Wcs affine transform. 

1310 cd12 : `~pandas.Series` 

1311 [1, 2] element of the local Wcs affine transform. 

1312 cd21 : `~pandas.Series` 

1313 [2, 1] element of the local Wcs affine transform. 

1314 cd22 : `~pandas.Series` 

1315 [2, 2] element of the local Wcs affine transform. 

1316 

1317 Returns 

1318 ------- 

1319 Distance : `~pandas.Series` 

1320 Arcseconds per pixel at the location of the local WC. 

1321 """ 

1322 ra1, dec1 = self.computeDeltaRaDec(x1, y1, cd11, cd12, cd21, cd22) 

1323 ra2, dec2 = self.computeDeltaRaDec(x2, y2, cd11, cd12, cd21, cd22) 

1324 # Great circle distance for small separations. 

1325 return self.computeSkySeparation(ra1, dec1, ra2, dec2) 

1326 

1327 def computePositionAngle(self, ra1, dec1, ra2, dec2): 

1328 """Compute position angle (E of N) from (ra1, dec1) to (ra2, dec2). 

1329 

1330 Parameters 

1331 ---------- 

1332 ra1 : iterable [`float`] 

1333 RA of the first coordinate [radian]. 

1334 dec1 : iterable [`float`] 

1335 Dec of the first coordinate [radian]. 

1336 ra2 : iterable [`float`] 

1337 RA of the second coordinate [radian]. 

1338 dec2 : iterable [`float`] 

1339 Dec of the second coordinate [radian]. 

1340 

1341 Returns 

1342 ------- 

1343 Position Angle: `~pandas.Series` 

1344 radians E of N 

1345 

1346 Notes 

1347 ----- 

1348 (ra1, dec1) -> (ra2, dec2) is interpreted as the shorter way around the sphere 

1349 

1350 For a separation of 0.0001 rad, the position angle is good to 0.0009 rad 

1351 all over the sphere. 

1352 """ 

1353 # lsst.geom.SpherePoint has "bearingTo", which returns angle N of E 

1354 # We instead want the astronomy convention of "Position Angle", which is angle E of N 

1355 position_angle = np.zeros(len(ra1)) 

1356 for i, (r1, d1, r2, d2) in enumerate(zip(ra1, dec1, ra2, dec2)): 

1357 point1 = geom.SpherePoint(r1, d1, geom.radians) 

1358 point2 = geom.SpherePoint(r2, d2, geom.radians) 

1359 bearing = point1.bearingTo(point2) 

1360 pa_ref_angle = geom.Angle(np.pi/2, geom.radians) # in bearing system 

1361 pa = pa_ref_angle - bearing 

1362 # Wrap around to get Delta_RA from -pi to +pi 

1363 pa = pa.wrapCtr() 

1364 position_angle[i] = pa.asRadians() 

1365 

1366 return pd.Series(position_angle) 

1367 

1368 def getPositionAngleFromDetectorAngle(self, theta, cd11, cd12, cd21, cd22): 

1369 """Compute position angle (E of N) from detector angle (+y of +x). 

1370 

1371 Parameters 

1372 ---------- 

1373 theta : `float` 

1374 detector angle [radian] 

1375 cd11 : `float` 

1376 [1, 1] element of the local Wcs affine transform. 

1377 cd12 : `float` 

1378 [1, 2] element of the local Wcs affine transform. 

1379 cd21 : `float` 

1380 [2, 1] element of the local Wcs affine transform. 

1381 cd22 : `float` 

1382 [2, 2] element of the local Wcs affine transform. 

1383 

1384 Returns 

1385 ------- 

1386 Position Angle: `~pandas.Series` 

1387 Degrees E of N. 

1388 """ 

1389 # Create a unit vector in (x, y) along da 

1390 dx = np.cos(theta) 

1391 dy = np.sin(theta) 

1392 ra1, dec1 = self.computeDeltaRaDec(0, 0, cd11, cd12, cd21, cd22) 

1393 ra2, dec2 = self.computeDeltaRaDec(dx, dy, cd11, cd12, cd21, cd22) 

1394 # Position angle of vector from (RA1, Dec1) to (RA2, Dec2) 

1395 return np.rad2deg(self.computePositionAngle(ra1, dec1, ra2, dec2)) 

1396 

1397 

1398class ComputePixelScale(LocalWcs): 

1399 """Compute the local pixel scale from the stored CDMatrix. 

1400 """ 

1401 name = "PixelScale" 

1402 

1403 @property 

1404 def columns(self): 

1405 return [self.colCD_1_1, 

1406 self.colCD_1_2, 

1407 self.colCD_2_1, 

1408 self.colCD_2_2] 

1409 

1410 def pixelScaleArcseconds(self, cd11, cd12, cd21, cd22): 

1411 """Compute the local pixel to scale conversion in arcseconds. 

1412 

1413 Parameters 

1414 ---------- 

1415 cd11 : `~pandas.Series` 

1416 [1, 1] element of the local Wcs affine transform in radians. 

1417 cd11 : `~pandas.Series` 

1418 [1, 1] element of the local Wcs affine transform in radians. 

1419 cd12 : `~pandas.Series` 

1420 [1, 2] element of the local Wcs affine transform in radians. 

1421 cd21 : `~pandas.Series` 

1422 [2, 1] element of the local Wcs affine transform in radians. 

1423 cd22 : `~pandas.Series` 

1424 [2, 2] element of the local Wcs affine transform in radians. 

1425 

1426 Returns 

1427 ------- 

1428 pixScale : `~pandas.Series` 

1429 Arcseconds per pixel at the location of the local WC. 

1430 """ 

1431 return 3600 * np.degrees(np.sqrt(np.fabs(cd11 * cd22 - cd12 * cd21))) 

1432 

1433 def _func(self, df): 

1434 return self.pixelScaleArcseconds(df[self.colCD_1_1], 

1435 df[self.colCD_1_2], 

1436 df[self.colCD_2_1], 

1437 df[self.colCD_2_2]) 

1438 

1439 

1440class ConvertPixelToArcseconds(ComputePixelScale): 

1441 """Convert a value in units of pixels to units of arcseconds.""" 

1442 

1443 def __init__(self, 

1444 col, 

1445 colCD_1_1, 

1446 colCD_1_2, 

1447 colCD_2_1, 

1448 colCD_2_2, 

1449 **kwargs): 

1450 self.col = col 

1451 super().__init__(colCD_1_1, 

1452 colCD_1_2, 

1453 colCD_2_1, 

1454 colCD_2_2, 

1455 **kwargs) 

1456 

1457 @property 

1458 def name(self): 

1459 return f"{self.col}_asArcseconds" 

1460 

1461 @property 

1462 def columns(self): 

1463 return [self.col, 

1464 self.colCD_1_1, 

1465 self.colCD_1_2, 

1466 self.colCD_2_1, 

1467 self.colCD_2_2] 

1468 

1469 def _func(self, df): 

1470 return df[self.col] * self.pixelScaleArcseconds(df[self.colCD_1_1], 

1471 df[self.colCD_1_2], 

1472 df[self.colCD_2_1], 

1473 df[self.colCD_2_2]) 

1474 

1475 

1476class ConvertPixelSqToArcsecondsSq(ComputePixelScale): 

1477 """Convert a value in units of pixels squared to units of arcseconds 

1478 squared. 

1479 """ 

1480 

1481 def __init__(self, 

1482 col, 

1483 colCD_1_1, 

1484 colCD_1_2, 

1485 colCD_2_1, 

1486 colCD_2_2, 

1487 **kwargs): 

1488 self.col = col 

1489 super().__init__(colCD_1_1, 

1490 colCD_1_2, 

1491 colCD_2_1, 

1492 colCD_2_2, 

1493 **kwargs) 

1494 

1495 @property 

1496 def name(self): 

1497 return f"{self.col}_asArcsecondsSq" 

1498 

1499 @property 

1500 def columns(self): 

1501 return [self.col, 

1502 self.colCD_1_1, 

1503 self.colCD_1_2, 

1504 self.colCD_2_1, 

1505 self.colCD_2_2] 

1506 

1507 def _func(self, df): 

1508 pixScale = self.pixelScaleArcseconds(df[self.colCD_1_1], 

1509 df[self.colCD_1_2], 

1510 df[self.colCD_2_1], 

1511 df[self.colCD_2_2]) 

1512 return df[self.col] * pixScale * pixScale 

1513 

1514 

1515class ConvertDetectorAngleToPositionAngle(LocalWcs): 

1516 """Compute a position angle from a detector angle and the stored CDMatrix. 

1517 

1518 Returns 

1519 ------- 

1520 position angle : degrees 

1521 """ 

1522 

1523 name = "PositionAngle" 

1524 

1525 def __init__( 

1526 self, 

1527 theta_col, 

1528 colCD_1_1, 

1529 colCD_1_2, 

1530 colCD_2_1, 

1531 colCD_2_2, 

1532 **kwargs 

1533 ): 

1534 self.theta_col = theta_col 

1535 super().__init__(colCD_1_1, colCD_1_2, colCD_2_1, colCD_2_2, **kwargs) 

1536 

1537 @property 

1538 def columns(self): 

1539 return [ 

1540 self.theta_col, 

1541 self.colCD_1_1, 

1542 self.colCD_1_2, 

1543 self.colCD_2_1, 

1544 self.colCD_2_2 

1545 ] 

1546 

1547 def _func(self, df): 

1548 return self.getPositionAngleFromDetectorAngle( 

1549 df[self.theta_col], 

1550 df[self.colCD_1_1], 

1551 df[self.colCD_1_2], 

1552 df[self.colCD_2_1], 

1553 df[self.colCD_2_2] 

1554 ) 

1555 

1556 

1557class ReferenceBand(Functor): 

1558 """Return the band used to seed multiband forced photometry. 

1559 

1560 This functor is to be used on Object tables. 

1561 It converts the boolean merge_measurements_{band} columns into a single 

1562 string representing the first band for which merge_measurements_{band} 

1563 is True. 

1564 

1565 Assumes the default priority order of i, r, z, y, g, u. 

1566 """ 

1567 name = 'Reference Band' 

1568 shortname = 'refBand' 

1569 

1570 band_order = ("i", "r", "z", "y", "g", "u") 

1571 

1572 @property 

1573 def columns(self): 

1574 # Build the actual input column list, not hardcoded ugrizy 

1575 bands = [band for band in self.band_order if band in self.bands] 

1576 # In the unlikely scenario that users attempt to add non-ugrizy bands 

1577 bands += [band for band in self.bands if band not in self.band_order] 

1578 return [f"merge_measurement_{band}" for band in bands] 

1579 

1580 def _func(self, df: pd.DataFrame) -> pd.Series: 

1581 def getFilterAliasName(row): 

1582 # Get column name with the max value (True > False). 

1583 colName = row.idxmax() 

1584 return colName.replace('merge_measurement_', '') 

1585 

1586 # Skip columns that are unavailable, because this functor requests the 

1587 # superset of bands that could be included in the object table. 

1588 columns = [col for col in self.columns if col in df.columns] 

1589 # Makes a Series of dtype object if df is empty. 

1590 return df[columns].apply(getFilterAliasName, axis=1, 

1591 result_type='reduce').astype('object') 

1592 

1593 def __init__(self, bands: tuple[str] | list[str] | None = None, **kwargs): 

1594 super().__init__(**kwargs) 

1595 self.bands = self.band_order if bands is None else tuple(bands) 

1596 

1597 

1598class Photometry(Functor): 

1599 """Base class for Object table calibrated fluxes and magnitudes.""" 

1600 # AB to NanoJansky (3631 Jansky). 

1601 AB_FLUX_SCALE = (0 * u.ABmag).to_value(u.nJy) 

1602 LOG_AB_FLUX_SCALE = 12.56 

1603 FIVE_OVER_2LOG10 = 1.085736204758129569 

1604 # TO DO: DM-21955 Replace hard coded photometic calibration values. 

1605 COADD_ZP = 27 

1606 

1607 def __init__(self, colFlux, colFluxErr=None, **kwargs): 

1608 self.vhypot = np.vectorize(self.hypot) 

1609 self.col = colFlux 

1610 self.colFluxErr = colFluxErr 

1611 

1612 self.fluxMag0 = 1./np.power(10, -0.4*self.COADD_ZP) 

1613 self.fluxMag0Err = 0. 

1614 

1615 super().__init__(**kwargs) 

1616 

1617 @property 

1618 def columns(self): 

1619 return [self.col] 

1620 

1621 @property 

1622 def name(self): 

1623 return f'mag_{self.col}' 

1624 

1625 @classmethod 

1626 def hypot(cls, a, b): 

1627 """Compute sqrt(a^2 + b^2) without under/overflow.""" 

1628 if np.abs(a) < np.abs(b): 

1629 a, b = b, a 

1630 if a == 0.: 

1631 return 0. 

1632 q = b/a 

1633 return np.abs(a) * np.sqrt(1. + q*q) 

1634 

1635 def dn2flux(self, dn, fluxMag0): 

1636 """Convert instrumental flux to nanojanskys.""" 

1637 return (self.AB_FLUX_SCALE * dn / fluxMag0).astype(np.float32) 

1638 

1639 def dn2mag(self, dn, fluxMag0): 

1640 """Convert instrumental flux to AB magnitude.""" 

1641 with warnings.catch_warnings(): 

1642 warnings.filterwarnings('ignore', r'invalid value encountered') 

1643 warnings.filterwarnings('ignore', r'divide by zero') 

1644 return (-2.5 * np.log10(dn/fluxMag0)).astype(np.float32) 

1645 

1646 def dn2fluxErr(self, dn, dnErr, fluxMag0, fluxMag0Err): 

1647 """Convert instrumental flux error to nanojanskys.""" 

1648 retVal = self.vhypot(dn * fluxMag0Err, dnErr * fluxMag0) 

1649 retVal *= self.AB_FLUX_SCALE / fluxMag0 / fluxMag0 

1650 return retVal.astype(np.float32) 

1651 

1652 def dn2MagErr(self, dn, dnErr, fluxMag0, fluxMag0Err): 

1653 """Convert instrumental flux error to AB magnitude error.""" 

1654 retVal = self.dn2fluxErr(dn, dnErr, fluxMag0, fluxMag0Err) / self.dn2flux(dn, fluxMag0) 

1655 return (self.FIVE_OVER_2LOG10 * retVal).astype(np.float32) 

1656 

1657 

1658class NanoJansky(Photometry): 

1659 """Convert instrumental flux to nanojanskys.""" 

1660 def _func(self, df): 

1661 return self.dn2flux(df[self.col], self.fluxMag0) 

1662 

1663 

1664class NanoJanskyErr(Photometry): 

1665 """Convert instrumental flux error to nanojanskys.""" 

1666 @property 

1667 def columns(self): 

1668 return [self.col, self.colFluxErr] 

1669 

1670 def _func(self, df): 

1671 retArr = self.dn2fluxErr(df[self.col], df[self.colFluxErr], self.fluxMag0, self.fluxMag0Err) 

1672 return pd.Series(retArr, index=df.index) 

1673 

1674 

1675class LocalPhotometry(Functor): 

1676 """Base class for calibrating the specified instrument flux column using 

1677 the local photometric calibration. 

1678 

1679 Parameters 

1680 ---------- 

1681 instFluxCol : `str` 

1682 Name of the instrument flux column. 

1683 instFluxErrCol : `str` 

1684 Name of the assocated error columns for ``instFluxCol``. 

1685 photoCalibCol : `str` 

1686 Name of local calibration column. 

1687 photoCalibErrCol : `str`, optional 

1688 Error associated with ``photoCalibCol``. Ignored and deprecated; will 

1689 be removed after v29. 

1690 

1691 See Also 

1692 -------- 

1693 LocalNanojansky 

1694 LocalNanojanskyErr 

1695 """ 

1696 logNJanskyToAB = (1 * u.nJy).to_value(u.ABmag) 

1697 

1698 def __init__(self, 

1699 instFluxCol, 

1700 instFluxErrCol, 

1701 photoCalibCol, 

1702 photoCalibErrCol=None, 

1703 **kwargs): 

1704 self.instFluxCol = instFluxCol 

1705 self.instFluxErrCol = instFluxErrCol 

1706 self.photoCalibCol = photoCalibCol 

1707 # TODO[DM-49400]: remove this check and the argument it corresponds to. 

1708 if photoCalibErrCol is not None: 

1709 warnings.warn("The photoCalibErrCol argument is deprecated and will be removed after v29.", 

1710 category=FutureWarning) 

1711 super().__init__(**kwargs) 

1712 

1713 def instFluxToNanojansky(self, instFlux, localCalib): 

1714 """Convert instrument flux to nanojanskys. 

1715 

1716 Parameters 

1717 ---------- 

1718 instFlux : `~numpy.ndarray` or `~pandas.Series` 

1719 Array of instrument flux measurements. 

1720 localCalib : `~numpy.ndarray` or `~pandas.Series` 

1721 Array of local photometric calibration estimates. 

1722 

1723 Returns 

1724 ------- 

1725 calibFlux : `~numpy.ndarray` or `~pandas.Series` 

1726 Array of calibrated flux measurements. 

1727 """ 

1728 return instFlux * localCalib 

1729 

1730 def instFluxErrToNanojanskyErr(self, instFlux, instFluxErr, localCalib, localCalibErr=None): 

1731 """Convert instrument flux to nanojanskys. 

1732 

1733 Parameters 

1734 ---------- 

1735 instFlux : `~numpy.ndarray` or `~pandas.Series` 

1736 Array of instrument flux measurements. Ignored (accepted for 

1737 backwards compatibility and consistency with magnitude-error 

1738 calculation methods). 

1739 instFluxErr : `~numpy.ndarray` or `~pandas.Series` 

1740 Errors on associated ``instFlux`` values. 

1741 localCalib : `~numpy.ndarray` or `~pandas.Series` 

1742 Array of local photometric calibration estimates. 

1743 localCalibErr : `~numpy.ndarray` or `~pandas.Series`, optional 

1744 Errors on associated ``localCalib`` values. Ignored and deprecated; 

1745 will be removed after v29. 

1746 

1747 Returns 

1748 ------- 

1749 calibFluxErr : `~numpy.ndarray` or `~pandas.Series` 

1750 Errors on calibrated flux measurements. 

1751 """ 

1752 # TODO[DM-49400]: remove this check and the argument it corresponds to. 

1753 if localCalibErr is not None: 

1754 warnings.warn("The localCalibErr argument is deprecated and will be removed after v29.", 

1755 category=FutureWarning) 

1756 return instFluxErr * localCalib 

1757 

1758 def instFluxToMagnitude(self, instFlux, localCalib): 

1759 """Convert instrument flux to nanojanskys. 

1760 

1761 Parameters 

1762 ---------- 

1763 instFlux : `~numpy.ndarray` or `~pandas.Series` 

1764 Array of instrument flux measurements. 

1765 localCalib : `~numpy.ndarray` or `~pandas.Series` 

1766 Array of local photometric calibration estimates. 

1767 

1768 Returns 

1769 ------- 

1770 calibMag : `~numpy.ndarray` or `~pandas.Series` 

1771 Array of calibrated AB magnitudes. 

1772 """ 

1773 return -2.5 * np.log10(self.instFluxToNanojansky(instFlux, localCalib)) + self.logNJanskyToAB 

1774 

1775 def instFluxErrToMagnitudeErr(self, instFlux, instFluxErr, localCalib, localCalibErr=None): 

1776 """Convert instrument flux err to nanojanskys. 

1777 

1778 Parameters 

1779 ---------- 

1780 instFlux : `~numpy.ndarray` or `~pandas.Series` 

1781 Array of instrument flux measurements. 

1782 instFluxErr : `~numpy.ndarray` or `~pandas.Series` 

1783 Errors on associated ``instFlux`` values. 

1784 localCalib : `~numpy.ndarray` or `~pandas.Series` 

1785 Array of local photometric calibration estimates. 

1786 localCalibErr : `~numpy.ndarray` or `~pandas.Series`, optional 

1787 Errors on associated ``localCalib`` values. Ignored and deprecated; 

1788 will be removed after v29. 

1789 

1790 Returns 

1791 ------- 

1792 calibMagErr: `~numpy.ndarray` or `~pandas.Series` 

1793 Error on calibrated AB magnitudes. 

1794 """ 

1795 # TODO[DM-49400]: remove this check and the argument it corresponds to. 

1796 if localCalibErr is not None: 

1797 warnings.warn("The localCalibErr argument is deprecated and will be removed after v29.", 

1798 category=FutureWarning) 

1799 err = self.instFluxErrToNanojanskyErr(instFlux, instFluxErr, localCalib) 

1800 return 2.5 / np.log(10) * err / self.instFluxToNanojansky(instFlux, instFluxErr) 

1801 

1802 

1803class LocalNanojansky(LocalPhotometry): 

1804 """Compute calibrated fluxes using the local calibration value. 

1805 

1806 This returns units of nanojanskys. 

1807 """ 

1808 

1809 @property 

1810 def columns(self): 

1811 return [self.instFluxCol, self.photoCalibCol] 

1812 

1813 @property 

1814 def name(self): 

1815 return f'flux_{self.instFluxCol}' 

1816 

1817 def _func(self, df): 

1818 return self.instFluxToNanojansky(df[self.instFluxCol], 

1819 df[self.photoCalibCol]).astype(np.float32) 

1820 

1821 

1822class LocalNanojanskyErr(LocalPhotometry): 

1823 """Compute calibrated flux errors using the local calibration value. 

1824 

1825 This returns units of nanojanskys. 

1826 """ 

1827 

1828 @property 

1829 def columns(self): 

1830 return [self.instFluxCol, self.instFluxErrCol, self.photoCalibCol] 

1831 

1832 @property 

1833 def name(self): 

1834 return f'fluxErr_{self.instFluxCol}' 

1835 

1836 def _func(self, df): 

1837 return self.instFluxErrToNanojanskyErr(df[self.instFluxCol], df[self.instFluxErrCol], 

1838 df[self.photoCalibCol]).astype(np.float32) 

1839 

1840 

1841class LocalDipoleMeanFlux(LocalPhotometry): 

1842 """Compute absolute mean of dipole fluxes. 

1843 

1844 See Also 

1845 -------- 

1846 LocalNanojansky 

1847 LocalNanojanskyErr 

1848 LocalDipoleMeanFluxErr 

1849 LocalDipoleDiffFlux 

1850 LocalDipoleDiffFluxErr 

1851 """ 

1852 def __init__(self, 

1853 instFluxPosCol, 

1854 instFluxNegCol, 

1855 instFluxPosErrCol, 

1856 instFluxNegErrCol, 

1857 photoCalibCol, 

1858 # TODO[DM-49400]: remove this option; it's already deprecated (in super). 

1859 photoCalibErrCol=None, 

1860 **kwargs): 

1861 self.instFluxNegCol = instFluxNegCol 

1862 self.instFluxPosCol = instFluxPosCol 

1863 self.instFluxNegErrCol = instFluxNegErrCol 

1864 self.instFluxPosErrCol = instFluxPosErrCol 

1865 self.photoCalibCol = photoCalibCol 

1866 super().__init__(instFluxNegCol, 

1867 instFluxNegErrCol, 

1868 photoCalibCol, 

1869 photoCalibErrCol, 

1870 **kwargs) 

1871 

1872 @property 

1873 def columns(self): 

1874 return [self.instFluxPosCol, 

1875 self.instFluxNegCol, 

1876 self.photoCalibCol] 

1877 

1878 @property 

1879 def name(self): 

1880 return f'dipMeanFlux_{self.instFluxPosCol}_{self.instFluxNegCol}' 

1881 

1882 def _func(self, df): 

1883 return 0.5*(np.fabs(self.instFluxToNanojansky(df[self.instFluxNegCol], df[self.photoCalibCol])) 

1884 + np.fabs(self.instFluxToNanojansky(df[self.instFluxPosCol], df[self.photoCalibCol]))) 

1885 

1886 

1887class LocalDipoleMeanFluxErr(LocalDipoleMeanFlux): 

1888 """Compute the error on the absolute mean of dipole fluxes. 

1889 

1890 See Also 

1891 -------- 

1892 LocalNanojansky 

1893 LocalNanojanskyErr 

1894 LocalDipoleMeanFlux 

1895 LocalDipoleDiffFlux 

1896 LocalDipoleDiffFluxErr 

1897 """ 

1898 

1899 @property 

1900 def columns(self): 

1901 return [self.instFluxPosCol, 

1902 self.instFluxNegCol, 

1903 self.instFluxPosErrCol, 

1904 self.instFluxNegErrCol, 

1905 self.photoCalibCol] 

1906 

1907 @property 

1908 def name(self): 

1909 return f'dipMeanFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}' 

1910 

1911 def _func(self, df): 

1912 return 0.5*np.hypot(df[self.instFluxNegErrCol], df[self.instFluxPosErrCol]) * df[self.photoCalibCol] 

1913 

1914 

1915class LocalDipoleDiffFlux(LocalDipoleMeanFlux): 

1916 """Compute the absolute difference of dipole fluxes. 

1917 

1918 Calculated value is (abs(pos) - abs(neg)). 

1919 

1920 See Also 

1921 -------- 

1922 LocalNanojansky 

1923 LocalNanojanskyErr 

1924 LocalDipoleMeanFlux 

1925 LocalDipoleMeanFluxErr 

1926 LocalDipoleDiffFluxErr 

1927 """ 

1928 

1929 @property 

1930 def columns(self): 

1931 return [self.instFluxPosCol, 

1932 self.instFluxNegCol, 

1933 self.photoCalibCol] 

1934 

1935 @property 

1936 def name(self): 

1937 return f'dipDiffFlux_{self.instFluxPosCol}_{self.instFluxNegCol}' 

1938 

1939 def _func(self, df): 

1940 return (np.fabs(self.instFluxToNanojansky(df[self.instFluxPosCol], df[self.photoCalibCol])) 

1941 - np.fabs(self.instFluxToNanojansky(df[self.instFluxNegCol], df[self.photoCalibCol]))) 

1942 

1943 

1944class LocalDipoleDiffFluxErr(LocalDipoleMeanFlux): 

1945 """Compute the error on the absolute difference of dipole fluxes. 

1946 

1947 See Also 

1948 -------- 

1949 LocalNanojansky 

1950 LocalNanojanskyErr 

1951 LocalDipoleMeanFlux 

1952 LocalDipoleMeanFluxErr 

1953 LocalDipoleDiffFlux 

1954 """ 

1955 

1956 @property 

1957 def columns(self): 

1958 return [self.instFluxPosCol, 

1959 self.instFluxNegCol, 

1960 self.instFluxPosErrCol, 

1961 self.instFluxNegErrCol, 

1962 self.photoCalibCol] 

1963 

1964 @property 

1965 def name(self): 

1966 return f'dipDiffFluxErr_{self.instFluxPosCol}_{self.instFluxNegCol}' 

1967 

1968 def _func(self, df): 

1969 return np.hypot(df[self.instFluxPosErrCol], df[self.instFluxNegErrCol]) * df[self.photoCalibCol] 

1970 

1971 

1972class Ebv(Functor): 

1973 """Compute E(B-V) from dustmaps.sfd.""" 

1974 _defaultDataset = 'ref' 

1975 name = "E(B-V)" 

1976 shortname = "ebv" 

1977 

1978 def __init__(self, **kwargs): 

1979 # Import is only needed for Ebv. 

1980 # Suppress unnecessary .dustmapsrc log message on import. 

1981 with open(os.devnull, "w") as devnull: 

1982 with redirect_stdout(devnull): 

1983 from dustmaps.sfd import SFDQuery 

1984 self._columns = ['coord_ra', 'coord_dec'] 

1985 self.sfd = SFDQuery() 

1986 super().__init__(**kwargs) 

1987 

1988 def _func(self, df): 

1989 coords = SkyCoord(df['coord_ra'].values * u.rad, df['coord_dec'].values * u.rad) 

1990 ebv = self.sfd(coords) 

1991 return pd.Series(ebv, index=df.index).astype('float32') 

1992 

1993 

1994class MomentsBase(Functor): 

1995 """Base class for functors that use shape moments and localWCS 

1996 

1997 Attributes 

1998 ---------- 

1999 is_covariance : bool 

2000 Whether the shape columns are terms of a covariance matrix. If False, 

2001 they will be assumed to be terms of a correlation matrix instead. 

2002 """ 

2003 

2004 is_covariance: bool = True 

2005 

2006 def __init__(self, 

2007 shape_1_1, 

2008 shape_2_2, 

2009 shape_1_2, 

2010 colCD_1_1, 

2011 colCD_1_2, 

2012 colCD_2_1, 

2013 colCD_2_2, 

2014 **kwargs): 

2015 self.shape_1_1 = shape_1_1 

2016 self.shape_2_2 = shape_2_2 

2017 self.shape_1_2 = shape_1_2 

2018 self.colCD_1_1 = colCD_1_1 

2019 self.colCD_1_2 = colCD_1_2 

2020 self.colCD_2_1 = colCD_2_1 

2021 self.colCD_2_2 = colCD_2_2 

2022 super().__init__(**kwargs) 

2023 

2024 @property 

2025 def columns(self): 

2026 return [ 

2027 self.shape_1_1, 

2028 self.shape_2_2, 

2029 self.shape_1_2, 

2030 ] + self.columns_ref 

2031 

2032 @property 

2033 def columns_ref(self): 

2034 """Return columns that are needed from the ref table.""" 

2035 return [ 

2036 self.colCD_1_1, 

2037 self.colCD_1_2, 

2038 self.colCD_2_1, 

2039 self.colCD_2_2] 

2040 

2041 def compute_ellipse_terms(self, df, sky: bool = True): 

2042 r"""Return terms commonly used for ellipse parameterization conversions. 

2043 

2044 Parameters 

2045 ---------- 

2046 df 

2047 The data frame. 

2048 sky 

2049 Whether to compute the terms in sky coordinates. 

2050 If False, XX, YY and XY moments are used instead of 

2051 UU, VV and UV. 

2052 

2053 Returns 

2054 ------- 

2055 xx_p_yy 

2056 The sum of the diagonal terms of the covariance. 

2057 xx_m_yy 

2058 The difference of the diagonal terms of the covariance. 

2059 t2 

2060 A term similar to the discriminant of the quadratic formula. 

2061 """ 

2062 xx = self.sky_uu(df) if sky else self.get_xx(df) 

2063 yy = self.sky_vv(df) if sky else self.get_yy(df) 

2064 xx_m_yy = xx - yy 

2065 t2 = xx_m_yy**2 + 4.0*(self.sky_uv(df) if sky else self.get_xy(df))**2 

2066 # TODO: Check alternative form that may be more stable for computing 

2067 # the minor axis size (see gauss2d/src/ellipse.cc) 

2068 # t2 = xx**2 + yy**2 - 2*(xx*yy - 2*xy**2) 

2069 return xx + yy, xx_m_yy, t2 

2070 

2071 def get_xx(self, df): 

2072 xx = df[self.shape_1_1] 

2073 return xx if self.is_covariance else xx**2 

2074 

2075 def get_yy(self, df): 

2076 yy = df[self.shape_2_2] 

2077 return yy if self.is_covariance else yy**2 

2078 

2079 def get_xy(self, df): 

2080 xy = df[self.shape_1_2] 

2081 return xy if self.is_covariance else xy*df[self.shape_1_1]*df[self.shape_2_2] 

2082 

2083 # Each of sky_uu, sky_vv, sky_uv evaluates one element of 

2084 # CD_matrix * moments_matrix * CD_matrix.T 

2085 def sky_uu(self, df): 

2086 """Return the component of the moments tensor aligned with the RA axis, in radians.""" 

2087 i_xx = self.get_xx(df) 

2088 i_yy = self.get_yy(df) 

2089 i_xy = self.get_xy(df) 

2090 CD_1_1 = df[self.colCD_1_1] 

2091 CD_1_2 = df[self.colCD_1_2] 

2092 return (CD_1_1*(i_xx*CD_1_1 + i_xy*CD_1_2) 

2093 + CD_1_2*(i_xy*CD_1_1 + i_yy*CD_1_2)) 

2094 

2095 def sky_vv(self, df): 

2096 """Return the component of the moments tensor aligned with the dec axis, in radians.""" 

2097 i_xx = self.get_xx(df) 

2098 i_yy = self.get_yy(df) 

2099 i_xy = self.get_xy(df) 

2100 CD_2_1 = df[self.colCD_2_1] 

2101 CD_2_2 = df[self.colCD_2_2] 

2102 return (CD_2_1*(i_xx*CD_2_1 + i_xy*CD_2_2) 

2103 + CD_2_2*(i_xy*CD_2_1 + i_yy*CD_2_2)) 

2104 

2105 def sky_uv(self, df): 

2106 """Return the covariance of the moments tensor in ra, dec coordinates, in radians.""" 

2107 i_xx = self.get_xx(df) 

2108 i_yy = self.get_yy(df) 

2109 i_xy = self.get_xy(df) 

2110 CD_1_1 = df[self.colCD_1_1] 

2111 CD_1_2 = df[self.colCD_1_2] 

2112 CD_2_1 = df[self.colCD_2_1] 

2113 CD_2_2 = df[self.colCD_2_2] 

2114 return ((CD_1_1 * i_xx + CD_1_2 * i_xy) * CD_2_1 

2115 + (CD_1_1 * i_xy + CD_1_2 * i_yy) * CD_2_2) 

2116 

2117 def get_g1(self, df): 

2118 """ 

2119 Calculate shear-type ellipticity parameter G1. 

2120 """ 

2121 # TODO: Replace this with functionality from afwGeom, DM-54015 

2122 sky_uu = self.sky_uu(df) 

2123 sky_vv = self.sky_vv(df) 

2124 sky_uv = self.sky_uv(df) 

2125 denom = sky_uu + sky_vv + 2 * np.sqrt(sky_uu*sky_vv - sky_uv**2) 

2126 return ((sky_uu - sky_vv) / denom).astype(np.float32) 

2127 

2128 def get_g2(self, df): 

2129 """ 

2130 Calculate shear-type ellipticity parameter G2. 

2131 

2132 This has the opposite sign as sky_uv in order to maintain consistency with the HSM moments 

2133 sign convention. 

2134 """ 

2135 # TODO: Replace this with functionality from afwGeom, DM-54015 

2136 sky_uu = self.sky_uu(df) 

2137 sky_vv = self.sky_vv(df) 

2138 sky_uv = self.sky_uv(df) 

2139 denom = sky_uu + sky_vv + 2 * np.sqrt(sky_uu*sky_vv - sky_uv**2) 

2140 return (-2*sky_uv / denom).astype(np.float32) 

2141 

2142 def get_trace(self, df): 

2143 sky_uu = self.sky_uu(df) 

2144 sky_vv = self.sky_vv(df) 

2145 return np.sqrt(0.5*(sky_uu + sky_vv)).astype(np.float32) 

2146 

2147 

2148class MomentsG1Sky(MomentsBase): 

2149 """Rotate pixel moments Ixx,Iyy,Ixy into RA/dec frame and G1/G2 reduced 

2150 shear parameterization""" 

2151 _defaultDataset = 'meas' 

2152 name = "moments_g1" 

2153 shortname = "moments_g1" 

2154 

2155 def _func(self, df): 

2156 sky_g1 = self.get_g1(df) 

2157 

2158 return pd.Series(sky_g1.astype(np.float32), index=df.index) 

2159 

2160 

2161class MomentsG2Sky(MomentsBase): 

2162 """Rotate pixel moments Ixx,Iyy,Ixy into RA/dec frame and G1/G2 reduced 

2163 shear parameterization""" 

2164 _defaultDataset = 'meas' 

2165 name = "moments_g2" 

2166 shortname = "moments_g2" 

2167 

2168 def _func(self, df): 

2169 sky_g2 = self.get_g2(df) 

2170 

2171 return pd.Series(sky_g2.astype(np.float32), index=df.index) 

2172 

2173 

2174class MomentsTraceSky(MomentsBase): 

2175 """Trace radius size in arcseconds from pixel moments Ixx,Iyy,Ixy 

2176 

2177 The trace radius size is a measure of size equal to the square root of 

2178 half of the trace of the second moments tensor. 

2179 """ 

2180 _defaultDataset = 'meas' 

2181 name = "moments_trace" 

2182 shortname = "moments_trace" 

2183 

2184 def _func(self, df): 

2185 sky_trace_radians = self.get_trace(df) 

2186 

2187 return pd.Series((sky_trace_radians*(180/np.pi)*3600).astype(np.float32), index=df.index) 

2188 

2189 

2190class MomentsIuuSky(MomentsBase): 

2191 """Rotate pixel moments Ixx,Iyy,Ixy into ra,dec frame and arcseconds""" 

2192 _defaultDataset = 'meas' 

2193 name = "moments_uu" 

2194 shortname = "moments_uu" 

2195 

2196 def _func(self, df): 

2197 sky_uu_radians = self.sky_uu(df) 

2198 

2199 return pd.Series((sky_uu_radians*((180/np.pi)*3600)**2).astype(np.float32), index=df.index) 

2200 

2201 

2202class CorrelationIuuSky(MomentsIuuSky): 

2203 """MomentsIuuSky but from sigma_x, sigma_y, rho correlation terms.""" 

2204 is_covariance = False 

2205 

2206 

2207class MomentsIvvSky(MomentsBase): 

2208 """Rotate pixel moments Ixx,Iyy,Ixy into ra,dec frame and arcseconds""" 

2209 _defaultDataset = 'meas' 

2210 name = "moments_vv" 

2211 shortname = "moments_vv" 

2212 

2213 def _func(self, df): 

2214 sky_vv_radians = self.sky_vv(df) 

2215 

2216 return pd.Series((sky_vv_radians*((180/np.pi)*3600)**2).astype(np.float32), index=df.index) 

2217 

2218 

2219class CorrelationIvvSky(MomentsIvvSky): 

2220 """MomentsIvvSky but from sigma_x, sigma_y, rho correlation terms.""" 

2221 is_covariance = False 

2222 

2223 

2224class MomentsIuvSky(MomentsBase): 

2225 """Rotate pixel moments Ixx,Iyy,Ixy into ra,dec frame and arcseconds""" 

2226 _defaultDataset = 'meas' 

2227 name = "moments_uv" 

2228 shortname = "moments_uv" 

2229 

2230 def _func(self, df): 

2231 sky_uv_radians = self.sky_uv(df) 

2232 

2233 return pd.Series((sky_uv_radians*((180/np.pi)*3600)**2).astype(np.float32), index=df.index) 

2234 

2235 

2236class CorrelationIuvSky(MomentsIuvSky): 

2237 """MomentsIuvSky but from sigma_x, sigma_y, rho correlation terms.""" 

2238 is_covariance = False 

2239 

2240 

2241class PositionAngleFromMoments(MomentsBase): 

2242 """Compute position angle relative to ra,dec frame, in degrees, from Ixx,Iyy,Ixy pixel moments.""" 

2243 _defaultDataset = 'meas' 

2244 name = "moments_theta" 

2245 shortname = "moments_theta" 

2246 

2247 def _func(self, df): 

2248 sky_uu = self.sky_uu(df) 

2249 sky_vv = self.sky_vv(df) 

2250 sky_uv = self.sky_uv(df) 

2251 theta = 0.5*np.arctan2(2*sky_uv, sky_uu - sky_vv) 

2252 

2253 return pd.Series((np.degrees(np.array(theta))).astype(np.float32), index=df.index) 

2254 

2255 

2256class PositionAngleFromCorrelation(PositionAngleFromMoments): 

2257 """PositionAngleFromMoments but from sigma_x, sigma_y, rho correlation terms.""" 

2258 is_covariance = False 

2259 

2260 

2261class SemimajorAxisFromMoments(MomentsBase): 

2262 """Compute the semimajor axis length in arcseconds, from Ixx,Iyy,Ixy pixel moments.""" 

2263 _defaultDataset = 'meas' 

2264 name = "moments_a" 

2265 shortname = "moments_a" 

2266 

2267 def _func(self, df): 

2268 xx_p_yy, _, t2 = self.compute_ellipse_terms(df) 

2269 # This copies what is done (unvectorized) in afw.geom.ellipse 

2270 a_radians = np.sqrt(0.5 * (xx_p_yy + np.sqrt(t2))) 

2271 

2272 return pd.Series((np.degrees(a_radians)*3600).astype(np.float32), index=df.index) 

2273 

2274 

2275class SemimajorAxisFromCorrelation(SemimajorAxisFromMoments): 

2276 """SemimajorAxisFromMoments but from sigma_x, sigma_y, rho correlation terms.""" 

2277 is_covariance = False 

2278 

2279 

2280class SemiminorAxisFromMoments(MomentsBase): 

2281 """Compute the semiminor axis length in arcseconds, from Ixx,Iyy,Ixy pixel moments.""" 

2282 _defaultDataset = 'meas' 

2283 name = "moments_b" 

2284 shortname = "moments_b" 

2285 

2286 def _func(self, df): 

2287 xx_p_yy, _, t2 = self.compute_ellipse_terms(df) 

2288 # This copies what is done (unvectorized) in afw.geom.ellipse 

2289 b_radians = np.sqrt(0.5 * (xx_p_yy - np.sqrt(t2))) 

2290 

2291 return pd.Series((np.degrees(b_radians)*3600).astype(np.float32), index=df.index) 

2292 

2293 

2294class SemiminorAxisFromCorrelation(SemiminorAxisFromMoments): 

2295 """SemiminorAxisFromMoments but from sigma_x, sigma_y, rho correlation terms.""" 

2296 is_covariance = False