Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# This file is part of pipe_tasks. 

2# 

3# Developed for the LSST Data Management System. 

4# This product includes software developed by the LSST Project 

5# (http://www.lsst.org). 

6# See the COPYRIGHT file at the top-level directory of this distribution 

7# for details of code ownership. 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the GNU General Public License 

20# along with this program. If not, see <http://www.gnu.org/licenses/>. 

21 

22import astropy.units as u 

23import copy 

24import functools 

25import numpy as np 

26import os 

27import pandas as pd 

28import unittest 

29import tempfile 

30import shutil 

31 

32import lsst.daf.base as dafBase 

33import lsst.afw.geom as afwGeom 

34import lsst.geom as geom 

35from lsst.sphgeom import HtmPixelization 

36import lsst.meas.base as measBase 

37import lsst.utils.tests 

38from lsst.pipe.tasks.parquetTable import MultilevelParquetTable, ParquetTable 

39from lsst.daf.butler import Butler, DatasetType 

40from lsst.pipe.tasks.functors import (CompositeFunctor, CustomFunctor, Column, RAColumn, 

41 DecColumn, Mag, MagDiff, Color, StarGalaxyLabeller, 

42 DeconvolvedMoments, SdssTraceSize, PsfSdssTraceSizeDiff, 

43 HsmTraceSize, PsfHsmTraceSizeDiff, HsmFwhm, 

44 LocalPhotometry, LocalNanojansky, LocalNanojanskyErr, 

45 LocalMagnitude, LocalMagnitudeErr, 

46 LocalWcs, ComputePixelScale, ConvertPixelToArcseconds, 

47 ConvertPixelSqToArcsecondsSq, Ratio) 

48 

49ROOT = os.path.abspath(os.path.dirname(__file__)) 

50 

51 

52class FunctorTestCase(unittest.TestCase): 

53 

54 def simulateMultiParquet(self, dataDict): 

55 """Create a simple test MultilevelParquetTable 

56 """ 

57 simpleDF = pd.DataFrame(dataDict) 

58 dfFilterDSCombos = [] 

59 for ds in self.datasets: 

60 for band in self.bands: 

61 df = copy.copy(simpleDF) 

62 df.reindex(sorted(df.columns), axis=1) 

63 df['dataset'] = ds 

64 df['band'] = band 

65 df.columns = pd.MultiIndex.from_tuples( 

66 [(ds, band, c) for c in df.columns], 

67 names=('dataset', 'band', 'column')) 

68 dfFilterDSCombos.append(df) 

69 

70 df = functools.reduce(lambda d1, d2: d1.join(d2), dfFilterDSCombos) 

71 

72 return MultilevelParquetTable(dataFrame=df) 

73 

74 def simulateParquet(self, dataDict): 

75 df = pd.DataFrame(dataDict) 

76 return ParquetTable(dataFrame=df) 

77 

78 def getDatasetHandle(self, parq): 

79 df = parq._df 

80 lo, hi = HtmPixelization(7).universe().ranges()[0] 

81 value = np.random.randint(lo, hi) 

82 ref = self.butler.put(df, self.datasetType, dataId={'htm7': value}) 

83 return self.butler.getDeferred(ref) 

84 

85 def setUp(self): 

86 np.random.seed(12345) 

87 self.datasets = ['forced_src', 'meas', 'ref'] 

88 self.bands = ['g', 'r'] 

89 self.columns = ['coord_ra', 'coord_dec'] 

90 self.nRecords = 5 

91 self.dataDict = { 

92 "coord_ra": [3.77654137, 3.77643059, 3.77621148, 3.77611944, 3.77610396], 

93 "coord_dec": [0.01127624, 0.01127787, 0.01127543, 0.01127543, 0.01127543]} 

94 

95 # Set up butler 

96 self.root = tempfile.mkdtemp(dir=ROOT) 

97 Butler.makeRepo(self.root) 

98 self.butler = Butler(self.root, run="test_run") 

99 self.datasetType = DatasetType("data", dimensions=('htm7',), storageClass="DataFrame", 

100 universe=self.butler.registry.dimensions) 

101 self.butler.registry.registerDatasetType(self.datasetType) 

102 

103 def tearDown(self): 

104 if os.path.exists(self.root): 

105 shutil.rmtree(self.root, ignore_errors=True) 

106 

107 def _funcVal(self, functor, parq): 

108 self.assertIsInstance(functor.name, str) 

109 self.assertIsInstance(functor.shortname, str) 

110 

111 handle = self.getDatasetHandle(parq) 

112 

113 val = functor(parq) 

114 val2 = functor(handle) 

115 self.assertTrue((val == val2).all()) 

116 self.assertIsInstance(val, pd.Series) 

117 

118 val = functor(parq, dropna=True) 

119 val2 = functor(handle, dropna=True) 

120 self.assertTrue((val == val2).all()) 

121 self.assertEqual(val.isnull().sum(), 0) 

122 

123 return val 

124 

125 def _differenceVal(self, functor, parq1, parq2): 

126 self.assertIsInstance(functor.name, str) 

127 self.assertIsInstance(functor.shortname, str) 

128 

129 handle1 = self.getDatasetHandle(parq1) 

130 handle2 = self.getDatasetHandle(parq2) 

131 

132 val = functor.difference(parq1, parq2) 

133 val2 = functor.difference(handle1, handle2) 

134 self.assertTrue(val.equals(val2)) 

135 self.assertIsInstance(val, pd.Series) 

136 

137 val = functor.difference(parq1, parq2, dropna=True) 

138 val2 = functor.difference(handle1, handle2, dropna=True) 

139 self.assertTrue(val.equals(val2)) 

140 self.assertEqual(val.isnull().sum(), 0) 

141 

142 val1 = self._funcVal(functor, parq1) 

143 val2 = self._funcVal(functor, parq2) 

144 

145 self.assertTrue(np.allclose(val, val1 - val2)) 

146 

147 return val 

148 

149 def testColumn(self): 

150 self.columns.append("base_FootprintArea_value") 

151 self.dataDict["base_FootprintArea_value"] = \ 

152 np.full(self.nRecords, 1) 

153 parq = self.simulateMultiParquet(self.dataDict) 

154 func = Column('base_FootprintArea_value', filt='g') 

155 self._funcVal(func, parq) 

156 

157 parq = self.simulateParquet(self.dataDict) 

158 func = Column('base_FootprintArea_value') 

159 self._funcVal(func, parq) 

160 

161 def testCustom(self): 

162 self.columns.append("base_FootprintArea_value") 

163 self.dataDict["base_FootprintArea_value"] = \ 

164 np.random.rand(self.nRecords) 

165 parq = self.simulateMultiParquet(self.dataDict) 

166 func = CustomFunctor('2*base_FootprintArea_value', filt='g') 

167 val = self._funcVal(func, parq) 

168 

169 func2 = Column('base_FootprintArea_value', filt='g') 

170 

171 np.allclose(val.values, 2*func2(parq).values, atol=1e-13, rtol=0) 

172 

173 parq = self.simulateParquet(self.dataDict) 

174 func = CustomFunctor('2 * base_FootprintArea_value') 

175 val = self._funcVal(func, parq) 

176 func2 = Column('base_FootprintArea_value') 

177 

178 np.allclose(val.values, 2*func2(parq).values, atol=1e-13, rtol=0) 

179 

180 def testCoords(self): 

181 parq = self.simulateMultiParquet(self.dataDict) 

182 ra = self._funcVal(RAColumn(), parq) 

183 dec = self._funcVal(DecColumn(), parq) 

184 

185 columnDict = {'dataset': 'ref', 'band': 'g', 

186 'column': ['coord_ra', 'coord_dec']} 

187 

188 coords = parq.toDataFrame(columns=columnDict, droplevels=True) / np.pi * 180. 

189 

190 self.assertTrue(np.allclose(ra, coords[('ref', 'g', 'coord_ra')], atol=1e-13, rtol=0)) 

191 self.assertTrue(np.allclose(dec, coords[('ref', 'g', 'coord_dec')], atol=1e-13, rtol=0)) 

192 

193 # single-level column index table 

194 parq = self.simulateParquet(self.dataDict) 

195 ra = self._funcVal(RAColumn(), parq) 

196 dec = self._funcVal(DecColumn(), parq) 

197 

198 coords = parq.toDataFrame(columns=['coord_ra', 'coord_dec']) / np.pi * 180. 

199 

200 self.assertTrue(np.allclose(ra, coords['coord_ra'], atol=1e-13, rtol=0)) 

201 self.assertTrue(np.allclose(dec, coords['coord_dec'], atol=1e-13, rtol=0)) 

202 

203 def testMag(self): 

204 self.columns.extend(["base_PsfFlux_instFlux", "base_PsfFlux_instFluxErr"]) 

205 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000) 

206 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 10) 

207 parq = self.simulateMultiParquet(self.dataDict) 

208 # Change one dataset filter combinations value. 

209 parq._df[("meas", "g", "base_PsfFlux_instFlux")] -= 1 

210 

211 fluxName = 'base_PsfFlux' 

212 

213 # Check that things work when you provide dataset explicitly 

214 for dataset in ['forced_src', 'meas']: 

215 psfMag_G = self._funcVal(Mag(fluxName, dataset=dataset, 

216 filt='g'), 

217 parq) 

218 psfMag_R = self._funcVal(Mag(fluxName, dataset=dataset, 

219 filt='r'), 

220 parq) 

221 

222 psfColor_GR = self._funcVal(Color(fluxName, 'g', 'r', 

223 dataset=dataset), 

224 parq) 

225 

226 self.assertTrue(np.allclose((psfMag_G - psfMag_R).dropna(), psfColor_GR, rtol=0, atol=1e-13)) 

227 

228 # Check that behavior as expected when dataset not provided; 

229 # that is, that the color comes from forced and default Mag is meas 

230 psfMag_G = self._funcVal(Mag(fluxName, filt='g'), parq) 

231 psfMag_R = self._funcVal(Mag(fluxName, filt='r'), parq) 

232 

233 psfColor_GR = self._funcVal(Color(fluxName, 'g', 'r'), parq) 

234 

235 # These should *not* be equal. 

236 self.assertFalse(np.allclose((psfMag_G - psfMag_R).dropna(), psfColor_GR)) 

237 

238 def testMagDiff(self): 

239 self.columns.extend(["base_PsfFlux_instFlux", "base_PsfFlux_instFluxErr", 

240 "modelfit_CModel_instFlux", "modelfit_CModel_instFluxErr"]) 

241 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000) 

242 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 10) 

243 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 1000) 

244 self.dataDict["modelfit_CModel_instFluxErr"] = np.full(self.nRecords, 10) 

245 parq = self.simulateMultiParquet(self.dataDict) 

246 

247 for filt in self.bands: 

248 filt = 'g' 

249 val = self._funcVal(MagDiff('base_PsfFlux', 'modelfit_CModel', filt=filt), parq) 

250 

251 mag1 = self._funcVal(Mag('modelfit_CModel', filt=filt), parq) 

252 mag2 = self._funcVal(Mag('base_PsfFlux', filt=filt), parq) 

253 self.assertTrue(np.allclose((mag2 - mag1).dropna(), val, rtol=0, atol=1e-13)) 

254 

255 def testDifference(self): 

256 """Test .difference method using MagDiff as the example. 

257 """ 

258 self.columns.extend(["base_PsfFlux_instFlux", "base_PsfFlux_instFluxErr", 

259 "modelfit_CModel_instFlux", "modelfit_CModel_instFluxErr"]) 

260 

261 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000) 

262 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 1000) 

263 parq1 = self.simulateMultiParquet(self.dataDict) 

264 

265 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 999) 

266 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 999) 

267 parq2 = self.simulateMultiParquet(self.dataDict) 

268 

269 magDiff = MagDiff('base_PsfFlux', 'modelfit_CModel', filt='g') 

270 

271 # Asserts that differences computed properly 

272 self._differenceVal(magDiff, parq1, parq2) 

273 

274 def testLabeller(self): 

275 # Covering the code is better than nothing 

276 self.columns.append("base_ClassificationExtendedness_value") 

277 self.dataDict["base_ClassificationExtendedness_value"] = np.full(self.nRecords, 1) 

278 parq = self.simulateMultiParquet(self.dataDict) 

279 labels = self._funcVal(StarGalaxyLabeller(), parq) # noqa 

280 

281 def testPixelScale(self): 

282 # Test that the pixel scale and pix->arcsec calculations perform as 

283 # expected. 

284 pass 

285 

286 def testOther(self): 

287 self.columns.extend(["ext_shapeHSM_HsmSourceMoments_xx", "ext_shapeHSM_HsmSourceMoments_yy", 

288 "base_SdssShape_xx", "base_SdssShape_yy", 

289 "ext_shapeHSM_HsmPsfMoments_xx", "ext_shapeHSM_HsmPsfMoments_yy", 

290 "base_SdssShape_psf_xx", "base_SdssShape_psf_yy"]) 

291 self.dataDict["ext_shapeHSM_HsmSourceMoments_xx"] = np.full(self.nRecords, 1 / np.sqrt(2)) 

292 self.dataDict["ext_shapeHSM_HsmSourceMoments_yy"] = np.full(self.nRecords, 1 / np.sqrt(2)) 

293 self.dataDict["base_SdssShape_xx"] = np.full(self.nRecords, 1 / np.sqrt(2)) 

294 self.dataDict["base_SdssShape_yy"] = np.full(self.nRecords, 1 / np.sqrt(2)) 

295 self.dataDict["ext_shapeHSM_HsmPsfMoments_xx"] = np.full(self.nRecords, 1 / np.sqrt(2)) 

296 self.dataDict["ext_shapeHSM_HsmPsfMoments_yy"] = np.full(self.nRecords, 1 / np.sqrt(2)) 

297 self.dataDict["base_SdssShape_psf_xx"] = np.full(self.nRecords, 1) 

298 self.dataDict["base_SdssShape_psf_yy"] = np.full(self.nRecords, 1) 

299 parq = self.simulateMultiParquet(self.dataDict) 

300 # Covering the code is better than nothing 

301 for filt in self.bands: 

302 for Func in [DeconvolvedMoments, 

303 SdssTraceSize, 

304 PsfSdssTraceSizeDiff, 

305 HsmTraceSize, PsfHsmTraceSizeDiff, HsmFwhm]: 

306 val = self._funcVal(Func(filt=filt), parq) # noqa 

307 

308 def _compositeFuncVal(self, functor, parq): 

309 self.assertIsInstance(functor, CompositeFunctor) 

310 

311 handle = self.getDatasetHandle(parq) 

312 

313 df = functor(parq) 

314 df2 = functor(handle) 

315 self.assertTrue(df.equals(df2)) 

316 

317 self.assertIsInstance(df, pd.DataFrame) 

318 self.assertTrue(np.all([k in df.columns for k in functor.funcDict.keys()])) 

319 

320 df = functor(parq, dropna=True) 

321 df2 = functor(handle, dropna=True) 

322 self.assertTrue(df.equals(df2)) 

323 

324 # Check that there are no nulls 

325 self.assertFalse(df.isnull().any(axis=None)) 

326 

327 return df 

328 

329 def _compositeDifferenceVal(self, functor, parq1, parq2): 

330 self.assertIsInstance(functor, CompositeFunctor) 

331 

332 handle1 = self.getDatasetHandle(parq1) 

333 handle2 = self.getDatasetHandle(parq2) 

334 

335 df = functor.difference(parq1, parq2) 

336 df2 = functor.difference(handle1, handle2) 

337 self.assertTrue(df.equals(df2)) 

338 

339 self.assertIsInstance(df, pd.DataFrame) 

340 self.assertTrue(np.all([k in df.columns for k in functor.funcDict.keys()])) 

341 

342 df = functor.difference(parq1, parq2, dropna=True) 

343 df2 = functor.difference(handle1, handle2, dropna=True) 

344 self.assertTrue(df.equals(df2)) 

345 

346 # Check that there are no nulls 

347 self.assertFalse(df.isnull().any(axis=None)) 

348 

349 df1 = functor(parq1) 

350 df2 = functor(parq2) 

351 

352 self.assertTrue(np.allclose(df.values, df1.values - df2.values)) 

353 

354 return df 

355 

356 def testComposite(self): 

357 self.columns.extend(["modelfit_CModel_instFlux", "base_PsfFlux_instFlux"]) 

358 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 1) 

359 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1) 

360 

361 parq = self.simulateMultiParquet(self.dataDict) 

362 # Modify r band value slightly. 

363 parq._df[("meas", "r", "base_PsfFlux_instFlux")] -= 0.1 

364 

365 filt = 'g' 

366 funcDict = {'psfMag_ref': Mag('base_PsfFlux', dataset='ref'), 

367 'ra': RAColumn(), 

368 'dec': DecColumn(), 

369 'psfMag': Mag('base_PsfFlux', filt=filt), 

370 'cmodel_magDiff': MagDiff('base_PsfFlux', 

371 'modelfit_CModel', filt=filt)} 

372 func = CompositeFunctor(funcDict) 

373 df = self._compositeFuncVal(func, parq) 

374 

375 # Repeat same, but define filter globally instead of individually 

376 funcDict2 = {'psfMag_ref': Mag('base_PsfFlux', dataset='ref'), 

377 'ra': RAColumn(), 

378 'dec': DecColumn(), 

379 'psfMag': Mag('base_PsfFlux'), 

380 'cmodel_magDiff': MagDiff('base_PsfFlux', 

381 'modelfit_CModel')} 

382 

383 func2 = CompositeFunctor(funcDict2, filt=filt) 

384 df2 = self._compositeFuncVal(func2, parq) 

385 self.assertTrue(df.equals(df2)) 

386 

387 func2.filt = 'r' 

388 df3 = self._compositeFuncVal(func2, parq) 

389 # Because we modified the R filter this should fail. 

390 self.assertFalse(df2.equals(df3)) 

391 

392 # Make sure things work with passing list instead of dict 

393 funcs = [Mag('base_PsfFlux', dataset='ref'), 

394 RAColumn(), 

395 DecColumn(), 

396 Mag('base_PsfFlux', filt=filt), 

397 MagDiff('base_PsfFlux', 'modelfit_CModel', filt=filt)] 

398 

399 df = self._compositeFuncVal(CompositeFunctor(funcs), parq) 

400 

401 def testCompositeSimple(self): 

402 """Test single-level composite functor for functionality 

403 """ 

404 self.columns.extend(["modelfit_CModel_instFlux", "base_PsfFlux_instFlux"]) 

405 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 1) 

406 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1) 

407 

408 parq = self.simulateParquet(self.dataDict) 

409 funcDict = {'ra': RAColumn(), 

410 'dec': DecColumn(), 

411 'psfMag': Mag('base_PsfFlux'), 

412 'cmodel_magDiff': MagDiff('base_PsfFlux', 

413 'modelfit_CModel')} 

414 func = CompositeFunctor(funcDict) 

415 df = self._compositeFuncVal(func, parq) # noqa 

416 

417 def testCompositeColor(self): 

418 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000) 

419 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 10) 

420 parq = self.simulateMultiParquet(self.dataDict) 

421 funcDict = {'a': Mag('base_PsfFlux', dataset='meas', filt='g'), 

422 'b': Mag('base_PsfFlux', dataset='forced_src', filt='g'), 

423 'c': Color('base_PsfFlux', 'g', 'r')} 

424 # Covering the code is better than nothing 

425 df = self._compositeFuncVal(CompositeFunctor(funcDict), parq) # noqa 

426 

427 def testCompositeDifference(self): 

428 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000) 

429 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 10) 

430 parq1 = self.simulateMultiParquet(self.dataDict) 

431 

432 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 999) 

433 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 9) 

434 parq2 = self.simulateMultiParquet(self.dataDict) 

435 

436 funcDict = {'a': Mag('base_PsfFlux', dataset='meas', filt='g'), 

437 'b': Mag('base_PsfFlux', dataset='forced_src', filt='g'), 

438 'c': Color('base_PsfFlux', 'g', 'r')} 

439 # Covering the code is better than nothing 

440 df = self._compositeDifferenceVal(CompositeFunctor(funcDict), parq1, parq2) # noqa 

441 

442 def testCompositeFail(self): 

443 """Test a composite functor where one of the functors should be junk. 

444 """ 

445 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000) 

446 parq = self.simulateMultiParquet(self.dataDict) 

447 

448 funcDict = {'good': Column("base_PsfFlux_instFlux"), 

449 'bad': Column('not_a_column')} 

450 

451 df = self._compositeFuncVal(CompositeFunctor(funcDict), parq) # noqa 

452 

453 def testLocalPhotometry(self): 

454 """Test the local photometry functors. 

455 """ 

456 flux = 1000 

457 fluxErr = 10 

458 calib = 10 

459 calibErr = 1 

460 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, flux) 

461 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 

462 fluxErr) 

463 self.dataDict["base_LocalPhotoCalib"] = np.full(self.nRecords, calib) 

464 self.dataDict["base_LocalPhotoCalibErr"] = np.full(self.nRecords, 

465 calibErr) 

466 parq = self.simulateMultiParquet(self.dataDict) 

467 func = LocalPhotometry("base_PsfFlux_instFlux", 

468 "base_PsfFlux_instFluxErr", 

469 "base_LocalPhotoCalib", 

470 "base_LocalPhotoCalibErr") 

471 df = parq.toDataFrame(columns={"dataset": "meas", 

472 "band": "g", 

473 "columns": ["base_PsfFlux_instFlux", 

474 "base_PsfFlux_instFluxErr", 

475 "base_LocalPhotoCalib", 

476 "base_LocalPhotoCalibErr"]}) 

477 nanoJansky = func.instFluxToNanojansky( 

478 df[("meas", "g", "base_PsfFlux_instFlux")], 

479 df[("meas", "g", "base_LocalPhotoCalib")]) 

480 mag = func.instFluxToMagnitude( 

481 df[("meas", "g", "base_PsfFlux_instFlux")], 

482 df[("meas", "g", "base_LocalPhotoCalib")]) 

483 nanoJanskyErr = func.instFluxErrToNanojanskyErr( 

484 df[("meas", "g", "base_PsfFlux_instFlux")], 

485 df[("meas", "g", "base_PsfFlux_instFluxErr")], 

486 df[("meas", "g", "base_LocalPhotoCalib")], 

487 df[("meas", "g", "base_LocalPhotoCalibErr")]) 

488 magErr = func.instFluxErrToMagnitudeErr( 

489 df[("meas", "g", "base_PsfFlux_instFlux")], 

490 df[("meas", "g", "base_PsfFlux_instFluxErr")], 

491 df[("meas", "g", "base_LocalPhotoCalib")], 

492 df[("meas", "g", "base_LocalPhotoCalibErr")]) 

493 

494 self.assertTrue(np.allclose(nanoJansky.values, 

495 flux * calib, 

496 atol=1e-13, 

497 rtol=0)) 

498 self.assertTrue(np.allclose(mag.values, 

499 (flux * calib * u.nJy).to_value(u.ABmag), 

500 atol=1e-13, 

501 rtol=0)) 

502 self.assertTrue(np.allclose(nanoJanskyErr.values, 

503 np.hypot(fluxErr * calib, flux * calibErr), 

504 atol=1e-13, 

505 rtol=0)) 

506 self.assertTrue(np.allclose( 

507 magErr.values, 

508 2.5 / np.log(10) * nanoJanskyErr.values / nanoJansky.values, 

509 atol=1e-13, 

510 rtol=0)) 

511 

512 # Test functors against the values computed above. 

513 self._testLocalPhotometryFunctors(LocalNanojansky, 

514 parq, 

515 nanoJansky) 

516 self._testLocalPhotometryFunctors(LocalNanojanskyErr, 

517 parq, 

518 nanoJanskyErr) 

519 self._testLocalPhotometryFunctors(LocalMagnitude, 

520 parq, 

521 mag) 

522 self._testLocalPhotometryFunctors(LocalMagnitudeErr, 

523 parq, 

524 magErr) 

525 

526 def _testLocalPhotometryFunctors(self, functor, parq, testValues): 

527 func = functor("base_PsfFlux_instFlux", 

528 "base_PsfFlux_instFluxErr", 

529 "base_LocalPhotoCalib", 

530 "base_LocalPhotoCalibErr") 

531 val = self._funcVal(func, parq) 

532 self.assertTrue(np.allclose(testValues.values, 

533 val.values, 

534 atol=1e-13, 

535 rtol=0)) 

536 

537 def testConvertPixelToArcseconds(self): 

538 """Test calculations of the pixel scale and conversions of pixel to 

539 arcseconds. 

540 """ 

541 dipoleSep = 10 

542 ixx = 10 

543 testPixelDeltas = np.random.uniform(-100, 100, size=(self.nRecords, 2)) 

544 import lsst.afw.table as afwTable 

545 localWcsPlugin = measBase.EvaluateLocalWcsPlugin( 

546 None, 

547 "base_LocalWcs", 

548 afwTable.SourceTable.makeMinimalSchema(), 

549 None) 

550 for dec in np.linspace(-90, 90, 10): 

551 for x, y in zip(np.random.uniform(2 * 1109.99981456774, size=10), 

552 np.random.uniform(2 * 560.018167811613, size=10)): 

553 center = geom.Point2D(x, y) 

554 wcs = self._makeWcs(dec) 

555 skyOrigin = wcs.pixelToSky(center) 

556 

557 linAffMatrix = localWcsPlugin.makeLocalTransformMatrix(wcs, 

558 center) 

559 self.dataDict["dipoleSep"] = np.full(self.nRecords, dipoleSep) 

560 self.dataDict["ixx"] = np.full(self.nRecords, ixx) 

561 self.dataDict["slot_Centroid_x"] = np.full(self.nRecords, x) 

562 self.dataDict["slot_Centroid_y"] = np.full(self.nRecords, y) 

563 self.dataDict["someCentroid_x"] = x + testPixelDeltas[:, 0] 

564 self.dataDict["someCentroid_y"] = y + testPixelDeltas[:, 1] 

565 self.dataDict["base_LocalWcs_CDMatrix_1_1"] = np.full(self.nRecords, 

566 linAffMatrix[0, 0]) 

567 self.dataDict["base_LocalWcs_CDMatrix_1_2"] = np.full(self.nRecords, 

568 linAffMatrix[0, 1]) 

569 self.dataDict["base_LocalWcs_CDMatrix_2_1"] = np.full(self.nRecords, 

570 linAffMatrix[1, 0]) 

571 self.dataDict["base_LocalWcs_CDMatrix_2_2"] = np.full(self.nRecords, 

572 linAffMatrix[1, 1]) 

573 parq = self.simulateMultiParquet(self.dataDict) 

574 func = LocalWcs("base_LocalWcs_CDMatrix_1_1", 

575 "base_LocalWcs_CDMatrix_1_2", 

576 "base_LocalWcs_CDMatrix_2_1", 

577 "base_LocalWcs_CDMatrix_2_2") 

578 df = parq.toDataFrame(columns={"dataset": "meas", 

579 "band": "g", 

580 "columns": ["dipoleSep", 

581 "slot_Centroid_x", 

582 "slot_Centroid_y", 

583 "someCentroid_x", 

584 "someCentroid_y", 

585 "base_LocalWcs_CDMatrix_1_1", 

586 "base_LocalWcs_CDMatrix_1_2", 

587 "base_LocalWcs_CDMatrix_2_1", 

588 "base_LocalWcs_CDMatrix_2_2"]}) 

589 

590 # Exercise the full set of functions in LocalWcs. 

591 sepRadians = func.getSkySeperationFromPixel( 

592 df[("meas", "g", "someCentroid_x")] - df[("meas", "g", "slot_Centroid_x")], 

593 df[("meas", "g", "someCentroid_y")] - df[("meas", "g", "slot_Centroid_y")], 

594 0.0, 

595 0.0, 

596 df[("meas", "g", "base_LocalWcs_CDMatrix_1_1")], 

597 df[("meas", "g", "base_LocalWcs_CDMatrix_1_2")], 

598 df[("meas", "g", "base_LocalWcs_CDMatrix_2_1")], 

599 df[("meas", "g", "base_LocalWcs_CDMatrix_2_2")]) 

600 

601 # Test functor values against afw SkyWcs computations. 

602 for centX, centY, sep in zip(testPixelDeltas[:, 0], 

603 testPixelDeltas[:, 1], 

604 sepRadians.values): 

605 afwSepRadians = skyOrigin.separation( 

606 wcs.pixelToSky(x + centX, y + centY)).asRadians() 

607 self.assertAlmostEqual(1 - sep / afwSepRadians, 0, places=6) 

608 

609 # Test the pixel scale computation. 

610 func = ComputePixelScale("base_LocalWcs_CDMatrix_1_1", 

611 "base_LocalWcs_CDMatrix_1_2", 

612 "base_LocalWcs_CDMatrix_2_1", 

613 "base_LocalWcs_CDMatrix_2_2") 

614 pixelScale = self._funcVal(func, parq) 

615 self.assertTrue(np.allclose( 

616 wcs.getPixelScale(center).asArcseconds(), 

617 pixelScale.values, 

618 rtol=1e-8, 

619 atol=0)) 

620 

621 # Test pixel -> arcsec conversion. 

622 func = ConvertPixelToArcseconds("dipoleSep", 

623 "base_LocalWcs_CDMatrix_1_1", 

624 "base_LocalWcs_CDMatrix_1_2", 

625 "base_LocalWcs_CDMatrix_2_1", 

626 "base_LocalWcs_CDMatrix_2_2") 

627 val = self._funcVal(func, parq) 

628 self.assertTrue(np.allclose(pixelScale.values * dipoleSep, 

629 val.values, 

630 atol=1e-16, 

631 rtol=1e-16)) 

632 

633 # Test pixel^2 -> arcsec^2 conversion. 

634 func = ConvertPixelSqToArcsecondsSq("ixx", 

635 "base_LocalWcs_CDMatrix_1_1", 

636 "base_LocalWcs_CDMatrix_1_2", 

637 "base_LocalWcs_CDMatrix_2_1", 

638 "base_LocalWcs_CDMatrix_2_2") 

639 val = self._funcVal(func, parq) 

640 self.assertTrue(np.allclose(pixelScale.values ** 2 * dipoleSep, 

641 val.values, 

642 atol=1e-16, 

643 rtol=1e-16)) 

644 

645 def _makeWcs(self, dec=53.1595451514076): 

646 """Create a wcs from real CFHT values. 

647 

648 Returns 

649 ------- 

650 wcs : `lsst.afw.geom` 

651 Created wcs. 

652 """ 

653 metadata = dafBase.PropertySet() 

654 

655 metadata.set("SIMPLE", "T") 

656 metadata.set("BITPIX", -32) 

657 metadata.set("NAXIS", 2) 

658 metadata.set("NAXIS1", 1024) 

659 metadata.set("NAXIS2", 1153) 

660 metadata.set("RADECSYS", 'FK5') 

661 metadata.set("EQUINOX", 2000.) 

662 

663 metadata.setDouble("CRVAL1", 215.604025685476) 

664 metadata.setDouble("CRVAL2", dec) 

665 metadata.setDouble("CRPIX1", 1109.99981456774) 

666 metadata.setDouble("CRPIX2", 560.018167811613) 

667 metadata.set("CTYPE1", 'RA---SIN') 

668 metadata.set("CTYPE2", 'DEC--SIN') 

669 

670 metadata.setDouble("CD1_1", 5.10808596133527E-05) 

671 metadata.setDouble("CD1_2", 1.85579539217196E-07) 

672 metadata.setDouble("CD2_2", -5.10281493481982E-05) 

673 metadata.setDouble("CD2_1", -8.27440751733828E-07) 

674 

675 return afwGeom.makeSkyWcs(metadata) 

676 

677 def testRatio(self): 

678 """Test the ratio functor where one of the functors should be junk. 

679 """ 

680 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000) 

681 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 100) 

682 parq = self.simulateMultiParquet(self.dataDict) 

683 

684 func = Ratio("base_PsfFlux_instFlux", "base_PsfFlux_instFluxErr") 

685 

686 val = self._funcVal(func, parq) 

687 self.assertTrue(np.allclose(np.full(self.nRecords, 10), 

688 val.values, 

689 atol=1e-16, 

690 rtol=1e-16)) 

691 

692 

693class MyMemoryTestCase(lsst.utils.tests.MemoryTestCase): 

694 pass 

695 

696 

697def setup_module(module): 

698 lsst.utils.tests.init() 

699 

700 

701if __name__ == "__main__": 701 ↛ 702line 701 didn't jump to line 702, because the condition on line 701 was never true

702 lsst.utils.tests.init() 

703 unittest.main()