Coverage for tests/test_functors.py : 14%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of pipe_tasks.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (http://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <http://www.gnu.org/licenses/>.
22import astropy.units as u
23import copy
24import functools
25import numpy as np
26import os
27import pandas as pd
28import unittest
29import tempfile
30import shutil
32import lsst.daf.base as dafBase
33import lsst.afw.geom as afwGeom
34import lsst.geom as geom
35from lsst.sphgeom import HtmPixelization
36import lsst.meas.base as measBase
37import lsst.utils.tests
38from lsst.pipe.tasks.parquetTable import MultilevelParquetTable, ParquetTable
39from lsst.daf.butler import Butler, DatasetType
40from lsst.pipe.tasks.functors import (CompositeFunctor, CustomFunctor, Column, RAColumn,
41 DecColumn, Mag, MagDiff, Color, StarGalaxyLabeller,
42 DeconvolvedMoments, SdssTraceSize, PsfSdssTraceSizeDiff,
43 HsmTraceSize, PsfHsmTraceSizeDiff, HsmFwhm,
44 LocalPhotometry, LocalNanojansky, LocalNanojanskyErr,
45 LocalMagnitude, LocalMagnitudeErr,
46 LocalWcs, ComputePixelScale, ConvertPixelToArcseconds)
48ROOT = os.path.abspath(os.path.dirname(__file__))
51class FunctorTestCase(unittest.TestCase):
53 def simulateMultiParquet(self, dataDict):
54 """Create a simple test MultilevelParquetTable
55 """
56 simpleDF = pd.DataFrame(dataDict)
57 dfFilterDSCombos = []
58 for ds in self.datasets:
59 for band in self.bands:
60 df = copy.copy(simpleDF)
61 df.reindex(sorted(df.columns), axis=1)
62 df['dataset'] = ds
63 df['band'] = band
64 df.columns = pd.MultiIndex.from_tuples(
65 [(ds, band, c) for c in df.columns],
66 names=('dataset', 'band', 'column'))
67 dfFilterDSCombos.append(df)
69 df = functools.reduce(lambda d1, d2: d1.join(d2), dfFilterDSCombos)
71 return MultilevelParquetTable(dataFrame=df)
73 def simulateParquet(self, dataDict):
74 df = pd.DataFrame(dataDict)
75 return ParquetTable(dataFrame=df)
77 def getDatasetHandle(self, parq):
78 df = parq._df
79 lo, hi = HtmPixelization(7).universe().ranges()[0]
80 value = np.random.randint(lo, hi)
81 ref = self.butler.put(df, self.datasetType, dataId={'htm7': value})
82 return self.butler.getDeferred(ref)
84 def setUp(self):
85 np.random.seed(1234)
86 self.datasets = ['forced_src', 'meas', 'ref']
87 self.bands = ['g', 'r']
88 self.columns = ['coord_ra', 'coord_dec']
89 self.nRecords = 5
90 self.dataDict = {
91 "coord_ra": [3.77654137, 3.77643059, 3.77621148, 3.77611944, 3.77610396],
92 "coord_dec": [0.01127624, 0.01127787, 0.01127543, 0.01127543, 0.01127543]}
94 # Set up butler
95 self.root = tempfile.mkdtemp(dir=ROOT)
96 Butler.makeRepo(self.root)
97 self.butler = Butler(self.root, run="test_run")
98 self.datasetType = DatasetType("data", dimensions=('htm7',), storageClass="DataFrame",
99 universe=self.butler.registry.dimensions)
100 self.butler.registry.registerDatasetType(self.datasetType)
102 def tearDown(self):
103 if os.path.exists(self.root):
104 shutil.rmtree(self.root, ignore_errors=True)
106 def _funcVal(self, functor, parq):
107 self.assertIsInstance(functor.name, str)
108 self.assertIsInstance(functor.shortname, str)
110 handle = self.getDatasetHandle(parq)
112 val = functor(parq)
113 val2 = functor(handle)
114 self.assertTrue((val == val2).all())
115 self.assertIsInstance(val, pd.Series)
117 val = functor(parq, dropna=True)
118 val2 = functor(handle, dropna=True)
119 self.assertTrue((val == val2).all())
120 self.assertEqual(val.isnull().sum(), 0)
122 return val
124 def _differenceVal(self, functor, parq1, parq2):
125 self.assertIsInstance(functor.name, str)
126 self.assertIsInstance(functor.shortname, str)
128 handle1 = self.getDatasetHandle(parq1)
129 handle2 = self.getDatasetHandle(parq2)
131 val = functor.difference(parq1, parq2)
132 val2 = functor.difference(handle1, handle2)
133 self.assertTrue(val.equals(val2))
134 self.assertIsInstance(val, pd.Series)
136 val = functor.difference(parq1, parq2, dropna=True)
137 val2 = functor.difference(handle1, handle2, dropna=True)
138 self.assertTrue(val.equals(val2))
139 self.assertEqual(val.isnull().sum(), 0)
141 val1 = self._funcVal(functor, parq1)
142 val2 = self._funcVal(functor, parq2)
144 self.assertTrue(np.allclose(val, val1 - val2))
146 return val
148 def testColumn(self):
149 self.columns.append("base_FootprintArea_value")
150 self.dataDict["base_FootprintArea_value"] = \
151 np.full(self.nRecords, 1)
152 parq = self.simulateMultiParquet(self.dataDict)
153 func = Column('base_FootprintArea_value', filt='g')
154 self._funcVal(func, parq)
156 parq = self.simulateParquet(self.dataDict)
157 func = Column('base_FootprintArea_value')
158 self._funcVal(func, parq)
160 def testCustom(self):
161 self.columns.append("base_FootprintArea_value")
162 self.dataDict["base_FootprintArea_value"] = \
163 np.random.rand(self.nRecords)
164 parq = self.simulateMultiParquet(self.dataDict)
165 func = CustomFunctor('2*base_FootprintArea_value', filt='g')
166 val = self._funcVal(func, parq)
168 func2 = Column('base_FootprintArea_value', filt='g')
170 np.allclose(val.values, 2*func2(parq).values, atol=1e-13, rtol=0)
172 parq = self.simulateParquet(self.dataDict)
173 func = CustomFunctor('2 * base_FootprintArea_value')
174 val = self._funcVal(func, parq)
175 func2 = Column('base_FootprintArea_value')
177 np.allclose(val.values, 2*func2(parq).values, atol=1e-13, rtol=0)
179 def testCoords(self):
180 parq = self.simulateMultiParquet(self.dataDict)
181 ra = self._funcVal(RAColumn(), parq)
182 dec = self._funcVal(DecColumn(), parq)
184 columnDict = {'dataset': 'ref', 'band': 'g',
185 'column': ['coord_ra', 'coord_dec']}
187 coords = parq.toDataFrame(columns=columnDict, droplevels=True) / np.pi * 180.
189 self.assertTrue(np.allclose(ra, coords[('ref', 'g', 'coord_ra')], atol=1e-13, rtol=0))
190 self.assertTrue(np.allclose(dec, coords[('ref', 'g', 'coord_dec')], atol=1e-13, rtol=0))
192 # single-level column index table
193 parq = self.simulateParquet(self.dataDict)
194 ra = self._funcVal(RAColumn(), parq)
195 dec = self._funcVal(DecColumn(), parq)
197 coords = parq.toDataFrame(columns=['coord_ra', 'coord_dec']) / np.pi * 180.
199 self.assertTrue(np.allclose(ra, coords['coord_ra'], atol=1e-13, rtol=0))
200 self.assertTrue(np.allclose(dec, coords['coord_dec'], atol=1e-13, rtol=0))
202 def testMag(self):
203 self.columns.extend(["base_PsfFlux_instFlux", "base_PsfFlux_instFluxErr"])
204 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000)
205 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 10)
206 parq = self.simulateMultiParquet(self.dataDict)
207 # Change one dataset filter combinations value.
208 parq._df[("meas", "g", "base_PsfFlux_instFlux")] -= 1
210 fluxName = 'base_PsfFlux'
212 # Check that things work when you provide dataset explicitly
213 for dataset in ['forced_src', 'meas']:
214 psfMag_G = self._funcVal(Mag(fluxName, dataset=dataset,
215 filt='g'),
216 parq)
217 psfMag_R = self._funcVal(Mag(fluxName, dataset=dataset,
218 filt='r'),
219 parq)
221 psfColor_GR = self._funcVal(Color(fluxName, 'g', 'r',
222 dataset=dataset),
223 parq)
225 self.assertTrue(np.allclose((psfMag_G - psfMag_R).dropna(), psfColor_GR, rtol=0, atol=1e-13))
227 # Check that behavior as expected when dataset not provided;
228 # that is, that the color comes from forced and default Mag is meas
229 psfMag_G = self._funcVal(Mag(fluxName, filt='g'), parq)
230 psfMag_R = self._funcVal(Mag(fluxName, filt='r'), parq)
232 psfColor_GR = self._funcVal(Color(fluxName, 'g', 'r'), parq)
234 # These should *not* be equal.
235 self.assertFalse(np.allclose((psfMag_G - psfMag_R).dropna(), psfColor_GR))
237 def testMagDiff(self):
238 self.columns.extend(["base_PsfFlux_instFlux", "base_PsfFlux_instFluxErr",
239 "modelfit_CModel_instFlux", "modelfit_CModel_instFluxErr"])
240 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000)
241 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 10)
242 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 1000)
243 self.dataDict["modelfit_CModel_instFluxErr"] = np.full(self.nRecords, 10)
244 parq = self.simulateMultiParquet(self.dataDict)
246 for filt in self.bands:
247 filt = 'g'
248 val = self._funcVal(MagDiff('base_PsfFlux', 'modelfit_CModel', filt=filt), parq)
250 mag1 = self._funcVal(Mag('modelfit_CModel', filt=filt), parq)
251 mag2 = self._funcVal(Mag('base_PsfFlux', filt=filt), parq)
252 self.assertTrue(np.allclose((mag2 - mag1).dropna(), val, rtol=0, atol=1e-13))
254 def testDifference(self):
255 """Test .difference method using MagDiff as the example.
256 """
257 self.columns.extend(["base_PsfFlux_instFlux", "base_PsfFlux_instFluxErr",
258 "modelfit_CModel_instFlux", "modelfit_CModel_instFluxErr"])
260 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000)
261 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 1000)
262 parq1 = self.simulateMultiParquet(self.dataDict)
264 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 999)
265 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 999)
266 parq2 = self.simulateMultiParquet(self.dataDict)
268 magDiff = MagDiff('base_PsfFlux', 'modelfit_CModel', filt='g')
270 # Asserts that differences computed properly
271 self._differenceVal(magDiff, parq1, parq2)
273 def testLabeller(self):
274 # Covering the code is better than nothing
275 self.columns.append("base_ClassificationExtendedness_value")
276 self.dataDict["base_ClassificationExtendedness_value"] = np.full(self.nRecords, 1)
277 parq = self.simulateMultiParquet(self.dataDict)
278 labels = self._funcVal(StarGalaxyLabeller(), parq) # noqa
280 def testPixelScale(self):
281 # Test that the pixel scale and pix->arcsec calculations perform as
282 # expected.
283 pass
285 def testOther(self):
286 self.columns.extend(["ext_shapeHSM_HsmSourceMoments_xx", "ext_shapeHSM_HsmSourceMoments_yy",
287 "base_SdssShape_xx", "base_SdssShape_yy",
288 "ext_shapeHSM_HsmPsfMoments_xx", "ext_shapeHSM_HsmPsfMoments_yy",
289 "base_SdssShape_psf_xx", "base_SdssShape_psf_yy"])
290 self.dataDict["ext_shapeHSM_HsmSourceMoments_xx"] = np.full(self.nRecords, 1 / np.sqrt(2))
291 self.dataDict["ext_shapeHSM_HsmSourceMoments_yy"] = np.full(self.nRecords, 1 / np.sqrt(2))
292 self.dataDict["base_SdssShape_xx"] = np.full(self.nRecords, 1 / np.sqrt(2))
293 self.dataDict["base_SdssShape_yy"] = np.full(self.nRecords, 1 / np.sqrt(2))
294 self.dataDict["ext_shapeHSM_HsmPsfMoments_xx"] = np.full(self.nRecords, 1 / np.sqrt(2))
295 self.dataDict["ext_shapeHSM_HsmPsfMoments_yy"] = np.full(self.nRecords, 1 / np.sqrt(2))
296 self.dataDict["base_SdssShape_psf_xx"] = np.full(self.nRecords, 1)
297 self.dataDict["base_SdssShape_psf_yy"] = np.full(self.nRecords, 1)
298 parq = self.simulateMultiParquet(self.dataDict)
299 # Covering the code is better than nothing
300 for filt in self.bands:
301 for Func in [DeconvolvedMoments,
302 SdssTraceSize,
303 PsfSdssTraceSizeDiff,
304 HsmTraceSize, PsfHsmTraceSizeDiff, HsmFwhm]:
305 val = self._funcVal(Func(filt=filt), parq) # noqa
307 def _compositeFuncVal(self, functor, parq):
308 self.assertIsInstance(functor, CompositeFunctor)
310 handle = self.getDatasetHandle(parq)
312 df = functor(parq)
313 df2 = functor(handle)
314 self.assertTrue(df.equals(df2))
316 self.assertIsInstance(df, pd.DataFrame)
317 self.assertTrue(np.all([k in df.columns for k in functor.funcDict.keys()]))
319 df = functor(parq, dropna=True)
320 df2 = functor(handle, dropna=True)
321 self.assertTrue(df.equals(df2))
323 # Check that there are no nulls
324 self.assertFalse(df.isnull().any(axis=None))
326 return df
328 def _compositeDifferenceVal(self, functor, parq1, parq2):
329 self.assertIsInstance(functor, CompositeFunctor)
331 handle1 = self.getDatasetHandle(parq1)
332 handle2 = self.getDatasetHandle(parq2)
334 df = functor.difference(parq1, parq2)
335 df2 = functor.difference(handle1, handle2)
336 self.assertTrue(df.equals(df2))
338 self.assertIsInstance(df, pd.DataFrame)
339 self.assertTrue(np.all([k in df.columns for k in functor.funcDict.keys()]))
341 df = functor.difference(parq1, parq2, dropna=True)
342 df2 = functor.difference(handle1, handle2, dropna=True)
343 self.assertTrue(df.equals(df2))
345 # Check that there are no nulls
346 self.assertFalse(df.isnull().any(axis=None))
348 df1 = functor(parq1)
349 df2 = functor(parq2)
351 self.assertTrue(np.allclose(df.values, df1.values - df2.values))
353 return df
355 def testComposite(self):
356 self.columns.extend(["modelfit_CModel_instFlux", "base_PsfFlux_instFlux"])
357 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 1)
358 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1)
360 parq = self.simulateMultiParquet(self.dataDict)
361 # Modify r band value slightly.
362 parq._df[("meas", "r", "base_PsfFlux_instFlux")] -= 0.1
364 filt = 'g'
365 funcDict = {'psfMag_ref': Mag('base_PsfFlux', dataset='ref'),
366 'ra': RAColumn(),
367 'dec': DecColumn(),
368 'psfMag': Mag('base_PsfFlux', filt=filt),
369 'cmodel_magDiff': MagDiff('base_PsfFlux',
370 'modelfit_CModel', filt=filt)}
371 func = CompositeFunctor(funcDict)
372 df = self._compositeFuncVal(func, parq)
374 # Repeat same, but define filter globally instead of individually
375 funcDict2 = {'psfMag_ref': Mag('base_PsfFlux', dataset='ref'),
376 'ra': RAColumn(),
377 'dec': DecColumn(),
378 'psfMag': Mag('base_PsfFlux'),
379 'cmodel_magDiff': MagDiff('base_PsfFlux',
380 'modelfit_CModel')}
382 func2 = CompositeFunctor(funcDict2, filt=filt)
383 df2 = self._compositeFuncVal(func2, parq)
384 self.assertTrue(df.equals(df2))
386 func2.filt = 'r'
387 df3 = self._compositeFuncVal(func2, parq)
388 # Because we modified the R filter this should fail.
389 self.assertFalse(df2.equals(df3))
391 # Make sure things work with passing list instead of dict
392 funcs = [Mag('base_PsfFlux', dataset='ref'),
393 RAColumn(),
394 DecColumn(),
395 Mag('base_PsfFlux', filt=filt),
396 MagDiff('base_PsfFlux', 'modelfit_CModel', filt=filt)]
398 df = self._compositeFuncVal(CompositeFunctor(funcs), parq)
400 def testCompositeSimple(self):
401 """Test single-level composite functor for functionality
402 """
403 self.columns.extend(["modelfit_CModel_instFlux", "base_PsfFlux_instFlux"])
404 self.dataDict["modelfit_CModel_instFlux"] = np.full(self.nRecords, 1)
405 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1)
407 parq = self.simulateParquet(self.dataDict)
408 funcDict = {'ra': RAColumn(),
409 'dec': DecColumn(),
410 'psfMag': Mag('base_PsfFlux'),
411 'cmodel_magDiff': MagDiff('base_PsfFlux',
412 'modelfit_CModel')}
413 func = CompositeFunctor(funcDict)
414 df = self._compositeFuncVal(func, parq) # noqa
416 def testCompositeColor(self):
417 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000)
418 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 10)
419 parq = self.simulateMultiParquet(self.dataDict)
420 funcDict = {'a': Mag('base_PsfFlux', dataset='meas', filt='g'),
421 'b': Mag('base_PsfFlux', dataset='forced_src', filt='g'),
422 'c': Color('base_PsfFlux', 'g', 'r')}
423 # Covering the code is better than nothing
424 df = self._compositeFuncVal(CompositeFunctor(funcDict), parq) # noqa
426 def testCompositeDifference(self):
427 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000)
428 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 10)
429 parq1 = self.simulateMultiParquet(self.dataDict)
431 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 999)
432 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords, 9)
433 parq2 = self.simulateMultiParquet(self.dataDict)
435 funcDict = {'a': Mag('base_PsfFlux', dataset='meas', filt='g'),
436 'b': Mag('base_PsfFlux', dataset='forced_src', filt='g'),
437 'c': Color('base_PsfFlux', 'g', 'r')}
438 # Covering the code is better than nothing
439 df = self._compositeDifferenceVal(CompositeFunctor(funcDict), parq1, parq2) # noqa
441 def testCompositeFail(self):
442 """Test a composite functor where one of the functors should be junk.
443 """
444 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, 1000)
445 parq = self.simulateMultiParquet(self.dataDict)
447 funcDict = {'good': Column("base_PsfFlux_instFlux"),
448 'bad': Column('not_a_column')}
450 df = self._compositeFuncVal(CompositeFunctor(funcDict), parq) # noqa
452 def testLocalPhotometry(self):
453 """Test the local photometry functors.
454 """
455 flux = 1000
456 fluxErr = 10
457 calib = 10
458 calibErr = 1
459 self.dataDict["base_PsfFlux_instFlux"] = np.full(self.nRecords, flux)
460 self.dataDict["base_PsfFlux_instFluxErr"] = np.full(self.nRecords,
461 fluxErr)
462 self.dataDict["base_LocalPhotoCalib"] = np.full(self.nRecords, calib)
463 self.dataDict["base_LocalPhotoCalibErr"] = np.full(self.nRecords,
464 calibErr)
465 parq = self.simulateMultiParquet(self.dataDict)
466 func = LocalPhotometry("base_PsfFlux_instFlux",
467 "base_PsfFlux_instFluxErr",
468 "base_LocalPhotoCalib",
469 "base_LocalPhotoCalibErr")
470 df = parq.toDataFrame(columns={"dataset": "meas",
471 "band": "g",
472 "columns": ["base_PsfFlux_instFlux",
473 "base_PsfFlux_instFluxErr",
474 "base_LocalPhotoCalib",
475 "base_LocalPhotoCalibErr"]})
476 nanoJansky = func.instFluxToNanojansky(
477 df[("meas", "g", "base_PsfFlux_instFlux")],
478 df[("meas", "g", "base_LocalPhotoCalib")])
479 mag = func.instFluxToMagnitude(
480 df[("meas", "g", "base_PsfFlux_instFlux")],
481 df[("meas", "g", "base_LocalPhotoCalib")])
482 nanoJanskyErr = func.instFluxErrToNanojanskyErr(
483 df[("meas", "g", "base_PsfFlux_instFlux")],
484 df[("meas", "g", "base_PsfFlux_instFluxErr")],
485 df[("meas", "g", "base_LocalPhotoCalib")],
486 df[("meas", "g", "base_LocalPhotoCalibErr")])
487 magErr = func.instFluxErrToMagnitudeErr(
488 df[("meas", "g", "base_PsfFlux_instFlux")],
489 df[("meas", "g", "base_PsfFlux_instFluxErr")],
490 df[("meas", "g", "base_LocalPhotoCalib")],
491 df[("meas", "g", "base_LocalPhotoCalibErr")])
493 self.assertTrue(np.allclose(nanoJansky.values,
494 flux * calib,
495 atol=1e-13,
496 rtol=0))
497 self.assertTrue(np.allclose(mag.values,
498 (flux * calib * u.nJy).to_value(u.ABmag),
499 atol=1e-13,
500 rtol=0))
501 self.assertTrue(np.allclose(nanoJanskyErr.values,
502 np.hypot(fluxErr * calib, flux * calibErr),
503 atol=1e-13,
504 rtol=0))
505 self.assertTrue(np.allclose(
506 magErr.values,
507 2.5 / np.log(10) * nanoJanskyErr.values / nanoJansky.values,
508 atol=1e-13,
509 rtol=0))
511 # Test functors against the values computed above.
512 self._testLocalPhotometryFunctors(LocalNanojansky,
513 parq,
514 nanoJansky)
515 self._testLocalPhotometryFunctors(LocalNanojanskyErr,
516 parq,
517 nanoJanskyErr)
518 self._testLocalPhotometryFunctors(LocalMagnitude,
519 parq,
520 mag)
521 self._testLocalPhotometryFunctors(LocalMagnitudeErr,
522 parq,
523 magErr)
525 def _testLocalPhotometryFunctors(self, functor, parq, testValues):
526 func = functor("base_PsfFlux_instFlux",
527 "base_PsfFlux_instFluxErr",
528 "base_LocalPhotoCalib",
529 "base_LocalPhotoCalibErr")
530 val = self._funcVal(func, parq)
531 self.assertTrue(np.allclose(testValues.values,
532 val.values,
533 atol=1e-13,
534 rtol=0))
536 def testConvertPixelToArcseconds(self):
537 """Test calculations of the pixel scale and conversions of pixel to
538 arcseconds.
539 """
540 dipoleSep = 10
541 np.random.seed(1234)
542 testPixelDeltas = np.random.uniform(-100, 100, size=(self.nRecords, 2))
543 import lsst.afw.table as afwTable
544 localWcsPlugin = measBase.EvaluateLocalWcsPlugin(
545 None,
546 "base_LocalWcs",
547 afwTable.SourceTable.makeMinimalSchema(),
548 None)
549 for dec in np.linspace(-90, 90, 10):
550 for x, y in zip(np.random.uniform(2 * 1109.99981456774, size=10),
551 np.random.uniform(2 * 560.018167811613, size=10)):
553 center = geom.Point2D(x, y)
554 wcs = self._makeWcs(dec)
555 skyOrigin = wcs.pixelToSky(center)
557 linAffMatrix = localWcsPlugin.makeLocalTransformMatrix(wcs,
558 center)
559 self.dataDict["dipoleSep"] = np.full(self.nRecords, dipoleSep)
560 self.dataDict["slot_Centroid_x"] = np.full(self.nRecords, x)
561 self.dataDict["slot_Centroid_y"] = np.full(self.nRecords, y)
562 self.dataDict["someCentroid_x"] = x + testPixelDeltas[:, 0]
563 self.dataDict["someCentroid_y"] = y + testPixelDeltas[:, 1]
564 self.dataDict["base_LocalWcs_CDMatrix_1_1"] = np.full(self.nRecords,
565 linAffMatrix[0, 0])
566 self.dataDict["base_LocalWcs_CDMatrix_1_2"] = np.full(self.nRecords,
567 linAffMatrix[0, 1])
568 self.dataDict["base_LocalWcs_CDMatrix_2_1"] = np.full(self.nRecords,
569 linAffMatrix[1, 0])
570 self.dataDict["base_LocalWcs_CDMatrix_2_2"] = np.full(self.nRecords,
571 linAffMatrix[1, 1])
572 parq = self.simulateMultiParquet(self.dataDict)
573 func = LocalWcs("base_LocalWcs_CDMatrix_1_1",
574 "base_LocalWcs_CDMatrix_1_2",
575 "base_LocalWcs_CDMatrix_2_1",
576 "base_LocalWcs_CDMatrix_2_2")
577 df = parq.toDataFrame(columns={"dataset": "meas",
578 "band": "g",
579 "columns": ["dipoleSep",
580 "slot_Centroid_x",
581 "slot_Centroid_y",
582 "someCentroid_x",
583 "someCentroid_y",
584 "base_LocalWcs_CDMatrix_1_1",
585 "base_LocalWcs_CDMatrix_1_2",
586 "base_LocalWcs_CDMatrix_2_1",
587 "base_LocalWcs_CDMatrix_2_2"]})
589 # Exercise the full set of functions in LocalWcs.
590 sepRadians = func.getSkySeperationFromPixel(
591 df[("meas", "g", "someCentroid_x")] - df[("meas", "g", "slot_Centroid_x")],
592 df[("meas", "g", "someCentroid_y")] - df[("meas", "g", "slot_Centroid_y")],
593 0.0,
594 0.0,
595 df[("meas", "g", "base_LocalWcs_CDMatrix_1_1")],
596 df[("meas", "g", "base_LocalWcs_CDMatrix_1_2")],
597 df[("meas", "g", "base_LocalWcs_CDMatrix_2_1")],
598 df[("meas", "g", "base_LocalWcs_CDMatrix_2_2")])
600 # Test functor values against afw SkyWcs computations.
601 for centX, centY, sep in zip(testPixelDeltas[:, 0],
602 testPixelDeltas[:, 1],
603 sepRadians.values):
604 afwSepRadians = skyOrigin.separation(
605 wcs.pixelToSky(x + centX, y + centY)).asRadians()
606 self.assertAlmostEqual(1 - sep / afwSepRadians, 0, places=6)
608 # Test the pixel scale computation.
609 func = ComputePixelScale("base_LocalWcs_CDMatrix_1_1",
610 "base_LocalWcs_CDMatrix_1_2",
611 "base_LocalWcs_CDMatrix_2_1",
612 "base_LocalWcs_CDMatrix_2_2")
613 pixelScale = self._funcVal(func, parq)
614 self.assertTrue(np.allclose(
615 wcs.getPixelScale(center).asArcseconds(),
616 pixelScale.values,
617 rtol=1e-8,
618 atol=0))
620 func = ConvertPixelToArcseconds("dipoleSep",
621 "base_LocalWcs_CDMatrix_1_1",
622 "base_LocalWcs_CDMatrix_1_2",
623 "base_LocalWcs_CDMatrix_2_1",
624 "base_LocalWcs_CDMatrix_2_2")
625 val = self._funcVal(func, parq)
626 self.assertTrue(np.allclose(pixelScale.values * dipoleSep,
627 val.values,
628 atol=1e-16,
629 rtol=1e-16))
631 def _makeWcs(self, dec=53.1595451514076):
632 """Create a wcs from real CFHT values.
634 Returns
635 -------
636 wcs : `lsst.afw.geom`
637 Created wcs.
638 """
639 metadata = dafBase.PropertySet()
641 metadata.set("SIMPLE", "T")
642 metadata.set("BITPIX", -32)
643 metadata.set("NAXIS", 2)
644 metadata.set("NAXIS1", 1024)
645 metadata.set("NAXIS2", 1153)
646 metadata.set("RADECSYS", 'FK5')
647 metadata.set("EQUINOX", 2000.)
649 metadata.setDouble("CRVAL1", 215.604025685476)
650 metadata.setDouble("CRVAL2", dec)
651 metadata.setDouble("CRPIX1", 1109.99981456774)
652 metadata.setDouble("CRPIX2", 560.018167811613)
653 metadata.set("CTYPE1", 'RA---SIN')
654 metadata.set("CTYPE2", 'DEC--SIN')
656 metadata.setDouble("CD1_1", 5.10808596133527E-05)
657 metadata.setDouble("CD1_2", 1.85579539217196E-07)
658 metadata.setDouble("CD2_2", -5.10281493481982E-05)
659 metadata.setDouble("CD2_1", -8.27440751733828E-07)
661 return afwGeom.makeSkyWcs(metadata)
664class MyMemoryTestCase(lsst.utils.tests.MemoryTestCase):
665 pass
668def setup_module(module):
669 lsst.utils.tests.init()
672if __name__ == "__main__": 672 ↛ 673line 672 didn't jump to line 673, because the condition on line 672 was never true
673 lsst.utils.tests.init()
674 unittest.main()