Coverage for tests/test_htmIndex.py : 14%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1#
2# LSST Data Management System
3#
4# Copyright 2008-2016 AURA/LSST.
5#
6# This product includes software developed by the
7# LSST Project (http://www.lsst.org/).
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the LSST License Statement and
20# the GNU General Public License along with this program. If not,
21# see <https://www.lsstcorp.org/LegalNotices/>.
22#
24import os
25import unittest
26from collections import Counter
28import astropy.time
29import astropy.units
30import numpy as np
32import lsst.geom
33import lsst.afw.table as afwTable
34import lsst.afw.geom as afwGeom
35import lsst.daf.persistence as dafPersist
36from lsst.meas.algorithms import (IngestIndexedReferenceTask, LoadIndexedReferenceObjectsTask,
37 LoadIndexedReferenceObjectsConfig, getRefFluxField)
38from lsst.meas.algorithms.loadReferenceObjects import hasNanojanskyFluxUnits
39import lsst.utils
41from ingestIndexTestBase import (makeIngestIndexConfig, IngestIndexCatalogTestBase,
42 make_coord)
44REGENERATE_COMPARISON = False # Regenerate comparison data?
47class IngestIndexTaskValidateTestCase(lsst.utils.tests.TestCase):
48 """Test validation of IngestIndexReferenceConfig."""
49 def testValidateRaDecMag(self):
50 config = makeIngestIndexConfig()
51 config.validate()
53 for name in ("ra_name", "dec_name", "mag_column_list"):
54 with self.subTest(name=name):
55 config = makeIngestIndexConfig()
56 setattr(config, name, None)
57 with self.assertRaises(ValueError):
58 config.validate()
60 def testValidateRaDecErr(self):
61 # check that a basic config validates
62 config = makeIngestIndexConfig(withRaDecErr=True)
63 config.validate()
65 # check that a config with any of these fields missing does not validate
66 for name in ("ra_err_name", "dec_err_name", "coord_err_unit"):
67 with self.subTest(name=name):
68 config = makeIngestIndexConfig(withRaDecErr=True)
69 setattr(config, name, None)
70 with self.assertRaises(ValueError):
71 config.validate()
73 # check that coord_err_unit must be an astropy unit
74 config = makeIngestIndexConfig(withRaDecErr=True)
75 config.coord_err_unit = "nonsense unit"
76 with self.assertRaisesRegex(ValueError, "is not a valid astropy unit string"):
77 config.validate()
79 def testValidateMagErr(self):
80 config = makeIngestIndexConfig(withMagErr=True)
81 config.validate()
83 # test for missing names
84 for name in config.mag_column_list:
85 with self.subTest(name=name):
86 config = makeIngestIndexConfig(withMagErr=True)
87 del config.mag_err_column_map[name]
88 with self.assertRaises(ValueError):
89 config.validate()
91 # test for incorrect names
92 for name in config.mag_column_list:
93 with self.subTest(name=name):
94 config = makeIngestIndexConfig(withMagErr=True)
95 config.mag_err_column_map["badName"] = config.mag_err_column_map[name]
96 del config.mag_err_column_map[name]
97 with self.assertRaises(ValueError):
98 config.validate()
100 def testValidatePm(self):
101 basicNames = ["pm_ra_name", "pm_dec_name", "epoch_name", "epoch_format", "epoch_scale"]
103 for withPmErr in (False, True):
104 config = makeIngestIndexConfig(withPm=True, withPmErr=withPmErr)
105 config.validate()
106 del config
108 if withPmErr:
109 names = basicNames + ["pm_ra_err_name", "pm_dec_err_name"]
110 else:
111 names = basicNames
112 for name in names:
113 with self.subTest(name=name, withPmErr=withPmErr):
114 config = makeIngestIndexConfig(withPm=True, withPmErr=withPmErr)
115 setattr(config, name, None)
116 with self.assertRaises(ValueError):
117 config.validate()
119 def testValidateParallax(self):
120 """Validation should fail if any parallax-related fields are missing.
121 """
122 names = ["parallax_name", "epoch_name", "epoch_format", "epoch_scale", "parallax_err_name"]
124 config = makeIngestIndexConfig(withParallax=True)
125 config.validate()
126 del config
128 for name in names:
129 with self.subTest(name=name):
130 config = makeIngestIndexConfig(withParallax=True)
131 setattr(config, name, None)
132 with self.assertRaises(ValueError, msg=name):
133 config.validate()
136class IngestIndexReferenceTaskTestCase(IngestIndexCatalogTestBase, lsst.utils.tests.TestCase):
137 """Tests of ingesting and validating an HTM Indexed Reference Catalog.
138 """
139 def testSanity(self):
140 """Sanity-check that compCats contains some entries with sources."""
141 numWithSources = 0
142 for idList in self.compCats.values():
143 if len(idList) > 0:
144 numWithSources += 1
145 self.assertGreater(numWithSources, 0)
147 def testAgainstPersisted(self):
148 """Test that we can get a specific shard from a pre-persisted refcat.
149 """
150 shardId = 2222
151 dataset_name = IngestIndexedReferenceTask.ConfigClass().dataset_config.ref_dataset_name
152 dataId = self.indexer.makeDataId(shardId, dataset_name)
153 self.assertTrue(self.testButler.datasetExists('ref_cat', dataId))
154 refCat = self.testButler.get('ref_cat', dataId)
155 if REGENERATE_COMPARISON:
156 if os.path.exists(self.testCatPath):
157 os.unlink(self.testCatPath)
158 refCat.writeFits(self.testCatPath)
159 self.fail("New comparison data written; unset REGENERATE_COMPARISON in order to proceed")
161 ex1 = refCat.extract('*')
162 testCat = afwTable.SimpleCatalog.readFits(self.testCatPath)
164 ex2 = testCat.extract('*')
165 self.assertEqual(set(ex1.keys()), set(ex2.keys()))
166 for key in ex1:
167 np.testing.assert_array_almost_equal(ex1[key], ex2[key], err_msg=f"{key} values not equal")
169 def testIngestSetsVersion(self):
170 """Test that newly ingested catalogs get the correct version number set.
171 """
172 # Test with multiple files and standard config
173 config = makeIngestIndexConfig(withRaDecErr=True, withMagErr=True, withPm=True, withPmErr=True)
174 # don't use the default depth, to avoid taking the time to create thousands of file locks
175 config.dataset_config.indexer.active.depth = self.depth
176 IngestIndexedReferenceTask.parseAndRun(
177 args=[self.input_dir, "--output", self.outPath + "/output_setsVersion",
178 self.skyCatalogFile],
179 config=config)
180 # A newly-ingested refcat should be marked format_version=1.
181 loader = LoadIndexedReferenceObjectsTask(butler=dafPersist.Butler(
182 self.outPath + "/output_setsVersion"))
183 self.assertEqual(loader.dataset_config.format_version, 1)
185 def testIngestConfigOverrides(self):
186 """Test IngestIndexedReferenceTask with different configs."""
187 config2 = makeIngestIndexConfig(withRaDecErr=True, withMagErr=True, withPm=True, withPmErr=True,
188 withParallax=True)
189 config2.ra_name = "ra"
190 config2.dec_name = "dec"
191 config2.dataset_config.ref_dataset_name = 'myrefcat'
192 # Change the indexing depth to prove we can.
193 # Smaller is better than larger because it makes fewer files.
194 config2.dataset_config.indexer.active.depth = self.depth - 1
195 config2.is_photometric_name = 'is_phot'
196 config2.is_resolved_name = 'is_res'
197 config2.is_variable_name = 'is_var'
198 config2.id_name = 'id'
199 config2.extra_col_names = ['val1', 'val2', 'val3']
200 config2.file_reader.header_lines = 1
201 config2.file_reader.colnames = [
202 'id', 'ra', 'dec', 'ra_err', 'dec_err', 'a', 'a_err', 'b', 'b_err', 'is_phot',
203 'is_res', 'is_var', 'val1', 'val2', 'val3', 'pm_ra', 'pm_dec', 'pm_ra_err',
204 'pm_dec_err', 'parallax', 'parallax_err', 'unixtime',
205 ]
206 config2.file_reader.delimiter = '|'
207 # this also tests changing the delimiter
208 IngestIndexedReferenceTask.parseAndRun(
209 args=[self.input_dir, "--output", self.outPath+"/output_override",
210 self.skyCatalogFileDelim], config=config2)
212 # Test if we can get back the catalog with a non-standard dataset name
213 butler = dafPersist.Butler(self.outPath+"/output_override")
214 loaderConfig = LoadIndexedReferenceObjectsConfig()
215 loaderConfig.ref_dataset_name = "myrefcat"
216 loader = LoadIndexedReferenceObjectsTask(butler=butler, config=loaderConfig)
217 self.checkAllRowsInRefcat(loader, self.skyCatalog, config2)
219 # test that a catalog can be loaded even with a name not used for ingestion
220 butler = dafPersist.Butler(self.testRepoPath)
221 loaderConfig2 = LoadIndexedReferenceObjectsConfig()
222 loaderConfig2.ref_dataset_name = self.testDatasetName
223 loader = LoadIndexedReferenceObjectsTask(butler=butler, config=loaderConfig2)
224 self.checkAllRowsInRefcat(loader, self.skyCatalog, config2)
226 def testLoadIndexedReferenceConfig(self):
227 """Make sure LoadIndexedReferenceConfig has needed fields."""
228 """
229 Including at least one from the base class LoadReferenceObjectsConfig
230 """
231 config = LoadIndexedReferenceObjectsConfig()
232 self.assertEqual(config.ref_dataset_name, "cal_ref_cat")
233 self.assertEqual(config.defaultFilter, "")
235 def testLoadSkyCircle(self):
236 """Test LoadIndexedReferenceObjectsTask.loadSkyCircle with default config."""
237 loader = LoadIndexedReferenceObjectsTask(butler=self.testButler)
238 for tupl, idList in self.compCats.items():
239 cent = make_coord(*tupl)
240 lcat = loader.loadSkyCircle(cent, self.searchRadius, filterName='a')
241 self.assertTrue(lcat.refCat.isContiguous())
242 self.assertFalse("camFlux" in lcat.refCat.schema)
243 self.assertEqual(Counter(lcat.refCat['id']), Counter(idList))
244 if len(lcat.refCat) > 0:
245 # make sure there are no duplicate ids
246 self.assertEqual(len(set(Counter(lcat.refCat['id']).values())), 1)
247 self.assertEqual(len(set(Counter(idList).values())), 1)
248 # A default-loaded sky circle should not have centroids
249 self.assertNotIn("centroid_x", lcat.refCat.schema)
250 self.assertNotIn("centroid_y", lcat.refCat.schema)
251 self.assertNotIn("hasCentroid", lcat.refCat.schema)
252 else:
253 self.assertEqual(len(idList), 0)
255 def testLoadPixelBox(self):
256 """Test LoadIndexedReferenceObjectsTask.loadPixelBox with default config."""
257 loader = LoadIndexedReferenceObjectsTask(butler=self.testButler)
258 numFound = 0
259 for tupl, idList in self.compCats.items():
260 cent = make_coord(*tupl)
261 bbox = lsst.geom.Box2I(lsst.geom.Point2I(30, -5), lsst.geom.Extent2I(1000, 1004)) # arbitrary
262 ctr_pix = bbox.getCenter()
263 # catalog is sparse, so set pixel scale such that bbox encloses region
264 # used to generate compCats
265 pixel_scale = 2*self.searchRadius/max(bbox.getHeight(), bbox.getWidth())
266 cdMatrix = afwGeom.makeCdMatrix(scale=pixel_scale)
267 wcs = afwGeom.makeSkyWcs(crval=cent, crpix=ctr_pix, cdMatrix=cdMatrix)
268 result = loader.loadPixelBox(bbox=bbox, wcs=wcs, filterName="a")
269 self.assertFalse("camFlux" in result.refCat.schema)
270 self.assertGreaterEqual(len(result.refCat), len(idList))
271 numFound += len(result.refCat)
272 self.assertGreater(numFound, 0)
274 def testDefaultFilterAndFilterMap(self):
275 """Test defaultFilter and filterMap parameters of LoadIndexedReferenceObjectsConfig."""
276 config = LoadIndexedReferenceObjectsConfig()
277 config.defaultFilter = "b"
278 config.filterMap = {"aprime": "a"}
279 loader = LoadIndexedReferenceObjectsTask(butler=self.testButler, config=config)
280 for tupl, idList in self.compCats.items():
281 cent = make_coord(*tupl)
282 lcat = loader.loadSkyCircle(cent, self.searchRadius)
283 self.assertEqual(lcat.fluxField, "camFlux")
284 if len(idList) > 0:
285 defFluxFieldName = getRefFluxField(lcat.refCat.schema, None)
286 self.assertTrue(defFluxFieldName in lcat.refCat.schema)
287 aprimeFluxFieldName = getRefFluxField(lcat.refCat.schema, "aprime")
288 self.assertTrue(aprimeFluxFieldName in lcat.refCat.schema)
289 break # just need one test
291 def testProperMotion(self):
292 """Test proper motion correction"""
293 center = make_coord(93.0, -90.0)
294 loader = LoadIndexedReferenceObjectsTask(butler=self.testButler)
295 references = loader.loadSkyCircle(center, self.searchRadius, filterName='a').refCat
296 original = references.copy(True)
298 # Zero epoch change --> no proper motion correction (except minor numerical effects)
299 loader.applyProperMotions(references, self.epoch)
300 self.assertFloatsAlmostEqual(references["coord_ra"], original["coord_ra"], rtol=1.0e-14)
301 self.assertFloatsAlmostEqual(references["coord_dec"], original["coord_dec"], rtol=1.0e-14)
302 self.assertFloatsEqual(references["coord_raErr"], original["coord_raErr"])
303 self.assertFloatsEqual(references["coord_decErr"], original["coord_decErr"])
305 # One year difference
306 loader.applyProperMotions(references, self.epoch + 1.0*astropy.units.yr)
307 self.assertFloatsEqual(references["pm_raErr"], original["pm_raErr"])
308 self.assertFloatsEqual(references["pm_decErr"], original["pm_decErr"])
309 for orig, ref in zip(original, references):
310 self.assertAnglesAlmostEqual(orig.getCoord().separation(ref.getCoord()),
311 self.properMotionAmt, maxDiff=1.0e-6*lsst.geom.arcseconds)
312 self.assertAnglesAlmostEqual(orig.getCoord().bearingTo(ref.getCoord()),
313 self.properMotionDir, maxDiff=1.0e-4*lsst.geom.arcseconds)
314 predictedRaErr = np.hypot(original["coord_raErr"], original["pm_raErr"])
315 predictedDecErr = np.hypot(original["coord_decErr"], original["pm_decErr"])
316 self.assertFloatsAlmostEqual(references["coord_raErr"], predictedRaErr)
317 self.assertFloatsAlmostEqual(references["coord_decErr"], predictedDecErr)
319 def testLoadVersion0(self):
320 """Test reading a pre-written format_version=0 (Jy flux) catalog.
321 It should be converted to have nJy fluxes.
322 """
323 path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/version0')
324 loader = LoadIndexedReferenceObjectsTask(butler=dafPersist.Butler(path))
325 self.assertEqual(loader.dataset_config.format_version, 0)
326 result = loader.loadSkyCircle(make_coord(10, 20),
327 5*lsst.geom.degrees, filterName='a')
328 self.assertTrue(hasNanojanskyFluxUnits(result.refCat.schema))
329 catalog = afwTable.SimpleCatalog.readFits(os.path.join(path, 'ref_cats/cal_ref_cat/4022.fits'))
330 self.assertFloatsEqual(catalog['a_flux']*1e9, result.refCat['a_flux'])
331 self.assertFloatsEqual(catalog['a_fluxSigma']*1e9, result.refCat['a_fluxErr'])
332 self.assertFloatsEqual(catalog['b_flux']*1e9, result.refCat['b_flux'])
333 self.assertFloatsEqual(catalog['b_fluxSigma']*1e9, result.refCat['b_fluxErr'])
335 def testLoadVersion1(self):
336 """Test reading a format_version=1 catalog (fluxes unchanged)."""
337 path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/version1')
338 loader = LoadIndexedReferenceObjectsTask(butler=dafPersist.Butler(path))
339 self.assertEqual(loader.dataset_config.format_version, 1)
340 result = loader.loadSkyCircle(make_coord(10, 20),
341 5*lsst.geom.degrees, filterName='a')
342 self.assertTrue(hasNanojanskyFluxUnits(result.refCat.schema))
343 catalog = afwTable.SimpleCatalog.readFits(os.path.join(path, 'ref_cats/cal_ref_cat/4022.fits'))
344 self.assertFloatsEqual(catalog['a_flux'], result.refCat['a_flux'])
345 self.assertFloatsEqual(catalog['a_fluxErr'], result.refCat['a_fluxErr'])
346 self.assertFloatsEqual(catalog['b_flux'], result.refCat['b_flux'])
347 self.assertFloatsEqual(catalog['b_fluxErr'], result.refCat['b_fluxErr'])
350class TestMemory(lsst.utils.tests.MemoryTestCase):
351 pass
354def setup_module(module):
355 lsst.utils.tests.init()
358if __name__ == "__main__": 358 ↛ 359line 358 didn't jump to line 359, because the condition on line 358 was never true
359 lsst.utils.tests.init()
360 unittest.main()