Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# 

2# LSST Data Management System 

3# 

4# Copyright 2008-2016 AURA/LSST. 

5# 

6# This product includes software developed by the 

7# LSST Project (http://www.lsst.org/). 

8# 

9# This program is free software: you can redistribute it and/or modify 

10# it under the terms of the GNU General Public License as published by 

11# the Free Software Foundation, either version 3 of the License, or 

12# (at your option) any later version. 

13# 

14# This program is distributed in the hope that it will be useful, 

15# but WITHOUT ANY WARRANTY; without even the implied warranty of 

16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

17# GNU General Public License for more details. 

18# 

19# You should have received a copy of the LSST License Statement and 

20# the GNU General Public License along with this program. If not, 

21# see <https://www.lsstcorp.org/LegalNotices/>. 

22# 

23 

24import os 

25import unittest 

26from collections import Counter 

27 

28import astropy.time 

29import astropy.units 

30import numpy as np 

31 

32import lsst.geom 

33import lsst.afw.table as afwTable 

34import lsst.afw.geom as afwGeom 

35import lsst.daf.persistence as dafPersist 

36from lsst.meas.algorithms import (IngestIndexedReferenceTask, LoadIndexedReferenceObjectsTask, 

37 LoadIndexedReferenceObjectsConfig, getRefFluxField) 

38from lsst.meas.algorithms.loadReferenceObjects import hasNanojanskyFluxUnits 

39import lsst.utils 

40 

41from ingestIndexTestBase import (makeIngestIndexConfig, IngestIndexCatalogTestBase, 

42 make_coord) 

43 

44REGENERATE_COMPARISON = False # Regenerate comparison data? 

45 

46 

47class IngestIndexTaskValidateTestCase(lsst.utils.tests.TestCase): 

48 """Test validation of IngestIndexReferenceConfig.""" 

49 def testValidateRaDecMag(self): 

50 config = makeIngestIndexConfig() 

51 config.validate() 

52 

53 for name in ("ra_name", "dec_name", "mag_column_list"): 

54 with self.subTest(name=name): 

55 config = makeIngestIndexConfig() 

56 setattr(config, name, None) 

57 with self.assertRaises(ValueError): 

58 config.validate() 

59 

60 def testValidateRaDecErr(self): 

61 # check that a basic config validates 

62 config = makeIngestIndexConfig(withRaDecErr=True) 

63 config.validate() 

64 

65 # check that a config with any of these fields missing does not validate 

66 for name in ("ra_err_name", "dec_err_name", "coord_err_unit"): 

67 with self.subTest(name=name): 

68 config = makeIngestIndexConfig(withRaDecErr=True) 

69 setattr(config, name, None) 

70 with self.assertRaises(ValueError): 

71 config.validate() 

72 

73 # check that coord_err_unit must be an astropy unit 

74 config = makeIngestIndexConfig(withRaDecErr=True) 

75 config.coord_err_unit = "nonsense unit" 

76 with self.assertRaisesRegex(ValueError, "is not a valid astropy unit string"): 

77 config.validate() 

78 

79 def testValidateMagErr(self): 

80 config = makeIngestIndexConfig(withMagErr=True) 

81 config.validate() 

82 

83 # test for missing names 

84 for name in config.mag_column_list: 

85 with self.subTest(name=name): 

86 config = makeIngestIndexConfig(withMagErr=True) 

87 del config.mag_err_column_map[name] 

88 with self.assertRaises(ValueError): 

89 config.validate() 

90 

91 # test for incorrect names 

92 for name in config.mag_column_list: 

93 with self.subTest(name=name): 

94 config = makeIngestIndexConfig(withMagErr=True) 

95 config.mag_err_column_map["badName"] = config.mag_err_column_map[name] 

96 del config.mag_err_column_map[name] 

97 with self.assertRaises(ValueError): 

98 config.validate() 

99 

100 def testValidatePm(self): 

101 basicNames = ["pm_ra_name", "pm_dec_name", "epoch_name", "epoch_format", "epoch_scale"] 

102 

103 for withPmErr in (False, True): 

104 config = makeIngestIndexConfig(withPm=True, withPmErr=withPmErr) 

105 config.validate() 

106 del config 

107 

108 if withPmErr: 

109 names = basicNames + ["pm_ra_err_name", "pm_dec_err_name"] 

110 else: 

111 names = basicNames 

112 for name in names: 

113 with self.subTest(name=name, withPmErr=withPmErr): 

114 config = makeIngestIndexConfig(withPm=True, withPmErr=withPmErr) 

115 setattr(config, name, None) 

116 with self.assertRaises(ValueError): 

117 config.validate() 

118 

119 def testValidateParallax(self): 

120 """Validation should fail if any parallax-related fields are missing. 

121 """ 

122 names = ["parallax_name", "epoch_name", "epoch_format", "epoch_scale", "parallax_err_name"] 

123 

124 config = makeIngestIndexConfig(withParallax=True) 

125 config.validate() 

126 del config 

127 

128 for name in names: 

129 with self.subTest(name=name): 

130 config = makeIngestIndexConfig(withParallax=True) 

131 setattr(config, name, None) 

132 with self.assertRaises(ValueError, msg=name): 

133 config.validate() 

134 

135 

136class IngestIndexReferenceTaskTestCase(IngestIndexCatalogTestBase, lsst.utils.tests.TestCase): 

137 """Tests of ingesting and validating an HTM Indexed Reference Catalog. 

138 """ 

139 def testSanity(self): 

140 """Sanity-check that compCats contains some entries with sources.""" 

141 numWithSources = 0 

142 for idList in self.compCats.values(): 

143 if len(idList) > 0: 

144 numWithSources += 1 

145 self.assertGreater(numWithSources, 0) 

146 

147 def testAgainstPersisted(self): 

148 """Test that we can get a specific shard from a pre-persisted refcat. 

149 """ 

150 shardId = 2222 

151 dataset_name = IngestIndexedReferenceTask.ConfigClass().dataset_config.ref_dataset_name 

152 dataId = self.indexer.makeDataId(shardId, dataset_name) 

153 self.assertTrue(self.testButler.datasetExists('ref_cat', dataId)) 

154 refCat = self.testButler.get('ref_cat', dataId) 

155 if REGENERATE_COMPARISON: 

156 if os.path.exists(self.testCatPath): 

157 os.unlink(self.testCatPath) 

158 refCat.writeFits(self.testCatPath) 

159 self.fail("New comparison data written; unset REGENERATE_COMPARISON in order to proceed") 

160 

161 ex1 = refCat.extract('*') 

162 testCat = afwTable.SimpleCatalog.readFits(self.testCatPath) 

163 

164 ex2 = testCat.extract('*') 

165 self.assertEqual(set(ex1.keys()), set(ex2.keys())) 

166 for key in ex1: 

167 np.testing.assert_array_almost_equal(ex1[key], ex2[key], err_msg=f"{key} values not equal") 

168 

169 def testIngestSetsVersion(self): 

170 """Test that newly ingested catalogs get the correct version number set. 

171 """ 

172 def runTest(withRaDecErr): 

173 outputPath = os.path.join(self.outPath, "output_setsVersion" 

174 + "_withRaDecErr" if withRaDecErr else "") 

175 # Test with multiple files and standard config 

176 config = makeIngestIndexConfig(withRaDecErr=withRaDecErr, withMagErr=True, 

177 withPm=True, withPmErr=True) 

178 # don't use the default depth, to avoid taking the time to create thousands of file locks 

179 config.dataset_config.indexer.active.depth = self.depth 

180 IngestIndexedReferenceTask.parseAndRun( 

181 args=[self.input_dir, "--output", outputPath, self.skyCatalogFile], 

182 config=config) 

183 # A newly-ingested refcat should be marked format_version=1. 

184 loader = LoadIndexedReferenceObjectsTask(butler=dafPersist.Butler(outputPath)) 

185 self.assertEqual(loader.dataset_config.format_version, 1) 

186 

187 runTest(withRaDecErr=True) 

188 runTest(withRaDecErr=False) 

189 

190 def testIngestConfigOverrides(self): 

191 """Test IngestIndexedReferenceTask with different configs. 

192 """ 

193 config2 = makeIngestIndexConfig(withRaDecErr=True, withMagErr=True, withPm=True, withPmErr=True, 

194 withParallax=True) 

195 config2.ra_name = "ra" 

196 config2.dec_name = "dec" 

197 config2.dataset_config.ref_dataset_name = 'myrefcat' 

198 # Change the indexing depth to prove we can. 

199 # Smaller is better than larger because it makes fewer files. 

200 config2.dataset_config.indexer.active.depth = self.depth - 1 

201 config2.is_photometric_name = 'is_phot' 

202 config2.is_resolved_name = 'is_res' 

203 config2.is_variable_name = 'is_var' 

204 config2.id_name = 'id' 

205 config2.extra_col_names = ['val1', 'val2', 'val3'] 

206 config2.file_reader.header_lines = 1 

207 config2.file_reader.colnames = [ 

208 'id', 'ra', 'dec', 'ra_err', 'dec_err', 'a', 'a_err', 'b', 'b_err', 'is_phot', 

209 'is_res', 'is_var', 'val1', 'val2', 'val3', 'pm_ra', 'pm_dec', 'pm_ra_err', 

210 'pm_dec_err', 'parallax', 'parallax_err', 'unixtime', 

211 ] 

212 config2.file_reader.delimiter = '|' 

213 # this also tests changing the delimiter 

214 IngestIndexedReferenceTask.parseAndRun( 

215 args=[self.input_dir, "--output", self.outPath+"/output_override", 

216 self.skyCatalogFileDelim], config=config2) 

217 

218 # Test if we can get back the catalog with a non-standard dataset name 

219 butler = dafPersist.Butler(self.outPath+"/output_override") 

220 loaderConfig = LoadIndexedReferenceObjectsConfig() 

221 loaderConfig.ref_dataset_name = "myrefcat" 

222 loader = LoadIndexedReferenceObjectsTask(butler=butler, config=loaderConfig) 

223 self.checkAllRowsInRefcat(loader, self.skyCatalog, config2) 

224 

225 # test that a catalog can be loaded even with a name not used for ingestion 

226 butler = dafPersist.Butler(self.testRepoPath) 

227 loaderConfig2 = LoadIndexedReferenceObjectsConfig() 

228 loaderConfig2.ref_dataset_name = self.testDatasetName 

229 loader = LoadIndexedReferenceObjectsTask(butler=butler, config=loaderConfig2) 

230 self.checkAllRowsInRefcat(loader, self.skyCatalog, config2) 

231 

232 def testLoadIndexedReferenceConfig(self): 

233 """Make sure LoadIndexedReferenceConfig has needed fields.""" 

234 """ 

235 Including at least one from the base class LoadReferenceObjectsConfig 

236 """ 

237 config = LoadIndexedReferenceObjectsConfig() 

238 self.assertEqual(config.ref_dataset_name, "cal_ref_cat") 

239 self.assertEqual(config.defaultFilter, "") 

240 

241 def testLoadSkyCircle(self): 

242 """Test LoadIndexedReferenceObjectsTask.loadSkyCircle with default config.""" 

243 loader = LoadIndexedReferenceObjectsTask(butler=self.testButler) 

244 for tupl, idList in self.compCats.items(): 

245 cent = make_coord(*tupl) 

246 lcat = loader.loadSkyCircle(cent, self.searchRadius, filterName='a') 

247 self.assertTrue(lcat.refCat.isContiguous()) 

248 self.assertFalse("camFlux" in lcat.refCat.schema) 

249 self.assertEqual(Counter(lcat.refCat['id']), Counter(idList)) 

250 if len(lcat.refCat) > 0: 

251 # make sure there are no duplicate ids 

252 self.assertEqual(len(set(Counter(lcat.refCat['id']).values())), 1) 

253 self.assertEqual(len(set(Counter(idList).values())), 1) 

254 # A default-loaded sky circle should not have centroids 

255 self.assertNotIn("centroid_x", lcat.refCat.schema) 

256 self.assertNotIn("centroid_y", lcat.refCat.schema) 

257 self.assertNotIn("hasCentroid", lcat.refCat.schema) 

258 else: 

259 self.assertEqual(len(idList), 0) 

260 

261 def testLoadPixelBox(self): 

262 """Test LoadIndexedReferenceObjectsTask.loadPixelBox with default config.""" 

263 loader = LoadIndexedReferenceObjectsTask(butler=self.testButler) 

264 numFound = 0 

265 for tupl, idList in self.compCats.items(): 

266 cent = make_coord(*tupl) 

267 bbox = lsst.geom.Box2I(lsst.geom.Point2I(30, -5), lsst.geom.Extent2I(1000, 1004)) # arbitrary 

268 ctr_pix = bbox.getCenter() 

269 # catalog is sparse, so set pixel scale such that bbox encloses region 

270 # used to generate compCats 

271 pixel_scale = 2*self.searchRadius/max(bbox.getHeight(), bbox.getWidth()) 

272 cdMatrix = afwGeom.makeCdMatrix(scale=pixel_scale) 

273 wcs = afwGeom.makeSkyWcs(crval=cent, crpix=ctr_pix, cdMatrix=cdMatrix) 

274 result = loader.loadPixelBox(bbox=bbox, wcs=wcs, filterName="a") 

275 self.assertFalse("camFlux" in result.refCat.schema) 

276 self.assertGreaterEqual(len(result.refCat), len(idList)) 

277 numFound += len(result.refCat) 

278 self.assertGreater(numFound, 0) 

279 

280 def testDefaultFilterAndFilterMap(self): 

281 """Test defaultFilter and filterMap parameters of LoadIndexedReferenceObjectsConfig.""" 

282 config = LoadIndexedReferenceObjectsConfig() 

283 config.defaultFilter = "b" 

284 config.filterMap = {"aprime": "a"} 

285 loader = LoadIndexedReferenceObjectsTask(butler=self.testButler, config=config) 

286 for tupl, idList in self.compCats.items(): 

287 cent = make_coord(*tupl) 

288 lcat = loader.loadSkyCircle(cent, self.searchRadius) 

289 self.assertEqual(lcat.fluxField, "camFlux") 

290 if len(idList) > 0: 

291 defFluxFieldName = getRefFluxField(lcat.refCat.schema, None) 

292 self.assertTrue(defFluxFieldName in lcat.refCat.schema) 

293 aprimeFluxFieldName = getRefFluxField(lcat.refCat.schema, "aprime") 

294 self.assertTrue(aprimeFluxFieldName in lcat.refCat.schema) 

295 break # just need one test 

296 

297 def testProperMotion(self): 

298 """Test proper motion correction""" 

299 center = make_coord(93.0, -90.0) 

300 loader = LoadIndexedReferenceObjectsTask(butler=self.testButler) 

301 references = loader.loadSkyCircle(center, self.searchRadius, filterName='a').refCat 

302 original = references.copy(True) 

303 

304 # Zero epoch change --> no proper motion correction (except minor numerical effects) 

305 loader.applyProperMotions(references, self.epoch) 

306 self.assertFloatsAlmostEqual(references["coord_ra"], original["coord_ra"], rtol=1.0e-14) 

307 self.assertFloatsAlmostEqual(references["coord_dec"], original["coord_dec"], rtol=1.0e-14) 

308 self.assertFloatsEqual(references["coord_raErr"], original["coord_raErr"]) 

309 self.assertFloatsEqual(references["coord_decErr"], original["coord_decErr"]) 

310 

311 # One year difference 

312 loader.applyProperMotions(references, self.epoch + 1.0*astropy.units.yr) 

313 self.assertFloatsEqual(references["pm_raErr"], original["pm_raErr"]) 

314 self.assertFloatsEqual(references["pm_decErr"], original["pm_decErr"]) 

315 for orig, ref in zip(original, references): 

316 self.assertAnglesAlmostEqual(orig.getCoord().separation(ref.getCoord()), 

317 self.properMotionAmt, maxDiff=1.0e-6*lsst.geom.arcseconds) 

318 self.assertAnglesAlmostEqual(orig.getCoord().bearingTo(ref.getCoord()), 

319 self.properMotionDir, maxDiff=1.0e-4*lsst.geom.arcseconds) 

320 predictedRaErr = np.hypot(original["coord_raErr"], original["pm_raErr"]) 

321 predictedDecErr = np.hypot(original["coord_decErr"], original["pm_decErr"]) 

322 self.assertFloatsAlmostEqual(references["coord_raErr"], predictedRaErr) 

323 self.assertFloatsAlmostEqual(references["coord_decErr"], predictedDecErr) 

324 

325 def testLoadVersion0(self): 

326 """Test reading a pre-written format_version=0 (Jy flux) catalog. 

327 It should be converted to have nJy fluxes. 

328 """ 

329 path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/version0') 

330 loader = LoadIndexedReferenceObjectsTask(butler=dafPersist.Butler(path)) 

331 self.assertEqual(loader.dataset_config.format_version, 0) 

332 result = loader.loadSkyCircle(make_coord(10, 20), 

333 5*lsst.geom.degrees, filterName='a') 

334 self.assertTrue(hasNanojanskyFluxUnits(result.refCat.schema)) 

335 catalog = afwTable.SimpleCatalog.readFits(os.path.join(path, 'ref_cats/cal_ref_cat/4022.fits')) 

336 self.assertFloatsEqual(catalog['a_flux']*1e9, result.refCat['a_flux']) 

337 self.assertFloatsEqual(catalog['a_fluxSigma']*1e9, result.refCat['a_fluxErr']) 

338 self.assertFloatsEqual(catalog['b_flux']*1e9, result.refCat['b_flux']) 

339 self.assertFloatsEqual(catalog['b_fluxSigma']*1e9, result.refCat['b_fluxErr']) 

340 

341 def testLoadVersion1(self): 

342 """Test reading a format_version=1 catalog (fluxes unchanged).""" 

343 path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/version1') 

344 loader = LoadIndexedReferenceObjectsTask(butler=dafPersist.Butler(path)) 

345 self.assertEqual(loader.dataset_config.format_version, 1) 

346 result = loader.loadSkyCircle(make_coord(10, 20), 

347 5*lsst.geom.degrees, filterName='a') 

348 self.assertTrue(hasNanojanskyFluxUnits(result.refCat.schema)) 

349 catalog = afwTable.SimpleCatalog.readFits(os.path.join(path, 'ref_cats/cal_ref_cat/4022.fits')) 

350 self.assertFloatsEqual(catalog['a_flux'], result.refCat['a_flux']) 

351 self.assertFloatsEqual(catalog['a_fluxErr'], result.refCat['a_fluxErr']) 

352 self.assertFloatsEqual(catalog['b_flux'], result.refCat['b_flux']) 

353 self.assertFloatsEqual(catalog['b_fluxErr'], result.refCat['b_fluxErr']) 

354 

355 

356class TestMemory(lsst.utils.tests.MemoryTestCase): 

357 pass 

358 

359 

360def setup_module(module): 

361 lsst.utils.tests.init() 

362 

363 

364if __name__ == "__main__": 364 ↛ 365line 364 didn't jump to line 365, because the condition on line 364 was never true

365 lsst.utils.tests.init() 

366 unittest.main()