Coverage for tests/test_footprintMergeCatalog.py: 7%

221 statements  

« prev     ^ index     » next       coverage.py v7.2.7, created at 2023-06-27 02:52 -0700

1# 

2# LSST Data Management System 

3# Copyright 2008, 2009, 2010 LSST Corporation. 

4# 

5# This product includes software developed by the 

6# LSST Project (http://www.lsst.org/). 

7# 

8# This program is free software: you can redistribute it and/or modify 

9# it under the terms of the GNU General Public License as published by 

10# the Free Software Foundation, either version 3 of the License, or 

11# (at your option) any later version. 

12# 

13# This program is distributed in the hope that it will be useful, 

14# but WITHOUT ANY WARRANTY; without even the implied warranty of 

15# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

16# GNU General Public License for more details. 

17# 

18# You should have received a copy of the LSST License Statement and 

19# the GNU General Public License along with this program. If not, 

20# see <http://www.lsstcorp.org/LegalNotices/>. 

21# 

22import unittest 

23 

24import numpy as np 

25 

26import lsst.utils.tests 

27import lsst.pex.exceptions 

28import lsst.geom 

29import lsst.afw.image as afwImage 

30import lsst.afw.detection as afwDetect 

31import lsst.afw.table as afwTable 

32 

33 

34def insertPsf(pos, im, psf, kernelSize, flux): 

35 for x, y in pos: 

36 x0 = x-kernelSize//2 

37 y0 = y-kernelSize//2 

38 tmpbox = lsst.geom.Box2I(lsst.geom.Point2I(x0, y0), 

39 lsst.geom.Extent2I(kernelSize, kernelSize)) 

40 tmp = psf.computeImage(lsst.geom.Point2D(x0, y0)) 

41 tmp *= flux 

42 im.image[tmpbox, afwImage.LOCAL] += tmp 

43 

44 

45def mergeCatalogs(catList, names, peakDist, idFactory, indivNames=[], samePeakDist=-1.): 

46 schema = afwTable.SourceTable.makeMinimalSchema() 

47 merged = afwDetect.FootprintMergeList(schema, names) 

48 

49 if not indivNames: 

50 indivNames = names 

51 

52 # Count the number of objects and peaks in this list 

53 mergedList = merged.getMergedSourceCatalog(catList, indivNames, peakDist, 

54 schema, idFactory, samePeakDist) 

55 nob = len(mergedList) 

56 npeaks = sum([len(ob.getFootprint().getPeaks()) for ob in mergedList]) 

57 

58 return mergedList, nob, npeaks 

59 

60 

61def isPeakInCatalog(peak, catalog): 

62 for record in catalog: 

63 for p in record.getFootprint().getPeaks(): 

64 if p.getI() == peak.getI(): 

65 return True 

66 return False 

67 

68 

69class FootprintMergeCatalogTestCase(lsst.utils.tests.TestCase): 

70 

71 def setUp(self): 

72 """Build up three different sets of objects that are to be merged""" 

73 pos1 = [(40, 40), (220, 35), (40, 48), (220, 50), 

74 (67, 67), (150, 50), (40, 90), (70, 160), 

75 (35, 255), (70, 180), (250, 200), (120, 120), 

76 (170, 180), (100, 210), (20, 210), 

77 ] 

78 pos2 = [(43, 45), (215, 31), (171, 258), (211, 117), 

79 (48, 99), (70, 160), (125, 45), (251, 33), 

80 (37, 170), (134, 191), (79, 223), (258, 182) 

81 ] 

82 pos3 = [(70, 170), (219, 41), (253, 173), (253, 192)] 

83 

84 box = lsst.geom.Box2I(lsst.geom.Point2I(0, 0), lsst.geom.Point2I(300, 300)) 

85 psfsig = 1. 

86 kernelSize = 41 

87 flux = 1000 

88 

89 # Create a different sized psf for each image and insert them at the 

90 # desired positions 

91 im1 = afwImage.MaskedImageD(box) 

92 psf1 = afwDetect.GaussianPsf(kernelSize, kernelSize, psfsig) 

93 

94 im2 = afwImage.MaskedImageD(box) 

95 psf2 = afwDetect.GaussianPsf(kernelSize, kernelSize, 2*psfsig) 

96 

97 im3 = afwImage.MaskedImageD(box) 

98 psf3 = afwDetect.GaussianPsf(kernelSize, kernelSize, 1.3*psfsig) 

99 

100 insertPsf(pos1, im1, psf1, kernelSize, flux) 

101 insertPsf(pos2, im2, psf2, kernelSize, flux) 

102 insertPsf(pos3, im3, psf3, kernelSize, flux) 

103 

104 schema = afwTable.SourceTable.makeMinimalSchema() 

105 

106 # Create SourceCatalogs from these objects 

107 fp1 = afwDetect.FootprintSet( 

108 im1, afwDetect.Threshold(0.001), "DETECTED") 

109 # Use a different ID factory for each footprint so that the peak ID's 

110 # overlap. This allows us to test ID collisions. 

111 table = afwTable.SourceTable.make(schema, afwTable.IdFactory.makeSimple()) 

112 self.catalog1 = afwTable.SourceCatalog(table) 

113 fp1.makeSources(self.catalog1) 

114 

115 table = afwTable.SourceTable.make(schema, afwTable.IdFactory.makeSimple()) 

116 fp2 = afwDetect.FootprintSet( 

117 im2, afwDetect.Threshold(0.001), "DETECTED") 

118 self.catalog2 = afwTable.SourceCatalog(table) 

119 fp2.makeSources(self.catalog2) 

120 

121 table = afwTable.SourceTable.make(schema, afwTable.IdFactory.makeSimple()) 

122 fp3 = afwDetect.FootprintSet( 

123 im3, afwDetect.Threshold(0.001), "DETECTED") 

124 self.catalog3 = afwTable.SourceCatalog(table) 

125 fp3.makeSources(self.catalog3) 

126 

127 def tearDown(self): 

128 del self.catalog1 

129 del self.catalog2 

130 del self.catalog3 

131 

132 def assertUniqueIds(self, catalog): 

133 """Ensure that all of the peak IDs in a single parent are unique. 

134 """ 

135 for src in catalog: 

136 # The peak catalog may be non-contiguous, 

137 # so add each peak individually 

138 peaks = [peak.getId() for peak in src.getFootprint().peaks] 

139 self.assertEqual(sorted(peaks), sorted(set(peaks))) 

140 

141 def testMerge1(self): 

142 idFactory = afwTable.IdFactory.makeSimple() 

143 # Add the first catalog only 

144 merge, nob, npeak = mergeCatalogs([self.catalog1], ["1"], [-1], idFactory) 

145 self.assertEqual(nob, 14) 

146 self.assertEqual(npeak, 15) 

147 

148 for record in merge: 

149 self.assertTrue(record.get("merge_footprint_1")) 

150 for peak in record.getFootprint().getPeaks(): 

151 self.assertTrue(peak.get("merge_peak_1")) 

152 

153 # area for each object 

154 pixArea = np.empty(14) 

155 pixArea.fill(69) 

156 pixArea[1] = 135 

157 measArea = [i.getFootprint().getArea() for i in merge] 

158 np.testing.assert_array_equal(pixArea, measArea) 

159 

160 # Add the first catalog and second catalog with the wrong name, which should result 

161 # an exception being raised 

162 with self.assertRaises(lsst.pex.exceptions.LogicError): 

163 mergeCatalogs([self.catalog1, self.catalog2], ["1", "2"], [0, 0], idFactory, ["1", "3"]) 

164 

165 # Add the first catalog and second catalog with the wrong number of peakDist elements, 

166 # which should raise an exception 

167 with self.assertRaises(ValueError): 

168 mergeCatalogs([self.catalog1, self.catalog2], ["1", "2"], [0], idFactory, ["1", "3"]) 

169 

170 # Add the first catalog and second catalog with the wrong number of filters, 

171 # which should raise an exception 

172 with self.assertRaises(ValueError): 

173 mergeCatalogs([self.catalog1, self.catalog2], ["1"], [0], idFactory, ["1", "3"]) 

174 

175 # Add the first catalog and second catalog with minPeak < 1 so it will 

176 # not add new peaks 

177 merge, nob, npeak = mergeCatalogs([self.catalog1, self.catalog2], ["1", "2"], [0, -1], idFactory) 

178 self.assertEqual(nob, 22) 

179 self.assertEqual(npeak, 23) 

180 self.assertUniqueIds(merge) 

181 # area for each object 

182 pixArea = np.ones(22) 

183 pixArea[0] = 275 

184 pixArea[1] = 270 

185 pixArea[2:5].fill(69) 

186 pixArea[5] = 323 

187 pixArea[6] = 69 

188 pixArea[7] = 261 

189 pixArea[8:14].fill(69) 

190 pixArea[14:22].fill(261) 

191 measArea = [i.getFootprint().getArea() for i in merge] 

192 np.testing.assert_array_equal(pixArea, measArea) 

193 

194 for record in merge: 

195 for peak in record.getFootprint().getPeaks(): 

196 # Should only get peaks from catalog2 if catalog1 didn't 

197 # contribute to the footprint 

198 if record.get("merge_footprint_1"): 

199 self.assertTrue(peak.get("merge_peak_1")) 

200 self.assertFalse(peak.get("merge_peak_2")) 

201 else: 

202 self.assertFalse(peak.get("merge_peak_1")) 

203 self.assertTrue(peak.get("merge_peak_2")) 

204 

205 # Same as previous with another catalog 

206 merge, nob, npeak = mergeCatalogs([self.catalog1, self.catalog2, self.catalog3], 

207 ["1", "2", "3"], [0, -1, -1], 

208 idFactory) 

209 self.assertEqual(nob, 19) 

210 self.assertEqual(npeak, 20) 

211 self.assertUniqueIds(merge) 

212 pixArea = np.ones(19) 

213 pixArea[0] = 416 

214 pixArea[1] = 270 

215 pixArea[2:4].fill(69) 

216 pixArea[4] = 323 

217 pixArea[5] = 69 

218 pixArea[6] = 406 

219 pixArea[7] = 69 

220 pixArea[8] = 493 

221 pixArea[9:13].fill(69) 

222 pixArea[12:19].fill(261) 

223 measArea = [i.getFootprint().getArea() for i in merge] 

224 np.testing.assert_array_equal(pixArea, measArea) 

225 

226 for record in merge: 

227 for peak in record.getFootprint().getPeaks(): 

228 # Should only get peaks from catalog2 if catalog1 didn't 

229 # contribute to the footprint 

230 if record.get("merge_footprint_1"): 

231 self.assertTrue(peak.get("merge_peak_1")) 

232 self.assertFalse(peak.get("merge_peak_2")) 

233 else: 

234 self.assertFalse(peak.get("merge_peak_1")) 

235 self.assertTrue(peak.get("merge_peak_2")) 

236 

237 # Add all the catalogs with minPeak = 0 so all peaks will be added 

238 merge, nob, npeak = mergeCatalogs([self.catalog1, self.catalog2, self.catalog3], 

239 ["1", "2", "3"], [0, 0, 0], 

240 idFactory) 

241 self.assertEqual(nob, 19) 

242 self.assertEqual(npeak, 30) 

243 self.assertUniqueIds(merge) 

244 measArea = [i.getFootprint().getArea() for i in merge] 

245 np.testing.assert_array_equal(pixArea, measArea) 

246 

247 # Add all the catalogs with minPeak = 10 so some peaks will be added to 

248 # the footprint 

249 merge, nob, npeak = mergeCatalogs([self.catalog1, self.catalog2, self.catalog3], 

250 ["1", "2", "3"], 10, idFactory) 

251 self.assertEqual(nob, 19) 

252 self.assertEqual(npeak, 25) 

253 self.assertUniqueIds(merge) 

254 measArea = [i.getFootprint().getArea() for i in merge] 

255 np.testing.assert_array_equal(pixArea, measArea) 

256 

257 for record in merge: 

258 for peak in record.getFootprint().getPeaks(): 

259 if peak.get("merge_peak_1"): 

260 self.assertTrue(record.get("merge_footprint_1")) 

261 self.assertTrue(isPeakInCatalog(peak, self.catalog1)) 

262 elif peak.get("merge_peak_2"): 

263 self.assertTrue(record.get("merge_footprint_2")) 

264 self.assertTrue(isPeakInCatalog(peak, self.catalog2)) 

265 elif peak.get("merge_peak_3"): 

266 self.assertTrue(record.get("merge_footprint_3")) 

267 self.assertTrue(isPeakInCatalog(peak, self.catalog3)) 

268 else: 

269 self.fail("At least one merge.peak flag must be set") 

270 

271 # Add all the catalogs with minPeak = 100 so no new peaks will be added 

272 merge, nob, npeak = mergeCatalogs([self.catalog1, self.catalog2, self.catalog3], 

273 ["1", "2", "3"], 100, idFactory) 

274 self.assertEqual(nob, 19) 

275 self.assertEqual(npeak, 20) 

276 self.assertUniqueIds(merge) 

277 measArea = [i.getFootprint().getArea() for i in merge] 

278 np.testing.assert_array_equal(pixArea, measArea) 

279 

280 for record in merge: 

281 for peak in record.getFootprint().getPeaks(): 

282 if peak.get("merge_peak_1"): 

283 self.assertTrue(record.get("merge_footprint_1")) 

284 self.assertTrue(isPeakInCatalog(peak, self.catalog1)) 

285 elif peak.get("merge_peak_2"): 

286 self.assertTrue(record.get("merge_footprint_2")) 

287 self.assertTrue(isPeakInCatalog(peak, self.catalog2)) 

288 elif peak.get("merge_peak_3"): 

289 self.assertTrue(record.get("merge_footprint_3")) 

290 self.assertTrue(isPeakInCatalog(peak, self.catalog3)) 

291 else: 

292 self.fail("At least one merge_peak flag must be set") 

293 

294 # Add footprints with large samePeakDist so that any footprint that merges will also 

295 # have the peak flagged 

296 merge, nob, npeak = mergeCatalogs([self.catalog1, self.catalog2, self.catalog3], 

297 ["1", "2", "3"], 100, idFactory, samePeakDist=40) 

298 

299 # peaks detected in more than one catalog 

300 self.assertUniqueIds(merge) 

301 multiPeakIndex = [0, 2, 5, 7, 9] 

302 peakIndex = 0 

303 for record in merge: 

304 for peak in record.getFootprint().getPeaks(): 

305 numPeak = np.sum([peak.get("merge_peak_1"), peak.get("merge_peak_2"), 

306 peak.get("merge_peak_3")]) 

307 if peakIndex in multiPeakIndex: 

308 self.assertGreater(numPeak, 1) 

309 else: 

310 self.assertEqual(numPeak, 1) 

311 peakIndex += 1 

312 

313 def testDM17431(self): 

314 """Test that priority order is respected 

315 

316 specifically when lower priority footprints overlap two previously 

317 disconnected higher priority footprints. 

318 """ 

319 box = lsst.geom.Box2I(lsst.geom.Point2I(0, 0), lsst.geom.Point2I(100, 100)) 

320 psfsig = 1. 

321 kernelSize = 41 

322 flux = 1000 

323 cat1 = {} 

324 cat2 = {} 

325 peakDist = 10 

326 samePeakDist = 3 

327 

328 # cat2 connects cat1's 2 separated footprints. 

329 # 65 and 70 are too close to be new or same peaks. 

330 # 70 is higher priority and should be in the catalog instead of 65. 

331 cat1['pos'] = [(50, 50), (70, 50)] 

332 cat2['pos'] = [(55, 50), (65, 50)] 

333 schema = afwTable.SourceTable.makeMinimalSchema() 

334 idFactory = afwTable.IdFactory.makeSimple() 

335 table = afwTable.SourceTable.make(schema, idFactory) 

336 

337 for (cat, psfFactor) in zip([cat1, cat2], [1, 2]): 

338 cat['mi'] = afwImage.MaskedImageD(box) 

339 cat['psf'] = afwDetect.GaussianPsf(kernelSize, kernelSize, psfFactor*psfsig) 

340 insertPsf(cat['pos'], cat['mi'], cat['psf'], kernelSize, flux) 

341 fp = afwDetect.FootprintSet(cat['mi'], afwDetect.Threshold(0.001), "DETECTED") 

342 cat['catalog'] = afwTable.SourceCatalog(table) 

343 fp.makeSources(cat['catalog']) 

344 

345 merge, nob, npeak = mergeCatalogs([cat1['catalog'], cat2['catalog']], ["1", "2"], 

346 peakDist, idFactory, samePeakDist=samePeakDist) 

347 

348 # Check that both higher-priority cat1 records survive peak merging 

349 for record in cat1['catalog']: 

350 for peak in record.getFootprint().getPeaks(): 

351 self.assertTrue(isPeakInCatalog(peak, merge)) 

352 

353 

354class MemoryTester(lsst.utils.tests.MemoryTestCase): 

355 pass 

356 

357 

358def setup_module(module): 

359 lsst.utils.tests.init() 

360 

361 

362if __name__ == "__main__": 362 ↛ 363line 362 didn't jump to line 363, because the condition on line 362 was never true

363 lsst.utils.tests.init() 

364 unittest.main()