Coverage for tests/testCadenceMetrics.py : 11%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1from __future__ import print_function
2from builtins import zip
3import matplotlib
4matplotlib.use("Agg")
5import numpy as np
6import unittest
7import lsst.sims.maf.metrics as metrics
8import lsst.utils.tests
11class TestCadenceMetrics(unittest.TestCase):
13 def testPhaseGapMetric(self):
14 """
15 Test the phase gap metric
16 """
17 data = np.zeros(10, dtype=list(zip(['observationStartMJD'], [float])))
18 data['observationStartMJD'] += np.arange(10)*.25
20 pgm = metrics.PhaseGapMetric(nPeriods=1, periodMin=0.5, periodMax=0.5)
21 metricVal = pgm.run(data)
23 meanGap = pgm.reduceMeanGap(metricVal)
24 medianGap = pgm.reduceMedianGap(metricVal)
25 worstPeriod = pgm.reduceWorstPeriod(metricVal)
26 largestGap = pgm.reduceLargestGap(metricVal)
28 self.assertEqual(meanGap, 0.5)
29 self.assertEqual(medianGap, 0.5)
30 self.assertEqual(worstPeriod, 0.5)
31 self.assertEqual(largestGap, 0.5)
33 pgm = metrics.PhaseGapMetric(nPeriods=2, periodMin=0.25, periodMax=0.5)
34 metricVal = pgm.run(data)
36 meanGap = pgm.reduceMeanGap(metricVal)
37 medianGap = pgm.reduceMedianGap(metricVal)
38 worstPeriod = pgm.reduceWorstPeriod(metricVal)
39 largestGap = pgm.reduceLargestGap(metricVal)
41 self.assertEqual(meanGap, 0.75)
42 self.assertEqual(medianGap, 0.75)
43 self.assertEqual(worstPeriod, 0.25)
44 self.assertEqual(largestGap, 1.)
46 def testTemplateExists(self):
47 """
48 Test the TemplateExistsMetric.
49 """
50 names = ['finSeeing', 'observationStartMJD']
51 types = [float, float]
52 data = np.zeros(10, dtype=list(zip(names, types)))
53 data['finSeeing'] = [2., 2., 3., 1., 1., 1., 0.5, 1., 0.4, 1.]
54 data['observationStartMJD'] = np.arange(10)
55 slicePoint = {'sid': 0}
56 # so here we have 4 images w/o good previous templates
57 metric = metrics.TemplateExistsMetric(seeingCol='finSeeing')
58 result = metric.run(data, slicePoint)
59 self.assertEqual(result, 6./10.)
61 def testUniformityMetric(self):
62 names = ['observationStartMJD']
63 types = [float]
64 data = np.zeros(100, dtype=list(zip(names, types)))
65 metric = metrics.UniformityMetric()
66 result1 = metric.run(data)
67 # If all the observations are on the 1st day, should be 1
68 self.assertEqual(result1, 1)
69 data['observationStartMJD'] = data['observationStartMJD']+365.25*10
70 slicePoint = {'sid': 0}
71 result2 = metric.run(data, slicePoint)
72 # All on last day should also be 1
73 self.assertEqual(result2, 1)
74 # Make a perfectly uniform dist
75 data['observationStartMJD'] = np.arange(0., 365.25*10, 365.25*10/100)
76 result3 = metric.run(data, slicePoint)
77 # Result should be zero for uniform
78 np.testing.assert_almost_equal(result3, 0.)
79 # A single obseravtion should give a result of 1
80 data = np.zeros(1, dtype=list(zip(names, types)))
81 result4 = metric.run(data, slicePoint)
82 self.assertEqual(result4, 1)
84 def testTGapMetric(self):
85 names = ['observationStartMJD']
86 types = [float]
87 data = np.zeros(100, dtype=list(zip(names, types)))
88 # All 1-day gaps
89 data['observationStartMJD'] = np.arange(100)
91 metric = metrics.TgapsMetric(bins=np.arange(1, 100, 1))
92 result1 = metric.run(data)
93 # By default, should all be in first bin
94 self.assertEqual(result1[0], data.size-1)
95 self.assertEqual(np.sum(result1), data.size-1)
96 data['observationStartMJD'] = np.arange(0, 200, 2)
97 result2 = metric.run(data)
98 self.assertEqual(result2[1], data.size-1)
99 self.assertEqual(np.sum(result2), data.size-1)
101 data = np.zeros(4, dtype=list(zip(names, types)))
102 data['observationStartMJD'] = [10, 20, 30, 40]
103 metric = metrics.TgapsMetric(allGaps=True, bins=np.arange(1, 100, 10))
104 result3 = metric.run(data)
105 self.assertEqual(result3[1], 2)
106 Ngaps = np.math.factorial(data.size-1)
107 self.assertEqual(np.sum(result3), Ngaps)
109 def testNightGapMetric(self):
110 names = ['night']
111 types = [float]
112 data = np.zeros(100, dtype=list(zip(names, types)))
113 # All 1-day gaps
114 data['night'] = np.arange(100)
116 metric = metrics.NightgapsMetric(bins=np.arange(1, 100, 1))
117 result1 = metric.run(data)
118 # By default, should all be in first bin
119 self.assertEqual(result1[0], data.size-1)
120 self.assertEqual(np.sum(result1), data.size-1)
121 data['night'] = np.arange(0, 200, 2)
122 result2 = metric.run(data)
123 self.assertEqual(result2[1], data.size-1)
124 self.assertEqual(np.sum(result2), data.size-1)
126 data = np.zeros(4, dtype=list(zip(names, types)))
127 data['night'] = [10, 20, 30, 40]
128 metric = metrics.NightgapsMetric(allGaps=True, bins=np.arange(1, 100, 10))
129 result3 = metric.run(data)
130 self.assertEqual(result3[1], 2)
131 Ngaps = np.math.factorial(data.size-1)
132 self.assertEqual(np.sum(result3), Ngaps)
134 data = np.zeros(6, dtype=list(zip(names, types)))
135 data['night'] = [1, 1, 2, 3, 3, 5]
136 metric = metrics.NightgapsMetric(bins=np.arange(0, 5, 1))
137 result4 = metric.run(data)
138 self.assertEqual(result4[0], 0)
139 self.assertEqual(result4[1], 2)
140 self.assertEqual(result4[2], 1)
142 def testNVisitsPerNightMetric(self):
143 names = ['night']
144 types = [float]
145 data = np.zeros(100, dtype=list(zip(names, types)))
146 # One visit per night.
147 data['night'] = np.arange(100)
149 bins = np.arange(0, 5, 1)
150 metric = metrics.NVisitsPerNightMetric(bins=bins)
151 result = metric.run(data)
152 # All nights have one visit.
153 expected_result = np.zeros(len(bins) - 1, dtype=int)
154 expected_result[1] = len(data)
155 np.testing.assert_array_equal(result, expected_result)
157 data['night'] = np.floor(np.arange(0, 100) / 2)
158 result = metric.run(data)
159 expected_result = np.zeros(len(bins) - 1, dtype=int)
160 expected_result[2] = len(data) / 2
161 np.testing.assert_array_equal(result, expected_result)
163 def testRapidRevisitUniformityMetric(self):
164 data = np.zeros(100, dtype=list(zip(['observationStartMJD'], [float])))
165 # Uniformly distribute time _differences_ between 0 and 100
166 dtimes = np.arange(100)
167 data['observationStartMJD'] = dtimes.cumsum()
168 # Set up "rapid revisit" metric to look for visits between 5 and 25
169 metric = metrics.RapidRevisitUniformityMetric(dTmin=5, dTmax=55, minNvisits=50)
170 result = metric.run(data)
171 # This should be uniform.
172 self.assertLess(result, 0.1)
173 self.assertGreaterEqual(result, 0)
174 # Set up non-uniform distribution of time differences
175 dtimes = np.zeros(100) + 5
176 data['observationStartMJD'] = dtimes.cumsum()
177 result = metric.run(data)
178 self.assertGreaterEqual(result, 0.5)
179 dtimes = np.zeros(100) + 15
180 data['observationStartMJD'] = dtimes.cumsum()
181 result = metric.run(data)
182 self.assertGreaterEqual(result, 0.5)
183 """
184 # Let's see how much dmax/result can vary
185 resmin = 1
186 resmax = 0
187 rng = np.random.RandomState(88123100)
188 for i in range(10000):
189 dtimes = rng.rand(100)
190 data['observationStartMJD'] = dtimes.cumsum()
191 metric = metrics.RapidRevisitUniformityMetric(dTmin=0.1, dTmax=0.8, minNvisits=50)
192 result = metric.run(data)
193 resmin = np.min([resmin, result])
194 resmax = np.max([resmax, result])
195 print("RapidRevisitUniformity .. range", resmin, resmax)
196 """
198 def testRapidRevisitMetric(self):
199 data = np.zeros(100, dtype=list(zip(['observationStartMJD'], [float])))
200 dtimes = np.arange(100)/24./60.
201 data['observationStartMJD'] = dtimes.cumsum()
202 # Set metric parameters to the actual N1/N2 values for these dtimes.
203 metric = metrics.RapidRevisitMetric(dTmin=40./60./60./24., dTpairs=20./60./24.,
204 dTmax=30./60./24., minN1=19, minN2=29)
205 result = metric.run(data)
206 self.assertEqual(result, 1)
207 # Set metric parameters to > N1/N2 values, to see it return 0.
208 metric = metrics.RapidRevisitMetric(dTmin=40.0/60.0/60.0/24., dTpairs=20./60./24.,
209 dTmax=30./60./24., minN1=30, minN2=50)
210 result = metric.run(data)
211 self.assertEqual(result, 0)
212 # Test with single value data.
213 data = np.zeros(1, dtype=list(zip(['observationStartMJD'], [float])))
214 result = metric.run(data)
215 self.assertEqual(result, 0)
217 def testNRevisitsMetric(self):
218 data = np.zeros(100, dtype=list(zip(['observationStartMJD'], [float])))
219 dtimes = np.arange(100)/24./60.
220 data['observationStartMJD'] = dtimes.cumsum()
221 metric = metrics.NRevisitsMetric(dT=50.)
222 result = metric.run(data)
223 self.assertEqual(result, 50)
224 metric = metrics.NRevisitsMetric(dT=50., normed=True)
225 result = metric.run(data)
226 self.assertEqual(result, 0.5)
228 def testTransientMetric(self):
229 names = ['observationStartMJD', 'fiveSigmaDepth', 'filter']
230 types = [float, float, '<U1']
232 ndata = 100
233 dataSlice = np.zeros(ndata, dtype=list(zip(names, types)))
234 dataSlice['observationStartMJD'] = np.arange(ndata)
235 dataSlice['fiveSigmaDepth'] = 25
236 dataSlice['filter'] = 'g'
238 metric = metrics.TransientMetric(surveyDuration=ndata/365.25)
240 # Should detect everything
241 self.assertEqual(metric.run(dataSlice), 1.)
243 # Double to survey duration, should now only detect half
244 metric = metrics.TransientMetric(surveyDuration=ndata/365.25*2)
245 self.assertEqual(metric.run(dataSlice), 0.5)
247 # Set half of the m5 of the observations very bright, so kill another half.
248 dataSlice['fiveSigmaDepth'][0:50] = 20
249 self.assertEqual(metric.run(dataSlice), 0.25)
251 dataSlice['fiveSigmaDepth'] = 25
252 # Demand lots of early observations
253 metric = metrics.TransientMetric(peakTime=.5, nPrePeak=3, surveyDuration=ndata/365.25)
254 self.assertEqual(metric.run(dataSlice), 0.)
256 # Demand a reasonable number of early observations
257 metric = metrics.TransientMetric(peakTime=2, nPrePeak=2, surveyDuration=ndata/365.25)
258 self.assertEqual(metric.run(dataSlice), 1.)
260 # Demand multiple filters
261 metric = metrics.TransientMetric(nFilters=2, surveyDuration=ndata/365.25)
262 self.assertEqual(metric.run(dataSlice), 0.)
264 dataSlice['filter'] = ['r', 'g']*50
265 self.assertEqual(metric.run(dataSlice), 1.)
267 # Demad too many observation per light curve
268 metric = metrics.TransientMetric(nPerLC=20, surveyDuration=ndata/365.25)
269 self.assertEqual(metric.run(dataSlice), 0.)
271 # Test both filter and number of LC samples
272 metric = metrics.TransientMetric(nFilters=2, nPerLC=3, surveyDuration=ndata/365.25)
273 self.assertEqual(metric.run(dataSlice), 1.)
275 def testSeasonLengthMetric(self):
276 times = np.arange(0, 3650, 10)
277 data = np.zeros(len(times), dtype=list(zip(['observationStartMJD', 'visitExposureTime'], [float, float])))
278 data['observationStartMJD'] = times
279 data['visitExposureTime'] = 30.0
280 metric = metrics.SeasonLengthMetric(reduceFunc=np.median)
281 slicePoint = {'ra': 0}
282 result = metric.run(data, slicePoint)
283 self.assertEqual(result, 350)
284 times = np.arange(0, 3650, 365)
285 data = np.zeros(len(times)*2, dtype=list(zip(['observationStartMJD', 'visitExposureTime'], [float, float])))
286 data['observationStartMJD'][0:len(times)] = times
287 data['observationStartMJD'][len(times):] = times + 10
288 data['observationStartMJD'] = np.sort(data['observationStartMJD'])
289 data['visitExposureTime'] = 30.0
290 metric = metrics.SeasonLengthMetric(reduceFunc=np.median)
291 slicePoint = {'ra': 0}
292 result = metric.run(data, slicePoint)
293 self.assertEqual(result, 10)
294 times = np.arange(0, 3650-365, 365)
295 data = np.zeros(len(times)*2, dtype=list(zip(['observationStartMJD', 'visitExposureTime'], [float, float])))
296 data['observationStartMJD'][0:len(times)] = times
297 data['observationStartMJD'][len(times):] = times + 10
298 data['observationStartMJD'] = np.sort(data['observationStartMJD'])
299 data['visitExposureTime'] = 30.0
300 metric = metrics.SeasonLengthMetric(reduceFunc=np.size)
301 slicePoint = {'ra': 0}
302 result = metric.run(data, slicePoint)
303 self.assertEqual(result, 9)
305class TestMemory(lsst.utils.tests.MemoryTestCase):
306 pass
309def setup_module(module):
310 lsst.utils.tests.init()
313if __name__ == "__main__": 313 ↛ 314line 313 didn't jump to line 314, because the condition on line 313 was never true
314 lsst.utils.tests.init()
315 unittest.main()