Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

import numpy as np 

import warnings 

 

from .moMetrics import BaseMoMetric 

 

__all__ = ['integrateOverH', 'ValueAtHMetric', 'MeanValueAtHMetric', 

'MoCompletenessMetric', 'MoCompletenessAtTimeMetric'] 

 

 

def integrateOverH(Mvalues, Hvalues, Hindex = 0.33): 

"""Function to calculate a metric value integrated over an Hrange, assuming a power-law distribution. 

 

Parameters 

---------- 

Mvalues : numpy.ndarray 

The metric values at each H value. 

Hvalues : numpy.ndarray 

The H values corresponding to each Mvalue (must be the same length). 

Hindex : float, opt 

The power-law index expected for the H value distribution. 

Default is 0.33 (dN/dH = 10^(Hindex * H) ). 

 

Returns 

-------- 

numpy.ndarray 

The integrated or cumulative metric values. 

""" 

# Set expected H distribution. 

# dndh = differential size distribution (number in this bin) 

dndh = np.power(10., Hindex*(Hvalues-Hvalues.min())) 

# dn = cumulative size distribution (number in this bin and brighter) 

intVals = np.cumsum(Mvalues*dndh)/np.cumsum(dndh) 

return intVals 

 

 

class ValueAtHMetric(BaseMoMetric): 

"""Return the metric value at a given H value. 

 

Requires the metric values to be one-dimensional (typically, completeness values). 

 

Parameters 

---------- 

Hmark : float, opt 

The H value at which to look up the metric value. Default = 22. 

""" 

def __init__(self, Hmark=22, **kwargs): 

metricName = 'Value At H=%.1f' %(Hmark) 

super(ValueAtHMetric, self).__init__(metricName=metricName, **kwargs) 

self.Hmark = Hmark 

 

def run(self, metricVals, Hvals): 

# Check if desired H value is within range of H values. 

if (self.Hmark < Hvals.min()) or (self.Hmark > Hvals.max()): 

warnings.warn('Desired H value of metric outside range of provided H values.') 

return None 

if metricVals.shape[0] != 1: 

warnings.warn('This is not an appropriate summary statistic for this data - need 1d values.') 

return None 

value = np.interp(self.Hmark, Hvals, metricVals[0]) 

return value 

 

 

class MeanValueAtHMetric(BaseMoMetric): 

"""Return the mean value of a metric at a given H. 

 

Allows the metric values to be multi-dimensional (i.e. use a cloned H distribution). 

 

Parameters 

---------- 

Hmark : float, opt 

The H value at which to look up the metric value. Default = 22. 

""" 

def __init__(self, Hmark=22, reduceFunc=np.mean, metricName=None, **kwargs): 

if metricName is None: 

metricName = 'Mean Value At H=%.1f' %(Hmark) 

super(MeanValueAtHMetric, self).__init__(metricName=metricName, **kwargs) 

self.Hmark = Hmark 

self.reduceFunc = reduceFunc 

 

def run(self, metricVals, Hvals): 

# Check if desired H value is within range of H values. 

if (self.Hmark < Hvals.min()) or (self.Hmark > Hvals.max()): 

warnings.warn('Desired H value of metric outside range of provided H values.') 

return None 

value = np.interp(self.Hmark, Hvals, self.reduceFunc(metricVals.swapaxes(0, 1), axis=1)) 

return value 

 

 

class MoCompletenessMetric(BaseMoMetric): 

"""Calculate the fraction of the population that meets `threshold` value or higher. 

This is equivalent to calculating the completeness (relative to the entire population) given 

the output of a Discovery_N_Chances metric, or the fraction of the population that meets a given cutoff 

value for Color determination metrics. 

 

Any moving object metric that outputs a float value can thus have the 'fraction of the population' 

with greater than X value calculated here, as a summary statistic. 

 

Parameters 

---------- 

threshold : int, opt 

Count the fraction of the population that exceeds this value. Default = 1. 

nbins : int, opt 

If the H values for the metric are not a cloned distribution, then split up H into this many bins. 

Default 20. 

minHrange : float, opt 

If the H values for the metric are not a cloned distribution, then split up H into at least this 

range (otherwise just use the min/max of the H values). Default 1.0 

cumulative : bool, opt 

If False, simply report the differential fractional value (or differential completeness). 

If True, integrate over the H distribution (using IntegrateOverH) to report a cumulative fraction. 

Default False. 

Hindex : float, opt 

Use Hindex as the power law to integrate over H, if cumulative is True. Default 0.3. 

""" 

def __init__(self, threshold=1, nbins=20, minHrange=1.0, cumulative=False, Hindex=0.33, **kwargs): 

if 'metricName' in kwargs: 

metricName = kwargs.pop('metricName') 

if metricName.startswith('Cumulative'): 

self.cumulative=True 

units = '<= H' 

else: 

self.cumulative=False 

units = '@ H' 

else: 

self.cumulative = cumulative 

if self.cumulative: 

metricName = 'CumulativeCompleteness' 

units = '<= H' 

else: 

metricName = 'DifferentialCompleteness' 

units = '@ H' 

super(MoCompletenessMetric, self).__init__(metricName=metricName, units=units, **kwargs) 

self.threshold = threshold 

# If H is not a cloned distribution, then we need to specify how to bin these values. 

self.nbins = nbins 

self.minHrange = minHrange 

self.Hindex = Hindex 

 

def run(self, metricValues, Hvals): 

nSsos = metricValues.shape[0] 

nHval = len(Hvals) 

metricValH = metricValues.swapaxes(0, 1) 

if nHval == metricValues.shape[1]: 

# Hvals array is probably the same as the cloned H array. 

completeness = np.zeros(len(Hvals), float) 

for i, H in enumerate(Hvals): 

completeness[i] = np.where(metricValH[i].filled(0) >= self.threshold)[0].size 

completeness = completeness / float(nSsos) 

else: 

# The Hvals are spread more randomly among the objects (we probably used one per object). 

hrange = Hvals.max() - Hvals.min() 

minH = Hvals.min() 

if hrange < self.minHrange: 

hrange = self.minHrange 

minH = Hvals.min() - hrange/2.0 

stepsize = hrange / float(self.nbins) 

bins = np.arange(minH, minH + hrange + stepsize/2.0, stepsize) 

Hvals = bins[:-1] 

n_all, b = np.histogram(metricValH[0], bins) 

condition = np.where(metricValH[0] >= self.requiredChances)[0] 

n_found, b = np.histogram(metricValH[0][condition], bins) 

completeness = n_found.astype(float) / n_all.astype(float) 

completeness = np.where(n_all==0, 0, completeness) 

if self.cumulative: 

completenessInt = integrateOverH(completeness, Hvals, self.Hindex) 

summaryVal = np.empty(len(completenessInt), dtype=[('name', np.str_, 20), ('value', float)]) 

summaryVal['value'] = completenessInt 

for i, Hval in enumerate(Hvals): 

summaryVal['name'][i] = 'H <= %f' % (Hval) 

else: 

summaryVal = np.empty(len(completeness), dtype=[('name', np.str_, 20), ('value', float)]) 

summaryVal['value'] = completeness 

for i, Hval in enumerate(Hvals): 

summaryVal['name'][i] = 'H = %f' % (Hval) 

return summaryVal 

 

class MoCompletenessAtTimeMetric(BaseMoMetric): 

"""Calculate the completeness (relative to the entire population) <= a given H as a function of time, 

given the times of each discovery. 

 

Input values of the discovery times can come from the Discovery_Time (child) metric or the 

KnownObjects metric. 

 

Parameters 

---------- 

times : numpy.ndarray like 

The bins to distribute the discovery times into. Same units as the discovery time (typically MJD). 

Hval : float, opt 

The value of H to count completeness at (or cumulative completeness to). 

Default None, in which case a value halfway through Hvals (the slicer H range) will be chosen. 

cumulative : bool, opt 

If True, calculate the cumulative completeness (completeness <= H). 

If False, calculate the differential completeness (completeness @ H). 

Default True. 

Hindex : float, opt 

Use Hindex as the power law to integrate over H, if cumulative is True. Default 0.3. 

""" 

 

def __init__(self, times, Hval=None, cumulative=True, Hindex=0.33, **kwargs): 

self.Hval = Hval 

self.times = times 

self.Hindex = Hindex 

if 'metricName' in kwargs: 

metricName = kwargs.pop('metricName') 

if metricName.startswith('Differential'): 

self.cumulative = False 

self.metricName = metricName 

else: 

self.cumulative = True 

self.metricName = metricName 

else: 

self.cumulative = cumulative 

if self.cumulative: 

self.metricName = 'CumulativeCompleteness@Time@H=%.2f' % self.Hval 

else: 

self.metricName = 'DifferentialCompleteness@Time@H=%.2f' % self.Hval 

self._setLabels() 

super(MoCompletenessAtTimeMetric, self).__init__(metricName=self.metricName, units=self.units, 

**kwargs) 

 

def _setLabels(self): 

if self.Hval is not None: 

if self.cumulative: 

self.units = 'H <=%.1f' % (self.Hval) 

else: 

self.units = 'H = %.1f' % (self.Hval) 

else: 

self.units = 'H' 

 

def run(self, discoveryTimes, Hvals): 

if len(Hvals) != discoveryTimes.shape[1]: 

warnings.warn("This summary metric expects cloned H distribution. Cannot calculate summary.") 

return 

nSsos = discoveryTimes.shape[0] 

timesinH = discoveryTimes.swapaxes(0, 1) 

completenessH = np.empty([len(Hvals), len(self.times)], float) 

for i, H in enumerate(Hvals): 

n, b = np.histogram(timesinH[i].compressed(), bins=self.times) 

completenessH[i][0] = 0 

completenessH[i][1:] = n.cumsum() 

completenessH = completenessH / float(nSsos) 

completeness = completenessH.swapaxes(0, 1) 

if self.cumulative: 

for i, t in enumerate(self.times): 

completeness[i] = integrateOverH(completeness[i], Hvals) 

# To save the summary statistic, we must pick out a given H value. 

if self.Hval is None: 

Hidx = len(Hvals) // 2 

self.Hval = Hvals[Hidx] 

self._setLabels() 

else: 

Hidx = np.where(np.abs(Hvals - self.Hval) == np.abs(Hvals - self.Hval).min())[0][0] 

self.Hval = Hvals[Hidx] 

self._setLabels() 

summaryVal = np.empty(len(self.times), dtype=[('name', np.str_, 20), ('value', float)]) 

summaryVal['value'] = completeness[:, Hidx] 

for i, time in enumerate(self.times): 

summaryVal['name'][i] = '%s @ %.2f' % (self.units, time) 

return summaryVal