Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

import numpy as np 

import healpy as hp 

from .baseMetric import BaseMetric 

 

# A collection of metrics which are primarily intended to be used as summary statistics. 

 

__all__ = ['fOArea', 'fONv', 'TableFractionMetric', 'IdentityMetric', 

'NormalizeMetric', 'ZeropointMetric', 'TotalPowerMetric'] 

 

 

class fONv(BaseMetric): 

""" 

Metrics based on a specified area, but returning NVISITS related to area: 

given Asky, what is the minimum and median number of visits obtained over that much area? 

(choose the portion of the sky with the highest number of visits first). 

 

Parameters 

---------- 

col : str or list of strs, opt 

Name of the column in the numpy recarray passed to the summary metric. 

Asky : float, opt 

Area of the sky to base the evaluation of number of visits over. 

Default 18,0000 sq deg. 

nside : int, opt 

Nside parameter from healpix slicer, used to set the physical relationship between on-sky area 

and number of healpixels. Default 128. 

Nvisit : int, opt 

Number of visits to use as the benchmark value, if choosing to return a normalized Nvisit value. 

norm : boolean, opt 

Normalize the returned "nvisit" (min / median) values by Nvisit, if true. 

Default False. 

metricName : str, opt 

Name of the summary metric. Default fONv. 

""" 

def __init__(self, col='metricdata', Asky=18000., nside=128, Nvisit=825, 

norm=False, metricName='fONv', **kwargs): 

"""Asky = square degrees """ 

super().__init__(col=col, metricName=metricName, **kwargs) 

self.Nvisit = Nvisit 

self.nside = nside 

# Determine how many healpixels are included in Asky sq deg. 

self.Asky = Asky 

self.scale = hp.nside2pixarea(self.nside, degrees=True) 

self.npix_Asky = np.int(np.ceil(self.Asky / self.scale)) 

self.norm = norm 

 

def run(self, dataSlice, slicePoint=None): 

48 ↛ 49line 48 didn't jump to line 49, because the condition on line 48 was never true if len(dataSlice) < self.npix_Asky: 

return self.badval 

name = dataSlice.dtype.names[0] 

nvis_sorted = np.sort(dataSlice[name]) 

# Find the Asky's worth of healpixels with the largest # of visits. 

nvis_Asky = nvis_sorted[-self.npix_Asky:] 

result = np.empty(2, dtype=[('name', np.str_, 20), ('value', float)]) 

result['name'][0] = "MedianNvis" 

result['value'][0] = np.median(nvis_Asky) 

result['name'][1] = "MinNvis" 

result['value'][1] = np.min(nvis_Asky) 

59 ↛ 60line 59 didn't jump to line 60, because the condition on line 59 was never true if self.norm: 

result['value'] /= float(self.Nvisit) 

return result 

 

 

class fOArea(BaseMetric): 

""" 

Metrics based on a specified number of visits, but returning AREA related to Nvisits: 

given Nvisit, what amount of sky is covered with at least that many visits? 

 

Parameters 

---------- 

col : str or list of strs, opt 

Name of the column in the numpy recarray passed to the summary metric. 

Nvisit : int, opt 

Number of visits to use as the minimum required -- metric calculated area that has this many visits. 

Default 825. 

Asky : float, opt 

Area to use as the benchmark value, if choosing to returned a normalized Area value. 

Default 18,0000 sq deg. 

nside : int, opt 

Nside parameter from healpix slicer, used to set the physical relationship between on-sky area 

and number of healpixels. Default 128. 

norm : boolean, opt 

Normalize the returned "area" (area with minimum Nvisit visits) value by Asky, if true. 

Default False. 

metricName : str, opt 

Name of the summary metric. Default fOArea. 

""" 

def __init__(self, col='metricdata', Nvisit=825, Asky = 18000.0, nside=128, 

norm=False, metricName='fOArea', **kwargs): 

"""Asky = square degrees """ 

super().__init__(col=col, metricName=metricName, **kwargs) 

self.Nvisit = Nvisit 

self.nside = nside 

self.Asky = Asky 

self.scale = hp.nside2pixarea(self.nside, degrees=True) 

self.norm = norm 

 

def run(self, dataSlice, slicePoint=None): 

name = dataSlice.dtype.names[0] 

nvis_sorted = np.sort(dataSlice[name]) 

# Identify the healpixels with more than Nvisits. 

nvis_min = nvis_sorted[np.where(nvis_sorted >= self.Nvisit)] 

103 ↛ 104line 103 didn't jump to line 104, because the condition on line 103 was never true if len(nvis_min) == 0: 

result = self.badval 

else: 

result = nvis_min.size * self.scale 

107 ↛ 108line 107 didn't jump to line 108, because the condition on line 107 was never true if self.norm: 

result /= float(self.Asky) 

return result 

 

 

class TableFractionMetric(BaseMetric): 

""" 

Count the completeness (for many fields) and summarize how many fields have given completeness levels 

(within a series of bins). Works with completenessMetric only. 

 

This metric is meant to be used as a summary statistic on something like the completeness metric. 

The output is DIFFERENT FROM SSTAR and is: 

element matching values 

0 0 == P 

1 0 < P < .1 

2 .1 <= P < .2 

3 .2 <= P < .3 

... 

10 .9 <= P < 1 

11 1 == P 

12 1 < P 

Note the 1st and last elements do NOT obey the numpy histogram conventions. 

""" 

def __init__(self, col='metricdata', nbins=10): 

""" 

colname = the column name in the metric data (i.e. 'metricdata' usually). 

nbins = number of bins between 0 and 1. Should divide evenly into 100. 

""" 

super(TableFractionMetric, self).__init__(col=col, metricDtype='float') 

self.nbins = nbins 

# set this so runSliceMetric knows masked values should be set to zero and passed 

self.maskVal = 0. 

 

def run(self, dataSlice, slicePoint=None): 

# Calculate histogram of completeness values that fall between 0-1. 

goodVals = np.where((dataSlice[self.colname] > 0) & (dataSlice[self.colname] < 1) ) 

bins = np.arange(self.nbins+1.)/self.nbins 

hist, b = np.histogram(dataSlice[self.colname][goodVals], bins=bins) 

# Fill in values for exact 0, exact 1 and >1. 

zero = np.size(np.where(dataSlice[self.colname] == 0)[0]) 

one = np.size(np.where(dataSlice[self.colname] == 1)[0]) 

overone = np.size(np.where(dataSlice[self.colname] > 1)[0]) 

hist = np.concatenate((np.array([zero]), hist, np.array([one]), np.array([overone]))) 

# Create labels for each value 

binNames = ['0 == P'] 

binNames.append('0 < P < 0.1') 

for i in np.arange(1, self.nbins): 

binNames.append('%.2g <= P < %.2g'%(b[i], b[i+1]) ) 

binNames.append('1 == P') 

binNames.append('1 < P') 

# Package the names and values up 

result = np.empty(hist.size, dtype=[('name', np.str_, 20), ('value', float)]) 

result['name'] = binNames 

result['value'] = hist 

return result 

 

 

class IdentityMetric(BaseMetric): 

""" 

Return the metric value itself .. this is primarily useful as a summary statistic for UniSlicer metrics. 

""" 

def run(self, dataSlice, slicePoint=None): 

169 ↛ 170line 169 didn't jump to line 170, because the condition on line 169 was never true if len(dataSlice[self.colname]) == 1: 

result = dataSlice[self.colname][0] 

else: 

result = dataSlice[self.colname] 

return result 

 

 

class NormalizeMetric(BaseMetric): 

""" 

Return a metric values divided by 'normVal'. Useful for turning summary statistics into fractions. 

""" 

def __init__(self, col='metricdata', normVal=1, **kwargs): 

super(NormalizeMetric, self).__init__(col=col, **kwargs) 

self.normVal = float(normVal) 

def run(self, dataSlice, slicePoint=None): 

result = dataSlice[self.colname]/self.normVal 

185 ↛ 186line 185 didn't jump to line 186, because the condition on line 185 was never true if len(result) == 1: 

return result[0] 

else: 

return result 

 

class ZeropointMetric(BaseMetric): 

""" 

Return a metric values with the addition of 'zp'. Useful for altering the zeropoint for summary statistics. 

""" 

def __init__(self, col='metricdata', zp=0, **kwargs): 

super(ZeropointMetric, self).__init__(col=col, **kwargs) 

self.zp = zp 

def run(self, dataSlice, slicePoint=None): 

result = dataSlice[self.colname] + self.zp 

199 ↛ 200line 199 didn't jump to line 200, because the condition on line 199 was never true if len(result) == 1: 

return result[0] 

else: 

return result 

 

class TotalPowerMetric(BaseMetric): 

""" 

Calculate the total power in the angular power spectrum between lmin/lmax. 

""" 

def __init__(self, col='metricdata', lmin=100., lmax=300., removeDipole=True, **kwargs): 

self.lmin = lmin 

self.lmax = lmax 

self.removeDipole = removeDipole 

super(TotalPowerMetric, self).__init__(col=col, **kwargs) 

self.maskVal = hp.UNSEEN 

 

def run(self, dataSlice, slicePoint=None): 

# Calculate the power spectrum. 

217 ↛ 220line 217 didn't jump to line 220, because the condition on line 217 was never false if self.removeDipole: 

cl = hp.anafast(hp.remove_dipole(dataSlice[self.colname], verbose=False)) 

else: 

cl = hp.anafast(dataSlice[self.colname]) 

ell = np.arange(np.size(cl)) 

condition = np.where((ell <= self.lmax) & (ell >= self.lmin))[0] 

totalpower = np.sum(cl[condition]*(2*ell[condition]+1)) 

return totalpower