Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

import numpy as np 

import healpy as hp 

from .baseMetric import BaseMetric 

 

# A collection of metrics which are primarily intended to be used as summary statistics. 

 

__all__ = ['fOArea', 'fONv', 'TableFractionMetric', 'IdentityMetric', 

'NormalizeMetric', 'ZeropointMetric', 'TotalPowerMetric'] 

 

class fOArea(BaseMetric): 

""" 

Metric to calculate the FO Area. 

""" 

def __init__(self, col='metricdata', Asky=18000., Nvisit=825, 

metricName='fOArea', nside=128, norm=True, **kwargs): 

"""Asky = square degrees """ 

super(fOArea, self).__init__(col=col, metricName=metricName, **kwargs) 

self.Asky = Asky 

self.Nvisit = Nvisit 

self.nside = nside 

self.norm = norm 

 

def run(self, dataSlice, slicePoint=None): 

dataSlice.sort() 

name = dataSlice.dtype.names[0] 

scale = hp.nside2pixarea(self.nside, degrees=True) 

cumulativeArea = np.arange(1,dataSlice.size+1)[::-1]*scale 

good = np.where(cumulativeArea >= self.Asky)[0] 

29 ↛ 35line 29 didn't jump to line 35, because the condition on line 29 was never false if good.size > 0: 

nv = np.max(dataSlice[name][good]) 

31 ↛ 33line 31 didn't jump to line 33, because the condition on line 31 was never false if self.norm: 

nv = nv/float(self.Nvisit) 

return nv 

else: 

return self.badval 

 

 

class fONv(BaseMetric): 

""" 

Metric to calculate the FO_Nv. 

""" 

def __init__(self, col='metricdata', Asky=18000., metricName='fONv', Nvisit=825, 

nside=128, norm=True, **kwargs): 

"""Asky = square degrees """ 

super(fONv, self).__init__(col=col, metricName=metricName, **kwargs) 

self.Asky = Asky 

self.Nvisit = Nvisit 

self.nside = nside 

self.norm = norm 

 

def run(self, dataSlice, slicePoint=None): 

dataSlice.sort() 

name = dataSlice.dtype.names[0] 

scale = hp.nside2pixarea(self.nside, degrees=True) 

cumulativeArea = np.arange(1,dataSlice.size+1)[::-1]*scale 

good = np.where(dataSlice[name] >= self.Nvisit)[0] 

57 ↛ 63line 57 didn't jump to line 63, because the condition on line 57 was never false if good.size > 0: 

area = np.max(cumulativeArea[good]) 

59 ↛ 61line 59 didn't jump to line 61, because the condition on line 59 was never false if self.norm: 

area = area/float(self.Asky) 

return area 

else: 

return self.badval 

 

 

class TableFractionMetric(BaseMetric): 

""" 

Count the completeness (for many fields) and summarize how many fields have given completeness levels 

(within a series of bins). Works with completenessMetric only. 

 

This metric is meant to be used as a summary statistic on something like the completeness metric. 

The output is DIFFERENT FROM SSTAR and is: 

element matching values 

0 0 == P 

1 0 < P < .1 

2 .1 <= P < .2 

3 .2 <= P < .3 

... 

10 .9 <= P < 1 

11 1 == P 

12 1 < P 

Note the 1st and last elements do NOT obey the numpy histogram conventions. 

""" 

def __init__(self, col='metricdata', nbins=10): 

""" 

colname = the column name in the metric data (i.e. 'metricdata' usually). 

nbins = number of bins between 0 and 1. Should divide evenly into 100. 

""" 

super(TableFractionMetric, self).__init__(col=col, metricDtype='float') 

self.nbins = nbins 

# set this so runSliceMetric knows masked values should be set to zero and passed 

self.maskVal = 0. 

 

def run(self, dataSlice, slicePoint=None): 

# Calculate histogram of completeness values that fall between 0-1. 

goodVals = np.where((dataSlice[self.colname] > 0) & (dataSlice[self.colname] < 1) ) 

bins = np.arange(self.nbins+1.)/self.nbins 

hist, b = np.histogram(dataSlice[self.colname][goodVals], bins=bins) 

# Fill in values for exact 0, exact 1 and >1. 

zero = np.size(np.where(dataSlice[self.colname] == 0)[0]) 

one = np.size(np.where(dataSlice[self.colname] == 1)[0]) 

overone = np.size(np.where(dataSlice[self.colname] > 1)[0]) 

hist = np.concatenate((np.array([zero]), hist, np.array([one]), np.array([overone]))) 

# Create labels for each value 

binNames = ['0 == P'] 

binNames.append('0 < P < 0.1') 

for i in np.arange(1, self.nbins): 

binNames.append('%.2g <= P < %.2g'%(b[i], b[i+1]) ) 

binNames.append('1 == P') 

binNames.append('1 < P') 

# Package the names and values up 

result = np.empty(hist.size, dtype=[('name', np.str_, 20), ('value', float)]) 

result['name'] = binNames 

result['value'] = hist 

return result 

 

 

class IdentityMetric(BaseMetric): 

""" 

Return the metric value itself .. this is primarily useful as a summary statistic for UniSlicer metrics. 

""" 

def run(self, dataSlice, slicePoint=None): 

123 ↛ 124line 123 didn't jump to line 124, because the condition on line 123 was never true if len(dataSlice[self.colname]) == 1: 

result = dataSlice[self.colname][0] 

else: 

result = dataSlice[self.colname] 

return result 

 

 

class NormalizeMetric(BaseMetric): 

""" 

Return a metric values divided by 'normVal'. Useful for turning summary statistics into fractions. 

""" 

def __init__(self, col='metricdata', normVal=1, **kwargs): 

super(NormalizeMetric, self).__init__(col=col, **kwargs) 

self.normVal = float(normVal) 

def run(self, dataSlice, slicePoint=None): 

result = dataSlice[self.colname]/self.normVal 

139 ↛ 140line 139 didn't jump to line 140, because the condition on line 139 was never true if len(result) == 1: 

return result[0] 

else: 

return result 

 

class ZeropointMetric(BaseMetric): 

""" 

Return a metric values with the addition of 'zp'. Useful for altering the zeropoint for summary statistics. 

""" 

def __init__(self, col='metricdata', zp=0, **kwargs): 

super(ZeropointMetric, self).__init__(col=col, **kwargs) 

self.zp = zp 

def run(self, dataSlice, slicePoint=None): 

result = dataSlice[self.colname] + self.zp 

153 ↛ 154line 153 didn't jump to line 154, because the condition on line 153 was never true if len(result) == 1: 

return result[0] 

else: 

return result 

 

class TotalPowerMetric(BaseMetric): 

""" 

Calculate the total power in the angular power spectrum between lmin/lmax. 

""" 

def __init__(self, col='metricdata', lmin=100., lmax=300., removeDipole=True, **kwargs): 

self.lmin = lmin 

self.lmax = lmax 

self.removeDipole = removeDipole 

super(TotalPowerMetric, self).__init__(col=col, **kwargs) 

self.maskVal = hp.UNSEEN 

 

def run(self, dataSlice, slicePoint=None): 

# Calculate the power spectrum. 

171 ↛ 174line 171 didn't jump to line 174, because the condition on line 171 was never false if self.removeDipole: 

cl = hp.anafast(hp.remove_dipole(dataSlice[self.colname], verbose=False)) 

else: 

cl = hp.anafast(dataSlice[self.colname]) 

ell = np.arange(np.size(cl)) 

condition = np.where((ell <= self.lmax) & (ell >= self.lmin))[0] 

totalpower = np.sum(cl[condition]*(2*ell[condition]+1)) 

return totalpower