Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

# This file is part of daf_butler. 

# 

# Developed for the LSST Data Management System. 

# This product includes software developed by the LSST Project 

# (https://www.lsst.org). 

# See the COPYRIGHT file at the top-level directory of this distribution 

# for details of code ownership. 

# 

# This program is free software: you can redistribute it and/or modify 

# it under the terms of the GNU General Public License as published by 

# the Free Software Foundation, either version 3 of the License, or 

# (at your option) any later version. 

# 

# This program is distributed in the hope that it will be useful, 

# but WITHOUT ANY WARRANTY; without even the implied warranty of 

# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 

# GNU General Public License for more details. 

# 

# You should have received a copy of the GNU General Public License 

# along with this program. If not, see <https://www.gnu.org/licenses/>. 

 

"""Simple unit test for expr_parser/parserLex module. 

""" 

 

import re 

import unittest 

 

from lsst.daf.butler.exprParser import ParserLex, ParserLexError 

import lsst.utils.tests 

 

 

class ParserLexTestCase(unittest.TestCase): 

"""A test case for ParserLex 

""" 

 

def _assertToken(self, token, type, value, lineno=None, lexpos=None): 

self.assertIsNotNone(token) 

self.assertEqual(token.type, type) 

self.assertEqual(token.value, value) 

if lineno is not None: 

self.assertEqual(token.lineno, lineno) 

if lexpos is not None: 

self.assertEqual(token.lexpos, lexpos) 

 

def setUp(self): 

pass 

 

def tearDown(self): 

pass 

 

def testInstantiate(self): 

"""Tests for making ParserLex instances 

""" 

 

default_reflags = re.IGNORECASE | re.VERBOSE 

lexer = ParserLex.make_lexer() 

self.assertEqual(lexer.lexreflags, default_reflags) 

 

lexer = ParserLex.make_lexer(reflags=re.DOTALL) 

self.assertEqual(lexer.lexreflags, re.DOTALL | default_reflags) 

 

def testSimpleTokens(self): 

"""Test for simple tokens""" 

lexer = ParserLex.make_lexer() 

 

lexer.input("=!= <<= >>= +-*/()") 

self._assertToken(lexer.token(), 'EQ', '=') 

self._assertToken(lexer.token(), 'NE', '!=') 

self._assertToken(lexer.token(), 'LT', '<') 

self._assertToken(lexer.token(), 'LE', '<=') 

self._assertToken(lexer.token(), 'GT', '>') 

self._assertToken(lexer.token(), 'GE', '>=') 

self._assertToken(lexer.token(), 'ADD', '+') 

self._assertToken(lexer.token(), 'SUB', '-') 

self._assertToken(lexer.token(), 'MUL', '*') 

self._assertToken(lexer.token(), 'DIV', '/') 

self._assertToken(lexer.token(), 'LPAREN', '(') 

self._assertToken(lexer.token(), 'RPAREN', ')') 

self.assertIsNone(lexer.token()) 

 

def testReservedTokens(self): 

"""Test for reserved words""" 

lexer = ParserLex.make_lexer() 

 

# tokens = "IS NOT IN NULL OR XOR AND BETWEEN LIKE ESCAPE REGEXP" 

tokens = "NOT IN OR XOR AND" 

lexer.input(tokens) 

for token in tokens.split(): 

self._assertToken(lexer.token(), token, token) 

self.assertIsNone(lexer.token()) 

 

# tokens = "is not in null or xor and between like escape regexp" 

tokens = "not in or xor and" 

lexer.input(tokens) 

for token in tokens.split(): 

self._assertToken(lexer.token(), token.upper(), token) 

self.assertIsNone(lexer.token()) 

 

# not reserved 

token = "ISNOTIN" 

lexer.input(token) 

self._assertToken(lexer.token(), "IDENTIFIER", token) 

self.assertIsNone(lexer.token()) 

 

def testStringLiteral(self): 

"""Test for string literals""" 

lexer = ParserLex.make_lexer() 

 

lexer.input("''") 

self._assertToken(lexer.token(), "STRING_LITERAL", "") 

self.assertIsNone(lexer.token()) 

 

lexer.input("'string'") 

self._assertToken(lexer.token(), "STRING_LITERAL", "string") 

self.assertIsNone(lexer.token()) 

 

lexer.input("'string' 'string'\n'string'") 

self._assertToken(lexer.token(), "STRING_LITERAL", "string") 

self._assertToken(lexer.token(), "STRING_LITERAL", "string") 

self._assertToken(lexer.token(), "STRING_LITERAL", "string") 

self.assertIsNone(lexer.token()) 

 

# odd newline inside string 

lexer.input("'string\nstring'") 

with self.assertRaises(ParserLexError): 

lexer.token() 

 

lexer.input("'string") 

with self.assertRaises(ParserLexError): 

lexer.token() 

 

def testNumericLiteral(self): 

"""Test for numeric literals""" 

lexer = ParserLex.make_lexer() 

 

lexer.input("0 100 999. 100.1 1e10 1e-10 1.e+20 .2E5") 

self._assertToken(lexer.token(), "NUMERIC_LITERAL", "0") 

self._assertToken(lexer.token(), "NUMERIC_LITERAL", "100") 

self._assertToken(lexer.token(), "NUMERIC_LITERAL", "999.") 

self._assertToken(lexer.token(), "NUMERIC_LITERAL", "100.1") 

self._assertToken(lexer.token(), "NUMERIC_LITERAL", "1e10") 

self._assertToken(lexer.token(), "NUMERIC_LITERAL", "1e-10") 

self._assertToken(lexer.token(), "NUMERIC_LITERAL", "1.e+20") 

self._assertToken(lexer.token(), "NUMERIC_LITERAL", ".2E5") 

self.assertIsNone(lexer.token()) 

 

def testIdentifier(self): 

"""Test for numeric literals""" 

lexer = ParserLex.make_lexer() 

 

lexer.input("ID id _012 a_b_C") 

self._assertToken(lexer.token(), "IDENTIFIER", "ID") 

self._assertToken(lexer.token(), "IDENTIFIER", "id") 

self._assertToken(lexer.token(), "IDENTIFIER", "_012") 

self._assertToken(lexer.token(), "IDENTIFIER", "a_b_C") 

self.assertIsNone(lexer.token()) 

 

lexer.input("a.b _._") 

self._assertToken(lexer.token(), "IDENTIFIER", "a.b") 

self._assertToken(lexer.token(), "IDENTIFIER", "_._") 

self.assertIsNone(lexer.token()) 

 

lexer.input(".id") 

with self.assertRaises(ParserLexError): 

lexer.token() 

 

lexer.input("id.") 

self._assertToken(lexer.token(), "IDENTIFIER", "id") 

with self.assertRaises(ParserLexError): 

lexer.token() 

 

lexer.input("id.id.id") 

self._assertToken(lexer.token(), "IDENTIFIER", "id.id") 

with self.assertRaises(ParserLexError): 

lexer.token() 

 

def testExpression(self): 

"""Test for more or less complete expression""" 

lexer = ParserLex.make_lexer() 

 

expr = ("((instrument='HSC' AND detector != 9) OR instrument='CFHT') " 

"AND tract=8766 AND patch.cell_x > 5 AND " 

"patch.cell_y < 4 AND abstract_filter='i'") 

tokens = (("LPAREN", "("), 

("LPAREN", "("), 

("IDENTIFIER", "instrument"), 

("EQ", "="), 

("STRING_LITERAL", "HSC"), 

("AND", "AND"), 

("IDENTIFIER", "detector"), 

("NE", "!="), 

("NUMERIC_LITERAL", "9"), 

("RPAREN", ")"), 

("OR", "OR"), 

("IDENTIFIER", "instrument"), 

("EQ", "="), 

("STRING_LITERAL", "CFHT"), 

("RPAREN", ")"), 

("AND", "AND"), 

("IDENTIFIER", "tract"), 

("EQ", "="), 

("NUMERIC_LITERAL", "8766"), 

("AND", "AND"), 

("IDENTIFIER", "patch.cell_x"), 

("GT", ">"), 

("NUMERIC_LITERAL", "5"), 

("AND", "AND"), 

("IDENTIFIER", "patch.cell_y"), 

("LT", "<"), 

("NUMERIC_LITERAL", "4"), 

("AND", "AND"), 

("IDENTIFIER", "abstract_filter"), 

("EQ", "="), 

("STRING_LITERAL", "i")) 

lexer.input(expr) 

for type, value in tokens: 

self._assertToken(lexer.token(), type, value) 

self.assertIsNone(lexer.token()) 

 

def testExceptions(self): 

"""Test for exception contents""" 

 

def _assertExc(exc, expr, remain, pos, lineno): 

"""Check exception attribute values""" 

self.assertEqual(exc.expression, expr) 

self.assertEqual(exc.remain, remain) 

self.assertEqual(exc.pos, pos) 

self.assertEqual(exc.lineno, lineno) 

 

lexer = ParserLex.make_lexer() 

expr = "a.b.c" 

lexer.input(expr) 

self._assertToken(lexer.token(), "IDENTIFIER", "a.b") 

with self.assertRaises(ParserLexError) as catcher: 

lexer.token() 

_assertExc(catcher.exception, expr, ".c", 3, 1) 

 

lexer = ParserLex.make_lexer() 

expr = "a \n& b" 

lexer.input(expr) 

self._assertToken(lexer.token(), "IDENTIFIER", "a") 

with self.assertRaises(ParserLexError) as catcher: 

lexer.token() 

_assertExc(catcher.exception, expr, "& b", 3, 2) 

 

lexer = ParserLex.make_lexer() 

expr = "a\n=\n1e5.e2" 

lexer.input(expr) 

self._assertToken(lexer.token(), "IDENTIFIER", "a") 

self._assertToken(lexer.token(), "EQ", "=") 

self._assertToken(lexer.token(), "NUMERIC_LITERAL", "1e5") 

with self.assertRaises(ParserLexError) as catcher: 

lexer.token() 

_assertExc(catcher.exception, expr, ".e2", 7, 3) 

 

 

class MyMemoryTestCase(lsst.utils.tests.MemoryTestCase): 

pass 

 

 

def setup_module(module): 

lsst.utils.tests.init() 

 

 

265 ↛ 266line 265 didn't jump to line 266, because the condition on line 265 was never trueif __name__ == "__main__": 

lsst.utils.tests.init() 

unittest.main()