Coverage for python/lsst/daf/butler/registry/queries/exprParser/parserLex.py : 55%

Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of daf_butler.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This program is free software: you can redistribute it and/or modify
10# it under the terms of the GNU General Public License as published by
11# the Free Software Foundation, either version 3 of the License, or
12# (at your option) any later version.
13#
14# This program is distributed in the hope that it will be useful,
15# but WITHOUT ANY WARRANTY; without even the implied warranty of
16# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17# GNU General Public License for more details.
18#
19# You should have received a copy of the GNU General Public License
20# along with this program. If not, see <https://www.gnu.org/licenses/>.
22# type: ignore
24"""Module which defines PLY lexer for user expressions parsed by pre-flight.
25"""
27__all__ = ["ParserLex", "ParserLexError"]
29# -------------------------------
30# Imports of standard modules --
31# -------------------------------
32import re
34# -----------------------------
35# Imports for other modules --
36# -----------------------------
37from .ply import lex
39# ----------------------------------
40# Local non-exported definitions --
41# ----------------------------------
43_RE_RANGE = r"(?P<start>-?\d+)\s*\.\.\s*(?P<stop>-?\d+)(\s*:\s*(?P<stride>[1-9]\d*))?"
44"""Regular expression to match range literal in the form NUM..NUM[:NUM],
45this must match t_RANGE_LITERAL docstring.
46"""
48# ------------------------
49# Exported definitions --
50# ------------------------
53class ParserLexError(Exception):
54 """Exception raised for lex-phase errors.
56 Attributes
57 ----------
58 expression : str
59 Full initial expression being parsed
60 remain : str
61 Remaining non-parsed part of the expression
62 pos : int
63 Current parsing position, offset from beginning of expression in
64 characters
65 lineno : int
66 Current line number in the expression
67 """
69 def __init__(self, expression, remain, pos, lineno):
70 Exception.__init__(self, "Unexpected character at position {}".format(pos))
71 self.expression = expression
72 self.remain = remain
73 self.pos = pos
74 self.lineno = lineno
77class ParserLex:
78 """Class which defines PLY lexer.
79 """
81 @classmethod
82 def make_lexer(cls, reflags=0, **kwargs):
83 """Factory for lexers.
85 Returns
86 -------
87 `ply.lex.Lexer` instance.
88 """
90 # make sure that flags that we need are there
91 kw = dict(reflags=reflags | re.IGNORECASE | re.VERBOSE)
92 kw.update(kwargs)
94 return lex.lex(object=cls(), **kw)
96 # literals = ""
98 # reserved words in a grammar.
99 # SQL has reserved words which we could potentially make reserved in our
100 # grammar too, for now try to pretend we don't care about SQL
101 reserved = dict(
102 # IS="IS",
103 IN="IN",
104 # NULL="NULL",
105 OR="OR",
106 AND="AND",
107 NOT="NOT",
108 # BETWEEN="BETWEEN",
109 # LIKE="LIKE",
110 # ESCAPE="ESCAPE",
111 # REGEXP="REGEXP"
112 )
114 # List of token names.
115 tokens = (
116 'NUMERIC_LITERAL',
117 'TIME_LITERAL',
118 'STRING_LITERAL',
119 'RANGE_LITERAL',
120 # 'TIME_LITERAL',
121 # 'DURATION_LITERAL',
122 'IDENTIFIER',
123 'LPAREN', 'RPAREN',
124 'EQ', 'NE', 'LT', 'LE', 'GT', 'GE',
125 'ADD', 'SUB', 'MUL', 'DIV', 'MOD',
126 'COMMA'
127 ) + tuple(reserved.values())
129 # Regular expression rules for simple tokens
130 t_LPAREN = r'\('
131 t_RPAREN = r'\)'
132 t_EQ = '='
133 t_NE = '!='
134 t_LT = '<'
135 t_LE = '<='
136 t_GT = '>'
137 t_GE = '>='
138 t_ADD = r'\+'
139 t_SUB = '-'
140 t_MUL = r'\*'
141 t_DIV = '/'
142 t_MOD = '%'
143 t_COMMA = ','
145 # A string containing ignored characters (spaces and tabs)
146 t_ignore = ' \t'
148 # Define a rule so we can track line numbers
149 def t_newline(self, t):
150 r'\n+'
151 t.lexer.lineno += len(t.value)
153 # quoted string prefixed with 'T'
154 def t_TIME_LITERAL(self, t):
155 r"T'.*?'"
156 # strip quotes
157 t.value = t.value[2:-1]
158 return t
160 # quoted string
161 def t_STRING_LITERAL(self, t):
162 r"'.*?'"
163 # strip quotes
164 t.value = t.value[1:-1]
165 return t
167 # range literal in format N..M[:S], spaces allowed, see _RE_RANGE
168 @lex.TOKEN(_RE_RANGE)
169 def t_RANGE_LITERAL(self, t):
170 match = re.match(_RE_RANGE, t.value)
171 start = int(match.group("start"))
172 stop = int(match.group("stop"))
173 stride = match.group("stride")
174 if stride is not None:
175 stride = int(stride)
176 t.value = (start, stop, stride)
177 return t
179 # numbers are used as strings by parser, do not convert
180 def t_NUMERIC_LITERAL(self, t):
181 r"""\d+(\.\d*)?(e[-+]?\d+)? # 1, 1., 1.1, 1e10, 1.1e-10, etc.
182 |
183 \.\d+(e[-+]?\d+)? # .1, .1e10, .1e+10
184 """
185 return t
187 # identifiers can have dot, and we only support ASCII
188 def t_IDENTIFIER(self, t):
189 r"[a-zA-Z_][a-zA-Z0-9_]*(\.[a-zA-Z_][a-zA-Z0-9_]*)?"
190 # Check for reserved words
191 t.type = self.reserved.get(t.value.upper(), 'IDENTIFIER')
192 return t
194 def t_error(self, t):
195 "Error handling rule"
196 lexer = t.lexer
197 raise ParserLexError(lexer.lexdata, t.value, lexer.lexpos, lexer.lineno)