Coverage for python/lsst/daf/butler/registry/queries/expressions/parser/parserYacc.py: 21%
161 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-10-27 09:44 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-10-27 09:44 +0000
1# This file is part of daf_butler.
2#
3# Developed for the LSST Data Management System.
4# This product includes software developed by the LSST Project
5# (https://www.lsst.org).
6# See the COPYRIGHT file at the top-level directory of this distribution
7# for details of code ownership.
8#
9# This software is dual licensed under the GNU General Public License and also
10# under a 3-clause BSD license. Recipients may choose which of these licenses
11# to use; please see the files gpl-3.0.txt and/or bsd_license.txt,
12# respectively. If you choose the GPL option then the following text applies
13# (but note that there is still no warranty even if you opt for BSD instead):
14#
15# This program is free software: you can redistribute it and/or modify
16# it under the terms of the GNU General Public License as published by
17# the Free Software Foundation, either version 3 of the License, or
18# (at your option) any later version.
19#
20# This program is distributed in the hope that it will be useful,
21# but WITHOUT ANY WARRANTY; without even the implied warranty of
22# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23# GNU General Public License for more details.
24#
25# You should have received a copy of the GNU General Public License
26# along with this program. If not, see <https://www.gnu.org/licenses/>.
28# type: ignore
30"""Syntax definition for user expression parser.
31"""
33__all__ = ["ParserYacc", "ParserYaccError", "ParseError", "ParserEOFError"]
35# -------------------------------
36# Imports of standard modules --
37# -------------------------------
38import re
39import warnings
41# -----------------------------
42# Imports for other modules --
43# -----------------------------
44import astropy.time
46# As of astropy 4.2, the erfa interface is shipped independently and
47# ErfaWarning is no longer an AstropyWarning
48try:
49 import erfa
50except ImportError:
51 erfa = None
53from .exprTree import (
54 BinaryOp,
55 Identifier,
56 IsIn,
57 NumericLiteral,
58 Parens,
59 RangeLiteral,
60 StringLiteral,
61 TimeLiteral,
62 TupleNode,
63 UnaryOp,
64 function_call,
65)
66from .parserLex import ParserLex
67from .ply import yacc
69# ----------------------------------
70# Local non-exported definitions --
71# ----------------------------------
73# The purpose of this regex is to guess time format if it is not explicitly
74# provided in the string itself
75_re_time_str = re.compile(
76 r"""
77 ((?P<format>\w+)/)? # optionally prefixed by "format/"
78 (?P<value>
79 (?P<number>-?(\d+(\.\d*)|(\.\d+))) # floating point number
80 |
81 (?P<iso>\d+-\d+-\d+([ T]\d+:\d+(:\d+([.]\d*)?)?)?) # iso(t)
82 |
83 (?P<fits>[+]\d+-\d+-\d+(T\d+:\d+:\d+([.]\d*)?)?) # fits
84 |
85 (?P<yday>\d+:\d+(:\d+:\d+(:\d+([.]\d*)?)?)?) # yday
86 )
87 (/(?P<scale>\w+))? # optionally followed by "/scale"
88 $
89""",
90 re.VERBOSE | re.IGNORECASE,
91)
94def _parseTimeString(time_str):
95 """Try to convert time string into astropy.Time.
97 Parameters
98 ----------
99 time_str : `str`
100 Input string.
102 Returns
103 -------
104 time : `astropy.time.Time`
106 Raises
107 ------
108 ValueError
109 Raised if input string has unexpected format
110 """
111 match = _re_time_str.match(time_str)
112 if not match:
113 raise ValueError(f'Time string "{time_str}" does not match known formats')
115 value, fmt, scale = match.group("value", "format", "scale")
116 if fmt is not None:
117 fmt = fmt.lower()
118 if fmt not in astropy.time.Time.FORMATS:
119 raise ValueError(f'Time string "{time_str}" specifies unknown time format "{fmt}"')
120 if scale is not None:
121 scale = scale.lower()
122 if scale not in astropy.time.Time.SCALES:
123 raise ValueError(f'Time string "{time_str}" specifies unknown time scale "{scale}"')
125 # convert number string to floating point
126 if match.group("number") is not None:
127 value = float(value)
129 # guess format if not given
130 if fmt is None:
131 if match.group("number") is not None:
132 fmt = "mjd"
133 elif match.group("iso") is not None:
134 if "T" in value or "t" in value:
135 fmt = "isot"
136 else:
137 fmt = "iso"
138 elif match.group("fits") is not None:
139 fmt = "fits"
140 elif match.group("yday") is not None:
141 fmt = "yday"
142 assert fmt is not None
144 # guess scale if not given
145 if scale is None:
146 if fmt in ("iso", "isot", "fits", "yday", "unix"):
147 scale = "utc"
148 elif fmt == "cxcsec":
149 scale = "tt"
150 else:
151 scale = "tai"
153 try:
154 # Hide warnings about future dates
155 with warnings.catch_warnings():
156 warnings.simplefilter("ignore", category=astropy.utils.exceptions.AstropyWarning)
157 if erfa is not None:
158 warnings.simplefilter("ignore", category=erfa.ErfaWarning)
159 value = astropy.time.Time(value, format=fmt, scale=scale)
160 except ValueError:
161 # astropy makes very verbose exception that is not super-useful in
162 # many context, just say we don't like it.
163 raise ValueError(f'Time string "{time_str}" does not match format "{fmt}"') from None
165 return value
168# ------------------------
169# Exported definitions --
170# ------------------------
173class ParserYaccError(Exception):
174 """Base class for exceptions generated by parser."""
176 pass
179class ParseError(ParserYaccError):
180 """Exception raised for parsing errors.
182 Attributes
183 ----------
184 expression : str
185 Full initial expression being parsed
186 token : str
187 Current token at parsing position
188 pos : int
189 Current parsing position, offset from beginning of expression in
190 characters
191 lineno : int
192 Current line number in the expression
193 posInLine : int
194 Parsing position in current line, 0-based
195 """
197 def __init__(self, expression, token, pos, lineno):
198 self.expression = expression
199 self.token = token
200 self.pos = pos
201 self.lineno = lineno
202 self.posInLine = self._posInLine()
203 msg = "Syntax error at or near '{0}' (line: {1}, pos: {2})"
204 msg = msg.format(token, lineno, self.posInLine + 1)
205 ParserYaccError.__init__(self, msg)
207 def _posInLine(self):
208 """Return position in current line"""
209 lines = self.expression.split("\n")
210 pos = self.pos
211 for line in lines[: self.lineno - 1]:
212 # +1 for newline
213 pos -= len(line) + 1
214 return pos
217class ParserEOFError(ParserYaccError):
218 """Exception raised for EOF-during-parser."""
220 def __init__(self):
221 Exception.__init__(self, "End of input reached while expecting further input")
224class ParserYacc:
225 """Class which defines PLY grammar.
227 Based on MySQL grammar for expressions
228 (https://dev.mysql.com/doc/refman/5.7/en/expressions.html).
230 Parameters
231 ----------
232 idMap : `collections.abc.Mapping` [ `str`, `Node` ], optional
233 Mapping that provides substitutions for identifiers in the expression.
234 The key in the map is the identifier name, the value is the
235 `exprTree.Node` instance that will replace identifier in the full
236 expression. If identifier does not exist in the mapping then
237 `Identifier` is inserted into parse tree.
238 **kwargs
239 optional keyword arguments that are passed to `yacc.yacc` constructor.
240 """
242 def __init__(self, idMap=None, **kwargs):
243 kw = dict(write_tables=0, debug=False)
244 kw.update(kwargs)
246 self.parser = yacc.yacc(module=self, **kw)
247 self._idMap = idMap or {}
249 def parse(self, input, lexer=None, debug=False, tracking=False):
250 """Parse input expression ad return parsed tree object.
252 This is a trivial wrapper for yacc.LRParser.parse method which
253 provides lexer if not given in arguments.
255 Parameters
256 ----------
257 input : str
258 Expression to parse
259 lexer : object, optional
260 Lexer instance, if not given then ParserLex.make_lexer() is
261 called to create one.
262 debug : bool, optional
263 Set to True for debugging output.
264 tracking : bool, optional
265 Set to True for tracking line numbers in parser.
266 """
267 # make lexer
268 if lexer is None:
269 lexer = ParserLex.make_lexer()
270 tree = self.parser.parse(input=input, lexer=lexer, debug=debug, tracking=tracking)
271 return tree
273 tokens = ParserLex.tokens[:]
275 precedence = (
276 ("left", "OR"),
277 ("left", "AND"),
278 ("nonassoc", "OVERLAPS"), # Nonassociative operators
279 ("nonassoc", "EQ", "NE"), # Nonassociative operators
280 ("nonassoc", "LT", "LE", "GT", "GE"), # Nonassociative operators
281 ("left", "ADD", "SUB"),
282 ("left", "MUL", "DIV", "MOD"),
283 ("right", "UPLUS", "UMINUS", "NOT"), # unary plus and minus
284 )
286 # this is the starting rule
287 def p_input(self, p):
288 """input : expr
289 | empty
290 """
291 p[0] = p[1]
293 def p_empty(self, p):
294 """empty :"""
295 p[0] = None
297 def p_expr(self, p):
298 """expr : expr OR expr
299 | expr AND expr
300 | NOT expr
301 | bool_primary
302 """
303 if len(p) == 4:
304 p[0] = BinaryOp(lhs=p[1], op=p[2].upper(), rhs=p[3])
305 elif len(p) == 3:
306 p[0] = UnaryOp(op=p[1].upper(), operand=p[2])
307 else:
308 p[0] = p[1]
310 def p_bool_primary(self, p):
311 """bool_primary : bool_primary EQ predicate
312 | bool_primary NE predicate
313 | bool_primary LT predicate
314 | bool_primary LE predicate
315 | bool_primary GE predicate
316 | bool_primary GT predicate
317 | bool_primary OVERLAPS predicate
318 | predicate
319 """
320 if len(p) == 2:
321 p[0] = p[1]
322 else:
323 p[0] = BinaryOp(lhs=p[1], op=p[2], rhs=p[3])
325 def p_predicate(self, p):
326 """predicate : bit_expr IN LPAREN literal_or_id_list RPAREN
327 | bit_expr NOT IN LPAREN literal_or_id_list RPAREN
328 | bit_expr
329 """
330 if len(p) == 6:
331 p[0] = IsIn(lhs=p[1], values=p[4])
332 elif len(p) == 7:
333 p[0] = IsIn(lhs=p[1], values=p[5], not_in=True)
334 else:
335 p[0] = p[1]
337 def p_identifier(self, p):
338 """identifier : SIMPLE_IDENTIFIER
339 | QUALIFIED_IDENTIFIER
340 """
341 node = self._idMap.get(p[1])
342 if node is None:
343 node = Identifier(p[1])
344 p[0] = node
346 def p_literal_or_id_list(self, p):
347 """literal_or_id_list : literal_or_id_list COMMA literal
348 | literal_or_id_list COMMA identifier
349 | literal
350 | identifier
351 """
352 if len(p) == 2:
353 p[0] = [p[1]]
354 else:
355 p[0] = p[1] + [p[3]]
357 def p_bit_expr(self, p):
358 """bit_expr : bit_expr ADD bit_expr
359 | bit_expr SUB bit_expr
360 | bit_expr MUL bit_expr
361 | bit_expr DIV bit_expr
362 | bit_expr MOD bit_expr
363 | simple_expr
364 """
365 if len(p) == 2:
366 p[0] = p[1]
367 else:
368 p[0] = BinaryOp(lhs=p[1], op=p[2], rhs=p[3])
370 def p_simple_expr_lit(self, p):
371 """simple_expr : literal"""
372 p[0] = p[1]
374 def p_simple_expr_id(self, p):
375 """simple_expr : identifier"""
376 p[0] = p[1]
378 def p_simple_expr_function_call(self, p):
379 """simple_expr : function_call"""
380 p[0] = p[1]
382 def p_simple_expr_unary(self, p):
383 """simple_expr : ADD simple_expr %prec UPLUS
384 | SUB simple_expr %prec UMINUS
385 """
386 p[0] = UnaryOp(op=p[1], operand=p[2])
388 def p_simple_expr_paren(self, p):
389 """simple_expr : LPAREN expr RPAREN"""
390 p[0] = Parens(p[2])
392 def p_simple_expr_tuple(self, p):
393 """simple_expr : LPAREN expr COMMA expr RPAREN"""
394 # For now we only support tuples with two items,
395 # these are used for time ranges.
396 p[0] = TupleNode((p[2], p[4]))
398 def p_literal_num(self, p):
399 """literal : NUMERIC_LITERAL"""
400 p[0] = NumericLiteral(p[1])
402 def p_literal_num_signed(self, p):
403 """literal : ADD NUMERIC_LITERAL %prec UPLUS
404 | SUB NUMERIC_LITERAL %prec UMINUS
405 """
406 p[0] = NumericLiteral(p[1] + p[2])
408 def p_literal_str(self, p):
409 """literal : STRING_LITERAL"""
410 p[0] = StringLiteral(p[1])
412 def p_literal_time(self, p):
413 """literal : TIME_LITERAL"""
414 try:
415 value = _parseTimeString(p[1])
416 except ValueError as e:
417 raise ParseError(p.lexer.lexdata, p[1], p.lexpos(1), p.lineno(1)) from e
418 p[0] = TimeLiteral(value)
420 def p_literal_range(self, p):
421 """literal : RANGE_LITERAL"""
422 # RANGE_LITERAL value is tuple of three numbers
423 start, stop, stride = p[1]
424 p[0] = RangeLiteral(start, stop, stride)
426 def p_function_call(self, p):
427 """function_call : SIMPLE_IDENTIFIER LPAREN expr_list RPAREN"""
428 p[0] = function_call(p[1], p[3])
430 def p_expr_list(self, p):
431 """expr_list : expr_list COMMA expr
432 | expr
433 | empty
434 """
435 if len(p) == 2:
436 if p[1] is None:
437 p[0] = []
438 else:
439 p[0] = [p[1]]
440 else:
441 p[0] = p[1] + [p[3]]
443 # ---------- end of all grammar rules ----------
445 # Error rule for syntax errors
446 def p_error(self, p):
447 if p is None:
448 raise ParserEOFError()
449 else:
450 raise ParseError(p.lexer.lexdata, p.value, p.lexpos, p.lineno)