From 2f34a9bb634a127b05cc4444e0d307d1bcf0bc25 Mon Sep 17 00:00:00 2001 From: Aryan Roy Date: Sat, 14 Sep 2024 15:23:57 +0530 Subject: [PATCH] fixing parsing tree bug and using lark standalone Signed-off-by: Aryan Roy --- src/formulate/__init__.py | 10 +- src/formulate/convert_ptree.py | 21 + src/formulate/matching_tree.py | 4 +- src/formulate/numexpr.py | 51 - src/formulate/numexpr_parser.py | 3252 ++++++++++++++++++++++++++ src/formulate/ttreeformula.py | 58 - src/formulate/ttreeformula_parser.py | 3249 +++++++++++++++++++++++++ tests/test_numexpr.py | 40 +- tests/test_root.py | 40 +- tests/test_ttreeformula.py | 50 +- 10 files changed, 6597 insertions(+), 178 deletions(-) create mode 100644 src/formulate/convert_ptree.py delete mode 100644 src/formulate/numexpr.py create mode 100644 src/formulate/numexpr_parser.py delete mode 100644 src/formulate/ttreeformula.py create mode 100644 src/formulate/ttreeformula_parser.py diff --git a/src/formulate/__init__.py b/src/formulate/__init__.py index e362536..34e947b 100644 --- a/src/formulate/__init__.py +++ b/src/formulate/__init__.py @@ -7,7 +7,9 @@ from __future__ import annotations -from . import ttreeformula, numexpr # noqa # noqa +from . import ttreeformula_parser, numexpr_parser # noqa # noqa + +from . import convert_ptree from . import AST @@ -20,11 +22,13 @@ def from_root(exp : str, **kwargs) -> AST : - parser = ttreeformula.Lark_StandAlone() + parser = ttreeformula_parser.Lark_StandAlone() ptree = parser.parse(exp) + convert_ptree.convert_ptree(ptree) return toast.toast(ptree, nxp=False) def from_numexpr(exp : str, **kwargs) -> AST : - parser = numexpr.Lark_StandAlone() + parser = numexpr_parser.Lark_StandAlone() ptree = parser.parse(exp) + convert_ptree.convert_ptree(ptree) return toast.toast(ptree, nxp=True) diff --git a/src/formulate/convert_ptree.py b/src/formulate/convert_ptree.py new file mode 100644 index 0000000..60135d3 --- /dev/null +++ b/src/formulate/convert_ptree.py @@ -0,0 +1,21 @@ +""" +Copyright (c) 2023 Aryan Roy. All rights reserved. + +formulate: Easy conversions between different styles of expressions +""" + +from . import matching_tree +from . import numexpr_parser +from . import ttreeformula_parser + +def convert_ptree(raw_ptree): + + if isinstance(raw_ptree, numexpr_parser.Token) or isinstance(raw_ptree, ttreeformula_parser.Token): + return + + raw_ptree.__class__ = matching_tree.ptnode + + for x in raw_ptree.children: + convert_ptree(x) + + return diff --git a/src/formulate/matching_tree.py b/src/formulate/matching_tree.py index 081d1a7..04f3a3b 100644 --- a/src/formulate/matching_tree.py +++ b/src/formulate/matching_tree.py @@ -6,7 +6,9 @@ import lark +from . import numexpr_parser +from . import ttreeformula_parser -class ptnode(lark.tree.Tree): +class ptnode(numexpr_parser.Tree, ttreeformula_parser.Tree): __match_args__ = ("data", "children") diff --git a/src/formulate/numexpr.py b/src/formulate/numexpr.py deleted file mode 100644 index dbdf05e..0000000 --- a/src/formulate/numexpr.py +++ /dev/null @@ -1,51 +0,0 @@ -""" -Copyright (c) 2023 Aryan Roy. All rights reserved. - -formulate: Easy conversions between different styles of expressions -""" -from __future__ import annotations - -import lark - -from . import matching_tree - -expression_grammar = r''' -start: comparison -comparison: bitwise_or | comparison ">" bitwise_or -> gt - | comparison ">=" bitwise_or -> gte - | comparison "<" bitwise_or -> lt - | comparison "<=" bitwise_or -> lte - | comparison ("!=" ) bitwise_or -> neq - | comparison "==" bitwise_or -> eq -bitwise_or: bitwise_xor | bitwise_or "|" bitwise_xor -> bor -bitwise_xor: bitwise_and | bitwise_xor "^" bitwise_and -> bxor -bitwise_and: bitwise_inversion - | bitwise_and "&" bitwise_inversion -> band -bitwise_inversion: shift_expr | "~" bitwise_inversion -> binv -shift_expr: sum | shift_expr "<<" sum -> lshift | shift_expr ">>" sum -> rshift -sum: term | term "+" sum -> add | term "-" sum -> sub -term: factor | factor "*" term -> mul - | factor "/" term -> div - | factor "%" term -> mod -factor: pow | "+" factor -> pos - | "-" factor -> neg -pow: atom | atom "**" factor -> pow -atom: "(" comparison ")" | CNAME -> symbol - | NUMBER -> literal - | func_name trailer -> func -func_name: CNAME -trailer: "(" [arglist] ")" -arglist: comparison ("," comparison)* [","] -%import common.CNAME -%import common.NUMBER -%import common.WS -%ignore WS -''' - - -def exp_to_ptree(exp: str): - parser = lark.Lark( - expression_grammar, parser="lalr", tree_class=matching_tree.ptnode - ) - print(parser.parse(exp).pretty()) - return parser.parse(exp) diff --git a/src/formulate/numexpr_parser.py b/src/formulate/numexpr_parser.py new file mode 100644 index 0000000..d0b7a11 --- /dev/null +++ b/src/formulate/numexpr_parser.py @@ -0,0 +1,3252 @@ +# The file was automatically generated by Lark v1.1.7 +__version__ = "1.1.7" + +# +# +# Lark Stand-alone Generator Tool +# ---------------------------------- +# Generates a stand-alone LALR(1) parser +# +# Git: https://github.com/erezsh/lark +# Author: Erez Shinan (erezshin@gmail.com) +# +# +# >>> LICENSE +# +# This tool and its generated code use a separate license from Lark, +# and are subject to the terms of the Mozilla Public License, v. 2.0. +# If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +# +# If you wish to purchase a commercial license for this tool and its +# generated code, you may contact me via email or otherwise. +# +# If MPL2 is incompatible with your free or open-source project, +# contact me and we'll work it out. +# +# + +from abc import ABC, abstractmethod +from collections.abc import Sequence +from types import ModuleType +from typing import ( + TypeVar, Generic, Type, Tuple, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any, + Union, Iterable, IO, TYPE_CHECKING, overload, + Pattern as REPattern, ClassVar, Set, Mapping +) + + +class LarkError(Exception): + pass + + +class ConfigurationError(LarkError, ValueError): + pass + + +def assert_config(value, options: Collection, msg='Got %r, expected one of %s'): + if value not in options: + raise ConfigurationError(msg % (value, options)) + + +class GrammarError(LarkError): + pass + + +class ParseError(LarkError): + pass + + +class LexError(LarkError): + pass + +T = TypeVar('T') + +class UnexpectedInput(LarkError): + #-- + line: int + column: int + pos_in_stream = None + state: Any + _terminals_by_name = None + + def get_context(self, text: str, span: int=40) -> str: + #-- + assert self.pos_in_stream is not None, self + pos = self.pos_in_stream + start = max(pos - span, 0) + end = pos + span + if not isinstance(text, bytes): + before = text[start:pos].rsplit('\n', 1)[-1] + after = text[pos:end].split('\n', 1)[0] + return before + after + '\n' + ' ' * len(before.expandtabs()) + '^\n' + else: + before = text[start:pos].rsplit(b'\n', 1)[-1] + after = text[pos:end].split(b'\n', 1)[0] + return (before + after + b'\n' + b' ' * len(before.expandtabs()) + b'^\n').decode("ascii", "backslashreplace") + + def match_examples(self, parse_fn: 'Callable[[str], Tree]', + examples: Union[Mapping[T, Iterable[str]], Iterable[Tuple[T, Iterable[str]]]], + token_type_match_fallback: bool=False, + use_accepts: bool=True + ) -> Optional[T]: + #-- + assert self.state is not None, "Not supported for this exception" + + if isinstance(examples, Mapping): + examples = examples.items() + + candidate = (None, False) + for i, (label, example) in enumerate(examples): + assert not isinstance(example, str), "Expecting a list" + + for j, malformed in enumerate(example): + try: + parse_fn(malformed) + except UnexpectedInput as ut: + if ut.state == self.state: + if ( + use_accepts + and isinstance(self, UnexpectedToken) + and isinstance(ut, UnexpectedToken) + and ut.accepts != self.accepts + ): + logger.debug("Different accepts with same state[%d]: %s != %s at example [%s][%s]" % + (self.state, self.accepts, ut.accepts, i, j)) + continue + if ( + isinstance(self, (UnexpectedToken, UnexpectedEOF)) + and isinstance(ut, (UnexpectedToken, UnexpectedEOF)) + ): + if ut.token == self.token: ## + + logger.debug("Exact Match at example [%s][%s]" % (i, j)) + return label + + if token_type_match_fallback: + ## + + if (ut.token.type == self.token.type) and not candidate[-1]: + logger.debug("Token Type Fallback at example [%s][%s]" % (i, j)) + candidate = label, True + + if candidate[0] is None: + logger.debug("Same State match at example [%s][%s]" % (i, j)) + candidate = label, False + + return candidate[0] + + def _format_expected(self, expected): + if self._terminals_by_name: + d = self._terminals_by_name + expected = [d[t_name].user_repr() if t_name in d else t_name for t_name in expected] + return "Expected one of: \n\t* %s\n" % '\n\t* '.join(expected) + + +class UnexpectedEOF(ParseError, UnexpectedInput): + #-- + expected: 'List[Token]' + + def __init__(self, expected, state=None, terminals_by_name=None): + super(UnexpectedEOF, self).__init__() + + self.expected = expected + self.state = state + from .lexer import Token + self.token = Token("", "") ## + + self.pos_in_stream = -1 + self.line = -1 + self.column = -1 + self._terminals_by_name = terminals_by_name + + + def __str__(self): + message = "Unexpected end-of-input. " + message += self._format_expected(self.expected) + return message + + +class UnexpectedCharacters(LexError, UnexpectedInput): + #-- + + allowed: Set[str] + considered_tokens: Set[Any] + + def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None, + terminals_by_name=None, considered_rules=None): + super(UnexpectedCharacters, self).__init__() + + ## + + self.line = line + self.column = column + self.pos_in_stream = lex_pos + self.state = state + self._terminals_by_name = terminals_by_name + + self.allowed = allowed + self.considered_tokens = considered_tokens + self.considered_rules = considered_rules + self.token_history = token_history + + if isinstance(seq, bytes): + self.char = seq[lex_pos:lex_pos + 1].decode("ascii", "backslashreplace") + else: + self.char = seq[lex_pos] + self._context = self.get_context(seq) + + + def __str__(self): + message = "No terminal matches '%s' in the current parser context, at line %d col %d" % (self.char, self.line, self.column) + message += '\n\n' + self._context + if self.allowed: + message += self._format_expected(self.allowed) + if self.token_history: + message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in self.token_history) + return message + + +class UnexpectedToken(ParseError, UnexpectedInput): + #-- + + expected: Set[str] + considered_rules: Set[str] + interactive_parser: 'InteractiveParser' + + def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None): + super(UnexpectedToken, self).__init__() + + ## + + self.line = getattr(token, 'line', '?') + self.column = getattr(token, 'column', '?') + self.pos_in_stream = getattr(token, 'start_pos', None) + self.state = state + + self.token = token + self.expected = expected ## + + self._accepts = NO_VALUE + self.considered_rules = considered_rules + self.interactive_parser = interactive_parser + self._terminals_by_name = terminals_by_name + self.token_history = token_history + + + @property + def accepts(self) -> Set[str]: + if self._accepts is NO_VALUE: + self._accepts = self.interactive_parser and self.interactive_parser.accepts() + return self._accepts + + def __str__(self): + message = ("Unexpected token %r at line %s, column %s.\n%s" + % (self.token, self.line, self.column, self._format_expected(self.accepts or self.expected))) + if self.token_history: + message += "Previous tokens: %r\n" % self.token_history + + return message + + + +class VisitError(LarkError): + #-- + + obj: 'Union[Tree, Token]' + orig_exc: Exception + + def __init__(self, rule, obj, orig_exc): + message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc) + super(VisitError, self).__init__(message) + + self.rule = rule + self.obj = obj + self.orig_exc = orig_exc + + +class MissingVariableError(LarkError): + pass + + +import sys, re +import logging + +logger: logging.Logger = logging.getLogger("lark") +logger.addHandler(logging.StreamHandler()) +## + +## + +logger.setLevel(logging.CRITICAL) + + +NO_VALUE = object() + +T = TypeVar("T") + + +def classify(seq: Iterable, key: Optional[Callable] = None, value: Optional[Callable] = None) -> Dict: + d: Dict[Any, Any] = {} + for item in seq: + k = key(item) if (key is not None) else item + v = value(item) if (value is not None) else item + try: + d[k].append(v) + except KeyError: + d[k] = [v] + return d + + +def _deserialize(data: Any, namespace: Dict[str, Any], memo: Dict) -> Any: + if isinstance(data, dict): + if '__type__' in data: ## + + class_ = namespace[data['__type__']] + return class_.deserialize(data, memo) + elif '@' in data: + return memo[data['@']] + return {key:_deserialize(value, namespace, memo) for key, value in data.items()} + elif isinstance(data, list): + return [_deserialize(value, namespace, memo) for value in data] + return data + + +_T = TypeVar("_T", bound="Serialize") + +class Serialize: + #-- + + def memo_serialize(self, types_to_memoize: List) -> Any: + memo = SerializeMemoizer(types_to_memoize) + return self.serialize(memo), memo.serialize() + + def serialize(self, memo = None) -> Dict[str, Any]: + if memo and memo.in_types(self): + return {'@': memo.memoized.get(self)} + + fields = getattr(self, '__serialize_fields__') + res = {f: _serialize(getattr(self, f), memo) for f in fields} + res['__type__'] = type(self).__name__ + if hasattr(self, '_serialize'): + self._serialize(res, memo) ## + + return res + + @classmethod + def deserialize(cls: Type[_T], data: Dict[str, Any], memo: Dict[int, Any]) -> _T: + namespace = getattr(cls, '__serialize_namespace__', []) + namespace = {c.__name__:c for c in namespace} + + fields = getattr(cls, '__serialize_fields__') + + if '@' in data: + return memo[data['@']] + + inst = cls.__new__(cls) + for f in fields: + try: + setattr(inst, f, _deserialize(data[f], namespace, memo)) + except KeyError as e: + raise KeyError("Cannot find key for class", cls, e) + + if hasattr(inst, '_deserialize'): + inst._deserialize() ## + + + return inst + + +class SerializeMemoizer(Serialize): + #-- + + __serialize_fields__ = 'memoized', + + def __init__(self, types_to_memoize: List) -> None: + self.types_to_memoize = tuple(types_to_memoize) + self.memoized = Enumerator() + + def in_types(self, value: Serialize) -> bool: + return isinstance(value, self.types_to_memoize) + + def serialize(self) -> Dict[int, Any]: ## + + return _serialize(self.memoized.reversed(), None) + + @classmethod + def deserialize(cls, data: Dict[int, Any], namespace: Dict[str, Any], memo: Dict[Any, Any]) -> Dict[int, Any]: ## + + return _deserialize(data, namespace, memo) + + +try: + import regex + _has_regex = True +except ImportError: + _has_regex = False + +if sys.version_info >= (3, 11): + import re._parser as sre_parse + import re._constants as sre_constants +else: + import sre_parse + import sre_constants + +categ_pattern = re.compile(r'\\p{[A-Za-z_]+}') + +def get_regexp_width(expr: str) -> Union[Tuple[int, int], List[int]]: + if _has_regex: + ## + + ## + + ## + + regexp_final = re.sub(categ_pattern, 'A', expr) + else: + if re.search(categ_pattern, expr): + raise ImportError('`regex` module must be installed in order to use Unicode categories.', expr) + regexp_final = expr + try: + ## + + return [int(x) for x in sre_parse.parse(regexp_final).getwidth()] ## + + except sre_constants.error: + if not _has_regex: + raise ValueError(expr) + else: + ## + + ## + + c = regex.compile(regexp_final) + if c.match('') is None: + ## + + return 1, int(sre_constants.MAXREPEAT) + else: + return 0, int(sre_constants.MAXREPEAT) + + +from collections import OrderedDict + +class Meta: + + empty: bool + line: int + column: int + start_pos: int + end_line: int + end_column: int + end_pos: int + orig_expansion: 'List[TerminalDef]' + match_tree: bool + + def __init__(self): + self.empty = True + + +_Leaf_T = TypeVar("_Leaf_T") +Branch = Union[_Leaf_T, 'Tree[_Leaf_T]'] + +class numexprnode(): + __match_args__ = ("data", "children") + + +class Tree(Generic[_Leaf_T], numexprnode): + #-- + + data: str + children: 'List[Branch[_Leaf_T]]' + + def __init__(self, data: str, children: 'List[Branch[_Leaf_T]]', meta: Optional[Meta]=None) -> None: + self.data = data + self.children = children + self._meta = meta + + @property + def meta(self) -> Meta: + if self._meta is None: + self._meta = Meta() + return self._meta + + def __repr__(self): + return 'Tree(%r, %r)' % (self.data, self.children) + + def _pretty_label(self): + return self.data + + def _pretty(self, level, indent_str): + yield f'{indent_str*level}{self._pretty_label()}' + if len(self.children) == 1 and not isinstance(self.children[0], Tree): + yield f'\t{self.children[0]}\n' + else: + yield '\n' + for n in self.children: + if isinstance(n, Tree): + yield from n._pretty(level+1, indent_str) + else: + yield f'{indent_str*(level+1)}{n}\n' + + def pretty(self, indent_str: str=' ') -> str: + #-- + return ''.join(self._pretty(0, indent_str)) + + def __rich__(self, parent:Optional['rich.tree.Tree']=None) -> 'rich.tree.Tree': + #-- + return self._rich(parent) + + def _rich(self, parent): + if parent: + tree = parent.add(f'[bold]{self.data}[/bold]') + else: + import rich.tree + tree = rich.tree.Tree(self.data) + + for c in self.children: + if isinstance(c, Tree): + c._rich(tree) + else: + tree.add(f'[green]{c}[/green]') + + return tree + + def __eq__(self, other): + try: + return self.data == other.data and self.children == other.children + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self) -> int: + return hash((self.data, tuple(self.children))) + + def iter_subtrees(self) -> 'Iterator[Tree[_Leaf_T]]': + #-- + queue = [self] + subtrees = OrderedDict() + for subtree in queue: + subtrees[id(subtree)] = subtree + ## + + queue += [c for c in reversed(subtree.children) ## + + if isinstance(c, Tree) and id(c) not in subtrees] + + del queue + return reversed(list(subtrees.values())) + + def iter_subtrees_topdown(self): + #-- + stack = [self] + stack_append = stack.append + stack_pop = stack.pop + while stack: + node = stack_pop() + if not isinstance(node, Tree): + continue + yield node + for child in reversed(node.children): + stack_append(child) + + def find_pred(self, pred: 'Callable[[Tree[_Leaf_T]], bool]') -> 'Iterator[Tree[_Leaf_T]]': + #-- + return filter(pred, self.iter_subtrees()) + + def find_data(self, data: str) -> 'Iterator[Tree[_Leaf_T]]': + #-- + return self.find_pred(lambda t: t.data == data) + + +from functools import wraps, update_wrapper +from inspect import getmembers, getmro + +_Return_T = TypeVar('_Return_T') +_Return_V = TypeVar('_Return_V') +_Leaf_T = TypeVar('_Leaf_T') +_Leaf_U = TypeVar('_Leaf_U') +_R = TypeVar('_R') +_FUNC = Callable[..., _Return_T] +_DECORATED = Union[_FUNC, type] + +class _DiscardType: + #-- + + def __repr__(self): + return "lark.visitors.Discard" + +Discard = _DiscardType() + +## + + +class _Decoratable: + #-- + + @classmethod + def _apply_v_args(cls, visit_wrapper): + mro = getmro(cls) + assert mro[0] is cls + libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)} + for name, value in getmembers(cls): + + ## + + if name.startswith('_') or (name in libmembers and name not in cls.__dict__): + continue + if not callable(value): + continue + + ## + + if isinstance(cls.__dict__[name], _VArgsWrapper): + continue + + setattr(cls, name, _VArgsWrapper(cls.__dict__[name], visit_wrapper)) + return cls + + def __class_getitem__(cls, _): + return cls + + +class Transformer(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): + #-- + __visit_tokens__ = True ## + + + def __init__(self, visit_tokens: bool=True) -> None: + self.__visit_tokens__ = visit_tokens + + def _call_userfunc(self, tree, new_children=None): + ## + + children = new_children if new_children is not None else tree.children + try: + f = getattr(self, tree.data) + except AttributeError: + return self.__default__(tree.data, children, tree.meta) + else: + try: + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + return f.visit_wrapper(f, tree.data, children, tree.meta) + else: + return f(children) + except GrammarError: + raise + except Exception as e: + raise VisitError(tree.data, tree, e) + + def _call_userfunc_token(self, token): + try: + f = getattr(self, token.type) + except AttributeError: + return self.__default_token__(token) + else: + try: + return f(token) + except GrammarError: + raise + except Exception as e: + raise VisitError(token.type, token, e) + + def _transform_children(self, children): + for c in children: + if isinstance(c, Tree): + res = self._transform_tree(c) + elif self.__visit_tokens__ and isinstance(c, Token): + res = self._call_userfunc_token(c) + else: + res = c + + if res is not Discard: + yield res + + def _transform_tree(self, tree): + children = list(self._transform_children(tree.children)) + return self._call_userfunc(tree, children) + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + #-- + return self._transform_tree(tree) + + def __mul__( + self: 'Transformer[_Leaf_T, Tree[_Leaf_U]]', + other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V,]]' + ) -> 'TransformerChain[_Leaf_T, _Return_V]': + #-- + return TransformerChain(self, other) + + def __default__(self, data, children, meta): + #-- + return Tree(data, children, meta) + + def __default_token__(self, token): + #-- + return token + + +def merge_transformers(base_transformer=None, **transformers_to_merge): + #-- + if base_transformer is None: + base_transformer = Transformer() + for prefix, transformer in transformers_to_merge.items(): + for method_name in dir(transformer): + method = getattr(transformer, method_name) + if not callable(method): + continue + if method_name.startswith("_") or method_name == "transform": + continue + prefixed_method = prefix + "__" + method_name + if hasattr(base_transformer, prefixed_method): + raise AttributeError("Cannot merge: method '%s' appears more than once" % prefixed_method) + + setattr(base_transformer, prefixed_method, method) + + return base_transformer + + +class InlineTransformer(Transformer): ## + + def _call_userfunc(self, tree, new_children=None): + ## + + children = new_children if new_children is not None else tree.children + try: + f = getattr(self, tree.data) + except AttributeError: + return self.__default__(tree.data, children, tree.meta) + else: + return f(*children) + + +class TransformerChain(Generic[_Leaf_T, _Return_T]): + + transformers: 'Tuple[Union[Transformer, TransformerChain], ...]' + + def __init__(self, *transformers: 'Union[Transformer, TransformerChain]') -> None: + self.transformers = transformers + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + for t in self.transformers: + tree = t.transform(tree) + return cast(_Return_T, tree) + + def __mul__( + self: 'TransformerChain[_Leaf_T, Tree[_Leaf_U]]', + other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V]]' + ) -> 'TransformerChain[_Leaf_T, _Return_V]': + return TransformerChain(*self.transformers + (other,)) + + +class Transformer_InPlace(Transformer): + #-- + def _transform_tree(self, tree): ## + + return self._call_userfunc(tree) + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + for subtree in tree.iter_subtrees(): + subtree.children = list(self._transform_children(subtree.children)) + + return self._transform_tree(tree) + + +class Transformer_NonRecursive(Transformer): + #-- + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + ## + + rev_postfix = [] + q: List[Branch[_Leaf_T]] = [tree] + while q: + t = q.pop() + rev_postfix.append(t) + if isinstance(t, Tree): + q += t.children + + ## + + stack: List = [] + for x in reversed(rev_postfix): + if isinstance(x, Tree): + size = len(x.children) + if size: + args = stack[-size:] + del stack[-size:] + else: + args = [] + + res = self._call_userfunc(x, args) + if res is not Discard: + stack.append(res) + + elif self.__visit_tokens__ and isinstance(x, Token): + res = self._call_userfunc_token(x) + if res is not Discard: + stack.append(res) + else: + stack.append(x) + + result, = stack ## + + ## + + ## + + ## + + return cast(_Return_T, result) + + +class Transformer_InPlaceRecursive(Transformer): + #-- + def _transform_tree(self, tree): + tree.children = list(self._transform_children(tree.children)) + return self._call_userfunc(tree) + + +## + + +class VisitorBase: + def _call_userfunc(self, tree): + return getattr(self, tree.data, self.__default__)(tree) + + def __default__(self, tree): + #-- + return tree + + def __class_getitem__(cls, _): + return cls + + +class Visitor(VisitorBase, ABC, Generic[_Leaf_T]): + #-- + + def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + #-- + for subtree in tree.iter_subtrees(): + self._call_userfunc(subtree) + return tree + + def visit_topdown(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + #-- + for subtree in tree.iter_subtrees_topdown(): + self._call_userfunc(subtree) + return tree + + +class Visitor_Recursive(VisitorBase, Generic[_Leaf_T]): + #-- + + def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + #-- + for child in tree.children: + if isinstance(child, Tree): + self.visit(child) + + self._call_userfunc(tree) + return tree + + def visit_topdown(self,tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + #-- + self._call_userfunc(tree) + + for child in tree.children: + if isinstance(child, Tree): + self.visit_topdown(child) + + return tree + + +class Interpreter(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): + #-- + + def visit(self, tree: Tree[_Leaf_T]) -> _Return_T: + ## + + ## + + ## + + return self._visit_tree(tree) + + def _visit_tree(self, tree: Tree[_Leaf_T]): + f = getattr(self, tree.data) + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + return f.visit_wrapper(f, tree.data, tree.children, tree.meta) + else: + return f(tree) + + def visit_children(self, tree: Tree[_Leaf_T]) -> List: + return [self._visit_tree(child) if isinstance(child, Tree) else child + for child in tree.children] + + def __getattr__(self, name): + return self.__default__ + + def __default__(self, tree): + return self.visit_children(tree) + + +_InterMethod = Callable[[Type[Interpreter], _Return_T], _R] + +def visit_children_decor(func: _InterMethod) -> _InterMethod: + #-- + @wraps(func) + def inner(cls, tree): + values = cls.visit_children(tree) + return func(cls, values) + return inner + +## + + +def _apply_v_args(obj, visit_wrapper): + try: + _apply = obj._apply_v_args + except AttributeError: + return _VArgsWrapper(obj, visit_wrapper) + else: + return _apply(visit_wrapper) + + +class _VArgsWrapper: + #-- + base_func: Callable + + def __init__(self, func: Callable, visit_wrapper: Callable[[Callable, str, list, Any], Any]): + if isinstance(func, _VArgsWrapper): + func = func.base_func + ## + + self.base_func = func ## + + self.visit_wrapper = visit_wrapper + update_wrapper(self, func) + + def __call__(self, *args, **kwargs): + return self.base_func(*args, **kwargs) + + def __get__(self, instance, owner=None): + try: + ## + + ## + + g = type(self.base_func).__get__ + except AttributeError: + return self + else: + return _VArgsWrapper(g(self.base_func, instance, owner), self.visit_wrapper) + + def __set_name__(self, owner, name): + try: + f = type(self.base_func).__set_name__ + except AttributeError: + return + else: + f(self.base_func, owner, name) + + +def _vargs_inline(f, _data, children, _meta): + return f(*children) +def _vargs_meta_inline(f, _data, children, meta): + return f(meta, *children) +def _vargs_meta(f, _data, children, meta): + return f(meta, children) +def _vargs_tree(f, data, children, meta): + return f(Tree(data, children, meta)) + + +def v_args(inline: bool = False, meta: bool = False, tree: bool = False, wrapper: Optional[Callable] = None) -> Callable[[_DECORATED], _DECORATED]: + #-- + if tree and (meta or inline): + raise ValueError("Visitor functions cannot combine 'tree' with 'meta' or 'inline'.") + + func = None + if meta: + if inline: + func = _vargs_meta_inline + else: + func = _vargs_meta + elif inline: + func = _vargs_inline + elif tree: + func = _vargs_tree + + if wrapper is not None: + if func is not None: + raise ValueError("Cannot use 'wrapper' along with 'tree', 'meta' or 'inline'.") + func = wrapper + + def _visitor_args_dec(obj): + return _apply_v_args(obj, func) + return _visitor_args_dec + + + +TOKEN_DEFAULT_PRIORITY = 0 + + +class Symbol(Serialize): + __slots__ = ('name',) + + name: str + is_term: ClassVar[bool] = NotImplemented + + def __init__(self, name: str) -> None: + self.name = name + + def __eq__(self, other): + assert isinstance(other, Symbol), other + return self.is_term == other.is_term and self.name == other.name + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash(self.name) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.name) + + fullrepr = property(__repr__) + + def renamed(self, f): + return type(self)(f(self.name)) + + +class Terminal(Symbol): + __serialize_fields__ = 'name', 'filter_out' + + is_term: ClassVar[bool] = True + + def __init__(self, name, filter_out=False): + self.name = name + self.filter_out = filter_out + + @property + def fullrepr(self): + return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out) + + def renamed(self, f): + return type(self)(f(self.name), self.filter_out) + + +class NonTerminal(Symbol): + __serialize_fields__ = 'name', + + is_term: ClassVar[bool] = False + + +class RuleOptions(Serialize): + __serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices' + + keep_all_tokens: bool + expand1: bool + priority: Optional[int] + template_source: Optional[str] + empty_indices: Tuple[bool, ...] + + def __init__(self, keep_all_tokens: bool=False, expand1: bool=False, priority: Optional[int]=None, template_source: Optional[str]=None, empty_indices: Tuple[bool, ...]=()) -> None: + self.keep_all_tokens = keep_all_tokens + self.expand1 = expand1 + self.priority = priority + self.template_source = template_source + self.empty_indices = empty_indices + + def __repr__(self): + return 'RuleOptions(%r, %r, %r, %r)' % ( + self.keep_all_tokens, + self.expand1, + self.priority, + self.template_source + ) + + +class Rule(Serialize): + #-- + __slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash') + + __serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options' + __serialize_namespace__ = Terminal, NonTerminal, RuleOptions + + def __init__(self, origin, expansion, order=0, alias=None, options=None): + self.origin = origin + self.expansion = expansion + self.alias = alias + self.order = order + self.options = options or RuleOptions() + self._hash = hash((self.origin, tuple(self.expansion))) + + def _deserialize(self): + self._hash = hash((self.origin, tuple(self.expansion))) + + def __str__(self): + return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion)) + + def __repr__(self): + return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options) + + def __hash__(self): + return self._hash + + def __eq__(self, other): + if not isinstance(other, Rule): + return False + return self.origin == other.origin and self.expansion == other.expansion + + + +from copy import copy + +try: ## + + has_interegular = bool(interegular) +except NameError: + has_interegular = False + +class Pattern(Serialize, ABC): + + value: str + flags: Collection[str] + raw: Optional[str] + type: ClassVar[str] + + def __init__(self, value: str, flags: Collection[str] = (), raw: Optional[str] = None) -> None: + self.value = value + self.flags = frozenset(flags) + self.raw = raw + + def __repr__(self): + return repr(self.to_regexp()) + + ## + + def __hash__(self): + return hash((type(self), self.value, self.flags)) + + def __eq__(self, other): + return type(self) == type(other) and self.value == other.value and self.flags == other.flags + + @abstractmethod + def to_regexp(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def min_width(self) -> int: + raise NotImplementedError() + + @property + @abstractmethod + def max_width(self) -> int: + raise NotImplementedError() + + def _get_flags(self, value): + for f in self.flags: + value = ('(?%s:%s)' % (f, value)) + return value + + +class PatternStr(Pattern): + __serialize_fields__ = 'value', 'flags', 'raw' + + type: ClassVar[str] = "str" + + def to_regexp(self) -> str: + return self._get_flags(re.escape(self.value)) + + @property + def min_width(self) -> int: + return len(self.value) + + @property + def max_width(self) -> int: + return len(self.value) + + +class PatternRE(Pattern): + __serialize_fields__ = 'value', 'flags', 'raw', '_width' + + type: ClassVar[str] = "re" + + def to_regexp(self) -> str: + return self._get_flags(self.value) + + _width = None + def _get_width(self): + if self._width is None: + self._width = get_regexp_width(self.to_regexp()) + return self._width + + @property + def min_width(self) -> int: + return self._get_width()[0] + + @property + def max_width(self) -> int: + return self._get_width()[1] + + +class TerminalDef(Serialize): + __serialize_fields__ = 'name', 'pattern', 'priority' + __serialize_namespace__ = PatternStr, PatternRE + + name: str + pattern: Pattern + priority: int + + def __init__(self, name: str, pattern: Pattern, priority: int = TOKEN_DEFAULT_PRIORITY) -> None: + assert isinstance(pattern, Pattern), pattern + self.name = name + self.pattern = pattern + self.priority = priority + + def __repr__(self): + return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern) + + def user_repr(self) -> str: + if self.name.startswith('__'): ## + + return self.pattern.raw or self.name + else: + return self.name + +_T = TypeVar('_T', bound="Token") + +class Token(str): + #-- + __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos') + + __match_args__ = ('type', 'value') + + type: str + start_pos: Optional[int] + value: Any + line: Optional[int] + column: Optional[int] + end_line: Optional[int] + end_column: Optional[int] + end_pos: Optional[int] + + + @overload + def __new__( + cls, + type: str, + value: Any, + start_pos: Optional[int] = None, + line: Optional[int] = None, + column: Optional[int] = None, + end_line: Optional[int] = None, + end_column: Optional[int] = None, + end_pos: Optional[int] = None + ) -> 'Token': + ... + + @overload + def __new__( + cls, + type_: str, + value: Any, + start_pos: Optional[int] = None, + line: Optional[int] = None, + column: Optional[int] = None, + end_line: Optional[int] = None, + end_column: Optional[int] = None, + end_pos: Optional[int] = None + ) -> 'Token': ... + + def __new__(cls, *args, **kwargs): + if "type_" in kwargs: + warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) + + if "type" in kwargs: + raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") + kwargs["type"] = kwargs.pop("type_") + + return cls._future_new(*args, **kwargs) + + + @classmethod + def _future_new(cls, type, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None): + inst = super(Token, cls).__new__(cls, value) + + inst.type = type + inst.start_pos = start_pos + inst.value = value + inst.line = line + inst.column = column + inst.end_line = end_line + inst.end_column = end_column + inst.end_pos = end_pos + return inst + + @overload + def update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token': + ... + + @overload + def update(self, type_: Optional[str] = None, value: Optional[Any] = None) -> 'Token': + ... + + def update(self, *args, **kwargs): + if "type_" in kwargs: + warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) + + if "type" in kwargs: + raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") + kwargs["type"] = kwargs.pop("type_") + + return self._future_update(*args, **kwargs) + + def _future_update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token': + return Token.new_borrow_pos( + type if type is not None else self.type, + value if value is not None else self.value, + self + ) + + @classmethod + def new_borrow_pos(cls: Type[_T], type_: str, value: Any, borrow_t: 'Token') -> _T: + return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos) + + def __reduce__(self): + return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column)) + + def __repr__(self): + return 'Token(%r, %r)' % (self.type, self.value) + + def __deepcopy__(self, memo): + return Token(self.type, self.value, self.start_pos, self.line, self.column) + + def __eq__(self, other): + if isinstance(other, Token) and self.type != other.type: + return False + + return str.__eq__(self, other) + + __hash__ = str.__hash__ + + +class LineCounter: + __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char' + + def __init__(self, newline_char): + self.newline_char = newline_char + self.char_pos = 0 + self.line = 1 + self.column = 1 + self.line_start_pos = 0 + + def __eq__(self, other): + if not isinstance(other, LineCounter): + return NotImplemented + + return self.char_pos == other.char_pos and self.newline_char == other.newline_char + + def feed(self, token: Token, test_newline=True): + #-- + if test_newline: + newlines = token.count(self.newline_char) + if newlines: + self.line += newlines + self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1 + + self.char_pos += len(token) + self.column = self.char_pos - self.line_start_pos + 1 + + +class UnlessCallback: + def __init__(self, scanner): + self.scanner = scanner + + def __call__(self, t): + res = self.scanner.match(t.value, 0) + if res: + _value, t.type = res + return t + + +class CallChain: + def __init__(self, callback1, callback2, cond): + self.callback1 = callback1 + self.callback2 = callback2 + self.cond = cond + + def __call__(self, t): + t2 = self.callback1(t) + return self.callback2(t) if self.cond(t2) else t2 + + +def _get_match(re_, regexp, s, flags): + m = re_.match(regexp, s, flags) + if m: + return m.group(0) + +def _create_unless(terminals, g_regex_flags, re_, use_bytes): + tokens_by_type = classify(terminals, lambda t: type(t.pattern)) + assert len(tokens_by_type) <= 2, tokens_by_type.keys() + embedded_strs = set() + callback = {} + for retok in tokens_by_type.get(PatternRE, []): + unless = [] + for strtok in tokens_by_type.get(PatternStr, []): + if strtok.priority != retok.priority: + continue + s = strtok.pattern.value + if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags): + unless.append(strtok) + if strtok.pattern.flags <= retok.pattern.flags: + embedded_strs.add(strtok) + if unless: + callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes)) + + new_terminals = [t for t in terminals if t not in embedded_strs] + return new_terminals, callback + + +class Scanner: + def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): + self.terminals = terminals + self.g_regex_flags = g_regex_flags + self.re_ = re_ + self.use_bytes = use_bytes + self.match_whole = match_whole + + self.allowed_types = {t.name for t in self.terminals} + + self._mres = self._build_mres(terminals, len(terminals)) + + def _build_mres(self, terminals, max_size): + ## + + ## + + ## + + postfix = '$' if self.match_whole else '' + mres = [] + while terminals: + pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size]) + if self.use_bytes: + pattern = pattern.encode('latin-1') + try: + mre = self.re_.compile(pattern, self.g_regex_flags) + except AssertionError: ## + + return self._build_mres(terminals, max_size // 2) + + mres.append(mre) + terminals = terminals[max_size:] + return mres + + def match(self, text, pos): + for mre in self._mres: + m = mre.match(text, pos) + if m: + return m.group(0), m.lastgroup + + +def _regexp_has_newline(r: str): + #-- + return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r) + + +class LexerState: + #-- + + __slots__ = 'text', 'line_ctr', 'last_token' + + text: str + line_ctr: LineCounter + last_token: Optional[Token] + + def __init__(self, text: str, line_ctr: Optional[LineCounter]=None, last_token: Optional[Token]=None): + self.text = text + self.line_ctr = line_ctr or LineCounter(b'\n' if isinstance(text, bytes) else '\n') + self.last_token = last_token + + def __eq__(self, other): + if not isinstance(other, LexerState): + return NotImplemented + + return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token + + def __copy__(self): + return type(self)(self.text, copy(self.line_ctr), self.last_token) + + +class LexerThread: + #-- + + def __init__(self, lexer: 'Lexer', lexer_state: LexerState): + self.lexer = lexer + self.state = lexer_state + + @classmethod + def from_text(cls, lexer: 'Lexer', text: str): + return cls(lexer, LexerState(text)) + + def lex(self, parser_state): + return self.lexer.lex(self.state, parser_state) + + def __copy__(self): + return type(self)(self.lexer, copy(self.state)) + + _Token = Token + + +_Callback = Callable[[Token], Token] + +class Lexer(ABC): + #-- + @abstractmethod + def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: + return NotImplemented + + def make_lexer_state(self, text): + #-- + return LexerState(text) + + +def _check_regex_collisions(terminal_to_regexp: Dict[TerminalDef, str], comparator, strict_mode, max_collisions_to_show=8): + if not comparator: + comparator = interegular.Comparator.from_regexes(terminal_to_regexp) + + ## + + ## + + max_time = 2 if strict_mode else 0.2 + + ## + + if comparator.count_marked_pairs() >= max_collisions_to_show: + return + for group in classify(terminal_to_regexp, lambda t: t.priority).values(): + for a, b in comparator.check(group, skip_marked=True): + assert a.priority == b.priority + ## + + comparator.mark(a, b) + + ## + + message = f"Collision between Terminals {a.name} and {b.name}. " + try: + example = comparator.get_example_overlap(a, b, max_time).format_multiline() + except ValueError: + ## + + example = "No example could be found fast enough. However, the collision does still exists" + if strict_mode: + raise LexError(f"{message}\n{example}") + logger.warning("%s The lexer will choose between them arbitrarily.\n%s", message, example) + if comparator.count_marked_pairs() >= max_collisions_to_show: + logger.warning("Found 8 regex collisions, will not check for more.") + return + + +class BasicLexer(Lexer): + terminals: Collection[TerminalDef] + ignore_types: FrozenSet[str] + newline_types: FrozenSet[str] + user_callbacks: Dict[str, _Callback] + callback: Dict[str, _Callback] + re: ModuleType + + def __init__(self, conf: 'LexerConf', comparator=None) -> None: + terminals = list(conf.terminals) + assert all(isinstance(t, TerminalDef) for t in terminals), terminals + + self.re = conf.re_module + + if not conf.skip_validation: + ## + + terminal_to_regexp = {} + for t in terminals: + regexp = t.pattern.to_regexp() + try: + self.re.compile(regexp, conf.g_regex_flags) + except self.re.error: + raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern)) + + if t.pattern.min_width == 0: + raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern)) + if t.pattern.type == "re": + terminal_to_regexp[t] = regexp + + if not (set(conf.ignore) <= {t.name for t in terminals}): + raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals})) + + if has_interegular: + _check_regex_collisions(terminal_to_regexp, comparator, conf.strict) + elif conf.strict: + raise LexError("interegular must be installed for strict mode. Use `pip install 'lark[interegular]'`.") + + ## + + self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())) + self.ignore_types = frozenset(conf.ignore) + + terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name)) + self.terminals = terminals + self.user_callbacks = conf.callbacks + self.g_regex_flags = conf.g_regex_flags + self.use_bytes = conf.use_bytes + self.terminals_by_name = conf.terminals_by_name + + self._scanner = None + + def _build_scanner(self): + terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes) + assert all(self.callback.values()) + + for type_, f in self.user_callbacks.items(): + if type_ in self.callback: + ## + + self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_) + else: + self.callback[type_] = f + + self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes) + + @property + def scanner(self): + if self._scanner is None: + self._build_scanner() + return self._scanner + + def match(self, text, pos): + return self.scanner.match(text, pos) + + def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]: + with suppress(EOFError): + while True: + yield self.next_token(state, parser_state) + + def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: + line_ctr = lex_state.line_ctr + while line_ctr.char_pos < len(lex_state.text): + res = self.match(lex_state.text, line_ctr.char_pos) + if not res: + allowed = self.scanner.allowed_types - self.ignore_types + if not allowed: + allowed = {""} + raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, + allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token], + state=parser_state, terminals_by_name=self.terminals_by_name) + + value, type_ = res + + if type_ not in self.ignore_types: + t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) + line_ctr.feed(value, type_ in self.newline_types) + t.end_line = line_ctr.line + t.end_column = line_ctr.column + t.end_pos = line_ctr.char_pos + if t.type in self.callback: + t = self.callback[t.type](t) + if not isinstance(t, Token): + raise LexError("Callbacks must return a token (returned %r)" % t) + lex_state.last_token = t + return t + else: + if type_ in self.callback: + t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) + self.callback[type_](t2) + line_ctr.feed(value, type_ in self.newline_types) + + ## + + raise EOFError(self) + + +class ContextualLexer(Lexer): + + lexers: Dict[str, BasicLexer] + root_lexer: BasicLexer + + def __init__(self, conf: 'LexerConf', states: Dict[str, Collection[str]], always_accept: Collection[str]=()) -> None: + terminals = list(conf.terminals) + terminals_by_name = conf.terminals_by_name + + trad_conf = copy(conf) + trad_conf.terminals = terminals + + if has_interegular and not conf.skip_validation: + comparator = interegular.Comparator.from_regexes({t: t.pattern.to_regexp() for t in terminals}) + else: + comparator = None + lexer_by_tokens: Dict[FrozenSet[str], BasicLexer] = {} + self.lexers = {} + for state, accepts in states.items(): + key = frozenset(accepts) + try: + lexer = lexer_by_tokens[key] + except KeyError: + accepts = set(accepts) | set(conf.ignore) | set(always_accept) + lexer_conf = copy(trad_conf) + lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name] + lexer = BasicLexer(lexer_conf, comparator) + lexer_by_tokens[key] = lexer + + self.lexers[state] = lexer + + assert trad_conf.terminals is terminals + trad_conf.skip_validation = True ## + + self.root_lexer = BasicLexer(trad_conf, comparator) + + def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: + try: + while True: + lexer = self.lexers[parser_state.position] + yield lexer.next_token(lexer_state, parser_state) + except EOFError: + pass + except UnexpectedCharacters as e: + ## + + ## + + try: + last_token = lexer_state.last_token ## + + token = self.root_lexer.next_token(lexer_state, parser_state) + raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name) + except UnexpectedCharacters: + raise e ## + + + + +_ParserArgType: 'TypeAlias' = 'Literal["earley", "lalr", "cyk", "auto"]' +_LexerArgType: 'TypeAlias' = 'Union[Literal["auto", "basic", "contextual", "dynamic", "dynamic_complete"], Type[Lexer]]' +_Callback = Callable[[Token], Token] + +class LexerConf(Serialize): + __serialize_fields__ = 'terminals', 'ignore', 'g_regex_flags', 'use_bytes', 'lexer_type' + __serialize_namespace__ = TerminalDef, + + terminals: Collection[TerminalDef] + re_module: ModuleType + ignore: Collection[str] + postlex: 'Optional[PostLex]' + callbacks: Dict[str, _Callback] + g_regex_flags: int + skip_validation: bool + use_bytes: bool + lexer_type: Optional[_LexerArgType] + strict: bool + + def __init__(self, terminals: Collection[TerminalDef], re_module: ModuleType, ignore: Collection[str]=(), postlex: 'Optional[PostLex]'=None, + callbacks: Optional[Dict[str, _Callback]]=None, g_regex_flags: int=0, skip_validation: bool=False, use_bytes: bool=False, strict: bool=False): + self.terminals = terminals + self.terminals_by_name = {t.name: t for t in self.terminals} + assert len(self.terminals) == len(self.terminals_by_name) + self.ignore = ignore + self.postlex = postlex + self.callbacks = callbacks or {} + self.g_regex_flags = g_regex_flags + self.re_module = re_module + self.skip_validation = skip_validation + self.use_bytes = use_bytes + self.strict = strict + self.lexer_type = None + + def _deserialize(self): + self.terminals_by_name = {t.name: t for t in self.terminals} + + def __deepcopy__(self, memo=None): + return type(self)( + deepcopy(self.terminals, memo), + self.re_module, + deepcopy(self.ignore, memo), + deepcopy(self.postlex, memo), + deepcopy(self.callbacks, memo), + deepcopy(self.g_regex_flags, memo), + deepcopy(self.skip_validation, memo), + deepcopy(self.use_bytes, memo), + ) + + +class ParserConf(Serialize): + __serialize_fields__ = 'rules', 'start', 'parser_type' + + def __init__(self, rules, callbacks, start): + assert isinstance(start, list) + self.rules = rules + self.callbacks = callbacks + self.start = start + + self.parser_type = None + + +from functools import partial, wraps +from itertools import product + + +class ExpandSingleChild: + def __init__(self, node_builder): + self.node_builder = node_builder + + def __call__(self, children): + if len(children) == 1: + return children[0] + else: + return self.node_builder(children) + + + +class PropagatePositions: + def __init__(self, node_builder, node_filter=None): + self.node_builder = node_builder + self.node_filter = node_filter + + def __call__(self, children): + res = self.node_builder(children) + + if isinstance(res, Tree): + ## + + ## + + ## + + ## + + + res_meta = res.meta + + first_meta = self._pp_get_meta(children) + if first_meta is not None: + if not hasattr(res_meta, 'line'): + ## + + res_meta.line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.column = getattr(first_meta, 'container_column', first_meta.column) + res_meta.start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) + res_meta.empty = False + + res_meta.container_line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.container_column = getattr(first_meta, 'container_column', first_meta.column) + res_meta.container_start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) + + last_meta = self._pp_get_meta(reversed(children)) + if last_meta is not None: + if not hasattr(res_meta, 'end_line'): + res_meta.end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + res_meta.end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) + res_meta.empty = False + + res_meta.container_end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.container_end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + res_meta.container_end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) + + return res + + def _pp_get_meta(self, children): + for c in children: + if self.node_filter is not None and not self.node_filter(c): + continue + if isinstance(c, Tree): + if not c.meta.empty: + return c.meta + elif isinstance(c, Token): + return c + elif hasattr(c, '__lark_meta__'): + return c.__lark_meta__() + +def make_propagate_positions(option): + if callable(option): + return partial(PropagatePositions, node_filter=option) + elif option is True: + return PropagatePositions + elif option is False: + return None + + raise ConfigurationError('Invalid option for propagate_positions: %r' % option) + + +class ChildFilter: + def __init__(self, to_include, append_none, node_builder): + self.node_builder = node_builder + self.to_include = to_include + self.append_none = append_none + + def __call__(self, children): + filtered = [] + + for i, to_expand, add_none in self.to_include: + if add_none: + filtered += [None] * add_none + if to_expand: + filtered += children[i].children + else: + filtered.append(children[i]) + + if self.append_none: + filtered += [None] * self.append_none + + return self.node_builder(filtered) + + +class ChildFilterLALR(ChildFilter): + #-- + + def __call__(self, children): + filtered = [] + for i, to_expand, add_none in self.to_include: + if add_none: + filtered += [None] * add_none + if to_expand: + if filtered: + filtered += children[i].children + else: ## + + filtered = children[i].children + else: + filtered.append(children[i]) + + if self.append_none: + filtered += [None] * self.append_none + + return self.node_builder(filtered) + + +class ChildFilterLALR_NoPlaceholders(ChildFilter): + #-- + def __init__(self, to_include, node_builder): + self.node_builder = node_builder + self.to_include = to_include + + def __call__(self, children): + filtered = [] + for i, to_expand in self.to_include: + if to_expand: + if filtered: + filtered += children[i].children + else: ## + + filtered = children[i].children + else: + filtered.append(children[i]) + return self.node_builder(filtered) + + +def _should_expand(sym): + return not sym.is_term and sym.name.startswith('_') + + +def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices: List[bool]): + ## + + if _empty_indices: + assert _empty_indices.count(False) == len(expansion) + s = ''.join(str(int(b)) for b in _empty_indices) + empty_indices = [len(ones) for ones in s.split('0')] + assert len(empty_indices) == len(expansion)+1, (empty_indices, len(expansion)) + else: + empty_indices = [0] * (len(expansion)+1) + + to_include = [] + nones_to_add = 0 + for i, sym in enumerate(expansion): + nones_to_add += empty_indices[i] + if keep_all_tokens or not (sym.is_term and sym.filter_out): + to_include.append((i, _should_expand(sym), nones_to_add)) + nones_to_add = 0 + + nones_to_add += empty_indices[len(expansion)] + + if _empty_indices or len(to_include) < len(expansion) or any(to_expand for i, to_expand,_ in to_include): + if _empty_indices or ambiguous: + return partial(ChildFilter if ambiguous else ChildFilterLALR, to_include, nones_to_add) + else: + ## + + return partial(ChildFilterLALR_NoPlaceholders, [(i, x) for i,x,_ in to_include]) + + +class AmbiguousExpander: + #-- + def __init__(self, to_expand, tree_class, node_builder): + self.node_builder = node_builder + self.tree_class = tree_class + self.to_expand = to_expand + + def __call__(self, children): + def _is_ambig_tree(t): + return hasattr(t, 'data') and t.data == '_ambig' + + ## + + ## + + ## + + ## + + ambiguous = [] + for i, child in enumerate(children): + if _is_ambig_tree(child): + if i in self.to_expand: + ambiguous.append(i) + + child.expand_kids_by_data('_ambig') + + if not ambiguous: + return self.node_builder(children) + + expand = [child.children if i in ambiguous else (child,) for i, child in enumerate(children)] + return self.tree_class('_ambig', [self.node_builder(list(f)) for f in product(*expand)]) + + +def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens): + to_expand = [i for i, sym in enumerate(expansion) + if keep_all_tokens or ((not (sym.is_term and sym.filter_out)) and _should_expand(sym))] + if to_expand: + return partial(AmbiguousExpander, to_expand, tree_class) + + +class AmbiguousIntermediateExpander: + #-- + + def __init__(self, tree_class, node_builder): + self.node_builder = node_builder + self.tree_class = tree_class + + def __call__(self, children): + def _is_iambig_tree(child): + return hasattr(child, 'data') and child.data == '_iambig' + + def _collapse_iambig(children): + #-- + + ## + + ## + + if children and _is_iambig_tree(children[0]): + iambig_node = children[0] + result = [] + for grandchild in iambig_node.children: + collapsed = _collapse_iambig(grandchild.children) + if collapsed: + for child in collapsed: + child.children += children[1:] + result += collapsed + else: + new_tree = self.tree_class('_inter', grandchild.children + children[1:]) + result.append(new_tree) + return result + + collapsed = _collapse_iambig(children) + if collapsed: + processed_nodes = [self.node_builder(c.children) for c in collapsed] + return self.tree_class('_ambig', processed_nodes) + + return self.node_builder(children) + + + +def inplace_transformer(func): + @wraps(func) + def f(children): + ## + + tree = Tree(func.__name__, children) + return func(tree) + return f + + +def apply_visit_wrapper(func, name, wrapper): + if wrapper is _vargs_meta or wrapper is _vargs_meta_inline: + raise NotImplementedError("Meta args not supported for internal transformer") + + @wraps(func) + def f(children): + return wrapper(func, name, children, None) + return f + + +class ParseTreeBuilder: + def __init__(self, rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False): + self.tree_class = tree_class + self.propagate_positions = propagate_positions + self.ambiguous = ambiguous + self.maybe_placeholders = maybe_placeholders + + self.rule_builders = list(self._init_builders(rules)) + + def _init_builders(self, rules): + propagate_positions = make_propagate_positions(self.propagate_positions) + + for rule in rules: + options = rule.options + keep_all_tokens = options.keep_all_tokens + expand_single_child = options.expand1 + + wrapper_chain = list(filter(None, [ + (expand_single_child and not rule.alias) and ExpandSingleChild, + maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None), + propagate_positions, + self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), + self.ambiguous and partial(AmbiguousIntermediateExpander, self.tree_class) + ])) + + yield rule, wrapper_chain + + def create_callback(self, transformer=None): + callbacks = {} + + default_handler = getattr(transformer, '__default__', None) + if default_handler: + def default_callback(data, children): + return default_handler(data, children, None) + else: + default_callback = self.tree_class + + for rule, wrapper_chain in self.rule_builders: + + user_callback_name = rule.alias or rule.options.template_source or rule.origin.name + try: + f = getattr(transformer, user_callback_name) + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + f = apply_visit_wrapper(f, user_callback_name, wrapper) + elif isinstance(transformer, Transformer_InPlace): + f = inplace_transformer(f) + except AttributeError: + f = partial(default_callback, user_callback_name) + + for w in wrapper_chain: + f = w(f) + + if rule in callbacks: + raise GrammarError("Rule '%s' already exists" % (rule,)) + + callbacks[rule] = f + + return callbacks + + + +class LALR_Parser(Serialize): + def __init__(self, parser_conf, debug=False, strict=False): + analysis = LALR_Analyzer(parser_conf, debug=debug, strict=strict) + analysis.compute_lalr() + callbacks = parser_conf.callbacks + + self._parse_table = analysis.parse_table + self.parser_conf = parser_conf + self.parser = _Parser(analysis.parse_table, callbacks, debug) + + @classmethod + def deserialize(cls, data, memo, callbacks, debug=False): + inst = cls.__new__(cls) + inst._parse_table = IntParseTable.deserialize(data, memo) + inst.parser = _Parser(inst._parse_table, callbacks, debug) + return inst + + def serialize(self, memo: Any = None) -> Dict[str, Any]: + return self._parse_table.serialize(memo) + + def parse_interactive(self, lexer, start): + return self.parser.parse(lexer, start, start_interactive=True) + + def parse(self, lexer, start, on_error=None): + try: + return self.parser.parse(lexer, start) + except UnexpectedInput as e: + if on_error is None: + raise + + while True: + if isinstance(e, UnexpectedCharacters): + s = e.interactive_parser.lexer_thread.state + p = s.line_ctr.char_pos + + if not on_error(e): + raise e + + if isinstance(e, UnexpectedCharacters): + ## + + if p == s.line_ctr.char_pos: + s.line_ctr.feed(s.text[p:p+1]) + + try: + return e.interactive_parser.resume_parse() + except UnexpectedToken as e2: + if (isinstance(e, UnexpectedToken) + and e.token.type == e2.token.type == '$END' + and e.interactive_parser == e2.interactive_parser): + ## + + raise e2 + e = e2 + except UnexpectedCharacters as e2: + e = e2 + + +class ParseConf: + __slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states' + + def __init__(self, parse_table, callbacks, start): + self.parse_table = parse_table + + self.start_state = self.parse_table.start_states[start] + self.end_state = self.parse_table.end_states[start] + self.states = self.parse_table.states + + self.callbacks = callbacks + self.start = start + + +class ParserState: + __slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack' + + def __init__(self, parse_conf, lexer, state_stack=None, value_stack=None): + self.parse_conf = parse_conf + self.lexer = lexer + self.state_stack = state_stack or [self.parse_conf.start_state] + self.value_stack = value_stack or [] + + @property + def position(self): + return self.state_stack[-1] + + ## + + def __eq__(self, other): + if not isinstance(other, ParserState): + return NotImplemented + return len(self.state_stack) == len(other.state_stack) and self.position == other.position + + def __copy__(self): + return type(self)( + self.parse_conf, + self.lexer, ## + + copy(self.state_stack), + deepcopy(self.value_stack), + ) + + def copy(self): + return copy(self) + + def feed_token(self, token, is_end=False): + state_stack = self.state_stack + value_stack = self.value_stack + states = self.parse_conf.states + end_state = self.parse_conf.end_state + callbacks = self.parse_conf.callbacks + + while True: + state = state_stack[-1] + try: + action, arg = states[state][token.type] + except KeyError: + expected = {s for s in states[state].keys() if s.isupper()} + raise UnexpectedToken(token, expected, state=self, interactive_parser=None) + + assert arg != end_state + + if action is Shift: + ## + + assert not is_end + state_stack.append(arg) + value_stack.append(token if token.type not in callbacks else callbacks[token.type](token)) + return + else: + ## + + rule = arg + size = len(rule.expansion) + if size: + s = value_stack[-size:] + del state_stack[-size:] + del value_stack[-size:] + else: + s = [] + + value = callbacks[rule](s) + + _action, new_state = states[state_stack[-1]][rule.origin.name] + assert _action is Shift + state_stack.append(new_state) + value_stack.append(value) + + if is_end and state_stack[-1] == end_state: + return value_stack[-1] + +class _Parser: + def __init__(self, parse_table, callbacks, debug=False): + self.parse_table = parse_table + self.callbacks = callbacks + self.debug = debug + + def parse(self, lexer, start, value_stack=None, state_stack=None, start_interactive=False): + parse_conf = ParseConf(self.parse_table, self.callbacks, start) + parser_state = ParserState(parse_conf, lexer, state_stack, value_stack) + if start_interactive: + return InteractiveParser(self, parser_state, parser_state.lexer) + return self.parse_from_state(parser_state) + + + def parse_from_state(self, state, last_token=None): + #-- + try: + token = last_token + for token in state.lexer.lex(state): + state.feed_token(token) + + end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1) + return state.feed_token(end_token, True) + except UnexpectedInput as e: + try: + e.interactive_parser = InteractiveParser(self, state, state.lexer) + except NameError: + pass + raise e + except Exception as e: + if self.debug: + print("") + print("STATE STACK DUMP") + print("----------------") + for i, s in enumerate(state.state_stack): + print('%d)' % i , s) + print("") + + raise + + +class Action: + def __init__(self, name): + self.name = name + def __str__(self): + return self.name + def __repr__(self): + return str(self) + +Shift = Action('Shift') +Reduce = Action('Reduce') + + +class ParseTable: + def __init__(self, states, start_states, end_states): + self.states = states + self.start_states = start_states + self.end_states = end_states + + def serialize(self, memo): + tokens = Enumerator() + + states = { + state: {tokens.get(token): ((1, arg.serialize(memo)) if action is Reduce else (0, arg)) + for token, (action, arg) in actions.items()} + for state, actions in self.states.items() + } + + return { + 'tokens': tokens.reversed(), + 'states': states, + 'start_states': self.start_states, + 'end_states': self.end_states, + } + + @classmethod + def deserialize(cls, data, memo): + tokens = data['tokens'] + states = { + state: {tokens[token]: ((Reduce, Rule.deserialize(arg, memo)) if action==1 else (Shift, arg)) + for token, (action, arg) in actions.items()} + for state, actions in data['states'].items() + } + return cls(states, data['start_states'], data['end_states']) + + +class IntParseTable(ParseTable): + + @classmethod + def from_ParseTable(cls, parse_table): + enum = list(parse_table.states) + state_to_idx = {s:i for i,s in enumerate(enum)} + int_states = {} + + for s, la in parse_table.states.items(): + la = {k:(v[0], state_to_idx[v[1]]) if v[0] is Shift else v + for k,v in la.items()} + int_states[ state_to_idx[s] ] = la + + + start_states = {start:state_to_idx[s] for start, s in parse_table.start_states.items()} + end_states = {start:state_to_idx[s] for start, s in parse_table.end_states.items()} + return cls(int_states, start_states, end_states) + + + +def _wrap_lexer(lexer_class): + future_interface = getattr(lexer_class, '__future_interface__', False) + if future_interface: + return lexer_class + else: + class CustomLexerWrapper(Lexer): + def __init__(self, lexer_conf): + self.lexer = lexer_class(lexer_conf) + def lex(self, lexer_state, parser_state): + return self.lexer.lex(lexer_state.text) + return CustomLexerWrapper + + +def _deserialize_parsing_frontend(data, memo, lexer_conf, callbacks, options): + parser_conf = ParserConf.deserialize(data['parser_conf'], memo) + cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser + parser = cls.deserialize(data['parser'], memo, callbacks, options.debug) + parser_conf.callbacks = callbacks + return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser) + + +_parser_creators: 'Dict[str, Callable[[LexerConf, Any, Any], Any]]' = {} + + +class ParsingFrontend(Serialize): + __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser' + + lexer_conf: LexerConf + parser_conf: ParserConf + options: Any + + def __init__(self, lexer_conf: LexerConf, parser_conf: ParserConf, options, parser=None): + self.parser_conf = parser_conf + self.lexer_conf = lexer_conf + self.options = options + + ## + + if parser: ## + + self.parser = parser + else: + create_parser = _parser_creators.get(parser_conf.parser_type) + assert create_parser is not None, "{} is not supported in standalone mode".format( + parser_conf.parser_type + ) + self.parser = create_parser(lexer_conf, parser_conf, options) + + ## + + lexer_type = lexer_conf.lexer_type + self.skip_lexer = False + if lexer_type in ('dynamic', 'dynamic_complete'): + assert lexer_conf.postlex is None + self.skip_lexer = True + return + + if isinstance(lexer_type, type): + assert issubclass(lexer_type, Lexer) + self.lexer = _wrap_lexer(lexer_type)(lexer_conf) + elif isinstance(lexer_type, str): + create_lexer = { + 'basic': create_basic_lexer, + 'contextual': create_contextual_lexer, + }[lexer_type] + self.lexer = create_lexer(lexer_conf, self.parser, lexer_conf.postlex, options) + else: + raise TypeError("Bad value for lexer_type: {lexer_type}") + + if lexer_conf.postlex: + self.lexer = PostLexConnector(self.lexer, lexer_conf.postlex) + + def _verify_start(self, start=None): + if start is None: + start_decls = self.parser_conf.start + if len(start_decls) > 1: + raise ConfigurationError("Lark initialized with more than 1 possible start rule. Must specify which start rule to parse", start_decls) + start ,= start_decls + elif start not in self.parser_conf.start: + raise ConfigurationError("Unknown start rule %s. Must be one of %r" % (start, self.parser_conf.start)) + return start + + def _make_lexer_thread(self, text: str): + cls = (self.options and self.options._plugins.get('LexerThread')) or LexerThread + return text if self.skip_lexer else cls.from_text(self.lexer, text) + + def parse(self, text: str, start=None, on_error=None): + chosen_start = self._verify_start(start) + kw = {} if on_error is None else {'on_error': on_error} + stream = self._make_lexer_thread(text) + return self.parser.parse(stream, chosen_start, **kw) + + def parse_interactive(self, text: Optional[str]=None, start=None): + ## + + ## + + chosen_start = self._verify_start(start) + if self.parser_conf.parser_type != 'lalr': + raise ConfigurationError("parse_interactive() currently only works with parser='lalr' ") + stream = self._make_lexer_thread(text) ## + + return self.parser.parse_interactive(stream, chosen_start) + + +def _validate_frontend_args(parser, lexer) -> None: + assert_config(parser, ('lalr', 'earley', 'cyk')) + if not isinstance(lexer, type): ## + + expected = { + 'lalr': ('basic', 'contextual'), + 'earley': ('basic', 'dynamic', 'dynamic_complete'), + 'cyk': ('basic', ), + }[parser] + assert_config(lexer, expected, 'Parser %r does not support lexer %%r, expected one of %%s' % parser) + + +def _get_lexer_callbacks(transformer, terminals): + result = {} + for terminal in terminals: + callback = getattr(transformer, terminal.name, None) + if callback is not None: + result[terminal.name] = callback + return result + +class PostLexConnector: + def __init__(self, lexer, postlexer): + self.lexer = lexer + self.postlexer = postlexer + + def lex(self, lexer_state, parser_state): + i = self.lexer.lex(lexer_state, parser_state) + return self.postlexer.process(i) + + + +def create_basic_lexer(lexer_conf, parser, postlex, options) -> BasicLexer: + cls = (options and options._plugins.get('BasicLexer')) or BasicLexer + return cls(lexer_conf) + +def create_contextual_lexer(lexer_conf: LexerConf, parser, postlex, options) -> ContextualLexer: + cls = (options and options._plugins.get('ContextualLexer')) or ContextualLexer + states: Dict[str, Collection[str]] = {idx:list(t.keys()) for idx, t in parser._parse_table.states.items()} + always_accept: Collection[str] = postlex.always_accept if postlex else () + return cls(lexer_conf, states, always_accept=always_accept) + +def create_lalr_parser(lexer_conf: LexerConf, parser_conf: ParserConf, options=None) -> LALR_Parser: + debug = options.debug if options else False + strict = options.strict if options else False + cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser + return cls(parser_conf, debug=debug, strict=strict) + +_parser_creators['lalr'] = create_lalr_parser + + + + +class PostLex(ABC): + @abstractmethod + def process(self, stream: Iterator[Token]) -> Iterator[Token]: + return stream + + always_accept: Iterable[str] = () + +class LarkOptions(Serialize): + #-- + + start: List[str] + debug: bool + strict: bool + transformer: 'Optional[Transformer]' + propagate_positions: Union[bool, str] + maybe_placeholders: bool + cache: Union[bool, str] + regex: bool + g_regex_flags: int + keep_all_tokens: bool + tree_class: Any + parser: _ParserArgType + lexer: _LexerArgType + ambiguity: 'Literal["auto", "resolve", "explicit", "forest"]' + postlex: Optional[PostLex] + priority: 'Optional[Literal["auto", "normal", "invert"]]' + lexer_callbacks: Dict[str, Callable[[Token], Token]] + use_bytes: bool + edit_terminals: Optional[Callable[[TerminalDef], TerminalDef]] + import_paths: 'List[Union[str, Callable[[Union[None, str, PackageResource], str], Tuple[str, str]]]]' + source_path: Optional[str] + + OPTIONS_DOC = """ + **=== General Options ===** + + start + The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start") + debug + Display debug information and extra warnings. Use only when debugging (Default: ``False``) + When used with Earley, it generates a forest graph as "sppf.png", if 'dot' is installed. + strict + Throw an exception on any potential ambiguity, including shift/reduce conflicts, and regex collisions. + transformer + Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster) + propagate_positions + Propagates positional attributes into the 'meta' attribute of all tree branches. + Sets attributes: (line, column, end_line, end_column, start_pos, end_pos, + container_line, container_column, container_end_line, container_end_column) + Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating. + maybe_placeholders + When ``True``, the ``[]`` operator returns ``None`` when not matched. + When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all. + (default= ``True``) + cache + Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now. + + - When ``False``, does nothing (default) + - When ``True``, caches to a temporary file in the local directory + - When given a string, caches to the path pointed by the string + regex + When True, uses the ``regex`` module instead of the stdlib ``re``. + g_regex_flags + Flags that are applied to all terminals (both regex and strings) + keep_all_tokens + Prevent the tree builder from automagically removing "punctuation" tokens (Default: ``False``) + tree_class + Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``. + + **=== Algorithm Options ===** + + parser + Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley"). + (there is also a "cyk" option for legacy) + lexer + Decides whether or not to use a lexer stage + + - "auto" (default): Choose for me based on the parser + - "basic": Use a basic lexer + - "contextual": Stronger lexer (only works with parser="lalr") + - "dynamic": Flexible and powerful (only with parser="earley") + - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible. + ambiguity + Decides how to handle ambiguity in the parse. Only relevant if parser="earley" + + - "resolve": The parser will automatically choose the simplest derivation + (it chooses consistently: greedy for tokens, non-greedy for rules) + - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest). + - "forest": The parser will return the root of the shared packed parse forest. + + **=== Misc. / Domain Specific Options ===** + + postlex + Lexer post-processing (Default: ``None``) Only works with the basic and contextual lexers. + priority + How priorities should be evaluated - "auto", ``None``, "normal", "invert" (Default: "auto") + lexer_callbacks + Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution. + use_bytes + Accept an input of type ``bytes`` instead of ``str``. + edit_terminals + A callback for editing the terminals before parse. + import_paths + A List of either paths or loader functions to specify from where grammars are imported + source_path + Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading + **=== End of Options ===** + """ + if __doc__: + __doc__ += OPTIONS_DOC + + + ## + + ## + + ## + + ## + + ## + + ## + + _defaults: Dict[str, Any] = { + 'debug': False, + 'strict': False, + 'keep_all_tokens': False, + 'tree_class': None, + 'cache': False, + 'postlex': None, + 'parser': 'earley', + 'lexer': 'auto', + 'transformer': None, + 'start': 'start', + 'priority': 'auto', + 'ambiguity': 'auto', + 'regex': False, + 'propagate_positions': False, + 'lexer_callbacks': {}, + 'maybe_placeholders': True, + 'edit_terminals': None, + 'g_regex_flags': 0, + 'use_bytes': False, + 'import_paths': [], + 'source_path': None, + '_plugins': {}, + } + + def __init__(self, options_dict: Dict[str, Any]) -> None: + o = dict(options_dict) + + options = {} + for name, default in self._defaults.items(): + if name in o: + value = o.pop(name) + if isinstance(default, bool) and name not in ('cache', 'use_bytes', 'propagate_positions'): + value = bool(value) + else: + value = default + + options[name] = value + + if isinstance(options['start'], str): + options['start'] = [options['start']] + + self.__dict__['options'] = options + + + assert_config(self.parser, ('earley', 'lalr', 'cyk', None)) + + if self.parser == 'earley' and self.transformer: + raise ConfigurationError('Cannot specify an embedded transformer when using the Earley algorithm. ' + 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)') + + if o: + raise ConfigurationError("Unknown options: %s" % o.keys()) + + def __getattr__(self, name: str) -> Any: + try: + return self.__dict__['options'][name] + except KeyError as e: + raise AttributeError(e) + + def __setattr__(self, name: str, value: str) -> None: + assert_config(name, self.options.keys(), "%r isn't a valid option. Expected one of: %s") + self.options[name] = value + + def serialize(self, memo = None) -> Dict[str, Any]: + return self.options + + @classmethod + def deserialize(cls, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]]) -> "LarkOptions": + return cls(data) + + +## + +## + +_LOAD_ALLOWED_OPTIONS = {'postlex', 'transformer', 'lexer_callbacks', 'use_bytes', 'debug', 'g_regex_flags', 'regex', 'propagate_positions', 'tree_class', '_plugins'} + +_VALID_PRIORITY_OPTIONS = ('auto', 'normal', 'invert', None) +_VALID_AMBIGUITY_OPTIONS = ('auto', 'resolve', 'explicit', 'forest') + + +_T = TypeVar('_T', bound="Lark") + +class Lark(Serialize): + #-- + + source_path: str + source_grammar: str + grammar: 'Grammar' + options: LarkOptions + lexer: Lexer + parser: 'ParsingFrontend' + terminals: Collection[TerminalDef] + + def __init__(self, grammar: 'Union[Grammar, str, IO[str]]', **options) -> None: + self.options = LarkOptions(options) + re_module: types.ModuleType + + ## + + use_regex = self.options.regex + if use_regex: + if _has_regex: + re_module = regex + else: + raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.') + else: + re_module = re + + ## + + if self.options.source_path is None: + try: + self.source_path = grammar.name ## + + except AttributeError: + self.source_path = '' + else: + self.source_path = self.options.source_path + + ## + + try: + read = grammar.read ## + + except AttributeError: + pass + else: + grammar = read() + + cache_fn = None + cache_sha256 = None + if isinstance(grammar, str): + self.source_grammar = grammar + if self.options.use_bytes: + if not isascii(grammar): + raise ConfigurationError("Grammar must be ascii only, when use_bytes=True") + + if self.options.cache: + if self.options.parser != 'lalr': + raise ConfigurationError("cache only works with parser='lalr' for now") + + unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals', '_plugins') + options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable) + from . import __version__ + s = grammar + options_str + __version__ + str(sys.version_info[:2]) + cache_sha256 = sha256_digest(s) + + if isinstance(self.options.cache, str): + cache_fn = self.options.cache + else: + if self.options.cache is not True: + raise ConfigurationError("cache argument must be bool or str") + + try: + username = getpass.getuser() + except Exception: + ## + + ## + + ## + + username = "unknown" + + cache_fn = tempfile.gettempdir() + "/.lark_cache_%s_%s_%s_%s.tmp" % (username, cache_sha256, *sys.version_info[:2]) + + old_options = self.options + try: + with FS.open(cache_fn, 'rb') as f: + logger.debug('Loading grammar from cache: %s', cache_fn) + ## + + for name in (set(options) - _LOAD_ALLOWED_OPTIONS): + del options[name] + file_sha256 = f.readline().rstrip(b'\n') + cached_used_files = pickle.load(f) + if file_sha256 == cache_sha256.encode('utf8') and verify_used_files(cached_used_files): + cached_parser_data = pickle.load(f) + self._load(cached_parser_data, **options) + return + except FileNotFoundError: + ## + + pass + except Exception: ## + + logger.exception("Failed to load Lark from cache: %r. We will try to carry on.", cache_fn) + + ## + + ## + + self.options = old_options + + + ## + + self.grammar, used_files = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens) + else: + assert isinstance(grammar, Grammar) + self.grammar = grammar + + + if self.options.lexer == 'auto': + if self.options.parser == 'lalr': + self.options.lexer = 'contextual' + elif self.options.parser == 'earley': + if self.options.postlex is not None: + logger.info("postlex can't be used with the dynamic lexer, so we use 'basic' instead. " + "Consider using lalr with contextual instead of earley") + self.options.lexer = 'basic' + else: + self.options.lexer = 'dynamic' + elif self.options.parser == 'cyk': + self.options.lexer = 'basic' + else: + assert False, self.options.parser + lexer = self.options.lexer + if isinstance(lexer, type): + assert issubclass(lexer, Lexer) ## + + else: + assert_config(lexer, ('basic', 'contextual', 'dynamic', 'dynamic_complete')) + if self.options.postlex is not None and 'dynamic' in lexer: + raise ConfigurationError("Can't use postlex with a dynamic lexer. Use basic or contextual instead") + + if self.options.ambiguity == 'auto': + if self.options.parser == 'earley': + self.options.ambiguity = 'resolve' + else: + assert_config(self.options.parser, ('earley', 'cyk'), "%r doesn't support disambiguation. Use one of these parsers instead: %s") + + if self.options.priority == 'auto': + self.options.priority = 'normal' + + if self.options.priority not in _VALID_PRIORITY_OPTIONS: + raise ConfigurationError("invalid priority option: %r. Must be one of %r" % (self.options.priority, _VALID_PRIORITY_OPTIONS)) + if self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS: + raise ConfigurationError("invalid ambiguity option: %r. Must be one of %r" % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS)) + + if self.options.parser is None: + terminals_to_keep = '*' + elif self.options.postlex is not None: + terminals_to_keep = set(self.options.postlex.always_accept) + else: + terminals_to_keep = set() + + ## + + self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep) + + if self.options.edit_terminals: + for t in self.terminals: + self.options.edit_terminals(t) + + self._terminals_dict = {t.name: t for t in self.terminals} + + ## + + if self.options.priority == 'invert': + for rule in self.rules: + if rule.options.priority is not None: + rule.options.priority = -rule.options.priority + for term in self.terminals: + term.priority = -term.priority + ## + + ## + + ## + + elif self.options.priority is None: + for rule in self.rules: + if rule.options.priority is not None: + rule.options.priority = None + for term in self.terminals: + term.priority = 0 + + ## + + self.lexer_conf = LexerConf( + self.terminals, re_module, self.ignore_tokens, self.options.postlex, + self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes, strict=self.options.strict + ) + + if self.options.parser: + self.parser = self._build_parser() + elif lexer: + self.lexer = self._build_lexer() + + if cache_fn: + logger.debug('Saving grammar to cache: %s', cache_fn) + try: + with FS.open(cache_fn, 'wb') as f: + assert cache_sha256 is not None + f.write(cache_sha256.encode('utf8') + b'\n') + pickle.dump(used_files, f) + self.save(f, _LOAD_ALLOWED_OPTIONS) + except IOError as e: + logger.exception("Failed to save Lark to cache: %r.", cache_fn, e) + + if __doc__: + __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC + + __serialize_fields__ = 'parser', 'rules', 'options' + + def _build_lexer(self, dont_ignore: bool=False) -> BasicLexer: + lexer_conf = self.lexer_conf + if dont_ignore: + from copy import copy + lexer_conf = copy(lexer_conf) + lexer_conf.ignore = () + return BasicLexer(lexer_conf) + + def _prepare_callbacks(self) -> None: + self._callbacks = {} + ## + + if self.options.ambiguity != 'forest': + self._parse_tree_builder = ParseTreeBuilder( + self.rules, + self.options.tree_class or Tree, + self.options.propagate_positions, + self.options.parser != 'lalr' and self.options.ambiguity == 'explicit', + self.options.maybe_placeholders + ) + self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer) + self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals)) + + def _build_parser(self) -> "ParsingFrontend": + self._prepare_callbacks() + _validate_frontend_args(self.options.parser, self.options.lexer) + parser_conf = ParserConf(self.rules, self._callbacks, self.options.start) + return _construct_parsing_frontend( + self.options.parser, + self.options.lexer, + self.lexer_conf, + parser_conf, + options=self.options + ) + + def save(self, f, exclude_options: Collection[str] = ()) -> None: + #-- + data, m = self.memo_serialize([TerminalDef, Rule]) + if exclude_options: + data["options"] = {n: v for n, v in data["options"].items() if n not in exclude_options} + pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL) + + @classmethod + def load(cls: Type[_T], f) -> _T: + #-- + inst = cls.__new__(cls) + return inst._load(f) + + def _deserialize_lexer_conf(self, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]], options: LarkOptions) -> LexerConf: + lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo) + lexer_conf.callbacks = options.lexer_callbacks or {} + lexer_conf.re_module = regex if options.regex else re + lexer_conf.use_bytes = options.use_bytes + lexer_conf.g_regex_flags = options.g_regex_flags + lexer_conf.skip_validation = True + lexer_conf.postlex = options.postlex + return lexer_conf + + def _load(self: _T, f: Any, **kwargs) -> _T: + if isinstance(f, dict): + d = f + else: + d = pickle.load(f) + memo_json = d['memo'] + data = d['data'] + + assert memo_json + memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {}) + options = dict(data['options']) + if (set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults): + raise ConfigurationError("Some options are not allowed when loading a Parser: {}" + .format(set(kwargs) - _LOAD_ALLOWED_OPTIONS)) + options.update(kwargs) + self.options = LarkOptions.deserialize(options, memo) + self.rules = [Rule.deserialize(r, memo) for r in data['rules']] + self.source_path = '' + _validate_frontend_args(self.options.parser, self.options.lexer) + self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options) + self.terminals = self.lexer_conf.terminals + self._prepare_callbacks() + self._terminals_dict = {t.name: t for t in self.terminals} + self.parser = _deserialize_parsing_frontend( + data['parser'], + memo, + self.lexer_conf, + self._callbacks, + self.options, ## + + ) + return self + + @classmethod + def _load_from_dict(cls, data, memo, **kwargs): + inst = cls.__new__(cls) + return inst._load({'data': data, 'memo': memo}, **kwargs) + + @classmethod + def open(cls: Type[_T], grammar_filename: str, rel_to: Optional[str]=None, **options) -> _T: + #-- + if rel_to: + basepath = os.path.dirname(rel_to) + grammar_filename = os.path.join(basepath, grammar_filename) + with open(grammar_filename, encoding='utf8') as f: + return cls(f, **options) + + @classmethod + def open_from_package(cls: Type[_T], package: str, grammar_path: str, search_paths: 'Sequence[str]'=[""], **options) -> _T: + #-- + package_loader = FromPackageLoader(package, search_paths) + full_path, text = package_loader(None, grammar_path) + options.setdefault('source_path', full_path) + options.setdefault('import_paths', []) + options['import_paths'].append(package_loader) + return cls(text, **options) + + def __repr__(self): + return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer) + + + def lex(self, text: str, dont_ignore: bool=False) -> Iterator[Token]: + #-- + lexer: Lexer + if not hasattr(self, 'lexer') or dont_ignore: + lexer = self._build_lexer(dont_ignore) + else: + lexer = self.lexer + lexer_thread = LexerThread.from_text(lexer, text) + stream = lexer_thread.lex(None) + if self.options.postlex: + return self.options.postlex.process(stream) + return stream + + def get_terminal(self, name: str) -> TerminalDef: + #-- + return self._terminals_dict[name] + + def parse_interactive(self, text: Optional[str]=None, start: Optional[str]=None) -> 'InteractiveParser': + #-- + return self.parser.parse_interactive(text, start=start) + + def parse(self, text: str, start: Optional[str]=None, on_error: 'Optional[Callable[[UnexpectedInput], bool]]'=None) -> 'ParseTree': + #-- + return self.parser.parse(text, start=start, on_error=on_error) + + + + +class DedentError(LarkError): + pass + +class Indenter(PostLex, ABC): + paren_level: int + indent_level: List[int] + + def __init__(self) -> None: + self.paren_level = 0 + self.indent_level = [0] + assert self.tab_len > 0 + + def handle_NL(self, token: Token) -> Iterator[Token]: + if self.paren_level > 0: + return + + yield token + + indent_str = token.rsplit('\n', 1)[1] ## + + indent = indent_str.count(' ') + indent_str.count('\t') * self.tab_len + + if indent > self.indent_level[-1]: + self.indent_level.append(indent) + yield Token.new_borrow_pos(self.INDENT_type, indent_str, token) + else: + while indent < self.indent_level[-1]: + self.indent_level.pop() + yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token) + + if indent != self.indent_level[-1]: + raise DedentError('Unexpected dedent to column %s. Expected dedent to %s' % (indent, self.indent_level[-1])) + + def _process(self, stream): + for token in stream: + if token.type == self.NL_type: + yield from self.handle_NL(token) + else: + yield token + + if token.type in self.OPEN_PAREN_types: + self.paren_level += 1 + elif token.type in self.CLOSE_PAREN_types: + self.paren_level -= 1 + assert self.paren_level >= 0 + + while len(self.indent_level) > 1: + self.indent_level.pop() + yield Token(self.DEDENT_type, '') + + assert self.indent_level == [0], self.indent_level + + def process(self, stream): + self.paren_level = 0 + self.indent_level = [0] + return self._process(stream) + + ## + + @property + def always_accept(self): + return (self.NL_type,) + + @property + @abstractmethod + def NL_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def OPEN_PAREN_types(self) -> List[str]: + raise NotImplementedError() + + @property + @abstractmethod + def CLOSE_PAREN_types(self) -> List[str]: + raise NotImplementedError() + + @property + @abstractmethod + def INDENT_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def DEDENT_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def tab_len(self) -> int: + raise NotImplementedError() + + +class PythonIndenter(Indenter): + NL_type = '_NEWLINE' + OPEN_PAREN_types = ['LPAR', 'LSQB', 'LBRACE'] + CLOSE_PAREN_types = ['RPAR', 'RSQB', 'RBRACE'] + INDENT_type = '_INDENT' + DEDENT_type = '_DEDENT' + tab_len = 8 + + +import pickle, zlib, base64 +DATA = ( +{'parser': {'lexer_conf': {'terminals': [{'@': 0}, {'@': 1}, {'@': 2}, {'@': 3}, {'@': 4}, {'@': 5}, {'@': 6}, {'@': 7}, {'@': 8}, {'@': 9}, {'@': 10}, {'@': 11}, {'@': 12}, {'@': 13}, {'@': 14}, {'@': 15}, {'@': 16}, {'@': 17}, {'@': 18}, {'@': 19}, {'@': 20}, {'@': 21}, {'@': 22}, {'@': 23}], 'ignore': ['WS'], 'g_regex_flags': 0, 'use_bytes': False, 'lexer_type': 'contextual', '__type__': 'LexerConf'}, 'parser_conf': {'rules': [{'@': 24}, {'@': 25}, {'@': 26}, {'@': 27}, {'@': 28}, {'@': 29}, {'@': 30}, {'@': 31}, {'@': 32}, {'@': 33}, {'@': 34}, {'@': 35}, {'@': 36}, {'@': 37}, {'@': 38}, {'@': 39}, {'@': 40}, {'@': 41}, {'@': 42}, {'@': 43}, {'@': 44}, {'@': 45}, {'@': 46}, {'@': 47}, {'@': 48}, {'@': 49}, {'@': 50}, {'@': 51}, {'@': 52}, {'@': 53}, {'@': 54}, {'@': 55}, {'@': 56}, {'@': 57}, {'@': 58}, {'@': 59}, {'@': 60}, {'@': 61}, {'@': 62}, {'@': 63}, {'@': 64}, {'@': 65}, {'@': 66}, {'@': 67}], 'start': ['start'], 'parser_type': 'lalr', '__type__': 'ParserConf'}, 'parser': {'tokens': {0: 'func_name', 1: 'NUMBER', 2: 'atom', 3: 'CNAME', 4: 'LPAR', 5: 'pow', 6: 'factor', 7: 'PLUS', 8: 'MINUS', 9: '__ANON_3', 10: '__ANON_0', 11: 'LESSTHAN', 12: 'MORETHAN', 13: '__ANON_1', 14: '__ANON_2', 15: 'COMMA', 16: 'RPAR', 17: 'term', 18: 'bitwise_inversion', 19: 'shift_expr', 20: 'TILDE', 21: 'sum', 22: 'STAR', 23: 'SLASH', 24: 'VBAR', 25: '__ANON_4', 26: '__ANON_5', 27: 'CIRCUMFLEX', 28: '$END', 29: 'AMPERSAND', 30: 'PERCENT', 31: 'bitwise_or', 32: 'bitwise_and', 33: 'bitwise_xor', 34: '__arglist_star_0', 35: 'comparison', 36: 'start', 37: '__ANON_6', 38: 'trailer', 39: 'arglist'}, 'states': {0: {0: (0, 29), 1: (0, 38), 2: (0, 52), 3: (0, 63), 4: (0, 16), 5: (0, 30), 6: (0, 9), 7: (0, 27), 8: (0, 39)}, 1: {9: (0, 19), 10: (0, 53), 11: (0, 28), 12: (0, 7), 13: (0, 23), 14: (0, 40), 15: (1, {'@': 66}), 16: (1, {'@': 66})}, 2: {17: (0, 26), 2: (0, 52), 5: (0, 30), 18: (0, 10), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 1: (0, 38), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 3: {7: (1, {'@': 51}), 12: (1, {'@': 51}), 9: (1, {'@': 51}), 22: (1, {'@': 51}), 23: (1, {'@': 51}), 24: (1, {'@': 51}), 25: (1, {'@': 51}), 8: (1, {'@': 51}), 15: (1, {'@': 51}), 14: (1, {'@': 51}), 10: (1, {'@': 51}), 16: (1, {'@': 51}), 11: (1, {'@': 51}), 26: (1, {'@': 51}), 27: (1, {'@': 51}), 28: (1, {'@': 51}), 29: (1, {'@': 51}), 13: (1, {'@': 51}), 30: (1, {'@': 51})}, 4: {7: (1, {'@': 49}), 12: (1, {'@': 49}), 9: (1, {'@': 49}), 24: (1, {'@': 49}), 25: (1, {'@': 49}), 8: (1, {'@': 49}), 15: (1, {'@': 49}), 14: (1, {'@': 49}), 10: (1, {'@': 49}), 16: (1, {'@': 49}), 11: (1, {'@': 49}), 26: (1, {'@': 49}), 27: (1, {'@': 49}), 28: (1, {'@': 49}), 29: (1, {'@': 49}), 13: (1, {'@': 49})}, 5: {27: (0, 31), 16: (1, {'@': 33}), 12: (1, {'@': 33}), 11: (1, {'@': 33}), 9: (1, {'@': 33}), 24: (1, {'@': 33}), 28: (1, {'@': 33}), 15: (1, {'@': 33}), 13: (1, {'@': 33}), 14: (1, {'@': 33}), 10: (1, {'@': 33})}, 6: {27: (0, 31), 16: (1, {'@': 32}), 12: (1, {'@': 32}), 11: (1, {'@': 32}), 9: (1, {'@': 32}), 24: (1, {'@': 32}), 28: (1, {'@': 32}), 15: (1, {'@': 32}), 13: (1, {'@': 32}), 14: (1, {'@': 32}), 10: (1, {'@': 32})}, 7: {31: (0, 35), 17: (0, 26), 2: (0, 52), 32: (0, 49), 5: (0, 30), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 8: {16: (1, {'@': 45}), 12: (1, {'@': 45}), 11: (1, {'@': 45}), 29: (1, {'@': 45}), 9: (1, {'@': 45}), 26: (1, {'@': 45}), 25: (1, {'@': 45}), 24: (1, {'@': 45}), 27: (1, {'@': 45}), 28: (1, {'@': 45}), 15: (1, {'@': 45}), 13: (1, {'@': 45}), 14: (1, {'@': 45}), 10: (1, {'@': 45})}, 9: {7: (1, {'@': 54}), 12: (1, {'@': 54}), 9: (1, {'@': 54}), 22: (1, {'@': 54}), 23: (1, {'@': 54}), 24: (1, {'@': 54}), 25: (1, {'@': 54}), 8: (1, {'@': 54}), 15: (1, {'@': 54}), 14: (1, {'@': 54}), 10: (1, {'@': 54}), 16: (1, {'@': 54}), 11: (1, {'@': 54}), 26: (1, {'@': 54}), 27: (1, {'@': 54}), 28: (1, {'@': 54}), 29: (1, {'@': 54}), 13: (1, {'@': 54}), 30: (1, {'@': 54})}, 10: {12: (1, {'@': 39}), 9: (1, {'@': 39}), 24: (1, {'@': 39}), 15: (1, {'@': 39}), 14: (1, {'@': 39}), 10: (1, {'@': 39}), 16: (1, {'@': 39}), 11: (1, {'@': 39}), 27: (1, {'@': 39}), 28: (1, {'@': 39}), 29: (1, {'@': 39}), 13: (1, {'@': 39})}, 11: {34: (0, 36), 10: (0, 53), 15: (0, 24), 13: (0, 23), 14: (0, 40), 9: (0, 19), 12: (0, 7), 11: (0, 28), 16: (1, {'@': 65})}, 12: {24: (0, 41), 12: (1, {'@': 25}), 11: (1, {'@': 25}), 9: (1, {'@': 25}), 28: (1, {'@': 25}), 13: (1, {'@': 25}), 14: (1, {'@': 25}), 10: (1, {'@': 25}), 16: (1, {'@': 25}), 15: (1, {'@': 25})}, 13: {24: (0, 41), 12: (1, {'@': 31}), 11: (1, {'@': 31}), 9: (1, {'@': 31}), 28: (1, {'@': 31}), 13: (1, {'@': 31}), 14: (1, {'@': 31}), 10: (1, {'@': 31}), 16: (1, {'@': 31}), 15: (1, {'@': 31})}, 14: {16: (1, {'@': 37}), 12: (1, {'@': 37}), 11: (1, {'@': 37}), 9: (1, {'@': 37}), 15: (1, {'@': 37}), 27: (1, {'@': 37}), 24: (1, {'@': 37}), 28: (1, {'@': 37}), 29: (1, {'@': 37}), 13: (1, {'@': 37}), 14: (1, {'@': 37}), 10: (1, {'@': 37})}, 15: {35: (0, 61), 17: (0, 26), 2: (0, 52), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 36: (0, 69), 4: (0, 16), 32: (0, 49), 20: (0, 2), 31: (0, 12), 5: (0, 30), 8: (0, 39), 7: (0, 27), 21: (0, 45)}, 16: {35: (0, 55), 17: (0, 26), 2: (0, 52), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 32: (0, 49), 20: (0, 2), 31: (0, 12), 5: (0, 30), 8: (0, 39), 7: (0, 27), 21: (0, 45)}, 17: {17: (0, 26), 2: (0, 52), 5: (0, 30), 8: (0, 39), 0: (0, 29), 7: (0, 27), 6: (0, 56), 1: (0, 38), 3: (0, 63), 4: (0, 16), 21: (0, 60)}, 18: {24: (0, 41), 12: (1, {'@': 29}), 11: (1, {'@': 29}), 9: (1, {'@': 29}), 28: (1, {'@': 29}), 13: (1, {'@': 29}), 14: (1, {'@': 29}), 10: (1, {'@': 29}), 16: (1, {'@': 29}), 15: (1, {'@': 29})}, 19: {31: (0, 13), 17: (0, 26), 2: (0, 52), 32: (0, 49), 5: (0, 30), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 20: {7: (1, {'@': 58}), 12: (1, {'@': 58}), 9: (1, {'@': 58}), 22: (1, {'@': 58}), 23: (1, {'@': 58}), 24: (1, {'@': 58}), 25: (1, {'@': 58}), 37: (1, {'@': 58}), 8: (1, {'@': 58}), 15: (1, {'@': 58}), 14: (1, {'@': 58}), 10: (1, {'@': 58}), 16: (1, {'@': 58}), 11: (1, {'@': 58}), 26: (1, {'@': 58}), 27: (1, {'@': 58}), 28: (1, {'@': 58}), 29: (1, {'@': 58}), 13: (1, {'@': 58}), 30: (1, {'@': 58})}, 21: {7: (1, {'@': 47}), 12: (1, {'@': 47}), 9: (1, {'@': 47}), 24: (1, {'@': 47}), 25: (1, {'@': 47}), 8: (1, {'@': 47}), 15: (1, {'@': 47}), 14: (1, {'@': 47}), 10: (1, {'@': 47}), 16: (1, {'@': 47}), 11: (1, {'@': 47}), 26: (1, {'@': 47}), 27: (1, {'@': 47}), 28: (1, {'@': 47}), 29: (1, {'@': 47}), 13: (1, {'@': 47})}, 22: {7: (1, {'@': 55}), 12: (1, {'@': 55}), 9: (1, {'@': 55}), 22: (1, {'@': 55}), 23: (1, {'@': 55}), 24: (1, {'@': 55}), 25: (1, {'@': 55}), 37: (1, {'@': 55}), 8: (1, {'@': 55}), 15: (1, {'@': 55}), 14: (1, {'@': 55}), 10: (1, {'@': 55}), 16: (1, {'@': 55}), 11: (1, {'@': 55}), 26: (1, {'@': 55}), 27: (1, {'@': 55}), 28: (1, {'@': 55}), 29: (1, {'@': 55}), 13: (1, {'@': 55}), 30: (1, {'@': 55})}, 23: {31: (0, 68), 17: (0, 26), 2: (0, 52), 32: (0, 49), 5: (0, 30), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 24: {17: (0, 26), 2: (0, 52), 35: (0, 1), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 32: (0, 49), 20: (0, 2), 31: (0, 12), 5: (0, 30), 8: (0, 39), 7: (0, 27), 21: (0, 45), 16: (1, {'@': 64})}, 25: {7: (1, {'@': 60}), 22: (1, {'@': 60}), 37: (1, {'@': 60}), 8: (1, {'@': 60}), 10: (1, {'@': 60}), 11: (1, {'@': 60}), 26: (1, {'@': 60}), 27: (1, {'@': 60}), 28: (1, {'@': 60}), 29: (1, {'@': 60}), 13: (1, {'@': 60}), 12: (1, {'@': 60}), 9: (1, {'@': 60}), 23: (1, {'@': 60}), 25: (1, {'@': 60}), 24: (1, {'@': 60}), 15: (1, {'@': 60}), 14: (1, {'@': 60}), 16: (1, {'@': 60}), 30: (1, {'@': 60})}, 26: {8: (0, 46), 7: (0, 17), 16: (1, {'@': 43}), 12: (1, {'@': 43}), 11: (1, {'@': 43}), 29: (1, {'@': 43}), 9: (1, {'@': 43}), 26: (1, {'@': 43}), 25: (1, {'@': 43}), 24: (1, {'@': 43}), 27: (1, {'@': 43}), 28: (1, {'@': 43}), 15: (1, {'@': 43}), 13: (1, {'@': 43}), 14: (1, {'@': 43}), 10: (1, {'@': 43})}, 27: {0: (0, 29), 1: (0, 38), 2: (0, 52), 3: (0, 63), 4: (0, 16), 6: (0, 3), 5: (0, 30), 7: (0, 27), 8: (0, 39)}, 28: {31: (0, 67), 17: (0, 26), 2: (0, 52), 32: (0, 49), 5: (0, 30), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 29: {4: (0, 58), 38: (0, 20)}, 30: {7: (1, {'@': 50}), 12: (1, {'@': 50}), 9: (1, {'@': 50}), 22: (1, {'@': 50}), 23: (1, {'@': 50}), 24: (1, {'@': 50}), 25: (1, {'@': 50}), 8: (1, {'@': 50}), 15: (1, {'@': 50}), 14: (1, {'@': 50}), 10: (1, {'@': 50}), 16: (1, {'@': 50}), 11: (1, {'@': 50}), 26: (1, {'@': 50}), 27: (1, {'@': 50}), 28: (1, {'@': 50}), 29: (1, {'@': 50}), 13: (1, {'@': 50}), 30: (1, {'@': 50})}, 31: {17: (0, 26), 2: (0, 52), 5: (0, 30), 32: (0, 37), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 32: {16: (1, {'@': 41}), 12: (1, {'@': 41}), 11: (1, {'@': 41}), 29: (1, {'@': 41}), 9: (1, {'@': 41}), 26: (1, {'@': 41}), 25: (1, {'@': 41}), 24: (1, {'@': 41}), 27: (1, {'@': 41}), 28: (1, {'@': 41}), 15: (1, {'@': 41}), 13: (1, {'@': 41}), 14: (1, {'@': 41}), 10: (1, {'@': 41})}, 33: {7: (1, {'@': 61}), 22: (1, {'@': 61}), 37: (1, {'@': 61}), 8: (1, {'@': 61}), 10: (1, {'@': 61}), 11: (1, {'@': 61}), 26: (1, {'@': 61}), 27: (1, {'@': 61}), 28: (1, {'@': 61}), 29: (1, {'@': 61}), 13: (1, {'@': 61}), 12: (1, {'@': 61}), 9: (1, {'@': 61}), 23: (1, {'@': 61}), 25: (1, {'@': 61}), 24: (1, {'@': 61}), 15: (1, {'@': 61}), 14: (1, {'@': 61}), 16: (1, {'@': 61}), 30: (1, {'@': 61})}, 34: {7: (1, {'@': 48}), 12: (1, {'@': 48}), 9: (1, {'@': 48}), 24: (1, {'@': 48}), 25: (1, {'@': 48}), 8: (1, {'@': 48}), 15: (1, {'@': 48}), 14: (1, {'@': 48}), 10: (1, {'@': 48}), 16: (1, {'@': 48}), 11: (1, {'@': 48}), 26: (1, {'@': 48}), 27: (1, {'@': 48}), 28: (1, {'@': 48}), 29: (1, {'@': 48}), 13: (1, {'@': 48})}, 35: {24: (0, 41), 12: (1, {'@': 26}), 11: (1, {'@': 26}), 9: (1, {'@': 26}), 28: (1, {'@': 26}), 13: (1, {'@': 26}), 14: (1, {'@': 26}), 10: (1, {'@': 26}), 16: (1, {'@': 26}), 15: (1, {'@': 26})}, 36: {15: (0, 43), 16: (1, {'@': 63})}, 37: {29: (0, 57), 16: (1, {'@': 35}), 12: (1, {'@': 35}), 11: (1, {'@': 35}), 9: (1, {'@': 35}), 27: (1, {'@': 35}), 24: (1, {'@': 35}), 28: (1, {'@': 35}), 15: (1, {'@': 35}), 13: (1, {'@': 35}), 14: (1, {'@': 35}), 10: (1, {'@': 35})}, 38: {7: (1, {'@': 57}), 12: (1, {'@': 57}), 9: (1, {'@': 57}), 22: (1, {'@': 57}), 23: (1, {'@': 57}), 24: (1, {'@': 57}), 25: (1, {'@': 57}), 37: (1, {'@': 57}), 8: (1, {'@': 57}), 15: (1, {'@': 57}), 14: (1, {'@': 57}), 10: (1, {'@': 57}), 16: (1, {'@': 57}), 11: (1, {'@': 57}), 26: (1, {'@': 57}), 27: (1, {'@': 57}), 28: (1, {'@': 57}), 29: (1, {'@': 57}), 13: (1, {'@': 57}), 30: (1, {'@': 57})}, 39: {0: (0, 29), 1: (0, 38), 2: (0, 52), 3: (0, 63), 6: (0, 48), 4: (0, 16), 5: (0, 30), 7: (0, 27), 8: (0, 39)}, 40: {31: (0, 18), 17: (0, 26), 2: (0, 52), 32: (0, 49), 5: (0, 30), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 41: {17: (0, 26), 2: (0, 52), 32: (0, 49), 5: (0, 30), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 1: (0, 38), 18: (0, 62), 33: (0, 5), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 42: {0: (0, 29), 17: (0, 21), 6: (0, 56), 1: (0, 38), 2: (0, 52), 3: (0, 63), 4: (0, 16), 5: (0, 30), 7: (0, 27), 8: (0, 39)}, 43: {17: (0, 26), 2: (0, 52), 35: (0, 44), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 32: (0, 49), 20: (0, 2), 31: (0, 12), 5: (0, 30), 8: (0, 39), 7: (0, 27), 21: (0, 45), 16: (1, {'@': 62})}, 44: {9: (0, 19), 10: (0, 53), 12: (0, 7), 11: (0, 28), 13: (0, 23), 14: (0, 40), 15: (1, {'@': 67}), 16: (1, {'@': 67})}, 45: {16: (1, {'@': 40}), 12: (1, {'@': 40}), 11: (1, {'@': 40}), 29: (1, {'@': 40}), 9: (1, {'@': 40}), 26: (1, {'@': 40}), 25: (1, {'@': 40}), 24: (1, {'@': 40}), 27: (1, {'@': 40}), 28: (1, {'@': 40}), 15: (1, {'@': 40}), 13: (1, {'@': 40}), 14: (1, {'@': 40}), 10: (1, {'@': 40})}, 46: {17: (0, 26), 2: (0, 52), 5: (0, 30), 8: (0, 39), 0: (0, 29), 6: (0, 56), 1: (0, 38), 3: (0, 63), 4: (0, 16), 21: (0, 8), 7: (0, 27)}, 47: {0: (0, 29), 6: (0, 56), 1: (0, 38), 2: (0, 52), 3: (0, 63), 4: (0, 16), 17: (0, 34), 5: (0, 30), 7: (0, 27), 8: (0, 39)}, 48: {7: (1, {'@': 52}), 12: (1, {'@': 52}), 9: (1, {'@': 52}), 22: (1, {'@': 52}), 23: (1, {'@': 52}), 24: (1, {'@': 52}), 25: (1, {'@': 52}), 8: (1, {'@': 52}), 15: (1, {'@': 52}), 14: (1, {'@': 52}), 10: (1, {'@': 52}), 16: (1, {'@': 52}), 11: (1, {'@': 52}), 26: (1, {'@': 52}), 27: (1, {'@': 52}), 28: (1, {'@': 52}), 29: (1, {'@': 52}), 13: (1, {'@': 52}), 30: (1, {'@': 52})}, 49: {29: (0, 57), 16: (1, {'@': 34}), 12: (1, {'@': 34}), 11: (1, {'@': 34}), 9: (1, {'@': 34}), 27: (1, {'@': 34}), 24: (1, {'@': 34}), 28: (1, {'@': 34}), 15: (1, {'@': 34}), 13: (1, {'@': 34}), 14: (1, {'@': 34}), 10: (1, {'@': 34})}, 50: {17: (0, 26), 2: (0, 52), 5: (0, 30), 21: (0, 64), 8: (0, 39), 0: (0, 29), 6: (0, 56), 1: (0, 38), 3: (0, 63), 4: (0, 16), 7: (0, 27)}, 51: {24: (0, 41), 12: (1, {'@': 30}), 11: (1, {'@': 30}), 9: (1, {'@': 30}), 28: (1, {'@': 30}), 13: (1, {'@': 30}), 14: (1, {'@': 30}), 10: (1, {'@': 30}), 16: (1, {'@': 30}), 15: (1, {'@': 30})}, 52: {37: (0, 0), 7: (1, {'@': 53}), 12: (1, {'@': 53}), 9: (1, {'@': 53}), 22: (1, {'@': 53}), 23: (1, {'@': 53}), 24: (1, {'@': 53}), 25: (1, {'@': 53}), 8: (1, {'@': 53}), 15: (1, {'@': 53}), 14: (1, {'@': 53}), 10: (1, {'@': 53}), 16: (1, {'@': 53}), 11: (1, {'@': 53}), 26: (1, {'@': 53}), 27: (1, {'@': 53}), 28: (1, {'@': 53}), 29: (1, {'@': 53}), 13: (1, {'@': 53}), 30: (1, {'@': 53})}, 53: {31: (0, 51), 17: (0, 26), 2: (0, 52), 32: (0, 49), 5: (0, 30), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 54: {17: (0, 26), 2: (0, 52), 5: (0, 30), 8: (0, 39), 21: (0, 32), 0: (0, 29), 6: (0, 56), 1: (0, 38), 3: (0, 63), 4: (0, 16), 7: (0, 27)}, 55: {9: (0, 19), 10: (0, 53), 12: (0, 7), 11: (0, 28), 16: (0, 22), 13: (0, 23), 14: (0, 40)}, 56: {30: (0, 59), 22: (0, 42), 23: (0, 47), 7: (1, {'@': 46}), 12: (1, {'@': 46}), 9: (1, {'@': 46}), 24: (1, {'@': 46}), 25: (1, {'@': 46}), 8: (1, {'@': 46}), 15: (1, {'@': 46}), 14: (1, {'@': 46}), 10: (1, {'@': 46}), 16: (1, {'@': 46}), 11: (1, {'@': 46}), 26: (1, {'@': 46}), 27: (1, {'@': 46}), 28: (1, {'@': 46}), 29: (1, {'@': 46}), 13: (1, {'@': 46})}, 57: {17: (0, 26), 2: (0, 52), 5: (0, 30), 18: (0, 14), 8: (0, 39), 19: (0, 66), 0: (0, 29), 6: (0, 56), 1: (0, 38), 3: (0, 63), 4: (0, 16), 7: (0, 27), 20: (0, 2), 21: (0, 45)}, 58: {16: (0, 33), 17: (0, 26), 2: (0, 52), 35: (0, 11), 19: (0, 66), 0: (0, 29), 6: (0, 56), 33: (0, 6), 1: (0, 38), 18: (0, 62), 3: (0, 63), 4: (0, 16), 32: (0, 49), 20: (0, 2), 31: (0, 12), 5: (0, 30), 8: (0, 39), 39: (0, 65), 7: (0, 27), 21: (0, 45)}, 59: {17: (0, 4), 0: (0, 29), 6: (0, 56), 1: (0, 38), 2: (0, 52), 3: (0, 63), 4: (0, 16), 5: (0, 30), 7: (0, 27), 8: (0, 39)}, 60: {16: (1, {'@': 44}), 12: (1, {'@': 44}), 11: (1, {'@': 44}), 29: (1, {'@': 44}), 9: (1, {'@': 44}), 26: (1, {'@': 44}), 25: (1, {'@': 44}), 24: (1, {'@': 44}), 27: (1, {'@': 44}), 28: (1, {'@': 44}), 15: (1, {'@': 44}), 13: (1, {'@': 44}), 14: (1, {'@': 44}), 10: (1, {'@': 44})}, 61: {9: (0, 19), 10: (0, 53), 11: (0, 28), 12: (0, 7), 13: (0, 23), 14: (0, 40), 28: (1, {'@': 24})}, 62: {16: (1, {'@': 36}), 12: (1, {'@': 36}), 11: (1, {'@': 36}), 9: (1, {'@': 36}), 15: (1, {'@': 36}), 27: (1, {'@': 36}), 24: (1, {'@': 36}), 28: (1, {'@': 36}), 29: (1, {'@': 36}), 13: (1, {'@': 36}), 14: (1, {'@': 36}), 10: (1, {'@': 36})}, 63: {4: (1, {'@': 59}), 7: (1, {'@': 56}), 12: (1, {'@': 56}), 9: (1, {'@': 56}), 22: (1, {'@': 56}), 23: (1, {'@': 56}), 24: (1, {'@': 56}), 25: (1, {'@': 56}), 37: (1, {'@': 56}), 8: (1, {'@': 56}), 15: (1, {'@': 56}), 14: (1, {'@': 56}), 10: (1, {'@': 56}), 16: (1, {'@': 56}), 11: (1, {'@': 56}), 26: (1, {'@': 56}), 27: (1, {'@': 56}), 28: (1, {'@': 56}), 29: (1, {'@': 56}), 13: (1, {'@': 56}), 30: (1, {'@': 56})}, 64: {16: (1, {'@': 42}), 12: (1, {'@': 42}), 11: (1, {'@': 42}), 29: (1, {'@': 42}), 9: (1, {'@': 42}), 26: (1, {'@': 42}), 25: (1, {'@': 42}), 24: (1, {'@': 42}), 27: (1, {'@': 42}), 28: (1, {'@': 42}), 15: (1, {'@': 42}), 13: (1, {'@': 42}), 14: (1, {'@': 42}), 10: (1, {'@': 42})}, 65: {16: (0, 25)}, 66: {25: (0, 54), 26: (0, 50), 12: (1, {'@': 38}), 9: (1, {'@': 38}), 24: (1, {'@': 38}), 15: (1, {'@': 38}), 14: (1, {'@': 38}), 10: (1, {'@': 38}), 16: (1, {'@': 38}), 11: (1, {'@': 38}), 27: (1, {'@': 38}), 28: (1, {'@': 38}), 29: (1, {'@': 38}), 13: (1, {'@': 38})}, 67: {24: (0, 41), 12: (1, {'@': 28}), 11: (1, {'@': 28}), 9: (1, {'@': 28}), 28: (1, {'@': 28}), 13: (1, {'@': 28}), 14: (1, {'@': 28}), 10: (1, {'@': 28}), 16: (1, {'@': 28}), 15: (1, {'@': 28})}, 68: {24: (0, 41), 12: (1, {'@': 27}), 11: (1, {'@': 27}), 9: (1, {'@': 27}), 28: (1, {'@': 27}), 13: (1, {'@': 27}), 14: (1, {'@': 27}), 10: (1, {'@': 27}), 16: (1, {'@': 27}), 15: (1, {'@': 27})}, 69: {}}, 'start_states': {'start': 15}, 'end_states': {'start': 69}}, '__type__': 'ParsingFrontend'}, 'rules': [{'@': 24}, {'@': 25}, {'@': 26}, {'@': 27}, {'@': 28}, {'@': 29}, {'@': 30}, {'@': 31}, {'@': 32}, {'@': 33}, {'@': 34}, {'@': 35}, {'@': 36}, {'@': 37}, {'@': 38}, {'@': 39}, {'@': 40}, {'@': 41}, {'@': 42}, {'@': 43}, {'@': 44}, {'@': 45}, {'@': 46}, {'@': 47}, {'@': 48}, {'@': 49}, {'@': 50}, {'@': 51}, {'@': 52}, {'@': 53}, {'@': 54}, {'@': 55}, {'@': 56}, {'@': 57}, {'@': 58}, {'@': 59}, {'@': 60}, {'@': 61}, {'@': 62}, {'@': 63}, {'@': 64}, {'@': 65}, {'@': 66}, {'@': 67}], 'options': {'debug': False, 'strict': False, 'keep_all_tokens': False, 'tree_class': None, 'cache': False, 'postlex': None, 'parser': 'lalr', 'lexer': 'contextual', 'transformer': None, 'start': ['start'], 'priority': 'normal', 'ambiguity': 'auto', 'regex': False, 'propagate_positions': False, 'lexer_callbacks': {}, 'maybe_placeholders': False, 'edit_terminals': None, 'g_regex_flags': 0, 'use_bytes': False, 'import_paths': [], 'source_path': None, '_plugins': {}}, '__type__': 'Lark'} +) +MEMO = ( +{0: {'name': 'NUMBER', 'pattern': {'value': '(?:(?:(?:[0-9])+(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+|(?:(?:[0-9])+\\.(?:(?:[0-9])+)?|\\.(?:[0-9])+)(?:(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+)?)|(?:[0-9])+)', 'flags': [], 'raw': None, '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 1: {'name': 'CNAME', 'pattern': {'value': '(?:(?:[A-Z]|[a-z])|_)(?:(?:(?:[A-Z]|[a-z])|[0-9]|_))*', 'flags': [], 'raw': None, '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 2: {'name': 'WS', 'pattern': {'value': '(?:[ \t\x0c\r\n])+', 'flags': [], 'raw': None, '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 3: {'name': '__ANON_0', 'pattern': {'value': '!=', 'flags': [], 'raw': '"!="', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 4: {'name': 'MORETHAN', 'pattern': {'value': '>', 'flags': [], 'raw': '">"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 5: {'name': '__ANON_1', 'pattern': {'value': '>=', 'flags': [], 'raw': '">="', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 6: {'name': 'LESSTHAN', 'pattern': {'value': '<', 'flags': [], 'raw': '"<"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 7: {'name': '__ANON_2', 'pattern': {'value': '<=', 'flags': [], 'raw': '"<="', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 8: {'name': '__ANON_3', 'pattern': {'value': '==', 'flags': [], 'raw': '"=="', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 9: {'name': 'VBAR', 'pattern': {'value': '|', 'flags': [], 'raw': '"|"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 10: {'name': 'CIRCUMFLEX', 'pattern': {'value': '^', 'flags': [], 'raw': '"^"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 11: {'name': 'AMPERSAND', 'pattern': {'value': '&', 'flags': [], 'raw': '"&"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 12: {'name': 'TILDE', 'pattern': {'value': '~', 'flags': [], 'raw': '"~"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 13: {'name': '__ANON_4', 'pattern': {'value': '<<', 'flags': [], 'raw': '"<<"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 14: {'name': '__ANON_5', 'pattern': {'value': '>>', 'flags': [], 'raw': '">>"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 15: {'name': 'PLUS', 'pattern': {'value': '+', 'flags': [], 'raw': '"+"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 16: {'name': 'MINUS', 'pattern': {'value': '-', 'flags': [], 'raw': '"-"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 17: {'name': 'STAR', 'pattern': {'value': '*', 'flags': [], 'raw': '"*"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 18: {'name': 'SLASH', 'pattern': {'value': '/', 'flags': [], 'raw': '"/"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 19: {'name': 'PERCENT', 'pattern': {'value': '%', 'flags': [], 'raw': '"%"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 20: {'name': '__ANON_6', 'pattern': {'value': '**', 'flags': [], 'raw': '"**"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 21: {'name': 'LPAR', 'pattern': {'value': '(', 'flags': [], 'raw': '"("', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 22: {'name': 'RPAR', 'pattern': {'value': ')', 'flags': [], 'raw': '")"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 23: {'name': 'COMMA', 'pattern': {'value': ',', 'flags': [], 'raw': '","', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 24: {'origin': {'name': Token('RULE', 'start'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 25: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 26: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': 'MORETHAN', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'gt', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 27: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__ANON_1', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'gte', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 28: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': 'LESSTHAN', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 3, 'alias': 'lt', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 29: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__ANON_2', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 4, 'alias': 'lte', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 30: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__ANON_0', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 5, 'alias': 'neq', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 31: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__ANON_3', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 6, 'alias': 'eq', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 32: {'origin': {'name': Token('RULE', 'bitwise_or'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_xor', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 33: {'origin': {'name': Token('RULE', 'bitwise_or'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_or', '__type__': 'NonTerminal'}, {'name': 'VBAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_xor', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'bor', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 34: {'origin': {'name': Token('RULE', 'bitwise_xor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_and', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 35: {'origin': {'name': Token('RULE', 'bitwise_xor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_xor', '__type__': 'NonTerminal'}, {'name': 'CIRCUMFLEX', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_and', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'bxor', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 36: {'origin': {'name': Token('RULE', 'bitwise_and'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_inversion', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 37: {'origin': {'name': Token('RULE', 'bitwise_and'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_and', '__type__': 'NonTerminal'}, {'name': 'AMPERSAND', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_inversion', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'band', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 38: {'origin': {'name': Token('RULE', 'bitwise_inversion'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'shift_expr', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 39: {'origin': {'name': Token('RULE', 'bitwise_inversion'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'TILDE', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_inversion', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'binv', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 40: {'origin': {'name': Token('RULE', 'shift_expr'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'sum', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 41: {'origin': {'name': Token('RULE', 'shift_expr'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'shift_expr', '__type__': 'NonTerminal'}, {'name': '__ANON_4', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'sum', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'lshift', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 42: {'origin': {'name': Token('RULE', 'shift_expr'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'shift_expr', '__type__': 'NonTerminal'}, {'name': '__ANON_5', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'sum', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'rshift', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 43: {'origin': {'name': Token('RULE', 'sum'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 44: {'origin': {'name': Token('RULE', 'sum'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term', '__type__': 'NonTerminal'}, {'name': 'PLUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'sum', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'add', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 45: {'origin': {'name': Token('RULE', 'sum'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term', '__type__': 'NonTerminal'}, {'name': 'MINUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'sum', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'sub', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 46: {'origin': {'name': Token('RULE', 'term'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'factor', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 47: {'origin': {'name': Token('RULE', 'term'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'factor', '__type__': 'NonTerminal'}, {'name': 'STAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'mul', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 48: {'origin': {'name': Token('RULE', 'term'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'factor', '__type__': 'NonTerminal'}, {'name': 'SLASH', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'div', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 49: {'origin': {'name': Token('RULE', 'term'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'factor', '__type__': 'NonTerminal'}, {'name': 'PERCENT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 3, 'alias': 'mod', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 50: {'origin': {'name': Token('RULE', 'factor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'pow', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 51: {'origin': {'name': Token('RULE', 'factor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'PLUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'factor', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'pos', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 52: {'origin': {'name': Token('RULE', 'factor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'MINUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'factor', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'neg', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 53: {'origin': {'name': Token('RULE', 'pow'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'atom', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 54: {'origin': {'name': Token('RULE', 'pow'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'atom', '__type__': 'NonTerminal'}, {'name': '__ANON_6', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'factor', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'pow', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 55: {'origin': {'name': Token('RULE', 'atom'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'comparison', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 56: {'origin': {'name': Token('RULE', 'atom'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'CNAME', 'filter_out': False, '__type__': 'Terminal'}], 'order': 1, 'alias': 'symbol', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 57: {'origin': {'name': Token('RULE', 'atom'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'NUMBER', 'filter_out': False, '__type__': 'Terminal'}], 'order': 2, 'alias': 'literal', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 58: {'origin': {'name': Token('RULE', 'atom'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'func_name', '__type__': 'NonTerminal'}, {'name': 'trailer', '__type__': 'NonTerminal'}], 'order': 3, 'alias': 'func', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 59: {'origin': {'name': Token('RULE', 'func_name'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'CNAME', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 60: {'origin': {'name': Token('RULE', 'trailer'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'arglist', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 61: {'origin': {'name': Token('RULE', 'trailer'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': [False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 62: {'origin': {'name': Token('RULE', 'arglist'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__arglist_star_0', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 63: {'origin': {'name': Token('RULE', 'arglist'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__arglist_star_0', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 64: {'origin': {'name': Token('RULE', 'arglist'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 65: {'origin': {'name': Token('RULE', 'arglist'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 66: {'origin': {'name': '__arglist_star_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'comparison', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 67: {'origin': {'name': '__arglist_star_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__arglist_star_0', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'comparison', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}} +) +Shift = 0 +Reduce = 1 +def Lark_StandAlone(**kwargs): + return Lark._load_from_dict(DATA, MEMO, **kwargs) diff --git a/src/formulate/ttreeformula.py b/src/formulate/ttreeformula.py deleted file mode 100644 index 9b9522d..0000000 --- a/src/formulate/ttreeformula.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -Copyright (c) 2023 Aryan Roy. All rights reserved. - -formulate: Easy conversions between different styles of expressions -""" -from __future__ import annotations - -import lark - -from . import matching_tree - -expression_grammar = r''' -start: expression -expression: disjunction | disjunction ":" expression -> multi_out -disjunction: conjunction | conjunction "||" conjunction -> lor -conjunction: inversion | inversion "&&" inversion -> land -inversion: comparison | "!" inversion -> linv -comparison: bitwise_or | comparison ">" bitwise_or -> gt - | comparison ">=" bitwise_or -> gte - | comparison "<" bitwise_or -> lt - | comparison "<=" bitwise_or -> lte - | comparison ("!=" ) bitwise_or -> neq - | comparison "==" bitwise_or -> eq -bitwise_or: bitwise_xor | bitwise_or "|" bitwise_xor -> bor -bitwise_xor: bitwise_and | bitwise_xor "^" bitwise_and -> bxor -bitwise_and: bitwise_inversion - | bitwise_and "&" bitwise_inversion -> band -bitwise_inversion: shift_expr | "~" bitwise_inversion -> binv -shift_expr: sum | shift_expr "<<" sum -> lshift | shift_expr ">>" sum -> rshift -sum: term | term "+" sum -> add | term "-" sum -> sub -term: factor | factor "*" term -> mul - | factor "/" term -> div - | factor "%" term -> mod -factor: pow | factor matpos+ -> matr - | "+" factor -> pos - | "-" factor -> neg -pow: atom | atom "**" factor -> pow -matpos: "[" [sum] "]" -atom: "(" expression ")" | var_name -> symbol - | NUMBER -> literal - | func_name trailer -> func -func_name: NAME | NAME "::" func_name -var_name: NAME | NAME "." var_name -trailer: "(" [arglist] ")" -arglist: expression ("," expression)* [","] -NAME: /[A-Za-z_][A-Za-z0-9_]*\$?/ -%import common.NUMBER -%import common.WS -%ignore WS -''' - - -def exp_to_ptree(exp: str): - parser = lark.Lark( - expression_grammar, parser="lalr", tree_class=matching_tree.ptnode - ) - print(parser.parse(exp).pretty()) - return parser.parse(exp) diff --git a/src/formulate/ttreeformula_parser.py b/src/formulate/ttreeformula_parser.py new file mode 100644 index 0000000..e51d15c --- /dev/null +++ b/src/formulate/ttreeformula_parser.py @@ -0,0 +1,3249 @@ +# The file was automatically generated by Lark v1.1.7 +__version__ = "1.1.7" + +# +# +# Lark Stand-alone Generator Tool +# ---------------------------------- +# Generates a stand-alone LALR(1) parser +# +# Git: https://github.com/erezsh/lark +# Author: Erez Shinan (erezshin@gmail.com) +# +# +# >>> LICENSE +# +# This tool and its generated code use a separate license from Lark, +# and are subject to the terms of the Mozilla Public License, v. 2.0. +# If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +# +# If you wish to purchase a commercial license for this tool and its +# generated code, you may contact me via email or otherwise. +# +# If MPL2 is incompatible with your free or open-source project, +# contact me and we'll work it out. +# +# + +from abc import ABC, abstractmethod +from collections.abc import Sequence +from types import ModuleType +from typing import ( + TypeVar, Generic, Type, Tuple, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any, + Union, Iterable, IO, TYPE_CHECKING, overload, + Pattern as REPattern, ClassVar, Set, Mapping +) + + +class LarkError(Exception): + pass + + +class ConfigurationError(LarkError, ValueError): + pass + + +def assert_config(value, options: Collection, msg='Got %r, expected one of %s'): + if value not in options: + raise ConfigurationError(msg % (value, options)) + + +class GrammarError(LarkError): + pass + + +class ParseError(LarkError): + pass + + +class LexError(LarkError): + pass + +T = TypeVar('T') + +class UnexpectedInput(LarkError): + #-- + line: int + column: int + pos_in_stream = None + state: Any + _terminals_by_name = None + + def get_context(self, text: str, span: int=40) -> str: + #-- + assert self.pos_in_stream is not None, self + pos = self.pos_in_stream + start = max(pos - span, 0) + end = pos + span + if not isinstance(text, bytes): + before = text[start:pos].rsplit('\n', 1)[-1] + after = text[pos:end].split('\n', 1)[0] + return before + after + '\n' + ' ' * len(before.expandtabs()) + '^\n' + else: + before = text[start:pos].rsplit(b'\n', 1)[-1] + after = text[pos:end].split(b'\n', 1)[0] + return (before + after + b'\n' + b' ' * len(before.expandtabs()) + b'^\n').decode("ascii", "backslashreplace") + + def match_examples(self, parse_fn: 'Callable[[str], Tree]', + examples: Union[Mapping[T, Iterable[str]], Iterable[Tuple[T, Iterable[str]]]], + token_type_match_fallback: bool=False, + use_accepts: bool=True + ) -> Optional[T]: + #-- + assert self.state is not None, "Not supported for this exception" + + if isinstance(examples, Mapping): + examples = examples.items() + + candidate = (None, False) + for i, (label, example) in enumerate(examples): + assert not isinstance(example, str), "Expecting a list" + + for j, malformed in enumerate(example): + try: + parse_fn(malformed) + except UnexpectedInput as ut: + if ut.state == self.state: + if ( + use_accepts + and isinstance(self, UnexpectedToken) + and isinstance(ut, UnexpectedToken) + and ut.accepts != self.accepts + ): + logger.debug("Different accepts with same state[%d]: %s != %s at example [%s][%s]" % + (self.state, self.accepts, ut.accepts, i, j)) + continue + if ( + isinstance(self, (UnexpectedToken, UnexpectedEOF)) + and isinstance(ut, (UnexpectedToken, UnexpectedEOF)) + ): + if ut.token == self.token: ## + + logger.debug("Exact Match at example [%s][%s]" % (i, j)) + return label + + if token_type_match_fallback: + ## + + if (ut.token.type == self.token.type) and not candidate[-1]: + logger.debug("Token Type Fallback at example [%s][%s]" % (i, j)) + candidate = label, True + + if candidate[0] is None: + logger.debug("Same State match at example [%s][%s]" % (i, j)) + candidate = label, False + + return candidate[0] + + def _format_expected(self, expected): + if self._terminals_by_name: + d = self._terminals_by_name + expected = [d[t_name].user_repr() if t_name in d else t_name for t_name in expected] + return "Expected one of: \n\t* %s\n" % '\n\t* '.join(expected) + + +class UnexpectedEOF(ParseError, UnexpectedInput): + #-- + expected: 'List[Token]' + + def __init__(self, expected, state=None, terminals_by_name=None): + super(UnexpectedEOF, self).__init__() + + self.expected = expected + self.state = state + from .lexer import Token + self.token = Token("", "") ## + + self.pos_in_stream = -1 + self.line = -1 + self.column = -1 + self._terminals_by_name = terminals_by_name + + + def __str__(self): + message = "Unexpected end-of-input. " + message += self._format_expected(self.expected) + return message + + +class UnexpectedCharacters(LexError, UnexpectedInput): + #-- + + allowed: Set[str] + considered_tokens: Set[Any] + + def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None, + terminals_by_name=None, considered_rules=None): + super(UnexpectedCharacters, self).__init__() + + ## + + self.line = line + self.column = column + self.pos_in_stream = lex_pos + self.state = state + self._terminals_by_name = terminals_by_name + + self.allowed = allowed + self.considered_tokens = considered_tokens + self.considered_rules = considered_rules + self.token_history = token_history + + if isinstance(seq, bytes): + self.char = seq[lex_pos:lex_pos + 1].decode("ascii", "backslashreplace") + else: + self.char = seq[lex_pos] + self._context = self.get_context(seq) + + + def __str__(self): + message = "No terminal matches '%s' in the current parser context, at line %d col %d" % (self.char, self.line, self.column) + message += '\n\n' + self._context + if self.allowed: + message += self._format_expected(self.allowed) + if self.token_history: + message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in self.token_history) + return message + + +class UnexpectedToken(ParseError, UnexpectedInput): + #-- + + expected: Set[str] + considered_rules: Set[str] + interactive_parser: 'InteractiveParser' + + def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None): + super(UnexpectedToken, self).__init__() + + ## + + self.line = getattr(token, 'line', '?') + self.column = getattr(token, 'column', '?') + self.pos_in_stream = getattr(token, 'start_pos', None) + self.state = state + + self.token = token + self.expected = expected ## + + self._accepts = NO_VALUE + self.considered_rules = considered_rules + self.interactive_parser = interactive_parser + self._terminals_by_name = terminals_by_name + self.token_history = token_history + + + @property + def accepts(self) -> Set[str]: + if self._accepts is NO_VALUE: + self._accepts = self.interactive_parser and self.interactive_parser.accepts() + return self._accepts + + def __str__(self): + message = ("Unexpected token %r at line %s, column %s.\n%s" + % (self.token, self.line, self.column, self._format_expected(self.accepts or self.expected))) + if self.token_history: + message += "Previous tokens: %r\n" % self.token_history + + return message + + + +class VisitError(LarkError): + #-- + + obj: 'Union[Tree, Token]' + orig_exc: Exception + + def __init__(self, rule, obj, orig_exc): + message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc) + super(VisitError, self).__init__(message) + + self.rule = rule + self.obj = obj + self.orig_exc = orig_exc + + +class MissingVariableError(LarkError): + pass + + +import sys, re +import logging + +logger: logging.Logger = logging.getLogger("lark") +logger.addHandler(logging.StreamHandler()) +## + +## + +logger.setLevel(logging.CRITICAL) + + +NO_VALUE = object() + +T = TypeVar("T") + + +def classify(seq: Iterable, key: Optional[Callable] = None, value: Optional[Callable] = None) -> Dict: + d: Dict[Any, Any] = {} + for item in seq: + k = key(item) if (key is not None) else item + v = value(item) if (value is not None) else item + try: + d[k].append(v) + except KeyError: + d[k] = [v] + return d + + +def _deserialize(data: Any, namespace: Dict[str, Any], memo: Dict) -> Any: + if isinstance(data, dict): + if '__type__' in data: ## + + class_ = namespace[data['__type__']] + return class_.deserialize(data, memo) + elif '@' in data: + return memo[data['@']] + return {key:_deserialize(value, namespace, memo) for key, value in data.items()} + elif isinstance(data, list): + return [_deserialize(value, namespace, memo) for value in data] + return data + + +_T = TypeVar("_T", bound="Serialize") + +class Serialize: + #-- + + def memo_serialize(self, types_to_memoize: List) -> Any: + memo = SerializeMemoizer(types_to_memoize) + return self.serialize(memo), memo.serialize() + + def serialize(self, memo = None) -> Dict[str, Any]: + if memo and memo.in_types(self): + return {'@': memo.memoized.get(self)} + + fields = getattr(self, '__serialize_fields__') + res = {f: _serialize(getattr(self, f), memo) for f in fields} + res['__type__'] = type(self).__name__ + if hasattr(self, '_serialize'): + self._serialize(res, memo) ## + + return res + + @classmethod + def deserialize(cls: Type[_T], data: Dict[str, Any], memo: Dict[int, Any]) -> _T: + namespace = getattr(cls, '__serialize_namespace__', []) + namespace = {c.__name__:c for c in namespace} + + fields = getattr(cls, '__serialize_fields__') + + if '@' in data: + return memo[data['@']] + + inst = cls.__new__(cls) + for f in fields: + try: + setattr(inst, f, _deserialize(data[f], namespace, memo)) + except KeyError as e: + raise KeyError("Cannot find key for class", cls, e) + + if hasattr(inst, '_deserialize'): + inst._deserialize() ## + + + return inst + + +class SerializeMemoizer(Serialize): + #-- + + __serialize_fields__ = 'memoized', + + def __init__(self, types_to_memoize: List) -> None: + self.types_to_memoize = tuple(types_to_memoize) + self.memoized = Enumerator() + + def in_types(self, value: Serialize) -> bool: + return isinstance(value, self.types_to_memoize) + + def serialize(self) -> Dict[int, Any]: ## + + return _serialize(self.memoized.reversed(), None) + + @classmethod + def deserialize(cls, data: Dict[int, Any], namespace: Dict[str, Any], memo: Dict[Any, Any]) -> Dict[int, Any]: ## + + return _deserialize(data, namespace, memo) + + +try: + import regex + _has_regex = True +except ImportError: + _has_regex = False + +if sys.version_info >= (3, 11): + import re._parser as sre_parse + import re._constants as sre_constants +else: + import sre_parse + import sre_constants + +categ_pattern = re.compile(r'\\p{[A-Za-z_]+}') + +def get_regexp_width(expr: str) -> Union[Tuple[int, int], List[int]]: + if _has_regex: + ## + + ## + + ## + + regexp_final = re.sub(categ_pattern, 'A', expr) + else: + if re.search(categ_pattern, expr): + raise ImportError('`regex` module must be installed in order to use Unicode categories.', expr) + regexp_final = expr + try: + ## + + return [int(x) for x in sre_parse.parse(regexp_final).getwidth()] ## + + except sre_constants.error: + if not _has_regex: + raise ValueError(expr) + else: + ## + + ## + + c = regex.compile(regexp_final) + if c.match('') is None: + ## + + return 1, int(sre_constants.MAXREPEAT) + else: + return 0, int(sre_constants.MAXREPEAT) + + +from collections import OrderedDict + +class Meta: + + empty: bool + line: int + column: int + start_pos: int + end_line: int + end_column: int + end_pos: int + orig_expansion: 'List[TerminalDef]' + match_tree: bool + + def __init__(self): + self.empty = True + + +_Leaf_T = TypeVar("_Leaf_T") +Branch = Union[_Leaf_T, 'Tree[_Leaf_T]'] + + +class Tree(Generic[_Leaf_T]): + #-- + + data: str + children: 'List[Branch[_Leaf_T]]' + + def __init__(self, data: str, children: 'List[Branch[_Leaf_T]]', meta: Optional[Meta]=None) -> None: + self.data = data + self.children = children + self._meta = meta + + @property + def meta(self) -> Meta: + if self._meta is None: + self._meta = Meta() + return self._meta + + def __repr__(self): + return 'Tree(%r, %r)' % (self.data, self.children) + + def _pretty_label(self): + return self.data + + def _pretty(self, level, indent_str): + yield f'{indent_str*level}{self._pretty_label()}' + if len(self.children) == 1 and not isinstance(self.children[0], Tree): + yield f'\t{self.children[0]}\n' + else: + yield '\n' + for n in self.children: + if isinstance(n, Tree): + yield from n._pretty(level+1, indent_str) + else: + yield f'{indent_str*(level+1)}{n}\n' + + def pretty(self, indent_str: str=' ') -> str: + #-- + return ''.join(self._pretty(0, indent_str)) + + def __rich__(self, parent:Optional['rich.tree.Tree']=None) -> 'rich.tree.Tree': + #-- + return self._rich(parent) + + def _rich(self, parent): + if parent: + tree = parent.add(f'[bold]{self.data}[/bold]') + else: + import rich.tree + tree = rich.tree.Tree(self.data) + + for c in self.children: + if isinstance(c, Tree): + c._rich(tree) + else: + tree.add(f'[green]{c}[/green]') + + return tree + + def __eq__(self, other): + try: + return self.data == other.data and self.children == other.children + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self) -> int: + return hash((self.data, tuple(self.children))) + + def iter_subtrees(self) -> 'Iterator[Tree[_Leaf_T]]': + #-- + queue = [self] + subtrees = OrderedDict() + for subtree in queue: + subtrees[id(subtree)] = subtree + ## + + queue += [c for c in reversed(subtree.children) ## + + if isinstance(c, Tree) and id(c) not in subtrees] + + del queue + return reversed(list(subtrees.values())) + + def iter_subtrees_topdown(self): + #-- + stack = [self] + stack_append = stack.append + stack_pop = stack.pop + while stack: + node = stack_pop() + if not isinstance(node, Tree): + continue + yield node + for child in reversed(node.children): + stack_append(child) + + def find_pred(self, pred: 'Callable[[Tree[_Leaf_T]], bool]') -> 'Iterator[Tree[_Leaf_T]]': + #-- + return filter(pred, self.iter_subtrees()) + + def find_data(self, data: str) -> 'Iterator[Tree[_Leaf_T]]': + #-- + return self.find_pred(lambda t: t.data == data) + + +from functools import wraps, update_wrapper +from inspect import getmembers, getmro + +_Return_T = TypeVar('_Return_T') +_Return_V = TypeVar('_Return_V') +_Leaf_T = TypeVar('_Leaf_T') +_Leaf_U = TypeVar('_Leaf_U') +_R = TypeVar('_R') +_FUNC = Callable[..., _Return_T] +_DECORATED = Union[_FUNC, type] + +class _DiscardType: + #-- + + def __repr__(self): + return "lark.visitors.Discard" + +Discard = _DiscardType() + +## + + +class _Decoratable: + #-- + + @classmethod + def _apply_v_args(cls, visit_wrapper): + mro = getmro(cls) + assert mro[0] is cls + libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)} + for name, value in getmembers(cls): + + ## + + if name.startswith('_') or (name in libmembers and name not in cls.__dict__): + continue + if not callable(value): + continue + + ## + + if isinstance(cls.__dict__[name], _VArgsWrapper): + continue + + setattr(cls, name, _VArgsWrapper(cls.__dict__[name], visit_wrapper)) + return cls + + def __class_getitem__(cls, _): + return cls + + +class Transformer(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): + #-- + __visit_tokens__ = True ## + + + def __init__(self, visit_tokens: bool=True) -> None: + self.__visit_tokens__ = visit_tokens + + def _call_userfunc(self, tree, new_children=None): + ## + + children = new_children if new_children is not None else tree.children + try: + f = getattr(self, tree.data) + except AttributeError: + return self.__default__(tree.data, children, tree.meta) + else: + try: + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + return f.visit_wrapper(f, tree.data, children, tree.meta) + else: + return f(children) + except GrammarError: + raise + except Exception as e: + raise VisitError(tree.data, tree, e) + + def _call_userfunc_token(self, token): + try: + f = getattr(self, token.type) + except AttributeError: + return self.__default_token__(token) + else: + try: + return f(token) + except GrammarError: + raise + except Exception as e: + raise VisitError(token.type, token, e) + + def _transform_children(self, children): + for c in children: + if isinstance(c, Tree): + res = self._transform_tree(c) + elif self.__visit_tokens__ and isinstance(c, Token): + res = self._call_userfunc_token(c) + else: + res = c + + if res is not Discard: + yield res + + def _transform_tree(self, tree): + children = list(self._transform_children(tree.children)) + return self._call_userfunc(tree, children) + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + #-- + return self._transform_tree(tree) + + def __mul__( + self: 'Transformer[_Leaf_T, Tree[_Leaf_U]]', + other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V,]]' + ) -> 'TransformerChain[_Leaf_T, _Return_V]': + #-- + return TransformerChain(self, other) + + def __default__(self, data, children, meta): + #-- + return Tree(data, children, meta) + + def __default_token__(self, token): + #-- + return token + + +def merge_transformers(base_transformer=None, **transformers_to_merge): + #-- + if base_transformer is None: + base_transformer = Transformer() + for prefix, transformer in transformers_to_merge.items(): + for method_name in dir(transformer): + method = getattr(transformer, method_name) + if not callable(method): + continue + if method_name.startswith("_") or method_name == "transform": + continue + prefixed_method = prefix + "__" + method_name + if hasattr(base_transformer, prefixed_method): + raise AttributeError("Cannot merge: method '%s' appears more than once" % prefixed_method) + + setattr(base_transformer, prefixed_method, method) + + return base_transformer + + +class InlineTransformer(Transformer): ## + + def _call_userfunc(self, tree, new_children=None): + ## + + children = new_children if new_children is not None else tree.children + try: + f = getattr(self, tree.data) + except AttributeError: + return self.__default__(tree.data, children, tree.meta) + else: + return f(*children) + + +class TransformerChain(Generic[_Leaf_T, _Return_T]): + + transformers: 'Tuple[Union[Transformer, TransformerChain], ...]' + + def __init__(self, *transformers: 'Union[Transformer, TransformerChain]') -> None: + self.transformers = transformers + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + for t in self.transformers: + tree = t.transform(tree) + return cast(_Return_T, tree) + + def __mul__( + self: 'TransformerChain[_Leaf_T, Tree[_Leaf_U]]', + other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V]]' + ) -> 'TransformerChain[_Leaf_T, _Return_V]': + return TransformerChain(*self.transformers + (other,)) + + +class Transformer_InPlace(Transformer): + #-- + def _transform_tree(self, tree): ## + + return self._call_userfunc(tree) + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + for subtree in tree.iter_subtrees(): + subtree.children = list(self._transform_children(subtree.children)) + + return self._transform_tree(tree) + + +class Transformer_NonRecursive(Transformer): + #-- + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + ## + + rev_postfix = [] + q: List[Branch[_Leaf_T]] = [tree] + while q: + t = q.pop() + rev_postfix.append(t) + if isinstance(t, Tree): + q += t.children + + ## + + stack: List = [] + for x in reversed(rev_postfix): + if isinstance(x, Tree): + size = len(x.children) + if size: + args = stack[-size:] + del stack[-size:] + else: + args = [] + + res = self._call_userfunc(x, args) + if res is not Discard: + stack.append(res) + + elif self.__visit_tokens__ and isinstance(x, Token): + res = self._call_userfunc_token(x) + if res is not Discard: + stack.append(res) + else: + stack.append(x) + + result, = stack ## + + ## + + ## + + ## + + return cast(_Return_T, result) + + +class Transformer_InPlaceRecursive(Transformer): + #-- + def _transform_tree(self, tree): + tree.children = list(self._transform_children(tree.children)) + return self._call_userfunc(tree) + + +## + + +class VisitorBase: + def _call_userfunc(self, tree): + return getattr(self, tree.data, self.__default__)(tree) + + def __default__(self, tree): + #-- + return tree + + def __class_getitem__(cls, _): + return cls + + +class Visitor(VisitorBase, ABC, Generic[_Leaf_T]): + #-- + + def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + #-- + for subtree in tree.iter_subtrees(): + self._call_userfunc(subtree) + return tree + + def visit_topdown(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + #-- + for subtree in tree.iter_subtrees_topdown(): + self._call_userfunc(subtree) + return tree + + +class Visitor_Recursive(VisitorBase, Generic[_Leaf_T]): + #-- + + def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + #-- + for child in tree.children: + if isinstance(child, Tree): + self.visit(child) + + self._call_userfunc(tree) + return tree + + def visit_topdown(self,tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + #-- + self._call_userfunc(tree) + + for child in tree.children: + if isinstance(child, Tree): + self.visit_topdown(child) + + return tree + + +class Interpreter(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): + #-- + + def visit(self, tree: Tree[_Leaf_T]) -> _Return_T: + ## + + ## + + ## + + return self._visit_tree(tree) + + def _visit_tree(self, tree: Tree[_Leaf_T]): + f = getattr(self, tree.data) + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + return f.visit_wrapper(f, tree.data, tree.children, tree.meta) + else: + return f(tree) + + def visit_children(self, tree: Tree[_Leaf_T]) -> List: + return [self._visit_tree(child) if isinstance(child, Tree) else child + for child in tree.children] + + def __getattr__(self, name): + return self.__default__ + + def __default__(self, tree): + return self.visit_children(tree) + + +_InterMethod = Callable[[Type[Interpreter], _Return_T], _R] + +def visit_children_decor(func: _InterMethod) -> _InterMethod: + #-- + @wraps(func) + def inner(cls, tree): + values = cls.visit_children(tree) + return func(cls, values) + return inner + +## + + +def _apply_v_args(obj, visit_wrapper): + try: + _apply = obj._apply_v_args + except AttributeError: + return _VArgsWrapper(obj, visit_wrapper) + else: + return _apply(visit_wrapper) + + +class _VArgsWrapper: + #-- + base_func: Callable + + def __init__(self, func: Callable, visit_wrapper: Callable[[Callable, str, list, Any], Any]): + if isinstance(func, _VArgsWrapper): + func = func.base_func + ## + + self.base_func = func ## + + self.visit_wrapper = visit_wrapper + update_wrapper(self, func) + + def __call__(self, *args, **kwargs): + return self.base_func(*args, **kwargs) + + def __get__(self, instance, owner=None): + try: + ## + + ## + + g = type(self.base_func).__get__ + except AttributeError: + return self + else: + return _VArgsWrapper(g(self.base_func, instance, owner), self.visit_wrapper) + + def __set_name__(self, owner, name): + try: + f = type(self.base_func).__set_name__ + except AttributeError: + return + else: + f(self.base_func, owner, name) + + +def _vargs_inline(f, _data, children, _meta): + return f(*children) +def _vargs_meta_inline(f, _data, children, meta): + return f(meta, *children) +def _vargs_meta(f, _data, children, meta): + return f(meta, children) +def _vargs_tree(f, data, children, meta): + return f(Tree(data, children, meta)) + + +def v_args(inline: bool = False, meta: bool = False, tree: bool = False, wrapper: Optional[Callable] = None) -> Callable[[_DECORATED], _DECORATED]: + #-- + if tree and (meta or inline): + raise ValueError("Visitor functions cannot combine 'tree' with 'meta' or 'inline'.") + + func = None + if meta: + if inline: + func = _vargs_meta_inline + else: + func = _vargs_meta + elif inline: + func = _vargs_inline + elif tree: + func = _vargs_tree + + if wrapper is not None: + if func is not None: + raise ValueError("Cannot use 'wrapper' along with 'tree', 'meta' or 'inline'.") + func = wrapper + + def _visitor_args_dec(obj): + return _apply_v_args(obj, func) + return _visitor_args_dec + + + +TOKEN_DEFAULT_PRIORITY = 0 + + +class Symbol(Serialize): + __slots__ = ('name',) + + name: str + is_term: ClassVar[bool] = NotImplemented + + def __init__(self, name: str) -> None: + self.name = name + + def __eq__(self, other): + assert isinstance(other, Symbol), other + return self.is_term == other.is_term and self.name == other.name + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash(self.name) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.name) + + fullrepr = property(__repr__) + + def renamed(self, f): + return type(self)(f(self.name)) + + +class Terminal(Symbol): + __serialize_fields__ = 'name', 'filter_out' + + is_term: ClassVar[bool] = True + + def __init__(self, name, filter_out=False): + self.name = name + self.filter_out = filter_out + + @property + def fullrepr(self): + return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out) + + def renamed(self, f): + return type(self)(f(self.name), self.filter_out) + + +class NonTerminal(Symbol): + __serialize_fields__ = 'name', + + is_term: ClassVar[bool] = False + + +class RuleOptions(Serialize): + __serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices' + + keep_all_tokens: bool + expand1: bool + priority: Optional[int] + template_source: Optional[str] + empty_indices: Tuple[bool, ...] + + def __init__(self, keep_all_tokens: bool=False, expand1: bool=False, priority: Optional[int]=None, template_source: Optional[str]=None, empty_indices: Tuple[bool, ...]=()) -> None: + self.keep_all_tokens = keep_all_tokens + self.expand1 = expand1 + self.priority = priority + self.template_source = template_source + self.empty_indices = empty_indices + + def __repr__(self): + return 'RuleOptions(%r, %r, %r, %r)' % ( + self.keep_all_tokens, + self.expand1, + self.priority, + self.template_source + ) + + +class Rule(Serialize): + #-- + __slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash') + + __serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options' + __serialize_namespace__ = Terminal, NonTerminal, RuleOptions + + def __init__(self, origin, expansion, order=0, alias=None, options=None): + self.origin = origin + self.expansion = expansion + self.alias = alias + self.order = order + self.options = options or RuleOptions() + self._hash = hash((self.origin, tuple(self.expansion))) + + def _deserialize(self): + self._hash = hash((self.origin, tuple(self.expansion))) + + def __str__(self): + return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion)) + + def __repr__(self): + return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options) + + def __hash__(self): + return self._hash + + def __eq__(self, other): + if not isinstance(other, Rule): + return False + return self.origin == other.origin and self.expansion == other.expansion + + + +from copy import copy + +try: ## + + has_interegular = bool(interegular) +except NameError: + has_interegular = False + +class Pattern(Serialize, ABC): + + value: str + flags: Collection[str] + raw: Optional[str] + type: ClassVar[str] + + def __init__(self, value: str, flags: Collection[str] = (), raw: Optional[str] = None) -> None: + self.value = value + self.flags = frozenset(flags) + self.raw = raw + + def __repr__(self): + return repr(self.to_regexp()) + + ## + + def __hash__(self): + return hash((type(self), self.value, self.flags)) + + def __eq__(self, other): + return type(self) == type(other) and self.value == other.value and self.flags == other.flags + + @abstractmethod + def to_regexp(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def min_width(self) -> int: + raise NotImplementedError() + + @property + @abstractmethod + def max_width(self) -> int: + raise NotImplementedError() + + def _get_flags(self, value): + for f in self.flags: + value = ('(?%s:%s)' % (f, value)) + return value + + +class PatternStr(Pattern): + __serialize_fields__ = 'value', 'flags', 'raw' + + type: ClassVar[str] = "str" + + def to_regexp(self) -> str: + return self._get_flags(re.escape(self.value)) + + @property + def min_width(self) -> int: + return len(self.value) + + @property + def max_width(self) -> int: + return len(self.value) + + +class PatternRE(Pattern): + __serialize_fields__ = 'value', 'flags', 'raw', '_width' + + type: ClassVar[str] = "re" + + def to_regexp(self) -> str: + return self._get_flags(self.value) + + _width = None + def _get_width(self): + if self._width is None: + self._width = get_regexp_width(self.to_regexp()) + return self._width + + @property + def min_width(self) -> int: + return self._get_width()[0] + + @property + def max_width(self) -> int: + return self._get_width()[1] + + +class TerminalDef(Serialize): + __serialize_fields__ = 'name', 'pattern', 'priority' + __serialize_namespace__ = PatternStr, PatternRE + + name: str + pattern: Pattern + priority: int + + def __init__(self, name: str, pattern: Pattern, priority: int = TOKEN_DEFAULT_PRIORITY) -> None: + assert isinstance(pattern, Pattern), pattern + self.name = name + self.pattern = pattern + self.priority = priority + + def __repr__(self): + return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern) + + def user_repr(self) -> str: + if self.name.startswith('__'): ## + + return self.pattern.raw or self.name + else: + return self.name + +_T = TypeVar('_T', bound="Token") + +class Token(str): + #-- + __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos') + + __match_args__ = ('type', 'value') + + type: str + start_pos: Optional[int] + value: Any + line: Optional[int] + column: Optional[int] + end_line: Optional[int] + end_column: Optional[int] + end_pos: Optional[int] + + + @overload + def __new__( + cls, + type: str, + value: Any, + start_pos: Optional[int] = None, + line: Optional[int] = None, + column: Optional[int] = None, + end_line: Optional[int] = None, + end_column: Optional[int] = None, + end_pos: Optional[int] = None + ) -> 'Token': + ... + + @overload + def __new__( + cls, + type_: str, + value: Any, + start_pos: Optional[int] = None, + line: Optional[int] = None, + column: Optional[int] = None, + end_line: Optional[int] = None, + end_column: Optional[int] = None, + end_pos: Optional[int] = None + ) -> 'Token': ... + + def __new__(cls, *args, **kwargs): + if "type_" in kwargs: + warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) + + if "type" in kwargs: + raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") + kwargs["type"] = kwargs.pop("type_") + + return cls._future_new(*args, **kwargs) + + + @classmethod + def _future_new(cls, type, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None): + inst = super(Token, cls).__new__(cls, value) + + inst.type = type + inst.start_pos = start_pos + inst.value = value + inst.line = line + inst.column = column + inst.end_line = end_line + inst.end_column = end_column + inst.end_pos = end_pos + return inst + + @overload + def update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token': + ... + + @overload + def update(self, type_: Optional[str] = None, value: Optional[Any] = None) -> 'Token': + ... + + def update(self, *args, **kwargs): + if "type_" in kwargs: + warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) + + if "type" in kwargs: + raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") + kwargs["type"] = kwargs.pop("type_") + + return self._future_update(*args, **kwargs) + + def _future_update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token': + return Token.new_borrow_pos( + type if type is not None else self.type, + value if value is not None else self.value, + self + ) + + @classmethod + def new_borrow_pos(cls: Type[_T], type_: str, value: Any, borrow_t: 'Token') -> _T: + return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos) + + def __reduce__(self): + return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column)) + + def __repr__(self): + return 'Token(%r, %r)' % (self.type, self.value) + + def __deepcopy__(self, memo): + return Token(self.type, self.value, self.start_pos, self.line, self.column) + + def __eq__(self, other): + if isinstance(other, Token) and self.type != other.type: + return False + + return str.__eq__(self, other) + + __hash__ = str.__hash__ + + +class LineCounter: + __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char' + + def __init__(self, newline_char): + self.newline_char = newline_char + self.char_pos = 0 + self.line = 1 + self.column = 1 + self.line_start_pos = 0 + + def __eq__(self, other): + if not isinstance(other, LineCounter): + return NotImplemented + + return self.char_pos == other.char_pos and self.newline_char == other.newline_char + + def feed(self, token: Token, test_newline=True): + #-- + if test_newline: + newlines = token.count(self.newline_char) + if newlines: + self.line += newlines + self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1 + + self.char_pos += len(token) + self.column = self.char_pos - self.line_start_pos + 1 + + +class UnlessCallback: + def __init__(self, scanner): + self.scanner = scanner + + def __call__(self, t): + res = self.scanner.match(t.value, 0) + if res: + _value, t.type = res + return t + + +class CallChain: + def __init__(self, callback1, callback2, cond): + self.callback1 = callback1 + self.callback2 = callback2 + self.cond = cond + + def __call__(self, t): + t2 = self.callback1(t) + return self.callback2(t) if self.cond(t2) else t2 + + +def _get_match(re_, regexp, s, flags): + m = re_.match(regexp, s, flags) + if m: + return m.group(0) + +def _create_unless(terminals, g_regex_flags, re_, use_bytes): + tokens_by_type = classify(terminals, lambda t: type(t.pattern)) + assert len(tokens_by_type) <= 2, tokens_by_type.keys() + embedded_strs = set() + callback = {} + for retok in tokens_by_type.get(PatternRE, []): + unless = [] + for strtok in tokens_by_type.get(PatternStr, []): + if strtok.priority != retok.priority: + continue + s = strtok.pattern.value + if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags): + unless.append(strtok) + if strtok.pattern.flags <= retok.pattern.flags: + embedded_strs.add(strtok) + if unless: + callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes)) + + new_terminals = [t for t in terminals if t not in embedded_strs] + return new_terminals, callback + + +class Scanner: + def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): + self.terminals = terminals + self.g_regex_flags = g_regex_flags + self.re_ = re_ + self.use_bytes = use_bytes + self.match_whole = match_whole + + self.allowed_types = {t.name for t in self.terminals} + + self._mres = self._build_mres(terminals, len(terminals)) + + def _build_mres(self, terminals, max_size): + ## + + ## + + ## + + postfix = '$' if self.match_whole else '' + mres = [] + while terminals: + pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size]) + if self.use_bytes: + pattern = pattern.encode('latin-1') + try: + mre = self.re_.compile(pattern, self.g_regex_flags) + except AssertionError: ## + + return self._build_mres(terminals, max_size // 2) + + mres.append(mre) + terminals = terminals[max_size:] + return mres + + def match(self, text, pos): + for mre in self._mres: + m = mre.match(text, pos) + if m: + return m.group(0), m.lastgroup + + +def _regexp_has_newline(r: str): + #-- + return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r) + + +class LexerState: + #-- + + __slots__ = 'text', 'line_ctr', 'last_token' + + text: str + line_ctr: LineCounter + last_token: Optional[Token] + + def __init__(self, text: str, line_ctr: Optional[LineCounter]=None, last_token: Optional[Token]=None): + self.text = text + self.line_ctr = line_ctr or LineCounter(b'\n' if isinstance(text, bytes) else '\n') + self.last_token = last_token + + def __eq__(self, other): + if not isinstance(other, LexerState): + return NotImplemented + + return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token + + def __copy__(self): + return type(self)(self.text, copy(self.line_ctr), self.last_token) + + +class LexerThread: + #-- + + def __init__(self, lexer: 'Lexer', lexer_state: LexerState): + self.lexer = lexer + self.state = lexer_state + + @classmethod + def from_text(cls, lexer: 'Lexer', text: str): + return cls(lexer, LexerState(text)) + + def lex(self, parser_state): + return self.lexer.lex(self.state, parser_state) + + def __copy__(self): + return type(self)(self.lexer, copy(self.state)) + + _Token = Token + + +_Callback = Callable[[Token], Token] + +class Lexer(ABC): + #-- + @abstractmethod + def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: + return NotImplemented + + def make_lexer_state(self, text): + #-- + return LexerState(text) + + +def _check_regex_collisions(terminal_to_regexp: Dict[TerminalDef, str], comparator, strict_mode, max_collisions_to_show=8): + if not comparator: + comparator = interegular.Comparator.from_regexes(terminal_to_regexp) + + ## + + ## + + max_time = 2 if strict_mode else 0.2 + + ## + + if comparator.count_marked_pairs() >= max_collisions_to_show: + return + for group in classify(terminal_to_regexp, lambda t: t.priority).values(): + for a, b in comparator.check(group, skip_marked=True): + assert a.priority == b.priority + ## + + comparator.mark(a, b) + + ## + + message = f"Collision between Terminals {a.name} and {b.name}. " + try: + example = comparator.get_example_overlap(a, b, max_time).format_multiline() + except ValueError: + ## + + example = "No example could be found fast enough. However, the collision does still exists" + if strict_mode: + raise LexError(f"{message}\n{example}") + logger.warning("%s The lexer will choose between them arbitrarily.\n%s", message, example) + if comparator.count_marked_pairs() >= max_collisions_to_show: + logger.warning("Found 8 regex collisions, will not check for more.") + return + + +class BasicLexer(Lexer): + terminals: Collection[TerminalDef] + ignore_types: FrozenSet[str] + newline_types: FrozenSet[str] + user_callbacks: Dict[str, _Callback] + callback: Dict[str, _Callback] + re: ModuleType + + def __init__(self, conf: 'LexerConf', comparator=None) -> None: + terminals = list(conf.terminals) + assert all(isinstance(t, TerminalDef) for t in terminals), terminals + + self.re = conf.re_module + + if not conf.skip_validation: + ## + + terminal_to_regexp = {} + for t in terminals: + regexp = t.pattern.to_regexp() + try: + self.re.compile(regexp, conf.g_regex_flags) + except self.re.error: + raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern)) + + if t.pattern.min_width == 0: + raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern)) + if t.pattern.type == "re": + terminal_to_regexp[t] = regexp + + if not (set(conf.ignore) <= {t.name for t in terminals}): + raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals})) + + if has_interegular: + _check_regex_collisions(terminal_to_regexp, comparator, conf.strict) + elif conf.strict: + raise LexError("interegular must be installed for strict mode. Use `pip install 'lark[interegular]'`.") + + ## + + self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())) + self.ignore_types = frozenset(conf.ignore) + + terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name)) + self.terminals = terminals + self.user_callbacks = conf.callbacks + self.g_regex_flags = conf.g_regex_flags + self.use_bytes = conf.use_bytes + self.terminals_by_name = conf.terminals_by_name + + self._scanner = None + + def _build_scanner(self): + terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes) + assert all(self.callback.values()) + + for type_, f in self.user_callbacks.items(): + if type_ in self.callback: + ## + + self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_) + else: + self.callback[type_] = f + + self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes) + + @property + def scanner(self): + if self._scanner is None: + self._build_scanner() + return self._scanner + + def match(self, text, pos): + return self.scanner.match(text, pos) + + def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]: + with suppress(EOFError): + while True: + yield self.next_token(state, parser_state) + + def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token: + line_ctr = lex_state.line_ctr + while line_ctr.char_pos < len(lex_state.text): + res = self.match(lex_state.text, line_ctr.char_pos) + if not res: + allowed = self.scanner.allowed_types - self.ignore_types + if not allowed: + allowed = {""} + raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, + allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token], + state=parser_state, terminals_by_name=self.terminals_by_name) + + value, type_ = res + + if type_ not in self.ignore_types: + t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) + line_ctr.feed(value, type_ in self.newline_types) + t.end_line = line_ctr.line + t.end_column = line_ctr.column + t.end_pos = line_ctr.char_pos + if t.type in self.callback: + t = self.callback[t.type](t) + if not isinstance(t, Token): + raise LexError("Callbacks must return a token (returned %r)" % t) + lex_state.last_token = t + return t + else: + if type_ in self.callback: + t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) + self.callback[type_](t2) + line_ctr.feed(value, type_ in self.newline_types) + + ## + + raise EOFError(self) + + +class ContextualLexer(Lexer): + + lexers: Dict[str, BasicLexer] + root_lexer: BasicLexer + + def __init__(self, conf: 'LexerConf', states: Dict[str, Collection[str]], always_accept: Collection[str]=()) -> None: + terminals = list(conf.terminals) + terminals_by_name = conf.terminals_by_name + + trad_conf = copy(conf) + trad_conf.terminals = terminals + + if has_interegular and not conf.skip_validation: + comparator = interegular.Comparator.from_regexes({t: t.pattern.to_regexp() for t in terminals}) + else: + comparator = None + lexer_by_tokens: Dict[FrozenSet[str], BasicLexer] = {} + self.lexers = {} + for state, accepts in states.items(): + key = frozenset(accepts) + try: + lexer = lexer_by_tokens[key] + except KeyError: + accepts = set(accepts) | set(conf.ignore) | set(always_accept) + lexer_conf = copy(trad_conf) + lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name] + lexer = BasicLexer(lexer_conf, comparator) + lexer_by_tokens[key] = lexer + + self.lexers[state] = lexer + + assert trad_conf.terminals is terminals + trad_conf.skip_validation = True ## + + self.root_lexer = BasicLexer(trad_conf, comparator) + + def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: + try: + while True: + lexer = self.lexers[parser_state.position] + yield lexer.next_token(lexer_state, parser_state) + except EOFError: + pass + except UnexpectedCharacters as e: + ## + + ## + + try: + last_token = lexer_state.last_token ## + + token = self.root_lexer.next_token(lexer_state, parser_state) + raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name) + except UnexpectedCharacters: + raise e ## + + + + +_ParserArgType: 'TypeAlias' = 'Literal["earley", "lalr", "cyk", "auto"]' +_LexerArgType: 'TypeAlias' = 'Union[Literal["auto", "basic", "contextual", "dynamic", "dynamic_complete"], Type[Lexer]]' +_Callback = Callable[[Token], Token] + +class LexerConf(Serialize): + __serialize_fields__ = 'terminals', 'ignore', 'g_regex_flags', 'use_bytes', 'lexer_type' + __serialize_namespace__ = TerminalDef, + + terminals: Collection[TerminalDef] + re_module: ModuleType + ignore: Collection[str] + postlex: 'Optional[PostLex]' + callbacks: Dict[str, _Callback] + g_regex_flags: int + skip_validation: bool + use_bytes: bool + lexer_type: Optional[_LexerArgType] + strict: bool + + def __init__(self, terminals: Collection[TerminalDef], re_module: ModuleType, ignore: Collection[str]=(), postlex: 'Optional[PostLex]'=None, + callbacks: Optional[Dict[str, _Callback]]=None, g_regex_flags: int=0, skip_validation: bool=False, use_bytes: bool=False, strict: bool=False): + self.terminals = terminals + self.terminals_by_name = {t.name: t for t in self.terminals} + assert len(self.terminals) == len(self.terminals_by_name) + self.ignore = ignore + self.postlex = postlex + self.callbacks = callbacks or {} + self.g_regex_flags = g_regex_flags + self.re_module = re_module + self.skip_validation = skip_validation + self.use_bytes = use_bytes + self.strict = strict + self.lexer_type = None + + def _deserialize(self): + self.terminals_by_name = {t.name: t for t in self.terminals} + + def __deepcopy__(self, memo=None): + return type(self)( + deepcopy(self.terminals, memo), + self.re_module, + deepcopy(self.ignore, memo), + deepcopy(self.postlex, memo), + deepcopy(self.callbacks, memo), + deepcopy(self.g_regex_flags, memo), + deepcopy(self.skip_validation, memo), + deepcopy(self.use_bytes, memo), + ) + + +class ParserConf(Serialize): + __serialize_fields__ = 'rules', 'start', 'parser_type' + + def __init__(self, rules, callbacks, start): + assert isinstance(start, list) + self.rules = rules + self.callbacks = callbacks + self.start = start + + self.parser_type = None + + +from functools import partial, wraps +from itertools import product + + +class ExpandSingleChild: + def __init__(self, node_builder): + self.node_builder = node_builder + + def __call__(self, children): + if len(children) == 1: + return children[0] + else: + return self.node_builder(children) + + + +class PropagatePositions: + def __init__(self, node_builder, node_filter=None): + self.node_builder = node_builder + self.node_filter = node_filter + + def __call__(self, children): + res = self.node_builder(children) + + if isinstance(res, Tree): + ## + + ## + + ## + + ## + + + res_meta = res.meta + + first_meta = self._pp_get_meta(children) + if first_meta is not None: + if not hasattr(res_meta, 'line'): + ## + + res_meta.line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.column = getattr(first_meta, 'container_column', first_meta.column) + res_meta.start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) + res_meta.empty = False + + res_meta.container_line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.container_column = getattr(first_meta, 'container_column', first_meta.column) + res_meta.container_start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) + + last_meta = self._pp_get_meta(reversed(children)) + if last_meta is not None: + if not hasattr(res_meta, 'end_line'): + res_meta.end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + res_meta.end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) + res_meta.empty = False + + res_meta.container_end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.container_end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + res_meta.container_end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) + + return res + + def _pp_get_meta(self, children): + for c in children: + if self.node_filter is not None and not self.node_filter(c): + continue + if isinstance(c, Tree): + if not c.meta.empty: + return c.meta + elif isinstance(c, Token): + return c + elif hasattr(c, '__lark_meta__'): + return c.__lark_meta__() + +def make_propagate_positions(option): + if callable(option): + return partial(PropagatePositions, node_filter=option) + elif option is True: + return PropagatePositions + elif option is False: + return None + + raise ConfigurationError('Invalid option for propagate_positions: %r' % option) + + +class ChildFilter: + def __init__(self, to_include, append_none, node_builder): + self.node_builder = node_builder + self.to_include = to_include + self.append_none = append_none + + def __call__(self, children): + filtered = [] + + for i, to_expand, add_none in self.to_include: + if add_none: + filtered += [None] * add_none + if to_expand: + filtered += children[i].children + else: + filtered.append(children[i]) + + if self.append_none: + filtered += [None] * self.append_none + + return self.node_builder(filtered) + + +class ChildFilterLALR(ChildFilter): + #-- + + def __call__(self, children): + filtered = [] + for i, to_expand, add_none in self.to_include: + if add_none: + filtered += [None] * add_none + if to_expand: + if filtered: + filtered += children[i].children + else: ## + + filtered = children[i].children + else: + filtered.append(children[i]) + + if self.append_none: + filtered += [None] * self.append_none + + return self.node_builder(filtered) + + +class ChildFilterLALR_NoPlaceholders(ChildFilter): + #-- + def __init__(self, to_include, node_builder): + self.node_builder = node_builder + self.to_include = to_include + + def __call__(self, children): + filtered = [] + for i, to_expand in self.to_include: + if to_expand: + if filtered: + filtered += children[i].children + else: ## + + filtered = children[i].children + else: + filtered.append(children[i]) + return self.node_builder(filtered) + + +def _should_expand(sym): + return not sym.is_term and sym.name.startswith('_') + + +def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices: List[bool]): + ## + + if _empty_indices: + assert _empty_indices.count(False) == len(expansion) + s = ''.join(str(int(b)) for b in _empty_indices) + empty_indices = [len(ones) for ones in s.split('0')] + assert len(empty_indices) == len(expansion)+1, (empty_indices, len(expansion)) + else: + empty_indices = [0] * (len(expansion)+1) + + to_include = [] + nones_to_add = 0 + for i, sym in enumerate(expansion): + nones_to_add += empty_indices[i] + if keep_all_tokens or not (sym.is_term and sym.filter_out): + to_include.append((i, _should_expand(sym), nones_to_add)) + nones_to_add = 0 + + nones_to_add += empty_indices[len(expansion)] + + if _empty_indices or len(to_include) < len(expansion) or any(to_expand for i, to_expand,_ in to_include): + if _empty_indices or ambiguous: + return partial(ChildFilter if ambiguous else ChildFilterLALR, to_include, nones_to_add) + else: + ## + + return partial(ChildFilterLALR_NoPlaceholders, [(i, x) for i,x,_ in to_include]) + + +class AmbiguousExpander: + #-- + def __init__(self, to_expand, tree_class, node_builder): + self.node_builder = node_builder + self.tree_class = tree_class + self.to_expand = to_expand + + def __call__(self, children): + def _is_ambig_tree(t): + return hasattr(t, 'data') and t.data == '_ambig' + + ## + + ## + + ## + + ## + + ambiguous = [] + for i, child in enumerate(children): + if _is_ambig_tree(child): + if i in self.to_expand: + ambiguous.append(i) + + child.expand_kids_by_data('_ambig') + + if not ambiguous: + return self.node_builder(children) + + expand = [child.children if i in ambiguous else (child,) for i, child in enumerate(children)] + return self.tree_class('_ambig', [self.node_builder(list(f)) for f in product(*expand)]) + + +def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens): + to_expand = [i for i, sym in enumerate(expansion) + if keep_all_tokens or ((not (sym.is_term and sym.filter_out)) and _should_expand(sym))] + if to_expand: + return partial(AmbiguousExpander, to_expand, tree_class) + + +class AmbiguousIntermediateExpander: + #-- + + def __init__(self, tree_class, node_builder): + self.node_builder = node_builder + self.tree_class = tree_class + + def __call__(self, children): + def _is_iambig_tree(child): + return hasattr(child, 'data') and child.data == '_iambig' + + def _collapse_iambig(children): + #-- + + ## + + ## + + if children and _is_iambig_tree(children[0]): + iambig_node = children[0] + result = [] + for grandchild in iambig_node.children: + collapsed = _collapse_iambig(grandchild.children) + if collapsed: + for child in collapsed: + child.children += children[1:] + result += collapsed + else: + new_tree = self.tree_class('_inter', grandchild.children + children[1:]) + result.append(new_tree) + return result + + collapsed = _collapse_iambig(children) + if collapsed: + processed_nodes = [self.node_builder(c.children) for c in collapsed] + return self.tree_class('_ambig', processed_nodes) + + return self.node_builder(children) + + + +def inplace_transformer(func): + @wraps(func) + def f(children): + ## + + tree = Tree(func.__name__, children) + return func(tree) + return f + + +def apply_visit_wrapper(func, name, wrapper): + if wrapper is _vargs_meta or wrapper is _vargs_meta_inline: + raise NotImplementedError("Meta args not supported for internal transformer") + + @wraps(func) + def f(children): + return wrapper(func, name, children, None) + return f + + +class ParseTreeBuilder: + def __init__(self, rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False): + self.tree_class = tree_class + self.propagate_positions = propagate_positions + self.ambiguous = ambiguous + self.maybe_placeholders = maybe_placeholders + + self.rule_builders = list(self._init_builders(rules)) + + def _init_builders(self, rules): + propagate_positions = make_propagate_positions(self.propagate_positions) + + for rule in rules: + options = rule.options + keep_all_tokens = options.keep_all_tokens + expand_single_child = options.expand1 + + wrapper_chain = list(filter(None, [ + (expand_single_child and not rule.alias) and ExpandSingleChild, + maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None), + propagate_positions, + self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), + self.ambiguous and partial(AmbiguousIntermediateExpander, self.tree_class) + ])) + + yield rule, wrapper_chain + + def create_callback(self, transformer=None): + callbacks = {} + + default_handler = getattr(transformer, '__default__', None) + if default_handler: + def default_callback(data, children): + return default_handler(data, children, None) + else: + default_callback = self.tree_class + + for rule, wrapper_chain in self.rule_builders: + + user_callback_name = rule.alias or rule.options.template_source or rule.origin.name + try: + f = getattr(transformer, user_callback_name) + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + f = apply_visit_wrapper(f, user_callback_name, wrapper) + elif isinstance(transformer, Transformer_InPlace): + f = inplace_transformer(f) + except AttributeError: + f = partial(default_callback, user_callback_name) + + for w in wrapper_chain: + f = w(f) + + if rule in callbacks: + raise GrammarError("Rule '%s' already exists" % (rule,)) + + callbacks[rule] = f + + return callbacks + + + +class LALR_Parser(Serialize): + def __init__(self, parser_conf, debug=False, strict=False): + analysis = LALR_Analyzer(parser_conf, debug=debug, strict=strict) + analysis.compute_lalr() + callbacks = parser_conf.callbacks + + self._parse_table = analysis.parse_table + self.parser_conf = parser_conf + self.parser = _Parser(analysis.parse_table, callbacks, debug) + + @classmethod + def deserialize(cls, data, memo, callbacks, debug=False): + inst = cls.__new__(cls) + inst._parse_table = IntParseTable.deserialize(data, memo) + inst.parser = _Parser(inst._parse_table, callbacks, debug) + return inst + + def serialize(self, memo: Any = None) -> Dict[str, Any]: + return self._parse_table.serialize(memo) + + def parse_interactive(self, lexer, start): + return self.parser.parse(lexer, start, start_interactive=True) + + def parse(self, lexer, start, on_error=None): + try: + return self.parser.parse(lexer, start) + except UnexpectedInput as e: + if on_error is None: + raise + + while True: + if isinstance(e, UnexpectedCharacters): + s = e.interactive_parser.lexer_thread.state + p = s.line_ctr.char_pos + + if not on_error(e): + raise e + + if isinstance(e, UnexpectedCharacters): + ## + + if p == s.line_ctr.char_pos: + s.line_ctr.feed(s.text[p:p+1]) + + try: + return e.interactive_parser.resume_parse() + except UnexpectedToken as e2: + if (isinstance(e, UnexpectedToken) + and e.token.type == e2.token.type == '$END' + and e.interactive_parser == e2.interactive_parser): + ## + + raise e2 + e = e2 + except UnexpectedCharacters as e2: + e = e2 + + +class ParseConf: + __slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states' + + def __init__(self, parse_table, callbacks, start): + self.parse_table = parse_table + + self.start_state = self.parse_table.start_states[start] + self.end_state = self.parse_table.end_states[start] + self.states = self.parse_table.states + + self.callbacks = callbacks + self.start = start + + +class ParserState: + __slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack' + + def __init__(self, parse_conf, lexer, state_stack=None, value_stack=None): + self.parse_conf = parse_conf + self.lexer = lexer + self.state_stack = state_stack or [self.parse_conf.start_state] + self.value_stack = value_stack or [] + + @property + def position(self): + return self.state_stack[-1] + + ## + + def __eq__(self, other): + if not isinstance(other, ParserState): + return NotImplemented + return len(self.state_stack) == len(other.state_stack) and self.position == other.position + + def __copy__(self): + return type(self)( + self.parse_conf, + self.lexer, ## + + copy(self.state_stack), + deepcopy(self.value_stack), + ) + + def copy(self): + return copy(self) + + def feed_token(self, token, is_end=False): + state_stack = self.state_stack + value_stack = self.value_stack + states = self.parse_conf.states + end_state = self.parse_conf.end_state + callbacks = self.parse_conf.callbacks + + while True: + state = state_stack[-1] + try: + action, arg = states[state][token.type] + except KeyError: + expected = {s for s in states[state].keys() if s.isupper()} + raise UnexpectedToken(token, expected, state=self, interactive_parser=None) + + assert arg != end_state + + if action is Shift: + ## + + assert not is_end + state_stack.append(arg) + value_stack.append(token if token.type not in callbacks else callbacks[token.type](token)) + return + else: + ## + + rule = arg + size = len(rule.expansion) + if size: + s = value_stack[-size:] + del state_stack[-size:] + del value_stack[-size:] + else: + s = [] + + value = callbacks[rule](s) + + _action, new_state = states[state_stack[-1]][rule.origin.name] + assert _action is Shift + state_stack.append(new_state) + value_stack.append(value) + + if is_end and state_stack[-1] == end_state: + return value_stack[-1] + +class _Parser: + def __init__(self, parse_table, callbacks, debug=False): + self.parse_table = parse_table + self.callbacks = callbacks + self.debug = debug + + def parse(self, lexer, start, value_stack=None, state_stack=None, start_interactive=False): + parse_conf = ParseConf(self.parse_table, self.callbacks, start) + parser_state = ParserState(parse_conf, lexer, state_stack, value_stack) + if start_interactive: + return InteractiveParser(self, parser_state, parser_state.lexer) + return self.parse_from_state(parser_state) + + + def parse_from_state(self, state, last_token=None): + #-- + try: + token = last_token + for token in state.lexer.lex(state): + state.feed_token(token) + + end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1) + return state.feed_token(end_token, True) + except UnexpectedInput as e: + try: + e.interactive_parser = InteractiveParser(self, state, state.lexer) + except NameError: + pass + raise e + except Exception as e: + if self.debug: + print("") + print("STATE STACK DUMP") + print("----------------") + for i, s in enumerate(state.state_stack): + print('%d)' % i , s) + print("") + + raise + + +class Action: + def __init__(self, name): + self.name = name + def __str__(self): + return self.name + def __repr__(self): + return str(self) + +Shift = Action('Shift') +Reduce = Action('Reduce') + + +class ParseTable: + def __init__(self, states, start_states, end_states): + self.states = states + self.start_states = start_states + self.end_states = end_states + + def serialize(self, memo): + tokens = Enumerator() + + states = { + state: {tokens.get(token): ((1, arg.serialize(memo)) if action is Reduce else (0, arg)) + for token, (action, arg) in actions.items()} + for state, actions in self.states.items() + } + + return { + 'tokens': tokens.reversed(), + 'states': states, + 'start_states': self.start_states, + 'end_states': self.end_states, + } + + @classmethod + def deserialize(cls, data, memo): + tokens = data['tokens'] + states = { + state: {tokens[token]: ((Reduce, Rule.deserialize(arg, memo)) if action==1 else (Shift, arg)) + for token, (action, arg) in actions.items()} + for state, actions in data['states'].items() + } + return cls(states, data['start_states'], data['end_states']) + + +class IntParseTable(ParseTable): + + @classmethod + def from_ParseTable(cls, parse_table): + enum = list(parse_table.states) + state_to_idx = {s:i for i,s in enumerate(enum)} + int_states = {} + + for s, la in parse_table.states.items(): + la = {k:(v[0], state_to_idx[v[1]]) if v[0] is Shift else v + for k,v in la.items()} + int_states[ state_to_idx[s] ] = la + + + start_states = {start:state_to_idx[s] for start, s in parse_table.start_states.items()} + end_states = {start:state_to_idx[s] for start, s in parse_table.end_states.items()} + return cls(int_states, start_states, end_states) + + + +def _wrap_lexer(lexer_class): + future_interface = getattr(lexer_class, '__future_interface__', False) + if future_interface: + return lexer_class + else: + class CustomLexerWrapper(Lexer): + def __init__(self, lexer_conf): + self.lexer = lexer_class(lexer_conf) + def lex(self, lexer_state, parser_state): + return self.lexer.lex(lexer_state.text) + return CustomLexerWrapper + + +def _deserialize_parsing_frontend(data, memo, lexer_conf, callbacks, options): + parser_conf = ParserConf.deserialize(data['parser_conf'], memo) + cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser + parser = cls.deserialize(data['parser'], memo, callbacks, options.debug) + parser_conf.callbacks = callbacks + return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser) + + +_parser_creators: 'Dict[str, Callable[[LexerConf, Any, Any], Any]]' = {} + + +class ParsingFrontend(Serialize): + __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser' + + lexer_conf: LexerConf + parser_conf: ParserConf + options: Any + + def __init__(self, lexer_conf: LexerConf, parser_conf: ParserConf, options, parser=None): + self.parser_conf = parser_conf + self.lexer_conf = lexer_conf + self.options = options + + ## + + if parser: ## + + self.parser = parser + else: + create_parser = _parser_creators.get(parser_conf.parser_type) + assert create_parser is not None, "{} is not supported in standalone mode".format( + parser_conf.parser_type + ) + self.parser = create_parser(lexer_conf, parser_conf, options) + + ## + + lexer_type = lexer_conf.lexer_type + self.skip_lexer = False + if lexer_type in ('dynamic', 'dynamic_complete'): + assert lexer_conf.postlex is None + self.skip_lexer = True + return + + if isinstance(lexer_type, type): + assert issubclass(lexer_type, Lexer) + self.lexer = _wrap_lexer(lexer_type)(lexer_conf) + elif isinstance(lexer_type, str): + create_lexer = { + 'basic': create_basic_lexer, + 'contextual': create_contextual_lexer, + }[lexer_type] + self.lexer = create_lexer(lexer_conf, self.parser, lexer_conf.postlex, options) + else: + raise TypeError("Bad value for lexer_type: {lexer_type}") + + if lexer_conf.postlex: + self.lexer = PostLexConnector(self.lexer, lexer_conf.postlex) + + def _verify_start(self, start=None): + if start is None: + start_decls = self.parser_conf.start + if len(start_decls) > 1: + raise ConfigurationError("Lark initialized with more than 1 possible start rule. Must specify which start rule to parse", start_decls) + start ,= start_decls + elif start not in self.parser_conf.start: + raise ConfigurationError("Unknown start rule %s. Must be one of %r" % (start, self.parser_conf.start)) + return start + + def _make_lexer_thread(self, text: str): + cls = (self.options and self.options._plugins.get('LexerThread')) or LexerThread + return text if self.skip_lexer else cls.from_text(self.lexer, text) + + def parse(self, text: str, start=None, on_error=None): + chosen_start = self._verify_start(start) + kw = {} if on_error is None else {'on_error': on_error} + stream = self._make_lexer_thread(text) + return self.parser.parse(stream, chosen_start, **kw) + + def parse_interactive(self, text: Optional[str]=None, start=None): + ## + + ## + + chosen_start = self._verify_start(start) + if self.parser_conf.parser_type != 'lalr': + raise ConfigurationError("parse_interactive() currently only works with parser='lalr' ") + stream = self._make_lexer_thread(text) ## + + return self.parser.parse_interactive(stream, chosen_start) + + +def _validate_frontend_args(parser, lexer) -> None: + assert_config(parser, ('lalr', 'earley', 'cyk')) + if not isinstance(lexer, type): ## + + expected = { + 'lalr': ('basic', 'contextual'), + 'earley': ('basic', 'dynamic', 'dynamic_complete'), + 'cyk': ('basic', ), + }[parser] + assert_config(lexer, expected, 'Parser %r does not support lexer %%r, expected one of %%s' % parser) + + +def _get_lexer_callbacks(transformer, terminals): + result = {} + for terminal in terminals: + callback = getattr(transformer, terminal.name, None) + if callback is not None: + result[terminal.name] = callback + return result + +class PostLexConnector: + def __init__(self, lexer, postlexer): + self.lexer = lexer + self.postlexer = postlexer + + def lex(self, lexer_state, parser_state): + i = self.lexer.lex(lexer_state, parser_state) + return self.postlexer.process(i) + + + +def create_basic_lexer(lexer_conf, parser, postlex, options) -> BasicLexer: + cls = (options and options._plugins.get('BasicLexer')) or BasicLexer + return cls(lexer_conf) + +def create_contextual_lexer(lexer_conf: LexerConf, parser, postlex, options) -> ContextualLexer: + cls = (options and options._plugins.get('ContextualLexer')) or ContextualLexer + states: Dict[str, Collection[str]] = {idx:list(t.keys()) for idx, t in parser._parse_table.states.items()} + always_accept: Collection[str] = postlex.always_accept if postlex else () + return cls(lexer_conf, states, always_accept=always_accept) + +def create_lalr_parser(lexer_conf: LexerConf, parser_conf: ParserConf, options=None) -> LALR_Parser: + debug = options.debug if options else False + strict = options.strict if options else False + cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser + return cls(parser_conf, debug=debug, strict=strict) + +_parser_creators['lalr'] = create_lalr_parser + + + + +class PostLex(ABC): + @abstractmethod + def process(self, stream: Iterator[Token]) -> Iterator[Token]: + return stream + + always_accept: Iterable[str] = () + +class LarkOptions(Serialize): + #-- + + start: List[str] + debug: bool + strict: bool + transformer: 'Optional[Transformer]' + propagate_positions: Union[bool, str] + maybe_placeholders: bool + cache: Union[bool, str] + regex: bool + g_regex_flags: int + keep_all_tokens: bool + tree_class: Any + parser: _ParserArgType + lexer: _LexerArgType + ambiguity: 'Literal["auto", "resolve", "explicit", "forest"]' + postlex: Optional[PostLex] + priority: 'Optional[Literal["auto", "normal", "invert"]]' + lexer_callbacks: Dict[str, Callable[[Token], Token]] + use_bytes: bool + edit_terminals: Optional[Callable[[TerminalDef], TerminalDef]] + import_paths: 'List[Union[str, Callable[[Union[None, str, PackageResource], str], Tuple[str, str]]]]' + source_path: Optional[str] + + OPTIONS_DOC = """ + **=== General Options ===** + + start + The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start") + debug + Display debug information and extra warnings. Use only when debugging (Default: ``False``) + When used with Earley, it generates a forest graph as "sppf.png", if 'dot' is installed. + strict + Throw an exception on any potential ambiguity, including shift/reduce conflicts, and regex collisions. + transformer + Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster) + propagate_positions + Propagates positional attributes into the 'meta' attribute of all tree branches. + Sets attributes: (line, column, end_line, end_column, start_pos, end_pos, + container_line, container_column, container_end_line, container_end_column) + Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating. + maybe_placeholders + When ``True``, the ``[]`` operator returns ``None`` when not matched. + When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all. + (default= ``True``) + cache + Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now. + + - When ``False``, does nothing (default) + - When ``True``, caches to a temporary file in the local directory + - When given a string, caches to the path pointed by the string + regex + When True, uses the ``regex`` module instead of the stdlib ``re``. + g_regex_flags + Flags that are applied to all terminals (both regex and strings) + keep_all_tokens + Prevent the tree builder from automagically removing "punctuation" tokens (Default: ``False``) + tree_class + Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``. + + **=== Algorithm Options ===** + + parser + Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley"). + (there is also a "cyk" option for legacy) + lexer + Decides whether or not to use a lexer stage + + - "auto" (default): Choose for me based on the parser + - "basic": Use a basic lexer + - "contextual": Stronger lexer (only works with parser="lalr") + - "dynamic": Flexible and powerful (only with parser="earley") + - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible. + ambiguity + Decides how to handle ambiguity in the parse. Only relevant if parser="earley" + + - "resolve": The parser will automatically choose the simplest derivation + (it chooses consistently: greedy for tokens, non-greedy for rules) + - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest). + - "forest": The parser will return the root of the shared packed parse forest. + + **=== Misc. / Domain Specific Options ===** + + postlex + Lexer post-processing (Default: ``None``) Only works with the basic and contextual lexers. + priority + How priorities should be evaluated - "auto", ``None``, "normal", "invert" (Default: "auto") + lexer_callbacks + Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution. + use_bytes + Accept an input of type ``bytes`` instead of ``str``. + edit_terminals + A callback for editing the terminals before parse. + import_paths + A List of either paths or loader functions to specify from where grammars are imported + source_path + Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading + **=== End of Options ===** + """ + if __doc__: + __doc__ += OPTIONS_DOC + + + ## + + ## + + ## + + ## + + ## + + ## + + _defaults: Dict[str, Any] = { + 'debug': False, + 'strict': False, + 'keep_all_tokens': False, + 'tree_class': None, + 'cache': False, + 'postlex': None, + 'parser': 'earley', + 'lexer': 'auto', + 'transformer': None, + 'start': 'start', + 'priority': 'auto', + 'ambiguity': 'auto', + 'regex': False, + 'propagate_positions': False, + 'lexer_callbacks': {}, + 'maybe_placeholders': True, + 'edit_terminals': None, + 'g_regex_flags': 0, + 'use_bytes': False, + 'import_paths': [], + 'source_path': None, + '_plugins': {}, + } + + def __init__(self, options_dict: Dict[str, Any]) -> None: + o = dict(options_dict) + + options = {} + for name, default in self._defaults.items(): + if name in o: + value = o.pop(name) + if isinstance(default, bool) and name not in ('cache', 'use_bytes', 'propagate_positions'): + value = bool(value) + else: + value = default + + options[name] = value + + if isinstance(options['start'], str): + options['start'] = [options['start']] + + self.__dict__['options'] = options + + + assert_config(self.parser, ('earley', 'lalr', 'cyk', None)) + + if self.parser == 'earley' and self.transformer: + raise ConfigurationError('Cannot specify an embedded transformer when using the Earley algorithm. ' + 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)') + + if o: + raise ConfigurationError("Unknown options: %s" % o.keys()) + + def __getattr__(self, name: str) -> Any: + try: + return self.__dict__['options'][name] + except KeyError as e: + raise AttributeError(e) + + def __setattr__(self, name: str, value: str) -> None: + assert_config(name, self.options.keys(), "%r isn't a valid option. Expected one of: %s") + self.options[name] = value + + def serialize(self, memo = None) -> Dict[str, Any]: + return self.options + + @classmethod + def deserialize(cls, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]]) -> "LarkOptions": + return cls(data) + + +## + +## + +_LOAD_ALLOWED_OPTIONS = {'postlex', 'transformer', 'lexer_callbacks', 'use_bytes', 'debug', 'g_regex_flags', 'regex', 'propagate_positions', 'tree_class', '_plugins'} + +_VALID_PRIORITY_OPTIONS = ('auto', 'normal', 'invert', None) +_VALID_AMBIGUITY_OPTIONS = ('auto', 'resolve', 'explicit', 'forest') + + +_T = TypeVar('_T', bound="Lark") + +class Lark(Serialize): + #-- + + source_path: str + source_grammar: str + grammar: 'Grammar' + options: LarkOptions + lexer: Lexer + parser: 'ParsingFrontend' + terminals: Collection[TerminalDef] + + def __init__(self, grammar: 'Union[Grammar, str, IO[str]]', **options) -> None: + self.options = LarkOptions(options) + re_module: types.ModuleType + + ## + + use_regex = self.options.regex + if use_regex: + if _has_regex: + re_module = regex + else: + raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.') + else: + re_module = re + + ## + + if self.options.source_path is None: + try: + self.source_path = grammar.name ## + + except AttributeError: + self.source_path = '' + else: + self.source_path = self.options.source_path + + ## + + try: + read = grammar.read ## + + except AttributeError: + pass + else: + grammar = read() + + cache_fn = None + cache_sha256 = None + if isinstance(grammar, str): + self.source_grammar = grammar + if self.options.use_bytes: + if not isascii(grammar): + raise ConfigurationError("Grammar must be ascii only, when use_bytes=True") + + if self.options.cache: + if self.options.parser != 'lalr': + raise ConfigurationError("cache only works with parser='lalr' for now") + + unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals', '_plugins') + options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable) + from . import __version__ + s = grammar + options_str + __version__ + str(sys.version_info[:2]) + cache_sha256 = sha256_digest(s) + + if isinstance(self.options.cache, str): + cache_fn = self.options.cache + else: + if self.options.cache is not True: + raise ConfigurationError("cache argument must be bool or str") + + try: + username = getpass.getuser() + except Exception: + ## + + ## + + ## + + username = "unknown" + + cache_fn = tempfile.gettempdir() + "/.lark_cache_%s_%s_%s_%s.tmp" % (username, cache_sha256, *sys.version_info[:2]) + + old_options = self.options + try: + with FS.open(cache_fn, 'rb') as f: + logger.debug('Loading grammar from cache: %s', cache_fn) + ## + + for name in (set(options) - _LOAD_ALLOWED_OPTIONS): + del options[name] + file_sha256 = f.readline().rstrip(b'\n') + cached_used_files = pickle.load(f) + if file_sha256 == cache_sha256.encode('utf8') and verify_used_files(cached_used_files): + cached_parser_data = pickle.load(f) + self._load(cached_parser_data, **options) + return + except FileNotFoundError: + ## + + pass + except Exception: ## + + logger.exception("Failed to load Lark from cache: %r. We will try to carry on.", cache_fn) + + ## + + ## + + self.options = old_options + + + ## + + self.grammar, used_files = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens) + else: + assert isinstance(grammar, Grammar) + self.grammar = grammar + + + if self.options.lexer == 'auto': + if self.options.parser == 'lalr': + self.options.lexer = 'contextual' + elif self.options.parser == 'earley': + if self.options.postlex is not None: + logger.info("postlex can't be used with the dynamic lexer, so we use 'basic' instead. " + "Consider using lalr with contextual instead of earley") + self.options.lexer = 'basic' + else: + self.options.lexer = 'dynamic' + elif self.options.parser == 'cyk': + self.options.lexer = 'basic' + else: + assert False, self.options.parser + lexer = self.options.lexer + if isinstance(lexer, type): + assert issubclass(lexer, Lexer) ## + + else: + assert_config(lexer, ('basic', 'contextual', 'dynamic', 'dynamic_complete')) + if self.options.postlex is not None and 'dynamic' in lexer: + raise ConfigurationError("Can't use postlex with a dynamic lexer. Use basic or contextual instead") + + if self.options.ambiguity == 'auto': + if self.options.parser == 'earley': + self.options.ambiguity = 'resolve' + else: + assert_config(self.options.parser, ('earley', 'cyk'), "%r doesn't support disambiguation. Use one of these parsers instead: %s") + + if self.options.priority == 'auto': + self.options.priority = 'normal' + + if self.options.priority not in _VALID_PRIORITY_OPTIONS: + raise ConfigurationError("invalid priority option: %r. Must be one of %r" % (self.options.priority, _VALID_PRIORITY_OPTIONS)) + if self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS: + raise ConfigurationError("invalid ambiguity option: %r. Must be one of %r" % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS)) + + if self.options.parser is None: + terminals_to_keep = '*' + elif self.options.postlex is not None: + terminals_to_keep = set(self.options.postlex.always_accept) + else: + terminals_to_keep = set() + + ## + + self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep) + + if self.options.edit_terminals: + for t in self.terminals: + self.options.edit_terminals(t) + + self._terminals_dict = {t.name: t for t in self.terminals} + + ## + + if self.options.priority == 'invert': + for rule in self.rules: + if rule.options.priority is not None: + rule.options.priority = -rule.options.priority + for term in self.terminals: + term.priority = -term.priority + ## + + ## + + ## + + elif self.options.priority is None: + for rule in self.rules: + if rule.options.priority is not None: + rule.options.priority = None + for term in self.terminals: + term.priority = 0 + + ## + + self.lexer_conf = LexerConf( + self.terminals, re_module, self.ignore_tokens, self.options.postlex, + self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes, strict=self.options.strict + ) + + if self.options.parser: + self.parser = self._build_parser() + elif lexer: + self.lexer = self._build_lexer() + + if cache_fn: + logger.debug('Saving grammar to cache: %s', cache_fn) + try: + with FS.open(cache_fn, 'wb') as f: + assert cache_sha256 is not None + f.write(cache_sha256.encode('utf8') + b'\n') + pickle.dump(used_files, f) + self.save(f, _LOAD_ALLOWED_OPTIONS) + except IOError as e: + logger.exception("Failed to save Lark to cache: %r.", cache_fn, e) + + if __doc__: + __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC + + __serialize_fields__ = 'parser', 'rules', 'options' + + def _build_lexer(self, dont_ignore: bool=False) -> BasicLexer: + lexer_conf = self.lexer_conf + if dont_ignore: + from copy import copy + lexer_conf = copy(lexer_conf) + lexer_conf.ignore = () + return BasicLexer(lexer_conf) + + def _prepare_callbacks(self) -> None: + self._callbacks = {} + ## + + if self.options.ambiguity != 'forest': + self._parse_tree_builder = ParseTreeBuilder( + self.rules, + self.options.tree_class or Tree, + self.options.propagate_positions, + self.options.parser != 'lalr' and self.options.ambiguity == 'explicit', + self.options.maybe_placeholders + ) + self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer) + self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals)) + + def _build_parser(self) -> "ParsingFrontend": + self._prepare_callbacks() + _validate_frontend_args(self.options.parser, self.options.lexer) + parser_conf = ParserConf(self.rules, self._callbacks, self.options.start) + return _construct_parsing_frontend( + self.options.parser, + self.options.lexer, + self.lexer_conf, + parser_conf, + options=self.options + ) + + def save(self, f, exclude_options: Collection[str] = ()) -> None: + #-- + data, m = self.memo_serialize([TerminalDef, Rule]) + if exclude_options: + data["options"] = {n: v for n, v in data["options"].items() if n not in exclude_options} + pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL) + + @classmethod + def load(cls: Type[_T], f) -> _T: + #-- + inst = cls.__new__(cls) + return inst._load(f) + + def _deserialize_lexer_conf(self, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]], options: LarkOptions) -> LexerConf: + lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo) + lexer_conf.callbacks = options.lexer_callbacks or {} + lexer_conf.re_module = regex if options.regex else re + lexer_conf.use_bytes = options.use_bytes + lexer_conf.g_regex_flags = options.g_regex_flags + lexer_conf.skip_validation = True + lexer_conf.postlex = options.postlex + return lexer_conf + + def _load(self: _T, f: Any, **kwargs) -> _T: + if isinstance(f, dict): + d = f + else: + d = pickle.load(f) + memo_json = d['memo'] + data = d['data'] + + assert memo_json + memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {}) + options = dict(data['options']) + if (set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults): + raise ConfigurationError("Some options are not allowed when loading a Parser: {}" + .format(set(kwargs) - _LOAD_ALLOWED_OPTIONS)) + options.update(kwargs) + self.options = LarkOptions.deserialize(options, memo) + self.rules = [Rule.deserialize(r, memo) for r in data['rules']] + self.source_path = '' + _validate_frontend_args(self.options.parser, self.options.lexer) + self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options) + self.terminals = self.lexer_conf.terminals + self._prepare_callbacks() + self._terminals_dict = {t.name: t for t in self.terminals} + self.parser = _deserialize_parsing_frontend( + data['parser'], + memo, + self.lexer_conf, + self._callbacks, + self.options, ## + + ) + return self + + @classmethod + def _load_from_dict(cls, data, memo, **kwargs): + inst = cls.__new__(cls) + return inst._load({'data': data, 'memo': memo}, **kwargs) + + @classmethod + def open(cls: Type[_T], grammar_filename: str, rel_to: Optional[str]=None, **options) -> _T: + #-- + if rel_to: + basepath = os.path.dirname(rel_to) + grammar_filename = os.path.join(basepath, grammar_filename) + with open(grammar_filename, encoding='utf8') as f: + return cls(f, **options) + + @classmethod + def open_from_package(cls: Type[_T], package: str, grammar_path: str, search_paths: 'Sequence[str]'=[""], **options) -> _T: + #-- + package_loader = FromPackageLoader(package, search_paths) + full_path, text = package_loader(None, grammar_path) + options.setdefault('source_path', full_path) + options.setdefault('import_paths', []) + options['import_paths'].append(package_loader) + return cls(text, **options) + + def __repr__(self): + return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer) + + + def lex(self, text: str, dont_ignore: bool=False) -> Iterator[Token]: + #-- + lexer: Lexer + if not hasattr(self, 'lexer') or dont_ignore: + lexer = self._build_lexer(dont_ignore) + else: + lexer = self.lexer + lexer_thread = LexerThread.from_text(lexer, text) + stream = lexer_thread.lex(None) + if self.options.postlex: + return self.options.postlex.process(stream) + return stream + + def get_terminal(self, name: str) -> TerminalDef: + #-- + return self._terminals_dict[name] + + def parse_interactive(self, text: Optional[str]=None, start: Optional[str]=None) -> 'InteractiveParser': + #-- + return self.parser.parse_interactive(text, start=start) + + def parse(self, text: str, start: Optional[str]=None, on_error: 'Optional[Callable[[UnexpectedInput], bool]]'=None) -> 'ParseTree': + #-- + return self.parser.parse(text, start=start, on_error=on_error) + + + + +class DedentError(LarkError): + pass + +class Indenter(PostLex, ABC): + paren_level: int + indent_level: List[int] + + def __init__(self) -> None: + self.paren_level = 0 + self.indent_level = [0] + assert self.tab_len > 0 + + def handle_NL(self, token: Token) -> Iterator[Token]: + if self.paren_level > 0: + return + + yield token + + indent_str = token.rsplit('\n', 1)[1] ## + + indent = indent_str.count(' ') + indent_str.count('\t') * self.tab_len + + if indent > self.indent_level[-1]: + self.indent_level.append(indent) + yield Token.new_borrow_pos(self.INDENT_type, indent_str, token) + else: + while indent < self.indent_level[-1]: + self.indent_level.pop() + yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token) + + if indent != self.indent_level[-1]: + raise DedentError('Unexpected dedent to column %s. Expected dedent to %s' % (indent, self.indent_level[-1])) + + def _process(self, stream): + for token in stream: + if token.type == self.NL_type: + yield from self.handle_NL(token) + else: + yield token + + if token.type in self.OPEN_PAREN_types: + self.paren_level += 1 + elif token.type in self.CLOSE_PAREN_types: + self.paren_level -= 1 + assert self.paren_level >= 0 + + while len(self.indent_level) > 1: + self.indent_level.pop() + yield Token(self.DEDENT_type, '') + + assert self.indent_level == [0], self.indent_level + + def process(self, stream): + self.paren_level = 0 + self.indent_level = [0] + return self._process(stream) + + ## + + @property + def always_accept(self): + return (self.NL_type,) + + @property + @abstractmethod + def NL_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def OPEN_PAREN_types(self) -> List[str]: + raise NotImplementedError() + + @property + @abstractmethod + def CLOSE_PAREN_types(self) -> List[str]: + raise NotImplementedError() + + @property + @abstractmethod + def INDENT_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def DEDENT_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def tab_len(self) -> int: + raise NotImplementedError() + + +class PythonIndenter(Indenter): + NL_type = '_NEWLINE' + OPEN_PAREN_types = ['LPAR', 'LSQB', 'LBRACE'] + CLOSE_PAREN_types = ['RPAR', 'RSQB', 'RBRACE'] + INDENT_type = '_INDENT' + DEDENT_type = '_DEDENT' + tab_len = 8 + + +import pickle, zlib, base64 +DATA = ( +{'parser': {'lexer_conf': {'terminals': [{'@': 0}, {'@': 1}, {'@': 2}, {'@': 3}, {'@': 4}, {'@': 5}, {'@': 6}, {'@': 7}, {'@': 8}, {'@': 9}, {'@': 10}, {'@': 11}, {'@': 12}, {'@': 13}, {'@': 14}, {'@': 15}, {'@': 16}, {'@': 17}, {'@': 18}, {'@': 19}, {'@': 20}, {'@': 21}, {'@': 22}, {'@': 23}, {'@': 24}, {'@': 25}, {'@': 26}, {'@': 27}, {'@': 28}, {'@': 29}, {'@': 30}, {'@': 31}], 'ignore': ['WS'], 'g_regex_flags': 0, 'use_bytes': False, 'lexer_type': 'contextual', '__type__': 'LexerConf'}, 'parser_conf': {'rules': [{'@': 32}, {'@': 33}, {'@': 34}, {'@': 35}, {'@': 36}, {'@': 37}, {'@': 38}, {'@': 39}, {'@': 40}, {'@': 41}, {'@': 42}, {'@': 43}, {'@': 44}, {'@': 45}, {'@': 46}, {'@': 47}, {'@': 48}, {'@': 49}, {'@': 50}, {'@': 51}, {'@': 52}, {'@': 53}, {'@': 54}, {'@': 55}, {'@': 56}, {'@': 57}, {'@': 58}, {'@': 59}, {'@': 60}, {'@': 61}, {'@': 62}, {'@': 63}, {'@': 64}, {'@': 65}, {'@': 66}, {'@': 67}, {'@': 68}, {'@': 69}, {'@': 70}, {'@': 71}, {'@': 72}, {'@': 73}, {'@': 74}, {'@': 75}, {'@': 76}, {'@': 77}, {'@': 78}, {'@': 79}, {'@': 80}, {'@': 81}, {'@': 82}, {'@': 83}, {'@': 84}, {'@': 85}, {'@': 86}, {'@': 87}, {'@': 88}, {'@': 89}, {'@': 90}, {'@': 91}], 'start': ['start'], 'parser_type': 'lalr', '__type__': 'ParserConf'}, 'parser': {'tokens': {0: '__ANON_3', 1: 'RPAR', 2: 'COMMA', 3: 'PLUS', 4: 'PERCENT', 5: '__ANON_7', 6: 'MINUS', 7: '__ANON_5', 8: 'COLON', 9: '$END', 10: 'VBAR', 11: '__ANON_4', 12: '__ANON_0', 13: 'AMPERSAND', 14: '__ANON_2', 15: '__ANON_6', 16: 'MORETHAN', 17: 'STAR', 18: 'SLASH', 19: 'CIRCUMFLEX', 20: 'RSQB', 21: 'LESSTHAN', 22: 'LSQB', 23: '__ANON_8', 24: '__ANON_1', 25: 'shift_expr', 26: 'TILDE', 27: 'NAME', 28: 'pow', 29: 'NUMBER', 30: 'factor', 31: 'bitwise_inversion', 32: 'sum', 33: 'var_name', 34: 'atom', 35: 'LPAR', 36: 'bitwise_xor', 37: 'bitwise_and', 38: 'term', 39: 'bitwise_or', 40: 'func_name', 41: '__arglist_star_1', 42: 'inversion', 43: 'conjunction', 44: 'comparison', 45: 'BANG', 46: 'disjunction', 47: 'expression', 48: 'DOT', 49: '__ANON_9', 50: 'start', 51: 'matpos', 52: '__factor_plus_0', 53: 'trailer', 54: 'arglist'}, 'states': {0: {0: (1, {'@': 82}), 1: (1, {'@': 82}), 2: (1, {'@': 82}), 3: (1, {'@': 82}), 4: (1, {'@': 82}), 5: (1, {'@': 82}), 6: (1, {'@': 82}), 7: (1, {'@': 82}), 8: (1, {'@': 82}), 9: (1, {'@': 82}), 10: (1, {'@': 82}), 11: (1, {'@': 82}), 12: (1, {'@': 82}), 13: (1, {'@': 82}), 14: (1, {'@': 82}), 15: (1, {'@': 82}), 16: (1, {'@': 82}), 17: (1, {'@': 82}), 18: (1, {'@': 82}), 19: (1, {'@': 82}), 20: (1, {'@': 82}), 21: (1, {'@': 82}), 22: (1, {'@': 82}), 23: (1, {'@': 82}), 24: (1, {'@': 82})}, 1: {0: (1, {'@': 88}), 1: (1, {'@': 88}), 2: (1, {'@': 88}), 3: (1, {'@': 88}), 4: (1, {'@': 88}), 5: (1, {'@': 88}), 6: (1, {'@': 88}), 7: (1, {'@': 88}), 8: (1, {'@': 88}), 9: (1, {'@': 88}), 10: (1, {'@': 88}), 11: (1, {'@': 88}), 12: (1, {'@': 88}), 13: (1, {'@': 88}), 14: (1, {'@': 88}), 15: (1, {'@': 88}), 16: (1, {'@': 88}), 17: (1, {'@': 88}), 18: (1, {'@': 88}), 19: (1, {'@': 88}), 20: (1, {'@': 88}), 21: (1, {'@': 88}), 22: (1, {'@': 88}), 24: (1, {'@': 88})}, 2: {25: (0, 84), 26: (0, 54), 27: (0, 24), 28: (0, 56), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 34: (0, 58), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 39: (0, 31), 40: (0, 33)}, 3: {27: (0, 24), 30: (0, 27), 3: (0, 43), 38: (0, 93), 28: (0, 56), 6: (0, 71), 29: (0, 63), 40: (0, 33), 34: (0, 58), 32: (0, 78), 33: (0, 80), 35: (0, 85)}, 4: {41: (0, 44), 2: (0, 82), 1: (1, {'@': 87})}, 5: {39: (0, 7), 25: (0, 84), 26: (0, 54), 27: (0, 24), 28: (0, 56), 42: (0, 23), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 43: (0, 20), 33: (0, 80), 44: (0, 89), 34: (0, 58), 45: (0, 88), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33)}, 6: {27: (0, 24), 40: (0, 33), 30: (0, 91), 34: (0, 58), 3: (0, 43), 28: (0, 56), 33: (0, 80), 6: (0, 71), 29: (0, 63), 35: (0, 85)}, 7: {10: (0, 12), 21: (1, {'@': 41}), 14: (1, {'@': 41}), 7: (1, {'@': 41}), 0: (1, {'@': 41}), 8: (1, {'@': 41}), 1: (1, {'@': 41}), 9: (1, {'@': 41}), 2: (1, {'@': 41}), 16: (1, {'@': 41}), 24: (1, {'@': 41}), 11: (1, {'@': 41}), 12: (1, {'@': 41})}, 8: {0: (1, {'@': 83}), 1: (1, {'@': 83}), 2: (1, {'@': 83}), 3: (1, {'@': 83}), 4: (1, {'@': 83}), 5: (1, {'@': 83}), 6: (1, {'@': 83}), 7: (1, {'@': 83}), 8: (1, {'@': 83}), 9: (1, {'@': 83}), 10: (1, {'@': 83}), 11: (1, {'@': 83}), 12: (1, {'@': 83}), 13: (1, {'@': 83}), 14: (1, {'@': 83}), 15: (1, {'@': 83}), 16: (1, {'@': 83}), 17: (1, {'@': 83}), 18: (1, {'@': 83}), 19: (1, {'@': 83}), 20: (1, {'@': 83}), 21: (1, {'@': 83}), 22: (1, {'@': 83}), 23: (1, {'@': 83}), 24: (1, {'@': 83})}, 9: {27: (0, 24), 30: (0, 27), 32: (0, 92), 3: (0, 43), 38: (0, 93), 28: (0, 56), 6: (0, 71), 29: (0, 63), 40: (0, 33), 34: (0, 58), 20: (0, 57), 33: (0, 80), 35: (0, 85)}, 10: {10: (0, 12), 21: (1, {'@': 46}), 14: (1, {'@': 46}), 7: (1, {'@': 46}), 0: (1, {'@': 46}), 8: (1, {'@': 46}), 1: (1, {'@': 46}), 9: (1, {'@': 46}), 2: (1, {'@': 46}), 16: (1, {'@': 46}), 24: (1, {'@': 46}), 11: (1, {'@': 46}), 12: (1, {'@': 46})}, 11: {39: (0, 7), 25: (0, 84), 43: (0, 87), 26: (0, 54), 27: (0, 24), 46: (0, 32), 28: (0, 56), 42: (0, 23), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 44: (0, 89), 34: (0, 58), 45: (0, 88), 47: (0, 53), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33)}, 12: {25: (0, 84), 27: (0, 24), 30: (0, 27), 36: (0, 21), 26: (0, 54), 37: (0, 46), 3: (0, 43), 38: (0, 93), 28: (0, 56), 6: (0, 71), 29: (0, 63), 31: (0, 15), 40: (0, 33), 32: (0, 30), 34: (0, 58), 33: (0, 80), 35: (0, 85)}, 13: {48: (0, 19), 0: (1, {'@': 80}), 1: (1, {'@': 80}), 2: (1, {'@': 80}), 3: (1, {'@': 80}), 4: (1, {'@': 80}), 5: (1, {'@': 80}), 6: (1, {'@': 80}), 7: (1, {'@': 80}), 8: (1, {'@': 80}), 9: (1, {'@': 80}), 10: (1, {'@': 80}), 11: (1, {'@': 80}), 12: (1, {'@': 80}), 13: (1, {'@': 80}), 14: (1, {'@': 80}), 15: (1, {'@': 80}), 16: (1, {'@': 80}), 17: (1, {'@': 80}), 18: (1, {'@': 80}), 19: (1, {'@': 80}), 20: (1, {'@': 80}), 21: (1, {'@': 80}), 22: (1, {'@': 80}), 23: (1, {'@': 80}), 24: (1, {'@': 80})}, 14: {10: (0, 12), 21: (1, {'@': 47}), 14: (1, {'@': 47}), 7: (1, {'@': 47}), 0: (1, {'@': 47}), 8: (1, {'@': 47}), 1: (1, {'@': 47}), 9: (1, {'@': 47}), 2: (1, {'@': 47}), 16: (1, {'@': 47}), 24: (1, {'@': 47}), 11: (1, {'@': 47}), 12: (1, {'@': 47})}, 15: {21: (1, {'@': 52}), 13: (1, {'@': 52}), 14: (1, {'@': 52}), 7: (1, {'@': 52}), 0: (1, {'@': 52}), 8: (1, {'@': 52}), 1: (1, {'@': 52}), 9: (1, {'@': 52}), 2: (1, {'@': 52}), 16: (1, {'@': 52}), 24: (1, {'@': 52}), 10: (1, {'@': 52}), 11: (1, {'@': 52}), 19: (1, {'@': 52}), 12: (1, {'@': 52})}, 16: {39: (0, 7), 25: (0, 84), 43: (0, 87), 26: (0, 54), 27: (0, 24), 46: (0, 32), 28: (0, 56), 42: (0, 23), 6: (0, 71), 29: (0, 63), 47: (0, 39), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 44: (0, 89), 34: (0, 58), 45: (0, 88), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33), 1: (1, {'@': 84})}, 17: {1: (0, 0)}, 18: {27: (0, 24), 30: (0, 27), 3: (0, 43), 28: (0, 56), 6: (0, 71), 29: (0, 63), 38: (0, 90), 40: (0, 33), 34: (0, 58), 33: (0, 80), 35: (0, 85)}, 19: {27: (0, 13), 33: (0, 55)}, 20: {8: (1, {'@': 36}), 1: (1, {'@': 36}), 9: (1, {'@': 36}), 2: (1, {'@': 36})}, 21: {19: (0, 22), 14: (1, {'@': 49}), 0: (1, {'@': 49}), 1: (1, {'@': 49}), 2: (1, {'@': 49}), 16: (1, {'@': 49}), 21: (1, {'@': 49}), 7: (1, {'@': 49}), 8: (1, {'@': 49}), 9: (1, {'@': 49}), 24: (1, {'@': 49}), 10: (1, {'@': 49}), 11: (1, {'@': 49}), 12: (1, {'@': 49})}, 22: {25: (0, 84), 27: (0, 24), 30: (0, 27), 26: (0, 54), 37: (0, 35), 3: (0, 43), 38: (0, 93), 28: (0, 56), 6: (0, 71), 29: (0, 63), 31: (0, 15), 40: (0, 33), 32: (0, 30), 34: (0, 58), 33: (0, 80), 35: (0, 85)}, 23: {24: (0, 94), 8: (1, {'@': 37}), 1: (1, {'@': 37}), 9: (1, {'@': 37}), 2: (1, {'@': 37}), 12: (1, {'@': 37})}, 24: {48: (0, 19), 49: (0, 49), 13: (1, {'@': 80}), 14: (1, {'@': 80}), 15: (1, {'@': 80}), 0: (1, {'@': 80}), 1: (1, {'@': 80}), 2: (1, {'@': 80}), 16: (1, {'@': 80}), 3: (1, {'@': 80}), 4: (1, {'@': 80}), 17: (1, {'@': 80}), 18: (1, {'@': 80}), 19: (1, {'@': 80}), 5: (1, {'@': 80}), 21: (1, {'@': 80}), 6: (1, {'@': 80}), 7: (1, {'@': 80}), 22: (1, {'@': 80}), 8: (1, {'@': 80}), 23: (1, {'@': 80}), 9: (1, {'@': 80}), 24: (1, {'@': 80}), 10: (1, {'@': 80}), 11: (1, {'@': 80}), 12: (1, {'@': 80}), 35: (1, {'@': 78}), 20: (1, {'@': 80})}, 25: {10: (0, 12), 21: (1, {'@': 45}), 14: (1, {'@': 45}), 7: (1, {'@': 45}), 0: (1, {'@': 45}), 8: (1, {'@': 45}), 1: (1, {'@': 45}), 9: (1, {'@': 45}), 2: (1, {'@': 45}), 16: (1, {'@': 45}), 24: (1, {'@': 45}), 11: (1, {'@': 45}), 12: (1, {'@': 45})}, 26: {39: (0, 7), 25: (0, 84), 43: (0, 87), 50: (0, 76), 26: (0, 54), 27: (0, 24), 46: (0, 32), 28: (0, 56), 42: (0, 23), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 44: (0, 89), 34: (0, 58), 45: (0, 88), 35: (0, 85), 36: (0, 48), 37: (0, 46), 47: (0, 77), 3: (0, 43), 38: (0, 93), 40: (0, 33)}, 27: {22: (0, 9), 51: (0, 1), 18: (0, 18), 52: (0, 52), 17: (0, 50), 4: (0, 68), 13: (1, {'@': 62}), 14: (1, {'@': 62}), 15: (1, {'@': 62}), 0: (1, {'@': 62}), 1: (1, {'@': 62}), 2: (1, {'@': 62}), 16: (1, {'@': 62}), 3: (1, {'@': 62}), 19: (1, {'@': 62}), 5: (1, {'@': 62}), 21: (1, {'@': 62}), 6: (1, {'@': 62}), 7: (1, {'@': 62}), 8: (1, {'@': 62}), 9: (1, {'@': 62}), 24: (1, {'@': 62}), 10: (1, {'@': 62}), 11: (1, {'@': 62}), 12: (1, {'@': 62}), 20: (1, {'@': 62})}, 28: {10: (0, 12), 21: (1, {'@': 44}), 14: (1, {'@': 44}), 7: (1, {'@': 44}), 0: (1, {'@': 44}), 8: (1, {'@': 44}), 1: (1, {'@': 44}), 9: (1, {'@': 44}), 2: (1, {'@': 44}), 16: (1, {'@': 44}), 24: (1, {'@': 44}), 11: (1, {'@': 44}), 12: (1, {'@': 44})}, 29: {1: (1, {'@': 90}), 2: (1, {'@': 90})}, 30: {13: (1, {'@': 56}), 14: (1, {'@': 56}), 15: (1, {'@': 56}), 0: (1, {'@': 56}), 1: (1, {'@': 56}), 2: (1, {'@': 56}), 16: (1, {'@': 56}), 19: (1, {'@': 56}), 5: (1, {'@': 56}), 21: (1, {'@': 56}), 7: (1, {'@': 56}), 8: (1, {'@': 56}), 9: (1, {'@': 56}), 24: (1, {'@': 56}), 10: (1, {'@': 56}), 11: (1, {'@': 56}), 12: (1, {'@': 56})}, 31: {10: (0, 12), 21: (1, {'@': 42}), 14: (1, {'@': 42}), 7: (1, {'@': 42}), 0: (1, {'@': 42}), 8: (1, {'@': 42}), 1: (1, {'@': 42}), 9: (1, {'@': 42}), 2: (1, {'@': 42}), 16: (1, {'@': 42}), 24: (1, {'@': 42}), 11: (1, {'@': 42}), 12: (1, {'@': 42})}, 32: {8: (0, 11), 1: (1, {'@': 33}), 9: (1, {'@': 33}), 2: (1, {'@': 33})}, 33: {53: (0, 74), 35: (0, 38)}, 34: {0: (1, {'@': 89}), 1: (1, {'@': 89}), 2: (1, {'@': 89}), 3: (1, {'@': 89}), 4: (1, {'@': 89}), 5: (1, {'@': 89}), 6: (1, {'@': 89}), 7: (1, {'@': 89}), 8: (1, {'@': 89}), 9: (1, {'@': 89}), 10: (1, {'@': 89}), 11: (1, {'@': 89}), 12: (1, {'@': 89}), 13: (1, {'@': 89}), 14: (1, {'@': 89}), 15: (1, {'@': 89}), 16: (1, {'@': 89}), 17: (1, {'@': 89}), 18: (1, {'@': 89}), 19: (1, {'@': 89}), 20: (1, {'@': 89}), 21: (1, {'@': 89}), 22: (1, {'@': 89}), 24: (1, {'@': 89})}, 35: {13: (0, 40), 21: (1, {'@': 51}), 14: (1, {'@': 51}), 7: (1, {'@': 51}), 0: (1, {'@': 51}), 8: (1, {'@': 51}), 1: (1, {'@': 51}), 9: (1, {'@': 51}), 2: (1, {'@': 51}), 16: (1, {'@': 51}), 24: (1, {'@': 51}), 10: (1, {'@': 51}), 11: (1, {'@': 51}), 19: (1, {'@': 51}), 12: (1, {'@': 51})}, 36: {25: (0, 84), 26: (0, 54), 27: (0, 24), 28: (0, 56), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 34: (0, 58), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33), 39: (0, 10)}, 37: {8: (1, {'@': 38}), 1: (1, {'@': 38}), 9: (1, {'@': 38}), 2: (1, {'@': 38}), 12: (1, {'@': 38})}, 38: {39: (0, 7), 25: (0, 84), 43: (0, 87), 26: (0, 54), 27: (0, 24), 46: (0, 32), 28: (0, 56), 42: (0, 23), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 47: (0, 4), 44: (0, 89), 34: (0, 58), 45: (0, 88), 1: (0, 8), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 54: (0, 17), 40: (0, 33)}, 39: {1: (1, {'@': 91}), 2: (1, {'@': 91})}, 40: {25: (0, 84), 27: (0, 24), 30: (0, 27), 26: (0, 54), 3: (0, 43), 38: (0, 93), 28: (0, 56), 6: (0, 71), 29: (0, 63), 40: (0, 33), 32: (0, 30), 34: (0, 58), 33: (0, 80), 31: (0, 95), 35: (0, 85)}, 41: {25: (0, 84), 26: (0, 54), 27: (0, 24), 28: (0, 56), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 39: (0, 14), 33: (0, 80), 34: (0, 58), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33)}, 42: {13: (1, {'@': 60}), 14: (1, {'@': 60}), 15: (1, {'@': 60}), 0: (1, {'@': 60}), 1: (1, {'@': 60}), 2: (1, {'@': 60}), 16: (1, {'@': 60}), 19: (1, {'@': 60}), 5: (1, {'@': 60}), 21: (1, {'@': 60}), 7: (1, {'@': 60}), 8: (1, {'@': 60}), 9: (1, {'@': 60}), 24: (1, {'@': 60}), 10: (1, {'@': 60}), 11: (1, {'@': 60}), 12: (1, {'@': 60}), 20: (1, {'@': 60})}, 43: {27: (0, 24), 40: (0, 33), 30: (0, 70), 34: (0, 58), 3: (0, 43), 28: (0, 56), 33: (0, 80), 6: (0, 71), 29: (0, 63), 35: (0, 85)}, 44: {2: (0, 16), 1: (1, {'@': 85})}, 45: {13: (1, {'@': 61}), 14: (1, {'@': 61}), 15: (1, {'@': 61}), 0: (1, {'@': 61}), 1: (1, {'@': 61}), 2: (1, {'@': 61}), 16: (1, {'@': 61}), 19: (1, {'@': 61}), 5: (1, {'@': 61}), 21: (1, {'@': 61}), 7: (1, {'@': 61}), 8: (1, {'@': 61}), 9: (1, {'@': 61}), 24: (1, {'@': 61}), 10: (1, {'@': 61}), 11: (1, {'@': 61}), 12: (1, {'@': 61}), 20: (1, {'@': 61})}, 46: {13: (0, 40), 21: (1, {'@': 50}), 14: (1, {'@': 50}), 7: (1, {'@': 50}), 0: (1, {'@': 50}), 8: (1, {'@': 50}), 1: (1, {'@': 50}), 9: (1, {'@': 50}), 2: (1, {'@': 50}), 16: (1, {'@': 50}), 24: (1, {'@': 50}), 10: (1, {'@': 50}), 11: (1, {'@': 50}), 19: (1, {'@': 50}), 12: (1, {'@': 50})}, 47: {35: (1, {'@': 79})}, 48: {19: (0, 22), 14: (1, {'@': 48}), 0: (1, {'@': 48}), 1: (1, {'@': 48}), 2: (1, {'@': 48}), 16: (1, {'@': 48}), 21: (1, {'@': 48}), 7: (1, {'@': 48}), 8: (1, {'@': 48}), 9: (1, {'@': 48}), 24: (1, {'@': 48}), 10: (1, {'@': 48}), 11: (1, {'@': 48}), 12: (1, {'@': 48})}, 49: {27: (0, 66), 40: (0, 47)}, 50: {27: (0, 24), 30: (0, 27), 3: (0, 43), 28: (0, 56), 6: (0, 71), 29: (0, 63), 40: (0, 33), 34: (0, 58), 38: (0, 79), 33: (0, 80), 35: (0, 85)}, 51: {27: (0, 24), 30: (0, 27), 3: (0, 43), 38: (0, 93), 28: (0, 56), 6: (0, 71), 29: (0, 63), 40: (0, 33), 32: (0, 59), 34: (0, 58), 33: (0, 80), 35: (0, 85)}, 52: {22: (0, 9), 51: (0, 34), 13: (1, {'@': 67}), 14: (1, {'@': 67}), 15: (1, {'@': 67}), 0: (1, {'@': 67}), 1: (1, {'@': 67}), 2: (1, {'@': 67}), 16: (1, {'@': 67}), 3: (1, {'@': 67}), 4: (1, {'@': 67}), 17: (1, {'@': 67}), 18: (1, {'@': 67}), 19: (1, {'@': 67}), 5: (1, {'@': 67}), 21: (1, {'@': 67}), 6: (1, {'@': 67}), 7: (1, {'@': 67}), 8: (1, {'@': 67}), 9: (1, {'@': 67}), 24: (1, {'@': 67}), 10: (1, {'@': 67}), 11: (1, {'@': 67}), 12: (1, {'@': 67}), 20: (1, {'@': 67})}, 53: {1: (1, {'@': 34}), 9: (1, {'@': 34}), 2: (1, {'@': 34})}, 54: {25: (0, 84), 27: (0, 24), 30: (0, 27), 26: (0, 54), 3: (0, 43), 38: (0, 93), 28: (0, 56), 6: (0, 71), 29: (0, 63), 31: (0, 64), 40: (0, 33), 32: (0, 30), 34: (0, 58), 33: (0, 80), 35: (0, 85)}, 55: {13: (1, {'@': 81}), 14: (1, {'@': 81}), 15: (1, {'@': 81}), 0: (1, {'@': 81}), 1: (1, {'@': 81}), 2: (1, {'@': 81}), 16: (1, {'@': 81}), 3: (1, {'@': 81}), 4: (1, {'@': 81}), 17: (1, {'@': 81}), 18: (1, {'@': 81}), 19: (1, {'@': 81}), 5: (1, {'@': 81}), 21: (1, {'@': 81}), 6: (1, {'@': 81}), 7: (1, {'@': 81}), 22: (1, {'@': 81}), 8: (1, {'@': 81}), 23: (1, {'@': 81}), 9: (1, {'@': 81}), 24: (1, {'@': 81}), 10: (1, {'@': 81}), 11: (1, {'@': 81}), 12: (1, {'@': 81}), 20: (1, {'@': 81})}, 56: {13: (1, {'@': 66}), 14: (1, {'@': 66}), 15: (1, {'@': 66}), 0: (1, {'@': 66}), 1: (1, {'@': 66}), 2: (1, {'@': 66}), 16: (1, {'@': 66}), 3: (1, {'@': 66}), 4: (1, {'@': 66}), 17: (1, {'@': 66}), 18: (1, {'@': 66}), 19: (1, {'@': 66}), 5: (1, {'@': 66}), 21: (1, {'@': 66}), 6: (1, {'@': 66}), 7: (1, {'@': 66}), 22: (1, {'@': 66}), 8: (1, {'@': 66}), 9: (1, {'@': 66}), 24: (1, {'@': 66}), 10: (1, {'@': 66}), 11: (1, {'@': 66}), 12: (1, {'@': 66}), 20: (1, {'@': 66})}, 57: {13: (1, {'@': 73}), 14: (1, {'@': 73}), 0: (1, {'@': 73}), 15: (1, {'@': 73}), 1: (1, {'@': 73}), 2: (1, {'@': 73}), 16: (1, {'@': 73}), 3: (1, {'@': 73}), 4: (1, {'@': 73}), 17: (1, {'@': 73}), 18: (1, {'@': 73}), 19: (1, {'@': 73}), 20: (1, {'@': 73}), 5: (1, {'@': 73}), 21: (1, {'@': 73}), 6: (1, {'@': 73}), 7: (1, {'@': 73}), 22: (1, {'@': 73}), 8: (1, {'@': 73}), 9: (1, {'@': 73}), 24: (1, {'@': 73}), 10: (1, {'@': 73}), 11: (1, {'@': 73}), 12: (1, {'@': 73})}, 58: {23: (0, 6), 13: (1, {'@': 70}), 14: (1, {'@': 70}), 15: (1, {'@': 70}), 0: (1, {'@': 70}), 1: (1, {'@': 70}), 2: (1, {'@': 70}), 16: (1, {'@': 70}), 3: (1, {'@': 70}), 4: (1, {'@': 70}), 17: (1, {'@': 70}), 18: (1, {'@': 70}), 19: (1, {'@': 70}), 5: (1, {'@': 70}), 21: (1, {'@': 70}), 6: (1, {'@': 70}), 7: (1, {'@': 70}), 22: (1, {'@': 70}), 8: (1, {'@': 70}), 9: (1, {'@': 70}), 24: (1, {'@': 70}), 10: (1, {'@': 70}), 11: (1, {'@': 70}), 12: (1, {'@': 70}), 20: (1, {'@': 70})}, 59: {13: (1, {'@': 57}), 14: (1, {'@': 57}), 15: (1, {'@': 57}), 0: (1, {'@': 57}), 1: (1, {'@': 57}), 2: (1, {'@': 57}), 16: (1, {'@': 57}), 19: (1, {'@': 57}), 5: (1, {'@': 57}), 21: (1, {'@': 57}), 7: (1, {'@': 57}), 8: (1, {'@': 57}), 9: (1, {'@': 57}), 24: (1, {'@': 57}), 10: (1, {'@': 57}), 11: (1, {'@': 57}), 12: (1, {'@': 57})}, 60: {25: (0, 84), 26: (0, 54), 27: (0, 24), 28: (0, 56), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 34: (0, 58), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33), 39: (0, 25)}, 61: {25: (0, 84), 26: (0, 54), 27: (0, 24), 28: (0, 56), 6: (0, 71), 29: (0, 63), 30: (0, 27), 39: (0, 75), 31: (0, 15), 32: (0, 30), 33: (0, 80), 34: (0, 58), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33)}, 62: {22: (0, 9), 52: (0, 52), 51: (0, 1), 13: (1, {'@': 69}), 14: (1, {'@': 69}), 15: (1, {'@': 69}), 0: (1, {'@': 69}), 1: (1, {'@': 69}), 2: (1, {'@': 69}), 16: (1, {'@': 69}), 3: (1, {'@': 69}), 4: (1, {'@': 69}), 17: (1, {'@': 69}), 18: (1, {'@': 69}), 19: (1, {'@': 69}), 5: (1, {'@': 69}), 21: (1, {'@': 69}), 6: (1, {'@': 69}), 7: (1, {'@': 69}), 8: (1, {'@': 69}), 9: (1, {'@': 69}), 24: (1, {'@': 69}), 10: (1, {'@': 69}), 11: (1, {'@': 69}), 12: (1, {'@': 69}), 20: (1, {'@': 69})}, 63: {13: (1, {'@': 76}), 14: (1, {'@': 76}), 15: (1, {'@': 76}), 0: (1, {'@': 76}), 1: (1, {'@': 76}), 2: (1, {'@': 76}), 16: (1, {'@': 76}), 3: (1, {'@': 76}), 4: (1, {'@': 76}), 17: (1, {'@': 76}), 18: (1, {'@': 76}), 19: (1, {'@': 76}), 5: (1, {'@': 76}), 21: (1, {'@': 76}), 6: (1, {'@': 76}), 7: (1, {'@': 76}), 22: (1, {'@': 76}), 8: (1, {'@': 76}), 23: (1, {'@': 76}), 9: (1, {'@': 76}), 24: (1, {'@': 76}), 10: (1, {'@': 76}), 11: (1, {'@': 76}), 12: (1, {'@': 76}), 20: (1, {'@': 76})}, 64: {21: (1, {'@': 55}), 13: (1, {'@': 55}), 14: (1, {'@': 55}), 7: (1, {'@': 55}), 0: (1, {'@': 55}), 8: (1, {'@': 55}), 1: (1, {'@': 55}), 9: (1, {'@': 55}), 2: (1, {'@': 55}), 16: (1, {'@': 55}), 24: (1, {'@': 55}), 10: (1, {'@': 55}), 11: (1, {'@': 55}), 19: (1, {'@': 55}), 12: (1, {'@': 55})}, 65: {13: (1, {'@': 74}), 14: (1, {'@': 74}), 15: (1, {'@': 74}), 0: (1, {'@': 74}), 1: (1, {'@': 74}), 2: (1, {'@': 74}), 16: (1, {'@': 74}), 3: (1, {'@': 74}), 4: (1, {'@': 74}), 17: (1, {'@': 74}), 18: (1, {'@': 74}), 19: (1, {'@': 74}), 5: (1, {'@': 74}), 21: (1, {'@': 74}), 6: (1, {'@': 74}), 7: (1, {'@': 74}), 22: (1, {'@': 74}), 8: (1, {'@': 74}), 23: (1, {'@': 74}), 9: (1, {'@': 74}), 24: (1, {'@': 74}), 10: (1, {'@': 74}), 11: (1, {'@': 74}), 12: (1, {'@': 74}), 20: (1, {'@': 74})}, 66: {49: (0, 49), 35: (1, {'@': 78})}, 67: {27: (0, 24), 30: (0, 27), 32: (0, 45), 3: (0, 43), 38: (0, 93), 28: (0, 56), 6: (0, 71), 29: (0, 63), 40: (0, 33), 34: (0, 58), 33: (0, 80), 35: (0, 85)}, 68: {27: (0, 24), 30: (0, 27), 3: (0, 43), 38: (0, 86), 28: (0, 56), 6: (0, 71), 29: (0, 63), 40: (0, 33), 34: (0, 58), 33: (0, 80), 35: (0, 85)}, 69: {1: (0, 65)}, 70: {22: (0, 9), 52: (0, 52), 51: (0, 1), 13: (1, {'@': 68}), 14: (1, {'@': 68}), 15: (1, {'@': 68}), 0: (1, {'@': 68}), 1: (1, {'@': 68}), 2: (1, {'@': 68}), 16: (1, {'@': 68}), 3: (1, {'@': 68}), 4: (1, {'@': 68}), 17: (1, {'@': 68}), 18: (1, {'@': 68}), 19: (1, {'@': 68}), 5: (1, {'@': 68}), 21: (1, {'@': 68}), 6: (1, {'@': 68}), 7: (1, {'@': 68}), 8: (1, {'@': 68}), 9: (1, {'@': 68}), 24: (1, {'@': 68}), 10: (1, {'@': 68}), 11: (1, {'@': 68}), 12: (1, {'@': 68}), 20: (1, {'@': 68})}, 71: {27: (0, 24), 40: (0, 33), 30: (0, 62), 34: (0, 58), 3: (0, 43), 28: (0, 56), 33: (0, 80), 6: (0, 71), 29: (0, 63), 35: (0, 85)}, 72: {8: (1, {'@': 40}), 1: (1, {'@': 40}), 9: (1, {'@': 40}), 2: (1, {'@': 40}), 24: (1, {'@': 40}), 12: (1, {'@': 40})}, 73: {27: (0, 24), 30: (0, 27), 32: (0, 42), 3: (0, 43), 38: (0, 93), 28: (0, 56), 6: (0, 71), 29: (0, 63), 40: (0, 33), 34: (0, 58), 33: (0, 80), 35: (0, 85)}, 74: {13: (1, {'@': 77}), 14: (1, {'@': 77}), 15: (1, {'@': 77}), 0: (1, {'@': 77}), 1: (1, {'@': 77}), 2: (1, {'@': 77}), 16: (1, {'@': 77}), 3: (1, {'@': 77}), 4: (1, {'@': 77}), 17: (1, {'@': 77}), 18: (1, {'@': 77}), 19: (1, {'@': 77}), 5: (1, {'@': 77}), 21: (1, {'@': 77}), 6: (1, {'@': 77}), 7: (1, {'@': 77}), 22: (1, {'@': 77}), 8: (1, {'@': 77}), 23: (1, {'@': 77}), 9: (1, {'@': 77}), 24: (1, {'@': 77}), 10: (1, {'@': 77}), 11: (1, {'@': 77}), 12: (1, {'@': 77}), 20: (1, {'@': 77})}, 75: {10: (0, 12), 21: (1, {'@': 43}), 14: (1, {'@': 43}), 7: (1, {'@': 43}), 0: (1, {'@': 43}), 8: (1, {'@': 43}), 1: (1, {'@': 43}), 9: (1, {'@': 43}), 2: (1, {'@': 43}), 16: (1, {'@': 43}), 24: (1, {'@': 43}), 11: (1, {'@': 43}), 12: (1, {'@': 43})}, 76: {}, 77: {9: (1, {'@': 32})}, 78: {13: (1, {'@': 58}), 14: (1, {'@': 58}), 15: (1, {'@': 58}), 0: (1, {'@': 58}), 1: (1, {'@': 58}), 2: (1, {'@': 58}), 16: (1, {'@': 58}), 19: (1, {'@': 58}), 5: (1, {'@': 58}), 21: (1, {'@': 58}), 7: (1, {'@': 58}), 8: (1, {'@': 58}), 9: (1, {'@': 58}), 24: (1, {'@': 58}), 10: (1, {'@': 58}), 11: (1, {'@': 58}), 12: (1, {'@': 58})}, 79: {13: (1, {'@': 63}), 14: (1, {'@': 63}), 15: (1, {'@': 63}), 0: (1, {'@': 63}), 1: (1, {'@': 63}), 2: (1, {'@': 63}), 16: (1, {'@': 63}), 3: (1, {'@': 63}), 19: (1, {'@': 63}), 5: (1, {'@': 63}), 21: (1, {'@': 63}), 6: (1, {'@': 63}), 7: (1, {'@': 63}), 8: (1, {'@': 63}), 9: (1, {'@': 63}), 24: (1, {'@': 63}), 10: (1, {'@': 63}), 11: (1, {'@': 63}), 12: (1, {'@': 63}), 20: (1, {'@': 63})}, 80: {13: (1, {'@': 75}), 14: (1, {'@': 75}), 15: (1, {'@': 75}), 0: (1, {'@': 75}), 1: (1, {'@': 75}), 2: (1, {'@': 75}), 16: (1, {'@': 75}), 3: (1, {'@': 75}), 4: (1, {'@': 75}), 17: (1, {'@': 75}), 18: (1, {'@': 75}), 19: (1, {'@': 75}), 5: (1, {'@': 75}), 21: (1, {'@': 75}), 6: (1, {'@': 75}), 7: (1, {'@': 75}), 22: (1, {'@': 75}), 8: (1, {'@': 75}), 23: (1, {'@': 75}), 9: (1, {'@': 75}), 24: (1, {'@': 75}), 10: (1, {'@': 75}), 11: (1, {'@': 75}), 12: (1, {'@': 75}), 20: (1, {'@': 75})}, 81: {25: (0, 84), 39: (0, 28), 26: (0, 54), 27: (0, 24), 28: (0, 56), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 34: (0, 58), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33)}, 82: {39: (0, 7), 25: (0, 84), 43: (0, 87), 26: (0, 54), 27: (0, 24), 46: (0, 32), 28: (0, 56), 42: (0, 23), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 47: (0, 29), 33: (0, 80), 44: (0, 89), 34: (0, 58), 45: (0, 88), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33), 1: (1, {'@': 86})}, 83: {13: (1, {'@': 72}), 14: (1, {'@': 72}), 0: (1, {'@': 72}), 15: (1, {'@': 72}), 1: (1, {'@': 72}), 2: (1, {'@': 72}), 16: (1, {'@': 72}), 3: (1, {'@': 72}), 4: (1, {'@': 72}), 17: (1, {'@': 72}), 18: (1, {'@': 72}), 19: (1, {'@': 72}), 20: (1, {'@': 72}), 5: (1, {'@': 72}), 21: (1, {'@': 72}), 6: (1, {'@': 72}), 7: (1, {'@': 72}), 22: (1, {'@': 72}), 8: (1, {'@': 72}), 9: (1, {'@': 72}), 24: (1, {'@': 72}), 10: (1, {'@': 72}), 11: (1, {'@': 72}), 12: (1, {'@': 72})}, 84: {5: (0, 3), 15: (0, 51), 21: (1, {'@': 54}), 13: (1, {'@': 54}), 14: (1, {'@': 54}), 7: (1, {'@': 54}), 0: (1, {'@': 54}), 8: (1, {'@': 54}), 1: (1, {'@': 54}), 9: (1, {'@': 54}), 2: (1, {'@': 54}), 16: (1, {'@': 54}), 24: (1, {'@': 54}), 10: (1, {'@': 54}), 11: (1, {'@': 54}), 19: (1, {'@': 54}), 12: (1, {'@': 54})}, 85: {39: (0, 7), 25: (0, 84), 43: (0, 87), 26: (0, 54), 27: (0, 24), 46: (0, 32), 47: (0, 69), 28: (0, 56), 42: (0, 23), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 44: (0, 89), 34: (0, 58), 45: (0, 88), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33)}, 86: {13: (1, {'@': 65}), 14: (1, {'@': 65}), 15: (1, {'@': 65}), 0: (1, {'@': 65}), 1: (1, {'@': 65}), 2: (1, {'@': 65}), 16: (1, {'@': 65}), 3: (1, {'@': 65}), 19: (1, {'@': 65}), 5: (1, {'@': 65}), 21: (1, {'@': 65}), 6: (1, {'@': 65}), 7: (1, {'@': 65}), 8: (1, {'@': 65}), 9: (1, {'@': 65}), 24: (1, {'@': 65}), 10: (1, {'@': 65}), 11: (1, {'@': 65}), 12: (1, {'@': 65}), 20: (1, {'@': 65})}, 87: {12: (0, 5), 8: (1, {'@': 35}), 1: (1, {'@': 35}), 9: (1, {'@': 35}), 2: (1, {'@': 35})}, 88: {39: (0, 7), 25: (0, 84), 26: (0, 54), 27: (0, 24), 28: (0, 56), 42: (0, 72), 6: (0, 71), 29: (0, 63), 30: (0, 27), 31: (0, 15), 32: (0, 30), 33: (0, 80), 44: (0, 89), 34: (0, 58), 45: (0, 88), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33)}, 89: {0: (0, 61), 11: (0, 60), 14: (0, 36), 21: (0, 81), 16: (0, 2), 7: (0, 41), 8: (1, {'@': 39}), 1: (1, {'@': 39}), 9: (1, {'@': 39}), 2: (1, {'@': 39}), 24: (1, {'@': 39}), 12: (1, {'@': 39})}, 90: {13: (1, {'@': 64}), 14: (1, {'@': 64}), 15: (1, {'@': 64}), 0: (1, {'@': 64}), 1: (1, {'@': 64}), 2: (1, {'@': 64}), 16: (1, {'@': 64}), 3: (1, {'@': 64}), 19: (1, {'@': 64}), 5: (1, {'@': 64}), 21: (1, {'@': 64}), 6: (1, {'@': 64}), 7: (1, {'@': 64}), 8: (1, {'@': 64}), 9: (1, {'@': 64}), 24: (1, {'@': 64}), 10: (1, {'@': 64}), 11: (1, {'@': 64}), 12: (1, {'@': 64}), 20: (1, {'@': 64})}, 91: {22: (0, 9), 52: (0, 52), 51: (0, 1), 13: (1, {'@': 71}), 14: (1, {'@': 71}), 15: (1, {'@': 71}), 0: (1, {'@': 71}), 1: (1, {'@': 71}), 2: (1, {'@': 71}), 16: (1, {'@': 71}), 3: (1, {'@': 71}), 4: (1, {'@': 71}), 17: (1, {'@': 71}), 18: (1, {'@': 71}), 19: (1, {'@': 71}), 5: (1, {'@': 71}), 21: (1, {'@': 71}), 6: (1, {'@': 71}), 7: (1, {'@': 71}), 8: (1, {'@': 71}), 9: (1, {'@': 71}), 24: (1, {'@': 71}), 10: (1, {'@': 71}), 11: (1, {'@': 71}), 12: (1, {'@': 71}), 20: (1, {'@': 71})}, 92: {20: (0, 83)}, 93: {3: (0, 73), 6: (0, 67), 13: (1, {'@': 59}), 14: (1, {'@': 59}), 15: (1, {'@': 59}), 0: (1, {'@': 59}), 1: (1, {'@': 59}), 2: (1, {'@': 59}), 16: (1, {'@': 59}), 19: (1, {'@': 59}), 5: (1, {'@': 59}), 21: (1, {'@': 59}), 7: (1, {'@': 59}), 8: (1, {'@': 59}), 9: (1, {'@': 59}), 24: (1, {'@': 59}), 10: (1, {'@': 59}), 11: (1, {'@': 59}), 12: (1, {'@': 59}), 20: (1, {'@': 59})}, 94: {39: (0, 7), 25: (0, 84), 26: (0, 54), 27: (0, 24), 28: (0, 56), 6: (0, 71), 29: (0, 63), 30: (0, 27), 42: (0, 37), 31: (0, 15), 32: (0, 30), 33: (0, 80), 44: (0, 89), 34: (0, 58), 45: (0, 88), 35: (0, 85), 36: (0, 48), 37: (0, 46), 3: (0, 43), 38: (0, 93), 40: (0, 33)}, 95: {21: (1, {'@': 53}), 13: (1, {'@': 53}), 14: (1, {'@': 53}), 7: (1, {'@': 53}), 0: (1, {'@': 53}), 8: (1, {'@': 53}), 1: (1, {'@': 53}), 9: (1, {'@': 53}), 2: (1, {'@': 53}), 16: (1, {'@': 53}), 24: (1, {'@': 53}), 10: (1, {'@': 53}), 11: (1, {'@': 53}), 19: (1, {'@': 53}), 12: (1, {'@': 53})}}, 'start_states': {'start': 26}, 'end_states': {'start': 76}}, '__type__': 'ParsingFrontend'}, 'rules': [{'@': 32}, {'@': 33}, {'@': 34}, {'@': 35}, {'@': 36}, {'@': 37}, {'@': 38}, {'@': 39}, {'@': 40}, {'@': 41}, {'@': 42}, {'@': 43}, {'@': 44}, {'@': 45}, {'@': 46}, {'@': 47}, {'@': 48}, {'@': 49}, {'@': 50}, {'@': 51}, {'@': 52}, {'@': 53}, {'@': 54}, {'@': 55}, {'@': 56}, {'@': 57}, {'@': 58}, {'@': 59}, {'@': 60}, {'@': 61}, {'@': 62}, {'@': 63}, {'@': 64}, {'@': 65}, {'@': 66}, {'@': 67}, {'@': 68}, {'@': 69}, {'@': 70}, {'@': 71}, {'@': 72}, {'@': 73}, {'@': 74}, {'@': 75}, {'@': 76}, {'@': 77}, {'@': 78}, {'@': 79}, {'@': 80}, {'@': 81}, {'@': 82}, {'@': 83}, {'@': 84}, {'@': 85}, {'@': 86}, {'@': 87}, {'@': 88}, {'@': 89}, {'@': 90}, {'@': 91}], 'options': {'debug': False, 'strict': False, 'keep_all_tokens': False, 'tree_class': None, 'cache': False, 'postlex': None, 'parser': 'lalr', 'lexer': 'contextual', 'transformer': None, 'start': ['start'], 'priority': 'normal', 'ambiguity': 'auto', 'regex': False, 'propagate_positions': False, 'lexer_callbacks': {}, 'maybe_placeholders': False, 'edit_terminals': None, 'g_regex_flags': 0, 'use_bytes': False, 'import_paths': [], 'source_path': None, '_plugins': {}}, '__type__': 'Lark'} +) +MEMO = ( +{0: {'name': 'NUMBER', 'pattern': {'value': '(?:(?:(?:[0-9])+(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+|(?:(?:[0-9])+\\.(?:(?:[0-9])+)?|\\.(?:[0-9])+)(?:(?:e|E)(?:(?:\\+|\\-))?(?:[0-9])+)?)|(?:[0-9])+)', 'flags': [], 'raw': None, '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 1: {'name': 'WS', 'pattern': {'value': '(?:[ \t\x0c\r\n])+', 'flags': [], 'raw': None, '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 2: {'name': 'NAME', 'pattern': {'value': '[A-Za-z_][A-Za-z0-9_]*\\$?', 'flags': [], 'raw': '/[A-Za-z_][A-Za-z0-9_]*\\$?/', '_width': [1, 4294967295], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 3: {'name': 'COLON', 'pattern': {'value': ':', 'flags': [], 'raw': '":"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 4: {'name': '__ANON_0', 'pattern': {'value': '||', 'flags': [], 'raw': '"||"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 5: {'name': '__ANON_1', 'pattern': {'value': '&&', 'flags': [], 'raw': '"&&"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 6: {'name': 'BANG', 'pattern': {'value': '!', 'flags': [], 'raw': '"!"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 7: {'name': '__ANON_2', 'pattern': {'value': '!=', 'flags': [], 'raw': '"!="', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 8: {'name': 'MORETHAN', 'pattern': {'value': '>', 'flags': [], 'raw': '">"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 9: {'name': '__ANON_3', 'pattern': {'value': '>=', 'flags': [], 'raw': '">="', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 10: {'name': 'LESSTHAN', 'pattern': {'value': '<', 'flags': [], 'raw': '"<"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 11: {'name': '__ANON_4', 'pattern': {'value': '<=', 'flags': [], 'raw': '"<="', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 12: {'name': '__ANON_5', 'pattern': {'value': '==', 'flags': [], 'raw': '"=="', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 13: {'name': 'VBAR', 'pattern': {'value': '|', 'flags': [], 'raw': '"|"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 14: {'name': 'CIRCUMFLEX', 'pattern': {'value': '^', 'flags': [], 'raw': '"^"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 15: {'name': 'AMPERSAND', 'pattern': {'value': '&', 'flags': [], 'raw': '"&"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 16: {'name': 'TILDE', 'pattern': {'value': '~', 'flags': [], 'raw': '"~"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 17: {'name': '__ANON_6', 'pattern': {'value': '<<', 'flags': [], 'raw': '"<<"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 18: {'name': '__ANON_7', 'pattern': {'value': '>>', 'flags': [], 'raw': '">>"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 19: {'name': 'PLUS', 'pattern': {'value': '+', 'flags': [], 'raw': '"+"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 20: {'name': 'MINUS', 'pattern': {'value': '-', 'flags': [], 'raw': '"-"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 21: {'name': 'STAR', 'pattern': {'value': '*', 'flags': [], 'raw': '"*"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 22: {'name': 'SLASH', 'pattern': {'value': '/', 'flags': [], 'raw': '"/"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 23: {'name': 'PERCENT', 'pattern': {'value': '%', 'flags': [], 'raw': '"%"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 24: {'name': '__ANON_8', 'pattern': {'value': '**', 'flags': [], 'raw': '"**"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 25: {'name': 'LSQB', 'pattern': {'value': '[', 'flags': [], 'raw': '"["', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 26: {'name': 'RSQB', 'pattern': {'value': ']', 'flags': [], 'raw': '"]"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 27: {'name': 'LPAR', 'pattern': {'value': '(', 'flags': [], 'raw': '"("', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 28: {'name': 'RPAR', 'pattern': {'value': ')', 'flags': [], 'raw': '")"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 29: {'name': '__ANON_9', 'pattern': {'value': '::', 'flags': [], 'raw': '"::"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 30: {'name': 'DOT', 'pattern': {'value': '.', 'flags': [], 'raw': '"."', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 31: {'name': 'COMMA', 'pattern': {'value': ',', 'flags': [], 'raw': '","', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 32: {'origin': {'name': Token('RULE', 'start'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'expression', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 33: {'origin': {'name': Token('RULE', 'expression'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'disjunction', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 34: {'origin': {'name': Token('RULE', 'expression'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'disjunction', '__type__': 'NonTerminal'}, {'name': 'COLON', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'expression', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'multi_out', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 35: {'origin': {'name': Token('RULE', 'disjunction'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'conjunction', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 36: {'origin': {'name': Token('RULE', 'disjunction'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'conjunction', '__type__': 'NonTerminal'}, {'name': '__ANON_0', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'conjunction', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'lor', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 37: {'origin': {'name': Token('RULE', 'conjunction'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'inversion', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 38: {'origin': {'name': Token('RULE', 'conjunction'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'inversion', '__type__': 'NonTerminal'}, {'name': '__ANON_1', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'inversion', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'land', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 39: {'origin': {'name': Token('RULE', 'inversion'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 40: {'origin': {'name': Token('RULE', 'inversion'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'BANG', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'inversion', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'linv', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 41: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 42: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': 'MORETHAN', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'gt', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 43: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__ANON_3', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'gte', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 44: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': 'LESSTHAN', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 3, 'alias': 'lt', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 45: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__ANON_4', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 4, 'alias': 'lte', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 46: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__ANON_2', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 5, 'alias': 'neq', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 47: {'origin': {'name': Token('RULE', 'comparison'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'comparison', '__type__': 'NonTerminal'}, {'name': '__ANON_5', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_or', '__type__': 'NonTerminal'}], 'order': 6, 'alias': 'eq', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 48: {'origin': {'name': Token('RULE', 'bitwise_or'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_xor', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 49: {'origin': {'name': Token('RULE', 'bitwise_or'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_or', '__type__': 'NonTerminal'}, {'name': 'VBAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_xor', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'bor', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 50: {'origin': {'name': Token('RULE', 'bitwise_xor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_and', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 51: {'origin': {'name': Token('RULE', 'bitwise_xor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_xor', '__type__': 'NonTerminal'}, {'name': 'CIRCUMFLEX', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_and', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'bxor', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 52: {'origin': {'name': Token('RULE', 'bitwise_and'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_inversion', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 53: {'origin': {'name': Token('RULE', 'bitwise_and'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'bitwise_and', '__type__': 'NonTerminal'}, {'name': 'AMPERSAND', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_inversion', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'band', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 54: {'origin': {'name': Token('RULE', 'bitwise_inversion'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'shift_expr', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 55: {'origin': {'name': Token('RULE', 'bitwise_inversion'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'TILDE', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'bitwise_inversion', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'binv', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 56: {'origin': {'name': Token('RULE', 'shift_expr'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'sum', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 57: {'origin': {'name': Token('RULE', 'shift_expr'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'shift_expr', '__type__': 'NonTerminal'}, {'name': '__ANON_6', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'sum', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'lshift', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 58: {'origin': {'name': Token('RULE', 'shift_expr'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'shift_expr', '__type__': 'NonTerminal'}, {'name': '__ANON_7', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'sum', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'rshift', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 59: {'origin': {'name': Token('RULE', 'sum'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 60: {'origin': {'name': Token('RULE', 'sum'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term', '__type__': 'NonTerminal'}, {'name': 'PLUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'sum', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'add', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 61: {'origin': {'name': Token('RULE', 'sum'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'term', '__type__': 'NonTerminal'}, {'name': 'MINUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'sum', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'sub', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 62: {'origin': {'name': Token('RULE', 'term'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'factor', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 63: {'origin': {'name': Token('RULE', 'term'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'factor', '__type__': 'NonTerminal'}, {'name': 'STAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'mul', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 64: {'origin': {'name': Token('RULE', 'term'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'factor', '__type__': 'NonTerminal'}, {'name': 'SLASH', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'div', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 65: {'origin': {'name': Token('RULE', 'term'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'factor', '__type__': 'NonTerminal'}, {'name': 'PERCENT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'term', '__type__': 'NonTerminal'}], 'order': 3, 'alias': 'mod', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 66: {'origin': {'name': Token('RULE', 'factor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'pow', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 67: {'origin': {'name': Token('RULE', 'factor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'factor', '__type__': 'NonTerminal'}, {'name': '__factor_plus_0', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'matr', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 68: {'origin': {'name': Token('RULE', 'factor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'PLUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'factor', '__type__': 'NonTerminal'}], 'order': 2, 'alias': 'pos', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 69: {'origin': {'name': Token('RULE', 'factor'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'MINUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'factor', '__type__': 'NonTerminal'}], 'order': 3, 'alias': 'neg', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 70: {'origin': {'name': Token('RULE', 'pow'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'atom', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 71: {'origin': {'name': Token('RULE', 'pow'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'atom', '__type__': 'NonTerminal'}, {'name': '__ANON_8', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'factor', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'pow', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 72: {'origin': {'name': Token('RULE', 'matpos'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LSQB', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'sum', '__type__': 'NonTerminal'}, {'name': 'RSQB', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 73: {'origin': {'name': Token('RULE', 'matpos'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LSQB', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'RSQB', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': [False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 74: {'origin': {'name': Token('RULE', 'atom'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'expression', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 75: {'origin': {'name': Token('RULE', 'atom'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'var_name', '__type__': 'NonTerminal'}], 'order': 1, 'alias': 'symbol', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 76: {'origin': {'name': Token('RULE', 'atom'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'NUMBER', 'filter_out': False, '__type__': 'Terminal'}], 'order': 2, 'alias': 'literal', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 77: {'origin': {'name': Token('RULE', 'atom'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'func_name', '__type__': 'NonTerminal'}, {'name': 'trailer', '__type__': 'NonTerminal'}], 'order': 3, 'alias': 'func', 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 78: {'origin': {'name': Token('RULE', 'func_name'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'NAME', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 79: {'origin': {'name': Token('RULE', 'func_name'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'NAME', 'filter_out': False, '__type__': 'Terminal'}, {'name': '__ANON_9', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'func_name', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 80: {'origin': {'name': Token('RULE', 'var_name'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'NAME', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 81: {'origin': {'name': Token('RULE', 'var_name'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'NAME', 'filter_out': False, '__type__': 'Terminal'}, {'name': 'DOT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'var_name', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 82: {'origin': {'name': Token('RULE', 'trailer'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'arglist', '__type__': 'NonTerminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 83: {'origin': {'name': Token('RULE', 'trailer'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LPAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'RPAR', 'filter_out': True, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': [False, True, False], '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 84: {'origin': {'name': Token('RULE', 'arglist'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'expression', '__type__': 'NonTerminal'}, {'name': '__arglist_star_1', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 85: {'origin': {'name': Token('RULE', 'arglist'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'expression', '__type__': 'NonTerminal'}, {'name': '__arglist_star_1', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 86: {'origin': {'name': Token('RULE', 'arglist'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'expression', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 87: {'origin': {'name': Token('RULE', 'arglist'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'expression', '__type__': 'NonTerminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 88: {'origin': {'name': '__factor_plus_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'matpos', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 89: {'origin': {'name': '__factor_plus_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__factor_plus_0', '__type__': 'NonTerminal'}, {'name': 'matpos', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 90: {'origin': {'name': '__arglist_star_1', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'expression', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 91: {'origin': {'name': '__arglist_star_1', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__arglist_star_1', '__type__': 'NonTerminal'}, {'name': 'COMMA', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'expression', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}} +) +Shift = 0 +Reduce = 1 +def Lark_StandAlone(**kwargs): + return Lark._load_from_dict(DATA, MEMO, **kwargs) diff --git a/tests/test_numexpr.py b/tests/test_numexpr.py index 6f3f4b9..a1eaf65 100644 --- a/tests/test_numexpr.py +++ b/tests/test_numexpr.py @@ -8,120 +8,120 @@ def test_simple_add(): - a = toast(formulate.numexpr.exp_to_ptree("a+2.0"), nxp = True) + a = formulate.from_numexpr(("a+2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a+2.0)")) def test_simple_sub(): - a = toast(formulate.numexpr.exp_to_ptree("a-2.0"), nxp = True) + a = formulate.from_numexpr(("a-2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a-2.0)")) def test_simple_mul(): - a = toast(formulate.numexpr.exp_to_ptree("f*2.0"), nxp = True) + a = formulate.from_numexpr(("f*2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(f*2.0)")) def test_simple_div(): - a = toast(formulate.numexpr.exp_to_ptree("a/2.0"), nxp = True) + a = formulate.from_numexpr(("a/2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a/2.0)")) def test_simple_lt(): - a = toast(formulate.numexpr.exp_to_ptree("a<2.0"), nxp = True) + a = formulate.from_numexpr(("a<2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a<2.0)")) def test_simple_lte(): - a = toast(formulate.numexpr.exp_to_ptree("a<=2.0"), nxp = True) + a = formulate.from_numexpr(("a<=2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a<=2.0)")) def test_simple_gt(): - a = toast(formulate.numexpr.exp_to_ptree("a>2.0"), nxp = True) + a = formulate.from_numexpr(("a>2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a>2.0)")) def test_simple_gte(): - a = toast(formulate.numexpr.exp_to_ptree("a>=2.0"), nxp = True) + a = formulate.from_numexpr(("a>=2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a>=2.0)")) def test_simple_eq(): - a = toast(formulate.numexpr.exp_to_ptree("a==2.0"), nxp = True) + a = formulate.from_numexpr(("a==2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a==2.0)")) def test_simple_neq(): - a = toast(formulate.numexpr.exp_to_ptree("a!=2.0"), nxp = True) + a = formulate.from_numexpr(("a!=2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a!=2.0)")) def test_simple_bor(): - a = toast(formulate.numexpr.exp_to_ptree("a|b"), nxp = True) + a = formulate.from_numexpr(("a|b")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("a | b")) def test_simple_band(): - a = toast(formulate.numexpr.exp_to_ptree("a&c"), nxp = True) + a = formulate.from_numexpr(("a&c")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("a & c")) def test_simple_bxor(): - a = toast(formulate.numexpr.exp_to_ptree("a^2.0"), nxp = True) + a = formulate.from_numexpr(("a^2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a^2.0)")) def test_simple_pow(): - a = toast(formulate.numexpr.exp_to_ptree("a**2.0"), nxp = True) + a = formulate.from_numexpr(("a**2.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a**2.0)")) def test_simple_function(): - a = toast(formulate.numexpr.exp_to_ptree("sqrt(4)"), nxp = True) + a = formulate.from_numexpr(("sqrt(4)")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("sqrt(4.0)")) def test_simple_unary_pos(): - a = toast(formulate.numexpr.exp_to_ptree("+5.0"), nxp = True) + a = formulate.from_numexpr(("+5.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(+5.0)")) def test_simple_unary_neg(): - a = toast(formulate.numexpr.exp_to_ptree("-5.0"), nxp = True) + a = formulate.from_numexpr(("-5.0")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(-5.0)")) def test_simple_unary_binv(): - a = toast(formulate.numexpr.exp_to_ptree("~bool"), nxp = True) + a = formulate.from_numexpr(("~bool")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("~bool")) def test_unary_binary_pos(): - a = toast(formulate.numexpr.exp_to_ptree("2.0 - -6"), nxp = True) + a = formulate.from_numexpr(("2.0 - -6")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(2.0-(-6.0))")) def test_complex_exp(): - a = toast(formulate.numexpr.exp_to_ptree("(~a**b)*23/(var|45)"), nxp = True) + a = formulate.from_numexpr(("(~a**b)*23/(var|45)")) out = a.to_numexpr() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("((~(a**b))*(23.0/(var|45.0)))")) diff --git a/tests/test_root.py b/tests/test_root.py index 3f931fc..2050b43 100644 --- a/tests/test_root.py +++ b/tests/test_root.py @@ -8,120 +8,120 @@ def test_simple_add(): - a = toast(formulate.numexpr.exp_to_ptree("a+2.0"), nxp = True) + a = formulate.from_numexpr(("a+2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a+2.0)")) def test_simple_sub(): - a = toast(formulate.numexpr.exp_to_ptree("a-2.0"), nxp = True) + a = formulate.from_numexpr(("a-2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a-2.0)")) def test_simple_mul(): - a = toast(formulate.numexpr.exp_to_ptree("f*2.0"), nxp = True) + a = formulate.from_numexpr(("f*2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(f*2.0)")) def test_simple_div(): - a = toast(formulate.numexpr.exp_to_ptree("a/2.0"), nxp = True) + a = formulate.from_numexpr(("a/2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a/2.0)")) def test_simple_lt(): - a = toast(formulate.numexpr.exp_to_ptree("a<2.0"), nxp = True) + a = formulate.from_numexpr(("a<2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a<2.0)")) def test_simple_lte(): - a = toast(formulate.numexpr.exp_to_ptree("a<=2.0"), nxp = True) + a = formulate.from_numexpr(("a<=2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a<=2.0)")) def test_simple_gt(): - a = toast(formulate.numexpr.exp_to_ptree("a>2.0"), nxp = True) + a = formulate.from_numexpr(("a>2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a>2.0)")) def test_simple_gte(): - a = toast(formulate.numexpr.exp_to_ptree("a>=2.0"), nxp = True) + a = formulate.from_numexpr(("a>=2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a>=2.0)")) def test_simple_eq(): - a = toast(formulate.numexpr.exp_to_ptree("a==2.0"), nxp = True) + a = formulate.from_numexpr(("a==2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a==2.0)")) def test_simple_neq(): - a = toast(formulate.numexpr.exp_to_ptree("a!=2.0"), nxp = True) + a = formulate.from_numexpr(("a!=2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a!=2.0)")) def test_simple_bor(): - a = toast(formulate.numexpr.exp_to_ptree("a|b"), nxp = True) + a = formulate.from_numexpr(("a|b")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("a | b")) def test_simple_band(): - a = toast(formulate.numexpr.exp_to_ptree("a&c"), nxp = True) + a = formulate.from_numexpr(("a&c")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("a & c")) def test_simple_bxor(): - a = toast(formulate.numexpr.exp_to_ptree("a^2.0"), nxp = True) + a = formulate.from_numexpr(("a^2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a^2.0)")) def test_simple_pow(): - a = toast(formulate.numexpr.exp_to_ptree("a**2.0"), nxp = True) + a = formulate.from_numexpr(("a**2.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a**2.0)")) def test_simple_function(): - a = toast(formulate.numexpr.exp_to_ptree("sqrt(4)"), nxp = True) + a = formulate.from_numexpr(("sqrt(4)")) out = a.to_root() assert out == "TMATH::Sqrt(4.0)" def test_simple_unary_pos(): - a = toast(formulate.numexpr.exp_to_ptree("+5.0"), nxp = True) + a = formulate.from_numexpr(("+5.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(+5.0)")) def test_simple_unary_neg(): - a = toast(formulate.numexpr.exp_to_ptree("-5.0"), nxp = True) + a = formulate.from_numexpr(("-5.0")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(-5.0)")) def test_simple_unary_binv(): - a = toast(formulate.numexpr.exp_to_ptree("~bool"), nxp = True) + a = formulate.from_numexpr(("~bool")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("~bool")) def test_unary_binary_pos(): - a = toast(formulate.numexpr.exp_to_ptree("2.0 - -6"), nxp = True) + a = formulate.from_numexpr(("2.0 - -6")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(2.0-(-6.0))")) def test_complex_exp(): - a = toast(formulate.numexpr.exp_to_ptree("(~a**b)*23/(var|45)"), nxp = True) + a = formulate.from_numexpr(("(~a**b)*23/(var|45)")) out = a.to_root() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("((~(a**b))*(23.0/(var|45.0)))")) diff --git a/tests/test_ttreeformula.py b/tests/test_ttreeformula.py index 47e2bf2..3284059 100644 --- a/tests/test_ttreeformula.py +++ b/tests/test_ttreeformula.py @@ -8,150 +8,150 @@ def test_simple_add(): - a = toast(formulate.ttreeformula.exp_to_ptree("a+2.0"), nxp = False) + a = formulate.from_root(("a+2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a+2.0)")) def test_simple_sub(): - a = toast(formulate.ttreeformula.exp_to_ptree("a-2.0"), nxp = False) + a = formulate.from_root(("a-2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a-2.0)")) def test_simple_mul(): - a = toast(formulate.ttreeformula.exp_to_ptree("f*2.0"), nxp = False) + a = formulate.from_root(("f*2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(f*2.0)")) def test_simple_div(): - a = toast(formulate.ttreeformula.exp_to_ptree("a/2.0"), nxp = False) + a = formulate.from_root(("a/2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a/2.0)")) def test_simple_lt(): - a = toast(formulate.ttreeformula.exp_to_ptree("a<2.0"), nxp = False) + a = formulate.from_root(("a<2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a<2.0)")) def test_simple_lte(): - a = toast(formulate.ttreeformula.exp_to_ptree("a<=2.0"), nxp = False) + a = formulate.from_root(("a<=2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a<=2.0)")) def test_simple_gt(): - a = toast(formulate.ttreeformula.exp_to_ptree("a>2.0"), nxp = False) + a = formulate.from_root(("a>2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a>2.0)")) def test_simple_gte(): - a = toast(formulate.ttreeformula.exp_to_ptree("a>=2.0"), nxp = False) + a = formulate.from_root(("a>=2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a>=2.0)")) def test_simple_eq(): - a = toast(formulate.ttreeformula.exp_to_ptree("a==2.0"), nxp = False) + a = formulate.from_root(("a==2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a==2.0)")) def test_simple_neq(): - a = toast(formulate.ttreeformula.exp_to_ptree("a!=2.0"), nxp = False) + a = formulate.from_root(("a!=2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a!=2.0)")) def test_simple_bor(): - a = toast(formulate.ttreeformula.exp_to_ptree("a|b"), nxp = False) + a = formulate.from_root(("a|b")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("np.bitwise_or(a,b)")) def test_simple_band(): - a = toast(formulate.ttreeformula.exp_to_ptree("a&c"), nxp = False) + a = formulate.from_root(("a&c")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("np.bitwise_and(a,c)")) def test_simple_bxor(): - a = toast(formulate.ttreeformula.exp_to_ptree("a^2.0"), nxp = False) + a = formulate.from_root(("a^2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a^2.0)")) def test_simple_land(): - a = toast(formulate.ttreeformula.exp_to_ptree("a&&2.0"), nxp = False) + a = formulate.from_root(("a&&2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a and 2.0)")) def test_simple_lor(): - a = toast(formulate.ttreeformula.exp_to_ptree("a||2.0"), nxp = False) + a = formulate.from_root(("a||2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a or 2.0)")) def test_simple_pow(): - a = toast(formulate.ttreeformula.exp_to_ptree("a**2.0"), nxp = False) + a = formulate.from_root(("a**2.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(a**2.0)")) def test_simple_matrix(): - a = toast(formulate.ttreeformula.exp_to_ptree("a[45][1]"), nxp = False) + a = formulate.from_root(("a[45][1]")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("a[:, 45.0, 1.0]")) def test_simple_function(): - a = toast(formulate.ttreeformula.exp_to_ptree("Math::sqrt(4)"), nxp = False) + a = formulate.from_root(("Math::sqrt(4)")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("np.sqrt(4.0)")) def test_simple_unary_pos(): - a = toast(formulate.ttreeformula.exp_to_ptree("+5.0"), nxp = False) + a = formulate.from_root(("+5.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(+5.0)")) def test_simple_unary_neg(): - a = toast(formulate.ttreeformula.exp_to_ptree("-5.0"), nxp = False) + a = formulate.from_root(("-5.0")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(-5.0)")) def test_simple_unary_binv(): - a = toast(formulate.ttreeformula.exp_to_ptree("~bool"), nxp = False) + a = formulate.from_root(("~bool")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("np.invert(bool)")) def test_simple_unary_linv(): - a = toast(formulate.ttreeformula.exp_to_ptree("!bool"), nxp = False) + a = formulate.from_root(("!bool")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("np.logical_not(bool)")) def test_unary_binary_pos(): - a = toast(formulate.ttreeformula.exp_to_ptree("2.0 - -6"), nxp = False) + a = formulate.from_root(("2.0 - -6")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(2.0-(-6.0))")) def test_complex_matrix(): - a = toast(formulate.ttreeformula.exp_to_ptree("mat1[a**23][mat2[45 - -34]]"), nxp = False) + a = formulate.from_root(("mat1[a**23][mat2[45 - -34]]")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("(mat1[:,(a**23.0),(mat2[:,(45.0-(-34.0))])])")) def test_complex_exp(): - a = toast(formulate.ttreeformula.exp_to_ptree("~a**b*23/(var||45)"), nxp = False) + a = formulate.from_root(("~a**b*23/(var||45)")) out = a.to_python() assert ast.unparse(ast.parse(out)) == ast.unparse(ast.parse("np.invert(((a**b)*(23.0/(var or 45.0))))"))