Skip to content

Instantly share code, notes, and snippets.

@mizar
Last active September 27, 2024 18:18
Show Gist options
  • Save mizar/4ad25cb42e75f37545a56909003ea7b9 to your computer and use it in GitHub Desktop.
Save mizar/4ad25cb42e75f37545a56909003ea7b9 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/mizar/4ad25cb42e75f37545a56909003ea7b9/stlangex.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "LWUCGA0UH6sM"
},
"outputs": [],
"source": [
"# @title (Google Colabにて初回使用時やランタイム接続解除後は 「ランタイム>全てのセルを実行(Ctrl+F9)」 の操作をして下さい)\n",
"\n",
"STLANGEX_GRAMMER = r\"\"\"\n",
"?start: program\n",
"program: statements \"return\" value lineends\n",
"?value: number | const_k | const_m | const_n | variable\n",
"number: /[0-9]+/\n",
"const_k: \"K\"\n",
"const_n: \"N\"\n",
"variable: \"$\" var_ident\n",
"var_ident: /[0-9]/\n",
"?statements: statement*\n",
"?statement: assignment | branching | repeating | lineend\n",
"assignment: \"$\" var_ident \"=\" expression lineend\n",
"?expression: value | add | sub | mul | div | rem\n",
"add: value \"+\" value\n",
"sub: value \"-\" value\n",
"mul: value \"*\" value\n",
"div: value \"/\" value\n",
"rem: value \"%\" value\n",
"branching: \"if\" condition lineend statements \"end\" lineend\n",
"repeating: \"while\" condition lineend statements \"end\" lineend\n",
"?condition: eq | ne | ge | le | gt | lt\n",
"eq: value \"==\" value\n",
"ne: value \"!=\" value\n",
"ge: value \">=\" value\n",
"le: value \"<=\" value\n",
"gt: value \">\" value\n",
"lt: value \"<\" value\n",
"?lineends: lineend* ST_COMMENT?\n",
"?lineend: ST_COMMENT? CR? LF\n",
"ST_COMMENT: \"#\" /[^\\n]*/\n",
"CR : /\\r/\n",
"LF : /\\n/\n",
"WS_INLINE: (\" \"|/\\t/)+\n",
"%ignore WS_INLINE\n",
"\"\"\"\n",
"\n",
"STLANGEX_STANDALONE_MD = \"\"\"\n",
"stlangex_parser.py 作成手順:\n",
"1. lark モジュールのインストール 例: `pip install lark>=1.1.9 --upgrade`\n",
"2. `STLANGEX_GRAMMER` の内容を `stlangex.lark` ファイルとして作成\n",
"3. `python -m lark.tools.standalone stlangex.lark > stlangex_parser.py` を実行\n",
"\"\"\"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "5vMKMicq7xnO"
},
"outputs": [],
"source": [
"# @title StLangEx Parser (Lark v1.1.9 standalone)\n",
"# The file was automatically generated by Lark v1.1.9\n",
"__version__ = \"1.1.9\"\n",
"\n",
"#\n",
"#\n",
"# Lark Stand-alone Generator Tool\n",
"# ----------------------------------\n",
"# Generates a stand-alone LALR(1) parser\n",
"#\n",
"# Git: https://github.com/erezsh/lark\n",
"# Author: Erez Shinan ([email protected])\n",
"#\n",
"#\n",
"# >>> LICENSE\n",
"#\n",
"# This tool and its generated code use a separate license from Lark,\n",
"# and are subject to the terms of the Mozilla Public License, v. 2.0.\n",
"# If a copy of the MPL was not distributed with this\n",
"# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n",
"#\n",
"# If you wish to purchase a commercial license for this tool and its\n",
"# generated code, you may contact me via email or otherwise.\n",
"#\n",
"# If MPL2 is incompatible with your free or open-source project,\n",
"# contact me and we'll work it out.\n",
"#\n",
"#\n",
"\n",
"from copy import deepcopy\n",
"from abc import ABC, abstractmethod\n",
"from types import ModuleType\n",
"from typing import (\n",
" TypeVar, Generic, Type, Tuple, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any,\n",
" Union, Iterable, IO, TYPE_CHECKING, overload, Sequence,\n",
" Pattern as REPattern, ClassVar, Set, Mapping\n",
")\n",
"\n",
"\n",
"class LarkError(Exception):\n",
" pass\n",
"\n",
"\n",
"class ConfigurationError(LarkError, ValueError):\n",
" pass\n",
"\n",
"\n",
"def assert_config(value, options: Collection, msg='Got %r, expected one of %s'):\n",
" if value not in options:\n",
" raise ConfigurationError(msg % (value, options))\n",
"\n",
"\n",
"class GrammarError(LarkError):\n",
" pass\n",
"\n",
"\n",
"class ParseError(LarkError):\n",
" pass\n",
"\n",
"\n",
"class LexError(LarkError):\n",
" pass\n",
"\n",
"T = TypeVar('T')\n",
"\n",
"class UnexpectedInput(LarkError):\n",
" #--\n",
" line: int\n",
" column: int\n",
" pos_in_stream = None\n",
" state: Any\n",
" _terminals_by_name = None\n",
" interactive_parser: 'InteractiveParser'\n",
"\n",
" def get_context(self, text: str, span: int=40) -> str:\n",
" #--\n",
" assert self.pos_in_stream is not None, self\n",
" pos = self.pos_in_stream\n",
" start = max(pos - span, 0)\n",
" end = pos + span\n",
" if not isinstance(text, bytes):\n",
" before = text[start:pos].rsplit('\\n', 1)[-1]\n",
" after = text[pos:end].split('\\n', 1)[0]\n",
" return before + after + '\\n' + ' ' * len(before.expandtabs()) + '^\\n'\n",
" else:\n",
" before = text[start:pos].rsplit(b'\\n', 1)[-1]\n",
" after = text[pos:end].split(b'\\n', 1)[0]\n",
" return (before + after + b'\\n' + b' ' * len(before.expandtabs()) + b'^\\n').decode(\"ascii\", \"backslashreplace\")\n",
"\n",
" def match_examples(self, parse_fn: 'Callable[[str], Tree]',\n",
" examples: Union[Mapping[T, Iterable[str]], Iterable[Tuple[T, Iterable[str]]]],\n",
" token_type_match_fallback: bool=False,\n",
" use_accepts: bool=True\n",
" ) -> Optional[T]:\n",
" #--\n",
" assert self.state is not None, \"Not supported for this exception\"\n",
"\n",
" if isinstance(examples, Mapping):\n",
" examples = examples.items()\n",
"\n",
" candidate = (None, False)\n",
" for i, (label, example) in enumerate(examples):\n",
" assert not isinstance(example, str), \"Expecting a list\"\n",
"\n",
" for j, malformed in enumerate(example):\n",
" try:\n",
" parse_fn(malformed)\n",
" except UnexpectedInput as ut:\n",
" if ut.state == self.state:\n",
" if (\n",
" use_accepts\n",
" and isinstance(self, UnexpectedToken)\n",
" and isinstance(ut, UnexpectedToken)\n",
" and ut.accepts != self.accepts\n",
" ):\n",
" logger.debug(\"Different accepts with same state[%d]: %s != %s at example [%s][%s]\" %\n",
" (self.state, self.accepts, ut.accepts, i, j))\n",
" continue\n",
" if (\n",
" isinstance(self, (UnexpectedToken, UnexpectedEOF))\n",
" and isinstance(ut, (UnexpectedToken, UnexpectedEOF))\n",
" ):\n",
" if ut.token == self.token: ##\n",
"\n",
" logger.debug(\"Exact Match at example [%s][%s]\" % (i, j))\n",
" return label\n",
"\n",
" if token_type_match_fallback:\n",
" ##\n",
"\n",
" if (ut.token.type == self.token.type) and not candidate[-1]:\n",
" logger.debug(\"Token Type Fallback at example [%s][%s]\" % (i, j))\n",
" candidate = label, True\n",
"\n",
" if candidate[0] is None:\n",
" logger.debug(\"Same State match at example [%s][%s]\" % (i, j))\n",
" candidate = label, False\n",
"\n",
" return candidate[0]\n",
"\n",
" def _format_expected(self, expected):\n",
" if self._terminals_by_name:\n",
" d = self._terminals_by_name\n",
" expected = [d[t_name].user_repr() if t_name in d else t_name for t_name in expected]\n",
" return \"Expected one of: \\n\\t* %s\\n\" % '\\n\\t* '.join(expected)\n",
"\n",
"\n",
"class UnexpectedEOF(ParseError, UnexpectedInput):\n",
" #--\n",
" expected: 'List[Token]'\n",
"\n",
" def __init__(self, expected, state=None, terminals_by_name=None):\n",
" super(UnexpectedEOF, self).__init__()\n",
"\n",
" self.expected = expected\n",
" self.state = state\n",
" from .lexer import Token\n",
" self.token = Token(\"<EOF>\", \"\") ##\n",
"\n",
" self.pos_in_stream = -1\n",
" self.line = -1\n",
" self.column = -1\n",
" self._terminals_by_name = terminals_by_name\n",
"\n",
"\n",
" def __str__(self):\n",
" message = \"Unexpected end-of-input. \"\n",
" message += self._format_expected(self.expected)\n",
" return message\n",
"\n",
"\n",
"class UnexpectedCharacters(LexError, UnexpectedInput):\n",
" #--\n",
"\n",
" allowed: Set[str]\n",
" considered_tokens: Set[Any]\n",
"\n",
" def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None,\n",
" terminals_by_name=None, considered_rules=None):\n",
" super(UnexpectedCharacters, self).__init__()\n",
"\n",
" ##\n",
"\n",
" self.line = line\n",
" self.column = column\n",
" self.pos_in_stream = lex_pos\n",
" self.state = state\n",
" self._terminals_by_name = terminals_by_name\n",
"\n",
" self.allowed = allowed\n",
" self.considered_tokens = considered_tokens\n",
" self.considered_rules = considered_rules\n",
" self.token_history = token_history\n",
"\n",
" if isinstance(seq, bytes):\n",
" self.char = seq[lex_pos:lex_pos + 1].decode(\"ascii\", \"backslashreplace\")\n",
" else:\n",
" self.char = seq[lex_pos]\n",
" self._context = self.get_context(seq)\n",
"\n",
"\n",
" def __str__(self):\n",
" message = \"No terminal matches '%s' in the current parser context, at line %d col %d\" % (self.char, self.line, self.column)\n",
" message += '\\n\\n' + self._context\n",
" if self.allowed:\n",
" message += self._format_expected(self.allowed)\n",
" if self.token_history:\n",
" message += '\\nPrevious tokens: %s\\n' % ', '.join(repr(t) for t in self.token_history)\n",
" return message\n",
"\n",
"\n",
"class UnexpectedToken(ParseError, UnexpectedInput):\n",
" #--\n",
"\n",
" expected: Set[str]\n",
" considered_rules: Set[str]\n",
"\n",
" def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None):\n",
" super(UnexpectedToken, self).__init__()\n",
"\n",
" ##\n",
"\n",
" self.line = getattr(token, 'line', '?')\n",
" self.column = getattr(token, 'column', '?')\n",
" self.pos_in_stream = getattr(token, 'start_pos', None)\n",
" self.state = state\n",
"\n",
" self.token = token\n",
" self.expected = expected ##\n",
"\n",
" self._accepts = NO_VALUE\n",
" self.considered_rules = considered_rules\n",
" self.interactive_parser = interactive_parser\n",
" self._terminals_by_name = terminals_by_name\n",
" self.token_history = token_history\n",
"\n",
"\n",
" @property\n",
" def accepts(self) -> Set[str]:\n",
" if self._accepts is NO_VALUE:\n",
" self._accepts = self.interactive_parser and self.interactive_parser.accepts()\n",
" return self._accepts\n",
"\n",
" def __str__(self):\n",
" message = (\"Unexpected token %r at line %s, column %s.\\n%s\"\n",
" % (self.token, self.line, self.column, self._format_expected(self.accepts or self.expected)))\n",
" if self.token_history:\n",
" message += \"Previous tokens: %r\\n\" % self.token_history\n",
"\n",
" return message\n",
"\n",
"\n",
"\n",
"class VisitError(LarkError):\n",
" #--\n",
"\n",
" obj: 'Union[Tree, Token]'\n",
" orig_exc: Exception\n",
"\n",
" def __init__(self, rule, obj, orig_exc):\n",
" message = 'Error trying to process rule \"%s\":\\n\\n%s' % (rule, orig_exc)\n",
" super(VisitError, self).__init__(message)\n",
"\n",
" self.rule = rule\n",
" self.obj = obj\n",
" self.orig_exc = orig_exc\n",
"\n",
"\n",
"class MissingVariableError(LarkError):\n",
" pass\n",
"\n",
"\n",
"import sys, re\n",
"import logging\n",
"\n",
"logger: logging.Logger = logging.getLogger(\"lark\")\n",
"logger.addHandler(logging.StreamHandler())\n",
"##\n",
"\n",
"##\n",
"\n",
"logger.setLevel(logging.CRITICAL)\n",
"\n",
"\n",
"NO_VALUE = object()\n",
"\n",
"T = TypeVar(\"T\")\n",
"\n",
"\n",
"def classify(seq: Iterable, key: Optional[Callable] = None, value: Optional[Callable] = None) -> Dict:\n",
" d: Dict[Any, Any] = {}\n",
" for item in seq:\n",
" k = key(item) if (key is not None) else item\n",
" v = value(item) if (value is not None) else item\n",
" try:\n",
" d[k].append(v)\n",
" except KeyError:\n",
" d[k] = [v]\n",
" return d\n",
"\n",
"\n",
"def _deserialize(data: Any, namespace: Dict[str, Any], memo: Dict) -> Any:\n",
" if isinstance(data, dict):\n",
" if '__type__' in data: ##\n",
"\n",
" class_ = namespace[data['__type__']]\n",
" return class_.deserialize(data, memo)\n",
" elif '@' in data:\n",
" return memo[data['@']]\n",
" return {key:_deserialize(value, namespace, memo) for key, value in data.items()}\n",
" elif isinstance(data, list):\n",
" return [_deserialize(value, namespace, memo) for value in data]\n",
" return data\n",
"\n",
"\n",
"_T = TypeVar(\"_T\", bound=\"Serialize\")\n",
"\n",
"class Serialize:\n",
" #--\n",
"\n",
" def memo_serialize(self, types_to_memoize: List) -> Any:\n",
" memo = SerializeMemoizer(types_to_memoize)\n",
" return self.serialize(memo), memo.serialize()\n",
"\n",
" def serialize(self, memo = None) -> Dict[str, Any]:\n",
" if memo and memo.in_types(self):\n",
" return {'@': memo.memoized.get(self)}\n",
"\n",
" fields = getattr(self, '__serialize_fields__')\n",
" res = {f: _serialize(getattr(self, f), memo) for f in fields}\n",
" res['__type__'] = type(self).__name__\n",
" if hasattr(self, '_serialize'):\n",
" self._serialize(res, memo) ##\n",
"\n",
" return res\n",
"\n",
" @classmethod\n",
" def deserialize(cls: Type[_T], data: Dict[str, Any], memo: Dict[int, Any]) -> _T:\n",
" namespace = getattr(cls, '__serialize_namespace__', [])\n",
" namespace = {c.__name__:c for c in namespace}\n",
"\n",
" fields = getattr(cls, '__serialize_fields__')\n",
"\n",
" if '@' in data:\n",
" return memo[data['@']]\n",
"\n",
" inst = cls.__new__(cls)\n",
" for f in fields:\n",
" try:\n",
" setattr(inst, f, _deserialize(data[f], namespace, memo))\n",
" except KeyError as e:\n",
" raise KeyError(\"Cannot find key for class\", cls, e)\n",
"\n",
" if hasattr(inst, '_deserialize'):\n",
" inst._deserialize() ##\n",
"\n",
"\n",
" return inst\n",
"\n",
"\n",
"class SerializeMemoizer(Serialize):\n",
" #--\n",
"\n",
" __serialize_fields__ = 'memoized',\n",
"\n",
" def __init__(self, types_to_memoize: List) -> None:\n",
" self.types_to_memoize = tuple(types_to_memoize)\n",
" self.memoized = Enumerator()\n",
"\n",
" def in_types(self, value: Serialize) -> bool:\n",
" return isinstance(value, self.types_to_memoize)\n",
"\n",
" def serialize(self) -> Dict[int, Any]: ##\n",
"\n",
" return _serialize(self.memoized.reversed(), None)\n",
"\n",
" @classmethod\n",
" def deserialize(cls, data: Dict[int, Any], namespace: Dict[str, Any], memo: Dict[Any, Any]) -> Dict[int, Any]: ##\n",
"\n",
" return _deserialize(data, namespace, memo)\n",
"\n",
"\n",
"try:\n",
" import regex\n",
" _has_regex = True\n",
"except ImportError:\n",
" _has_regex = False\n",
"\n",
"if sys.version_info >= (3, 11):\n",
" import re._parser as sre_parse\n",
" import re._constants as sre_constants\n",
"else:\n",
" import sre_parse\n",
" import sre_constants\n",
"\n",
"categ_pattern = re.compile(r'\\\\p{[A-Za-z_]+}')\n",
"\n",
"def get_regexp_width(expr: str) -> Union[Tuple[int, int], List[int]]:\n",
" if _has_regex:\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" regexp_final = re.sub(categ_pattern, 'A', expr)\n",
" else:\n",
" if re.search(categ_pattern, expr):\n",
" raise ImportError('`regex` module must be installed in order to use Unicode categories.', expr)\n",
" regexp_final = expr\n",
" try:\n",
" ##\n",
"\n",
" return [int(x) for x in sre_parse.parse(regexp_final).getwidth()] ##\n",
"\n",
" except sre_constants.error:\n",
" if not _has_regex:\n",
" raise ValueError(expr)\n",
" else:\n",
" ##\n",
"\n",
" ##\n",
"\n",
" c = regex.compile(regexp_final)\n",
" ##\n",
"\n",
" ##\n",
"\n",
" MAXWIDTH = getattr(sre_parse, \"MAXWIDTH\", sre_constants.MAXREPEAT)\n",
" if c.match('') is None:\n",
" ##\n",
"\n",
" return 1, int(MAXWIDTH)\n",
" else:\n",
" return 0, int(MAXWIDTH)\n",
"\n",
"\n",
"from collections import OrderedDict\n",
"\n",
"class Meta:\n",
"\n",
" empty: bool\n",
" line: int\n",
" column: int\n",
" start_pos: int\n",
" end_line: int\n",
" end_column: int\n",
" end_pos: int\n",
" orig_expansion: 'List[TerminalDef]'\n",
" match_tree: bool\n",
"\n",
" def __init__(self):\n",
" self.empty = True\n",
"\n",
"\n",
"_Leaf_T = TypeVar(\"_Leaf_T\")\n",
"Branch = Union[_Leaf_T, 'Tree[_Leaf_T]']\n",
"\n",
"\n",
"class Tree(Generic[_Leaf_T]):\n",
" #--\n",
"\n",
" data: str\n",
" children: 'List[Branch[_Leaf_T]]'\n",
"\n",
" def __init__(self, data: str, children: 'List[Branch[_Leaf_T]]', meta: Optional[Meta]=None) -> None:\n",
" self.data = data\n",
" self.children = children\n",
" self._meta = meta\n",
"\n",
" @property\n",
" def meta(self) -> Meta:\n",
" if self._meta is None:\n",
" self._meta = Meta()\n",
" return self._meta\n",
"\n",
" def __repr__(self):\n",
" return 'Tree(%r, %r)' % (self.data, self.children)\n",
"\n",
" def _pretty_label(self):\n",
" return self.data\n",
"\n",
" def _pretty(self, level, indent_str):\n",
" yield f'{indent_str*level}{self._pretty_label()}'\n",
" if len(self.children) == 1 and not isinstance(self.children[0], Tree):\n",
" yield f'\\t{self.children[0]}\\n'\n",
" else:\n",
" yield '\\n'\n",
" for n in self.children:\n",
" if isinstance(n, Tree):\n",
" yield from n._pretty(level+1, indent_str)\n",
" else:\n",
" yield f'{indent_str*(level+1)}{n}\\n'\n",
"\n",
" def pretty(self, indent_str: str=' ') -> str:\n",
" #--\n",
" return ''.join(self._pretty(0, indent_str))\n",
"\n",
" def __rich__(self, parent:Optional['rich.tree.Tree']=None) -> 'rich.tree.Tree':\n",
" #--\n",
" return self._rich(parent)\n",
"\n",
" def _rich(self, parent):\n",
" if parent:\n",
" tree = parent.add(f'[bold]{self.data}[/bold]')\n",
" else:\n",
" import rich.tree\n",
" tree = rich.tree.Tree(self.data)\n",
"\n",
" for c in self.children:\n",
" if isinstance(c, Tree):\n",
" c._rich(tree)\n",
" else:\n",
" tree.add(f'[green]{c}[/green]')\n",
"\n",
" return tree\n",
"\n",
" def __eq__(self, other):\n",
" try:\n",
" return self.data == other.data and self.children == other.children\n",
" except AttributeError:\n",
" return False\n",
"\n",
" def __ne__(self, other):\n",
" return not (self == other)\n",
"\n",
" def __hash__(self) -> int:\n",
" return hash((self.data, tuple(self.children)))\n",
"\n",
" def iter_subtrees(self) -> 'Iterator[Tree[_Leaf_T]]':\n",
" #--\n",
" queue = [self]\n",
" subtrees = OrderedDict()\n",
" for subtree in queue:\n",
" subtrees[id(subtree)] = subtree\n",
" ##\n",
"\n",
" queue += [c for c in reversed(subtree.children) ##\n",
"\n",
" if isinstance(c, Tree) and id(c) not in subtrees]\n",
"\n",
" del queue\n",
" return reversed(list(subtrees.values()))\n",
"\n",
" def iter_subtrees_topdown(self):\n",
" #--\n",
" stack = [self]\n",
" stack_append = stack.append\n",
" stack_pop = stack.pop\n",
" while stack:\n",
" node = stack_pop()\n",
" if not isinstance(node, Tree):\n",
" continue\n",
" yield node\n",
" for child in reversed(node.children):\n",
" stack_append(child)\n",
"\n",
" def find_pred(self, pred: 'Callable[[Tree[_Leaf_T]], bool]') -> 'Iterator[Tree[_Leaf_T]]':\n",
" #--\n",
" return filter(pred, self.iter_subtrees())\n",
"\n",
" def find_data(self, data: str) -> 'Iterator[Tree[_Leaf_T]]':\n",
" #--\n",
" return self.find_pred(lambda t: t.data == data)\n",
"\n",
"\n",
"from functools import wraps, update_wrapper\n",
"from inspect import getmembers, getmro\n",
"\n",
"_Return_T = TypeVar('_Return_T')\n",
"_Return_V = TypeVar('_Return_V')\n",
"_Leaf_T = TypeVar('_Leaf_T')\n",
"_Leaf_U = TypeVar('_Leaf_U')\n",
"_R = TypeVar('_R')\n",
"_FUNC = Callable[..., _Return_T]\n",
"_DECORATED = Union[_FUNC, type]\n",
"\n",
"class _DiscardType:\n",
" #--\n",
"\n",
" def __repr__(self):\n",
" return \"lark.visitors.Discard\"\n",
"\n",
"Discard = _DiscardType()\n",
"\n",
"##\n",
"\n",
"\n",
"class _Decoratable:\n",
" #--\n",
"\n",
" @classmethod\n",
" def _apply_v_args(cls, visit_wrapper):\n",
" mro = getmro(cls)\n",
" assert mro[0] is cls\n",
" libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)}\n",
" for name, value in getmembers(cls):\n",
"\n",
" ##\n",
"\n",
" if name.startswith('_') or (name in libmembers and name not in cls.__dict__):\n",
" continue\n",
" if not callable(value):\n",
" continue\n",
"\n",
" ##\n",
"\n",
" if isinstance(cls.__dict__[name], _VArgsWrapper):\n",
" continue\n",
"\n",
" setattr(cls, name, _VArgsWrapper(cls.__dict__[name], visit_wrapper))\n",
" return cls\n",
"\n",
" def __class_getitem__(cls, _):\n",
" return cls\n",
"\n",
"\n",
"class Transformer(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]):\n",
" #--\n",
" __visit_tokens__ = True ##\n",
"\n",
"\n",
" def __init__(self, visit_tokens: bool=True) -> None:\n",
" self.__visit_tokens__ = visit_tokens\n",
"\n",
" def _call_userfunc(self, tree, new_children=None):\n",
" ##\n",
"\n",
" children = new_children if new_children is not None else tree.children\n",
" try:\n",
" f = getattr(self, tree.data)\n",
" except AttributeError:\n",
" return self.__default__(tree.data, children, tree.meta)\n",
" else:\n",
" try:\n",
" wrapper = getattr(f, 'visit_wrapper', None)\n",
" if wrapper is not None:\n",
" return f.visit_wrapper(f, tree.data, children, tree.meta)\n",
" else:\n",
" return f(children)\n",
" except GrammarError:\n",
" raise\n",
" except Exception as e:\n",
" raise VisitError(tree.data, tree, e)\n",
"\n",
" def _call_userfunc_token(self, token):\n",
" try:\n",
" f = getattr(self, token.type)\n",
" except AttributeError:\n",
" return self.__default_token__(token)\n",
" else:\n",
" try:\n",
" return f(token)\n",
" except GrammarError:\n",
" raise\n",
" except Exception as e:\n",
" raise VisitError(token.type, token, e)\n",
"\n",
" def _transform_children(self, children):\n",
" for c in children:\n",
" if isinstance(c, Tree):\n",
" res = self._transform_tree(c)\n",
" elif self.__visit_tokens__ and isinstance(c, Token):\n",
" res = self._call_userfunc_token(c)\n",
" else:\n",
" res = c\n",
"\n",
" if res is not Discard:\n",
" yield res\n",
"\n",
" def _transform_tree(self, tree):\n",
" children = list(self._transform_children(tree.children))\n",
" return self._call_userfunc(tree, children)\n",
"\n",
" def transform(self, tree: Tree[_Leaf_T]) -> _Return_T:\n",
" #--\n",
" return self._transform_tree(tree)\n",
"\n",
" def __mul__(\n",
" self: 'Transformer[_Leaf_T, Tree[_Leaf_U]]',\n",
" other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V,]]'\n",
" ) -> 'TransformerChain[_Leaf_T, _Return_V]':\n",
" #--\n",
" return TransformerChain(self, other)\n",
"\n",
" def __default__(self, data, children, meta):\n",
" #--\n",
" return Tree(data, children, meta)\n",
"\n",
" def __default_token__(self, token):\n",
" #--\n",
" return token\n",
"\n",
"\n",
"def merge_transformers(base_transformer=None, **transformers_to_merge):\n",
" #--\n",
" if base_transformer is None:\n",
" base_transformer = Transformer()\n",
" for prefix, transformer in transformers_to_merge.items():\n",
" for method_name in dir(transformer):\n",
" method = getattr(transformer, method_name)\n",
" if not callable(method):\n",
" continue\n",
" if method_name.startswith(\"_\") or method_name == \"transform\":\n",
" continue\n",
" prefixed_method = prefix + \"__\" + method_name\n",
" if hasattr(base_transformer, prefixed_method):\n",
" raise AttributeError(\"Cannot merge: method '%s' appears more than once\" % prefixed_method)\n",
"\n",
" setattr(base_transformer, prefixed_method, method)\n",
"\n",
" return base_transformer\n",
"\n",
"\n",
"class InlineTransformer(Transformer): ##\n",
"\n",
" def _call_userfunc(self, tree, new_children=None):\n",
" ##\n",
"\n",
" children = new_children if new_children is not None else tree.children\n",
" try:\n",
" f = getattr(self, tree.data)\n",
" except AttributeError:\n",
" return self.__default__(tree.data, children, tree.meta)\n",
" else:\n",
" return f(*children)\n",
"\n",
"\n",
"class TransformerChain(Generic[_Leaf_T, _Return_T]):\n",
"\n",
" transformers: 'Tuple[Union[Transformer, TransformerChain], ...]'\n",
"\n",
" def __init__(self, *transformers: 'Union[Transformer, TransformerChain]') -> None:\n",
" self.transformers = transformers\n",
"\n",
" def transform(self, tree: Tree[_Leaf_T]) -> _Return_T:\n",
" for t in self.transformers:\n",
" tree = t.transform(tree)\n",
" return cast(_Return_T, tree)\n",
"\n",
" def __mul__(\n",
" self: 'TransformerChain[_Leaf_T, Tree[_Leaf_U]]',\n",
" other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V]]'\n",
" ) -> 'TransformerChain[_Leaf_T, _Return_V]':\n",
" return TransformerChain(*self.transformers + (other,))\n",
"\n",
"\n",
"class Transformer_InPlace(Transformer[_Leaf_T, _Return_T]):\n",
" #--\n",
" def _transform_tree(self, tree): ##\n",
"\n",
" return self._call_userfunc(tree)\n",
"\n",
" def transform(self, tree: Tree[_Leaf_T]) -> _Return_T:\n",
" for subtree in tree.iter_subtrees():\n",
" subtree.children = list(self._transform_children(subtree.children))\n",
"\n",
" return self._transform_tree(tree)\n",
"\n",
"\n",
"class Transformer_NonRecursive(Transformer[_Leaf_T, _Return_T]):\n",
" #--\n",
"\n",
" def transform(self, tree: Tree[_Leaf_T]) -> _Return_T:\n",
" ##\n",
"\n",
" rev_postfix = []\n",
" q: List[Branch[_Leaf_T]] = [tree]\n",
" while q:\n",
" t = q.pop()\n",
" rev_postfix.append(t)\n",
" if isinstance(t, Tree):\n",
" q += t.children\n",
"\n",
" ##\n",
"\n",
" stack: List = []\n",
" for x in reversed(rev_postfix):\n",
" if isinstance(x, Tree):\n",
" size = len(x.children)\n",
" if size:\n",
" args = stack[-size:]\n",
" del stack[-size:]\n",
" else:\n",
" args = []\n",
"\n",
" res = self._call_userfunc(x, args)\n",
" if res is not Discard:\n",
" stack.append(res)\n",
"\n",
" elif self.__visit_tokens__ and isinstance(x, Token):\n",
" res = self._call_userfunc_token(x)\n",
" if res is not Discard:\n",
" stack.append(res)\n",
" else:\n",
" stack.append(x)\n",
"\n",
" result, = stack ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" return cast(_Return_T, result)\n",
"\n",
"\n",
"class Transformer_InPlaceRecursive(Transformer):\n",
" #--\n",
" def _transform_tree(self, tree):\n",
" tree.children = list(self._transform_children(tree.children))\n",
" return self._call_userfunc(tree)\n",
"\n",
"\n",
"##\n",
"\n",
"\n",
"class VisitorBase:\n",
" def _call_userfunc(self, tree):\n",
" return getattr(self, tree.data, self.__default__)(tree)\n",
"\n",
" def __default__(self, tree):\n",
" #--\n",
" return tree\n",
"\n",
" def __class_getitem__(cls, _):\n",
" return cls\n",
"\n",
"\n",
"class Visitor(VisitorBase, ABC, Generic[_Leaf_T]):\n",
" #--\n",
"\n",
" def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]:\n",
" #--\n",
" for subtree in tree.iter_subtrees():\n",
" self._call_userfunc(subtree)\n",
" return tree\n",
"\n",
" def visit_topdown(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]:\n",
" #--\n",
" for subtree in tree.iter_subtrees_topdown():\n",
" self._call_userfunc(subtree)\n",
" return tree\n",
"\n",
"\n",
"class Visitor_Recursive(VisitorBase, Generic[_Leaf_T]):\n",
" #--\n",
"\n",
" def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]:\n",
" #--\n",
" for child in tree.children:\n",
" if isinstance(child, Tree):\n",
" self.visit(child)\n",
"\n",
" self._call_userfunc(tree)\n",
" return tree\n",
"\n",
" def visit_topdown(self,tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]:\n",
" #--\n",
" self._call_userfunc(tree)\n",
"\n",
" for child in tree.children:\n",
" if isinstance(child, Tree):\n",
" self.visit_topdown(child)\n",
"\n",
" return tree\n",
"\n",
"\n",
"class Interpreter(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]):\n",
" #--\n",
"\n",
" def visit(self, tree: Tree[_Leaf_T]) -> _Return_T:\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" return self._visit_tree(tree)\n",
"\n",
" def _visit_tree(self, tree: Tree[_Leaf_T]):\n",
" f = getattr(self, tree.data)\n",
" wrapper = getattr(f, 'visit_wrapper', None)\n",
" if wrapper is not None:\n",
" return f.visit_wrapper(f, tree.data, tree.children, tree.meta)\n",
" else:\n",
" return f(tree)\n",
"\n",
" def visit_children(self, tree: Tree[_Leaf_T]) -> List:\n",
" return [self._visit_tree(child) if isinstance(child, Tree) else child\n",
" for child in tree.children]\n",
"\n",
" def __getattr__(self, name):\n",
" return self.__default__\n",
"\n",
" def __default__(self, tree):\n",
" return self.visit_children(tree)\n",
"\n",
"\n",
"_InterMethod = Callable[[Type[Interpreter], _Return_T], _R]\n",
"\n",
"def visit_children_decor(func: _InterMethod) -> _InterMethod:\n",
" #--\n",
" @wraps(func)\n",
" def inner(cls, tree):\n",
" values = cls.visit_children(tree)\n",
" return func(cls, values)\n",
" return inner\n",
"\n",
"##\n",
"\n",
"\n",
"def _apply_v_args(obj, visit_wrapper):\n",
" try:\n",
" _apply = obj._apply_v_args\n",
" except AttributeError:\n",
" return _VArgsWrapper(obj, visit_wrapper)\n",
" else:\n",
" return _apply(visit_wrapper)\n",
"\n",
"\n",
"class _VArgsWrapper:\n",
" #--\n",
" base_func: Callable\n",
"\n",
" def __init__(self, func: Callable, visit_wrapper: Callable[[Callable, str, list, Any], Any]):\n",
" if isinstance(func, _VArgsWrapper):\n",
" func = func.base_func\n",
" ##\n",
"\n",
" self.base_func = func ##\n",
"\n",
" self.visit_wrapper = visit_wrapper\n",
" update_wrapper(self, func)\n",
"\n",
" def __call__(self, *args, **kwargs):\n",
" return self.base_func(*args, **kwargs)\n",
"\n",
" def __get__(self, instance, owner=None):\n",
" try:\n",
" ##\n",
"\n",
" ##\n",
"\n",
" g = type(self.base_func).__get__\n",
" except AttributeError:\n",
" return self\n",
" else:\n",
" return _VArgsWrapper(g(self.base_func, instance, owner), self.visit_wrapper)\n",
"\n",
" def __set_name__(self, owner, name):\n",
" try:\n",
" f = type(self.base_func).__set_name__\n",
" except AttributeError:\n",
" return\n",
" else:\n",
" f(self.base_func, owner, name)\n",
"\n",
"\n",
"def _vargs_inline(f, _data, children, _meta):\n",
" return f(*children)\n",
"def _vargs_meta_inline(f, _data, children, meta):\n",
" return f(meta, *children)\n",
"def _vargs_meta(f, _data, children, meta):\n",
" return f(meta, children)\n",
"def _vargs_tree(f, data, children, meta):\n",
" return f(Tree(data, children, meta))\n",
"\n",
"\n",
"def v_args(inline: bool = False, meta: bool = False, tree: bool = False, wrapper: Optional[Callable] = None) -> Callable[[_DECORATED], _DECORATED]:\n",
" #--\n",
" if tree and (meta or inline):\n",
" raise ValueError(\"Visitor functions cannot combine 'tree' with 'meta' or 'inline'.\")\n",
"\n",
" func = None\n",
" if meta:\n",
" if inline:\n",
" func = _vargs_meta_inline\n",
" else:\n",
" func = _vargs_meta\n",
" elif inline:\n",
" func = _vargs_inline\n",
" elif tree:\n",
" func = _vargs_tree\n",
"\n",
" if wrapper is not None:\n",
" if func is not None:\n",
" raise ValueError(\"Cannot use 'wrapper' along with 'tree', 'meta' or 'inline'.\")\n",
" func = wrapper\n",
"\n",
" def _visitor_args_dec(obj):\n",
" return _apply_v_args(obj, func)\n",
" return _visitor_args_dec\n",
"\n",
"\n",
"\n",
"TOKEN_DEFAULT_PRIORITY = 0\n",
"\n",
"\n",
"class Symbol(Serialize):\n",
" __slots__ = ('name',)\n",
"\n",
" name: str\n",
" is_term: ClassVar[bool] = NotImplemented\n",
"\n",
" def __init__(self, name: str) -> None:\n",
" self.name = name\n",
"\n",
" def __eq__(self, other):\n",
" assert isinstance(other, Symbol), other\n",
" return self.is_term == other.is_term and self.name == other.name\n",
"\n",
" def __ne__(self, other):\n",
" return not (self == other)\n",
"\n",
" def __hash__(self):\n",
" return hash(self.name)\n",
"\n",
" def __repr__(self):\n",
" return '%s(%r)' % (type(self).__name__, self.name)\n",
"\n",
" fullrepr = property(__repr__)\n",
"\n",
" def renamed(self, f):\n",
" return type(self)(f(self.name))\n",
"\n",
"\n",
"class Terminal(Symbol):\n",
" __serialize_fields__ = 'name', 'filter_out'\n",
"\n",
" is_term: ClassVar[bool] = True\n",
"\n",
" def __init__(self, name, filter_out=False):\n",
" self.name = name\n",
" self.filter_out = filter_out\n",
"\n",
" @property\n",
" def fullrepr(self):\n",
" return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out)\n",
"\n",
" def renamed(self, f):\n",
" return type(self)(f(self.name), self.filter_out)\n",
"\n",
"\n",
"class NonTerminal(Symbol):\n",
" __serialize_fields__ = 'name',\n",
"\n",
" is_term: ClassVar[bool] = False\n",
"\n",
"\n",
"class RuleOptions(Serialize):\n",
" __serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices'\n",
"\n",
" keep_all_tokens: bool\n",
" expand1: bool\n",
" priority: Optional[int]\n",
" template_source: Optional[str]\n",
" empty_indices: Tuple[bool, ...]\n",
"\n",
" def __init__(self, keep_all_tokens: bool=False, expand1: bool=False, priority: Optional[int]=None, template_source: Optional[str]=None, empty_indices: Tuple[bool, ...]=()) -> None:\n",
" self.keep_all_tokens = keep_all_tokens\n",
" self.expand1 = expand1\n",
" self.priority = priority\n",
" self.template_source = template_source\n",
" self.empty_indices = empty_indices\n",
"\n",
" def __repr__(self):\n",
" return 'RuleOptions(%r, %r, %r, %r)' % (\n",
" self.keep_all_tokens,\n",
" self.expand1,\n",
" self.priority,\n",
" self.template_source\n",
" )\n",
"\n",
"\n",
"class Rule(Serialize):\n",
" #--\n",
" __slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash')\n",
"\n",
" __serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options'\n",
" __serialize_namespace__ = Terminal, NonTerminal, RuleOptions\n",
"\n",
" origin: NonTerminal\n",
" expansion: Sequence[Symbol]\n",
" order: int\n",
" alias: Optional[str]\n",
" options: RuleOptions\n",
" _hash: int\n",
"\n",
" def __init__(self, origin: NonTerminal, expansion: Sequence[Symbol],\n",
" order: int=0, alias: Optional[str]=None, options: Optional[RuleOptions]=None):\n",
" self.origin = origin\n",
" self.expansion = expansion\n",
" self.alias = alias\n",
" self.order = order\n",
" self.options = options or RuleOptions()\n",
" self._hash = hash((self.origin, tuple(self.expansion)))\n",
"\n",
" def _deserialize(self):\n",
" self._hash = hash((self.origin, tuple(self.expansion)))\n",
"\n",
" def __str__(self):\n",
" return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion))\n",
"\n",
" def __repr__(self):\n",
" return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options)\n",
"\n",
" def __hash__(self):\n",
" return self._hash\n",
"\n",
" def __eq__(self, other):\n",
" if not isinstance(other, Rule):\n",
" return False\n",
" return self.origin == other.origin and self.expansion == other.expansion\n",
"\n",
"\n",
"\n",
"from copy import copy\n",
"\n",
"try: ##\n",
"\n",
" has_interegular = bool(interegular)\n",
"except NameError:\n",
" has_interegular = False\n",
"\n",
"class Pattern(Serialize, ABC):\n",
" #--\n",
"\n",
" value: str\n",
" flags: Collection[str]\n",
" raw: Optional[str]\n",
" type: ClassVar[str]\n",
"\n",
" def __init__(self, value: str, flags: Collection[str] = (), raw: Optional[str] = None) -> None:\n",
" self.value = value\n",
" self.flags = frozenset(flags)\n",
" self.raw = raw\n",
"\n",
" def __repr__(self):\n",
" return repr(self.to_regexp())\n",
"\n",
" ##\n",
"\n",
" def __hash__(self):\n",
" return hash((type(self), self.value, self.flags))\n",
"\n",
" def __eq__(self, other):\n",
" return type(self) == type(other) and self.value == other.value and self.flags == other.flags\n",
"\n",
" @abstractmethod\n",
" def to_regexp(self) -> str:\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" @abstractmethod\n",
" def min_width(self) -> int:\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" @abstractmethod\n",
" def max_width(self) -> int:\n",
" raise NotImplementedError()\n",
"\n",
" def _get_flags(self, value):\n",
" for f in self.flags:\n",
" value = ('(?%s:%s)' % (f, value))\n",
" return value\n",
"\n",
"\n",
"class PatternStr(Pattern):\n",
" __serialize_fields__ = 'value', 'flags', 'raw'\n",
"\n",
" type: ClassVar[str] = \"str\"\n",
"\n",
" def to_regexp(self) -> str:\n",
" return self._get_flags(re.escape(self.value))\n",
"\n",
" @property\n",
" def min_width(self) -> int:\n",
" return len(self.value)\n",
"\n",
" @property\n",
" def max_width(self) -> int:\n",
" return len(self.value)\n",
"\n",
"\n",
"class PatternRE(Pattern):\n",
" __serialize_fields__ = 'value', 'flags', 'raw', '_width'\n",
"\n",
" type: ClassVar[str] = \"re\"\n",
"\n",
" def to_regexp(self) -> str:\n",
" return self._get_flags(self.value)\n",
"\n",
" _width = None\n",
" def _get_width(self):\n",
" if self._width is None:\n",
" self._width = get_regexp_width(self.to_regexp())\n",
" return self._width\n",
"\n",
" @property\n",
" def min_width(self) -> int:\n",
" return self._get_width()[0]\n",
"\n",
" @property\n",
" def max_width(self) -> int:\n",
" return self._get_width()[1]\n",
"\n",
"\n",
"class TerminalDef(Serialize):\n",
" #--\n",
" __serialize_fields__ = 'name', 'pattern', 'priority'\n",
" __serialize_namespace__ = PatternStr, PatternRE\n",
"\n",
" name: str\n",
" pattern: Pattern\n",
" priority: int\n",
"\n",
" def __init__(self, name: str, pattern: Pattern, priority: int = TOKEN_DEFAULT_PRIORITY) -> None:\n",
" assert isinstance(pattern, Pattern), pattern\n",
" self.name = name\n",
" self.pattern = pattern\n",
" self.priority = priority\n",
"\n",
" def __repr__(self):\n",
" return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)\n",
"\n",
" def user_repr(self) -> str:\n",
" if self.name.startswith('__'): ##\n",
"\n",
" return self.pattern.raw or self.name\n",
" else:\n",
" return self.name\n",
"\n",
"_T = TypeVar('_T', bound=\"Token\")\n",
"\n",
"class Token(str):\n",
" #--\n",
" __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos')\n",
"\n",
" __match_args__ = ('type', 'value')\n",
"\n",
" type: str\n",
" start_pos: Optional[int]\n",
" value: Any\n",
" line: Optional[int]\n",
" column: Optional[int]\n",
" end_line: Optional[int]\n",
" end_column: Optional[int]\n",
" end_pos: Optional[int]\n",
"\n",
"\n",
" @overload\n",
" def __new__(\n",
" cls,\n",
" type: str,\n",
" value: Any,\n",
" start_pos: Optional[int] = None,\n",
" line: Optional[int] = None,\n",
" column: Optional[int] = None,\n",
" end_line: Optional[int] = None,\n",
" end_column: Optional[int] = None,\n",
" end_pos: Optional[int] = None\n",
" ) -> 'Token':\n",
" ...\n",
"\n",
" @overload\n",
" def __new__(\n",
" cls,\n",
" type_: str,\n",
" value: Any,\n",
" start_pos: Optional[int] = None,\n",
" line: Optional[int] = None,\n",
" column: Optional[int] = None,\n",
" end_line: Optional[int] = None,\n",
" end_column: Optional[int] = None,\n",
" end_pos: Optional[int] = None\n",
" ) -> 'Token': ...\n",
"\n",
" def __new__(cls, *args, **kwargs):\n",
" if \"type_\" in kwargs:\n",
" warnings.warn(\"`type_` is deprecated use `type` instead\", DeprecationWarning)\n",
"\n",
" if \"type\" in kwargs:\n",
" raise TypeError(\"Error: using both 'type' and the deprecated 'type_' as arguments.\")\n",
" kwargs[\"type\"] = kwargs.pop(\"type_\")\n",
"\n",
" return cls._future_new(*args, **kwargs)\n",
"\n",
"\n",
" @classmethod\n",
" def _future_new(cls, type, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None):\n",
" inst = super(Token, cls).__new__(cls, value)\n",
"\n",
" inst.type = type\n",
" inst.start_pos = start_pos\n",
" inst.value = value\n",
" inst.line = line\n",
" inst.column = column\n",
" inst.end_line = end_line\n",
" inst.end_column = end_column\n",
" inst.end_pos = end_pos\n",
" return inst\n",
"\n",
" @overload\n",
" def update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token':\n",
" ...\n",
"\n",
" @overload\n",
" def update(self, type_: Optional[str] = None, value: Optional[Any] = None) -> 'Token':\n",
" ...\n",
"\n",
" def update(self, *args, **kwargs):\n",
" if \"type_\" in kwargs:\n",
" warnings.warn(\"`type_` is deprecated use `type` instead\", DeprecationWarning)\n",
"\n",
" if \"type\" in kwargs:\n",
" raise TypeError(\"Error: using both 'type' and the deprecated 'type_' as arguments.\")\n",
" kwargs[\"type\"] = kwargs.pop(\"type_\")\n",
"\n",
" return self._future_update(*args, **kwargs)\n",
"\n",
" def _future_update(self, type: Optional[str] = None, value: Optional[Any] = None) -> 'Token':\n",
" return Token.new_borrow_pos(\n",
" type if type is not None else self.type,\n",
" value if value is not None else self.value,\n",
" self\n",
" )\n",
"\n",
" @classmethod\n",
" def new_borrow_pos(cls: Type[_T], type_: str, value: Any, borrow_t: 'Token') -> _T:\n",
" return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos)\n",
"\n",
" def __reduce__(self):\n",
" return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column))\n",
"\n",
" def __repr__(self):\n",
" return 'Token(%r, %r)' % (self.type, self.value)\n",
"\n",
" def __deepcopy__(self, memo):\n",
" return Token(self.type, self.value, self.start_pos, self.line, self.column)\n",
"\n",
" def __eq__(self, other):\n",
" if isinstance(other, Token) and self.type != other.type:\n",
" return False\n",
"\n",
" return str.__eq__(self, other)\n",
"\n",
" __hash__ = str.__hash__\n",
"\n",
"\n",
"class LineCounter:\n",
" #--\n",
"\n",
" __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char'\n",
"\n",
" def __init__(self, newline_char):\n",
" self.newline_char = newline_char\n",
" self.char_pos = 0\n",
" self.line = 1\n",
" self.column = 1\n",
" self.line_start_pos = 0\n",
"\n",
" def __eq__(self, other):\n",
" if not isinstance(other, LineCounter):\n",
" return NotImplemented\n",
"\n",
" return self.char_pos == other.char_pos and self.newline_char == other.newline_char\n",
"\n",
" def feed(self, token: Token, test_newline=True):\n",
" #--\n",
" if test_newline:\n",
" newlines = token.count(self.newline_char)\n",
" if newlines:\n",
" self.line += newlines\n",
" self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1\n",
"\n",
" self.char_pos += len(token)\n",
" self.column = self.char_pos - self.line_start_pos + 1\n",
"\n",
"\n",
"class UnlessCallback:\n",
" def __init__(self, scanner):\n",
" self.scanner = scanner\n",
"\n",
" def __call__(self, t):\n",
" res = self.scanner.match(t.value, 0)\n",
" if res:\n",
" _value, t.type = res\n",
" return t\n",
"\n",
"\n",
"class CallChain:\n",
" def __init__(self, callback1, callback2, cond):\n",
" self.callback1 = callback1\n",
" self.callback2 = callback2\n",
" self.cond = cond\n",
"\n",
" def __call__(self, t):\n",
" t2 = self.callback1(t)\n",
" return self.callback2(t) if self.cond(t2) else t2\n",
"\n",
"\n",
"def _get_match(re_, regexp, s, flags):\n",
" m = re_.match(regexp, s, flags)\n",
" if m:\n",
" return m.group(0)\n",
"\n",
"def _create_unless(terminals, g_regex_flags, re_, use_bytes):\n",
" tokens_by_type = classify(terminals, lambda t: type(t.pattern))\n",
" assert len(tokens_by_type) <= 2, tokens_by_type.keys()\n",
" embedded_strs = set()\n",
" callback = {}\n",
" for retok in tokens_by_type.get(PatternRE, []):\n",
" unless = []\n",
" for strtok in tokens_by_type.get(PatternStr, []):\n",
" if strtok.priority != retok.priority:\n",
" continue\n",
" s = strtok.pattern.value\n",
" if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags):\n",
" unless.append(strtok)\n",
" if strtok.pattern.flags <= retok.pattern.flags:\n",
" embedded_strs.add(strtok)\n",
" if unless:\n",
" callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes))\n",
"\n",
" new_terminals = [t for t in terminals if t not in embedded_strs]\n",
" return new_terminals, callback\n",
"\n",
"\n",
"class Scanner:\n",
" def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False):\n",
" self.terminals = terminals\n",
" self.g_regex_flags = g_regex_flags\n",
" self.re_ = re_\n",
" self.use_bytes = use_bytes\n",
" self.match_whole = match_whole\n",
"\n",
" self.allowed_types = {t.name for t in self.terminals}\n",
"\n",
" self._mres = self._build_mres(terminals, len(terminals))\n",
"\n",
" def _build_mres(self, terminals, max_size):\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" postfix = '$' if self.match_whole else ''\n",
" mres = []\n",
" while terminals:\n",
" pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size])\n",
" if self.use_bytes:\n",
" pattern = pattern.encode('latin-1')\n",
" try:\n",
" mre = self.re_.compile(pattern, self.g_regex_flags)\n",
" except AssertionError: ##\n",
"\n",
" return self._build_mres(terminals, max_size // 2)\n",
"\n",
" mres.append(mre)\n",
" terminals = terminals[max_size:]\n",
" return mres\n",
"\n",
" def match(self, text, pos):\n",
" for mre in self._mres:\n",
" m = mre.match(text, pos)\n",
" if m:\n",
" return m.group(0), m.lastgroup\n",
"\n",
"\n",
"def _regexp_has_newline(r: str):\n",
" #--\n",
" return '\\n' in r or '\\\\n' in r or '\\\\s' in r or '[^' in r or ('(?s' in r and '.' in r)\n",
"\n",
"\n",
"class LexerState:\n",
" #--\n",
"\n",
" __slots__ = 'text', 'line_ctr', 'last_token'\n",
"\n",
" text: str\n",
" line_ctr: LineCounter\n",
" last_token: Optional[Token]\n",
"\n",
" def __init__(self, text: str, line_ctr: Optional[LineCounter]=None, last_token: Optional[Token]=None):\n",
" self.text = text\n",
" self.line_ctr = line_ctr or LineCounter(b'\\n' if isinstance(text, bytes) else '\\n')\n",
" self.last_token = last_token\n",
"\n",
" def __eq__(self, other):\n",
" if not isinstance(other, LexerState):\n",
" return NotImplemented\n",
"\n",
" return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token\n",
"\n",
" def __copy__(self):\n",
" return type(self)(self.text, copy(self.line_ctr), self.last_token)\n",
"\n",
"\n",
"class LexerThread:\n",
" #--\n",
"\n",
" def __init__(self, lexer: 'Lexer', lexer_state: LexerState):\n",
" self.lexer = lexer\n",
" self.state = lexer_state\n",
"\n",
" @classmethod\n",
" def from_text(cls, lexer: 'Lexer', text: str) -> 'LexerThread':\n",
" return cls(lexer, LexerState(text))\n",
"\n",
" def lex(self, parser_state):\n",
" return self.lexer.lex(self.state, parser_state)\n",
"\n",
" def __copy__(self):\n",
" return type(self)(self.lexer, copy(self.state))\n",
"\n",
" _Token = Token\n",
"\n",
"\n",
"_Callback = Callable[[Token], Token]\n",
"\n",
"class Lexer(ABC):\n",
" #--\n",
" @abstractmethod\n",
" def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]:\n",
" return NotImplemented\n",
"\n",
" def make_lexer_state(self, text):\n",
" #--\n",
" return LexerState(text)\n",
"\n",
"\n",
"def _check_regex_collisions(terminal_to_regexp: Dict[TerminalDef, str], comparator, strict_mode, max_collisions_to_show=8):\n",
" if not comparator:\n",
" comparator = interegular.Comparator.from_regexes(terminal_to_regexp)\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" max_time = 2 if strict_mode else 0.2\n",
"\n",
" ##\n",
"\n",
" if comparator.count_marked_pairs() >= max_collisions_to_show:\n",
" return\n",
" for group in classify(terminal_to_regexp, lambda t: t.priority).values():\n",
" for a, b in comparator.check(group, skip_marked=True):\n",
" assert a.priority == b.priority\n",
" ##\n",
"\n",
" comparator.mark(a, b)\n",
"\n",
" ##\n",
"\n",
" message = f\"Collision between Terminals {a.name} and {b.name}. \"\n",
" try:\n",
" example = comparator.get_example_overlap(a, b, max_time).format_multiline()\n",
" except ValueError:\n",
" ##\n",
"\n",
" example = \"No example could be found fast enough. However, the collision does still exists\"\n",
" if strict_mode:\n",
" raise LexError(f\"{message}\\n{example}\")\n",
" logger.warning(\"%s The lexer will choose between them arbitrarily.\\n%s\", message, example)\n",
" if comparator.count_marked_pairs() >= max_collisions_to_show:\n",
" logger.warning(\"Found 8 regex collisions, will not check for more.\")\n",
" return\n",
"\n",
"\n",
"class AbstractBasicLexer(Lexer):\n",
" terminals_by_name: Dict[str, TerminalDef]\n",
"\n",
" @abstractmethod\n",
" def __init__(self, conf: 'LexerConf', comparator=None) -> None:\n",
" ...\n",
"\n",
" @abstractmethod\n",
" def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token:\n",
" ...\n",
"\n",
" def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]:\n",
" with suppress(EOFError):\n",
" while True:\n",
" yield self.next_token(state, parser_state)\n",
"\n",
"\n",
"class BasicLexer(AbstractBasicLexer):\n",
" terminals: Collection[TerminalDef]\n",
" ignore_types: FrozenSet[str]\n",
" newline_types: FrozenSet[str]\n",
" user_callbacks: Dict[str, _Callback]\n",
" callback: Dict[str, _Callback]\n",
" re: ModuleType\n",
"\n",
" def __init__(self, conf: 'LexerConf', comparator=None) -> None:\n",
" terminals = list(conf.terminals)\n",
" assert all(isinstance(t, TerminalDef) for t in terminals), terminals\n",
"\n",
" self.re = conf.re_module\n",
"\n",
" if not conf.skip_validation:\n",
" ##\n",
"\n",
" terminal_to_regexp = {}\n",
" for t in terminals:\n",
" regexp = t.pattern.to_regexp()\n",
" try:\n",
" self.re.compile(regexp, conf.g_regex_flags)\n",
" except self.re.error:\n",
" raise LexError(\"Cannot compile token %s: %s\" % (t.name, t.pattern))\n",
"\n",
" if t.pattern.min_width == 0:\n",
" raise LexError(\"Lexer does not allow zero-width terminals. (%s: %s)\" % (t.name, t.pattern))\n",
" if t.pattern.type == \"re\":\n",
" terminal_to_regexp[t] = regexp\n",
"\n",
" if not (set(conf.ignore) <= {t.name for t in terminals}):\n",
" raise LexError(\"Ignore terminals are not defined: %s\" % (set(conf.ignore) - {t.name for t in terminals}))\n",
"\n",
" if has_interegular:\n",
" _check_regex_collisions(terminal_to_regexp, comparator, conf.strict)\n",
" elif conf.strict:\n",
" raise LexError(\"interegular must be installed for strict mode. Use `pip install 'lark[interegular]'`.\")\n",
"\n",
" ##\n",
"\n",
" self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp()))\n",
" self.ignore_types = frozenset(conf.ignore)\n",
"\n",
" terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))\n",
" self.terminals = terminals\n",
" self.user_callbacks = conf.callbacks\n",
" self.g_regex_flags = conf.g_regex_flags\n",
" self.use_bytes = conf.use_bytes\n",
" self.terminals_by_name = conf.terminals_by_name\n",
"\n",
" self._scanner = None\n",
"\n",
" def _build_scanner(self):\n",
" terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes)\n",
" assert all(self.callback.values())\n",
"\n",
" for type_, f in self.user_callbacks.items():\n",
" if type_ in self.callback:\n",
" ##\n",
"\n",
" self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)\n",
" else:\n",
" self.callback[type_] = f\n",
"\n",
" self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes)\n",
"\n",
" @property\n",
" def scanner(self):\n",
" if self._scanner is None:\n",
" self._build_scanner()\n",
" return self._scanner\n",
"\n",
" def match(self, text, pos):\n",
" return self.scanner.match(text, pos)\n",
"\n",
" def next_token(self, lex_state: LexerState, parser_state: Any = None) -> Token:\n",
" line_ctr = lex_state.line_ctr\n",
" while line_ctr.char_pos < len(lex_state.text):\n",
" res = self.match(lex_state.text, line_ctr.char_pos)\n",
" if not res:\n",
" allowed = self.scanner.allowed_types - self.ignore_types\n",
" if not allowed:\n",
" allowed = {\"<END-OF-FILE>\"}\n",
" raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column,\n",
" allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token],\n",
" state=parser_state, terminals_by_name=self.terminals_by_name)\n",
"\n",
" value, type_ = res\n",
"\n",
" ignored = type_ in self.ignore_types\n",
" t = None\n",
" if not ignored or type_ in self.callback:\n",
" t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)\n",
" line_ctr.feed(value, type_ in self.newline_types)\n",
" if t is not None:\n",
" t.end_line = line_ctr.line\n",
" t.end_column = line_ctr.column\n",
" t.end_pos = line_ctr.char_pos\n",
" if t.type in self.callback:\n",
" t = self.callback[t.type](t)\n",
" if not ignored:\n",
" if not isinstance(t, Token):\n",
" raise LexError(\"Callbacks must return a token (returned %r)\" % t)\n",
" lex_state.last_token = t\n",
" return t\n",
"\n",
" ##\n",
"\n",
" raise EOFError(self)\n",
"\n",
"\n",
"class ContextualLexer(Lexer):\n",
" lexers: Dict[int, AbstractBasicLexer]\n",
" root_lexer: AbstractBasicLexer\n",
"\n",
" BasicLexer: Type[AbstractBasicLexer] = BasicLexer\n",
"\n",
" def __init__(self, conf: 'LexerConf', states: Dict[int, Collection[str]], always_accept: Collection[str]=()) -> None:\n",
" terminals = list(conf.terminals)\n",
" terminals_by_name = conf.terminals_by_name\n",
"\n",
" trad_conf = copy(conf)\n",
" trad_conf.terminals = terminals\n",
"\n",
" if has_interegular and not conf.skip_validation:\n",
" comparator = interegular.Comparator.from_regexes({t: t.pattern.to_regexp() for t in terminals})\n",
" else:\n",
" comparator = None\n",
" lexer_by_tokens: Dict[FrozenSet[str], AbstractBasicLexer] = {}\n",
" self.lexers = {}\n",
" for state, accepts in states.items():\n",
" key = frozenset(accepts)\n",
" try:\n",
" lexer = lexer_by_tokens[key]\n",
" except KeyError:\n",
" accepts = set(accepts) | set(conf.ignore) | set(always_accept)\n",
" lexer_conf = copy(trad_conf)\n",
" lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name]\n",
" lexer = self.BasicLexer(lexer_conf, comparator)\n",
" lexer_by_tokens[key] = lexer\n",
"\n",
" self.lexers[state] = lexer\n",
"\n",
" assert trad_conf.terminals is terminals\n",
" trad_conf.skip_validation = True ##\n",
"\n",
" self.root_lexer = self.BasicLexer(trad_conf, comparator)\n",
"\n",
" def lex(self, lexer_state: LexerState, parser_state: 'ParserState') -> Iterator[Token]:\n",
" try:\n",
" while True:\n",
" lexer = self.lexers[parser_state.position]\n",
" yield lexer.next_token(lexer_state, parser_state)\n",
" except EOFError:\n",
" pass\n",
" except UnexpectedCharacters as e:\n",
" ##\n",
"\n",
" ##\n",
"\n",
" try:\n",
" last_token = lexer_state.last_token ##\n",
"\n",
" token = self.root_lexer.next_token(lexer_state, parser_state)\n",
" raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name)\n",
" except UnexpectedCharacters:\n",
" raise e ##\n",
"\n",
"\n",
"\n",
"\n",
"_ParserArgType: 'TypeAlias' = 'Literal[\"earley\", \"lalr\", \"cyk\", \"auto\"]'\n",
"_LexerArgType: 'TypeAlias' = 'Union[Literal[\"auto\", \"basic\", \"contextual\", \"dynamic\", \"dynamic_complete\"], Type[Lexer]]'\n",
"_LexerCallback = Callable[[Token], Token]\n",
"ParserCallbacks = Dict[str, Callable]\n",
"\n",
"class LexerConf(Serialize):\n",
" __serialize_fields__ = 'terminals', 'ignore', 'g_regex_flags', 'use_bytes', 'lexer_type'\n",
" __serialize_namespace__ = TerminalDef,\n",
"\n",
" terminals: Collection[TerminalDef]\n",
" re_module: ModuleType\n",
" ignore: Collection[str]\n",
" postlex: 'Optional[PostLex]'\n",
" callbacks: Dict[str, _LexerCallback]\n",
" g_regex_flags: int\n",
" skip_validation: bool\n",
" use_bytes: bool\n",
" lexer_type: Optional[_LexerArgType]\n",
" strict: bool\n",
"\n",
" def __init__(self, terminals: Collection[TerminalDef], re_module: ModuleType, ignore: Collection[str]=(), postlex: 'Optional[PostLex]'=None,\n",
" callbacks: Optional[Dict[str, _LexerCallback]]=None, g_regex_flags: int=0, skip_validation: bool=False, use_bytes: bool=False, strict: bool=False):\n",
" self.terminals = terminals\n",
" self.terminals_by_name = {t.name: t for t in self.terminals}\n",
" assert len(self.terminals) == len(self.terminals_by_name)\n",
" self.ignore = ignore\n",
" self.postlex = postlex\n",
" self.callbacks = callbacks or {}\n",
" self.g_regex_flags = g_regex_flags\n",
" self.re_module = re_module\n",
" self.skip_validation = skip_validation\n",
" self.use_bytes = use_bytes\n",
" self.strict = strict\n",
" self.lexer_type = None\n",
"\n",
" def _deserialize(self):\n",
" self.terminals_by_name = {t.name: t for t in self.terminals}\n",
"\n",
" def __deepcopy__(self, memo=None):\n",
" return type(self)(\n",
" deepcopy(self.terminals, memo),\n",
" self.re_module,\n",
" deepcopy(self.ignore, memo),\n",
" deepcopy(self.postlex, memo),\n",
" deepcopy(self.callbacks, memo),\n",
" deepcopy(self.g_regex_flags, memo),\n",
" deepcopy(self.skip_validation, memo),\n",
" deepcopy(self.use_bytes, memo),\n",
" )\n",
"\n",
"class ParserConf(Serialize):\n",
" __serialize_fields__ = 'rules', 'start', 'parser_type'\n",
"\n",
" rules: List['Rule']\n",
" callbacks: ParserCallbacks\n",
" start: List[str]\n",
" parser_type: _ParserArgType\n",
"\n",
" def __init__(self, rules: List['Rule'], callbacks: ParserCallbacks, start: List[str]):\n",
" assert isinstance(start, list)\n",
" self.rules = rules\n",
" self.callbacks = callbacks\n",
" self.start = start\n",
"\n",
"\n",
"from functools import partial, wraps\n",
"from itertools import product\n",
"\n",
"\n",
"class ExpandSingleChild:\n",
" def __init__(self, node_builder):\n",
" self.node_builder = node_builder\n",
"\n",
" def __call__(self, children):\n",
" if len(children) == 1:\n",
" return children[0]\n",
" else:\n",
" return self.node_builder(children)\n",
"\n",
"\n",
"\n",
"class PropagatePositions:\n",
" def __init__(self, node_builder, node_filter=None):\n",
" self.node_builder = node_builder\n",
" self.node_filter = node_filter\n",
"\n",
" def __call__(self, children):\n",
" res = self.node_builder(children)\n",
"\n",
" if isinstance(res, Tree):\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
"\n",
" res_meta = res.meta\n",
"\n",
" first_meta = self._pp_get_meta(children)\n",
" if first_meta is not None:\n",
" if not hasattr(res_meta, 'line'):\n",
" ##\n",
"\n",
" res_meta.line = getattr(first_meta, 'container_line', first_meta.line)\n",
" res_meta.column = getattr(first_meta, 'container_column', first_meta.column)\n",
" res_meta.start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos)\n",
" res_meta.empty = False\n",
"\n",
" res_meta.container_line = getattr(first_meta, 'container_line', first_meta.line)\n",
" res_meta.container_column = getattr(first_meta, 'container_column', first_meta.column)\n",
" res_meta.container_start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos)\n",
"\n",
" last_meta = self._pp_get_meta(reversed(children))\n",
" if last_meta is not None:\n",
" if not hasattr(res_meta, 'end_line'):\n",
" res_meta.end_line = getattr(last_meta, 'container_end_line', last_meta.end_line)\n",
" res_meta.end_column = getattr(last_meta, 'container_end_column', last_meta.end_column)\n",
" res_meta.end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos)\n",
" res_meta.empty = False\n",
"\n",
" res_meta.container_end_line = getattr(last_meta, 'container_end_line', last_meta.end_line)\n",
" res_meta.container_end_column = getattr(last_meta, 'container_end_column', last_meta.end_column)\n",
" res_meta.container_end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos)\n",
"\n",
" return res\n",
"\n",
" def _pp_get_meta(self, children):\n",
" for c in children:\n",
" if self.node_filter is not None and not self.node_filter(c):\n",
" continue\n",
" if isinstance(c, Tree):\n",
" if not c.meta.empty:\n",
" return c.meta\n",
" elif isinstance(c, Token):\n",
" return c\n",
" elif hasattr(c, '__lark_meta__'):\n",
" return c.__lark_meta__()\n",
"\n",
"def make_propagate_positions(option):\n",
" if callable(option):\n",
" return partial(PropagatePositions, node_filter=option)\n",
" elif option is True:\n",
" return PropagatePositions\n",
" elif option is False:\n",
" return None\n",
"\n",
" raise ConfigurationError('Invalid option for propagate_positions: %r' % option)\n",
"\n",
"\n",
"class ChildFilter:\n",
" def __init__(self, to_include, append_none, node_builder):\n",
" self.node_builder = node_builder\n",
" self.to_include = to_include\n",
" self.append_none = append_none\n",
"\n",
" def __call__(self, children):\n",
" filtered = []\n",
"\n",
" for i, to_expand, add_none in self.to_include:\n",
" if add_none:\n",
" filtered += [None] * add_none\n",
" if to_expand:\n",
" filtered += children[i].children\n",
" else:\n",
" filtered.append(children[i])\n",
"\n",
" if self.append_none:\n",
" filtered += [None] * self.append_none\n",
"\n",
" return self.node_builder(filtered)\n",
"\n",
"\n",
"class ChildFilterLALR(ChildFilter):\n",
" #--\n",
"\n",
" def __call__(self, children):\n",
" filtered = []\n",
" for i, to_expand, add_none in self.to_include:\n",
" if add_none:\n",
" filtered += [None] * add_none\n",
" if to_expand:\n",
" if filtered:\n",
" filtered += children[i].children\n",
" else: ##\n",
"\n",
" filtered = children[i].children\n",
" else:\n",
" filtered.append(children[i])\n",
"\n",
" if self.append_none:\n",
" filtered += [None] * self.append_none\n",
"\n",
" return self.node_builder(filtered)\n",
"\n",
"\n",
"class ChildFilterLALR_NoPlaceholders(ChildFilter):\n",
" #--\n",
" def __init__(self, to_include, node_builder):\n",
" self.node_builder = node_builder\n",
" self.to_include = to_include\n",
"\n",
" def __call__(self, children):\n",
" filtered = []\n",
" for i, to_expand in self.to_include:\n",
" if to_expand:\n",
" if filtered:\n",
" filtered += children[i].children\n",
" else: ##\n",
"\n",
" filtered = children[i].children\n",
" else:\n",
" filtered.append(children[i])\n",
" return self.node_builder(filtered)\n",
"\n",
"\n",
"def _should_expand(sym):\n",
" return not sym.is_term and sym.name.startswith('_')\n",
"\n",
"\n",
"def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices: List[bool]):\n",
" ##\n",
"\n",
" if _empty_indices:\n",
" assert _empty_indices.count(False) == len(expansion)\n",
" s = ''.join(str(int(b)) for b in _empty_indices)\n",
" empty_indices = [len(ones) for ones in s.split('0')]\n",
" assert len(empty_indices) == len(expansion)+1, (empty_indices, len(expansion))\n",
" else:\n",
" empty_indices = [0] * (len(expansion)+1)\n",
"\n",
" to_include = []\n",
" nones_to_add = 0\n",
" for i, sym in enumerate(expansion):\n",
" nones_to_add += empty_indices[i]\n",
" if keep_all_tokens or not (sym.is_term and sym.filter_out):\n",
" to_include.append((i, _should_expand(sym), nones_to_add))\n",
" nones_to_add = 0\n",
"\n",
" nones_to_add += empty_indices[len(expansion)]\n",
"\n",
" if _empty_indices or len(to_include) < len(expansion) or any(to_expand for i, to_expand,_ in to_include):\n",
" if _empty_indices or ambiguous:\n",
" return partial(ChildFilter if ambiguous else ChildFilterLALR, to_include, nones_to_add)\n",
" else:\n",
" ##\n",
"\n",
" return partial(ChildFilterLALR_NoPlaceholders, [(i, x) for i,x,_ in to_include])\n",
"\n",
"\n",
"class AmbiguousExpander:\n",
" #--\n",
" def __init__(self, to_expand, tree_class, node_builder):\n",
" self.node_builder = node_builder\n",
" self.tree_class = tree_class\n",
" self.to_expand = to_expand\n",
"\n",
" def __call__(self, children):\n",
" def _is_ambig_tree(t):\n",
" return hasattr(t, 'data') and t.data == '_ambig'\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ambiguous = []\n",
" for i, child in enumerate(children):\n",
" if _is_ambig_tree(child):\n",
" if i in self.to_expand:\n",
" ambiguous.append(i)\n",
"\n",
" child.expand_kids_by_data('_ambig')\n",
"\n",
" if not ambiguous:\n",
" return self.node_builder(children)\n",
"\n",
" expand = [child.children if i in ambiguous else (child,) for i, child in enumerate(children)]\n",
" return self.tree_class('_ambig', [self.node_builder(list(f)) for f in product(*expand)])\n",
"\n",
"\n",
"def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens):\n",
" to_expand = [i for i, sym in enumerate(expansion)\n",
" if keep_all_tokens or ((not (sym.is_term and sym.filter_out)) and _should_expand(sym))]\n",
" if to_expand:\n",
" return partial(AmbiguousExpander, to_expand, tree_class)\n",
"\n",
"\n",
"class AmbiguousIntermediateExpander:\n",
" #--\n",
"\n",
" def __init__(self, tree_class, node_builder):\n",
" self.node_builder = node_builder\n",
" self.tree_class = tree_class\n",
"\n",
" def __call__(self, children):\n",
" def _is_iambig_tree(child):\n",
" return hasattr(child, 'data') and child.data == '_iambig'\n",
"\n",
" def _collapse_iambig(children):\n",
" #--\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" if children and _is_iambig_tree(children[0]):\n",
" iambig_node = children[0]\n",
" result = []\n",
" for grandchild in iambig_node.children:\n",
" collapsed = _collapse_iambig(grandchild.children)\n",
" if collapsed:\n",
" for child in collapsed:\n",
" child.children += children[1:]\n",
" result += collapsed\n",
" else:\n",
" new_tree = self.tree_class('_inter', grandchild.children + children[1:])\n",
" result.append(new_tree)\n",
" return result\n",
"\n",
" collapsed = _collapse_iambig(children)\n",
" if collapsed:\n",
" processed_nodes = [self.node_builder(c.children) for c in collapsed]\n",
" return self.tree_class('_ambig', processed_nodes)\n",
"\n",
" return self.node_builder(children)\n",
"\n",
"\n",
"\n",
"def inplace_transformer(func):\n",
" @wraps(func)\n",
" def f(children):\n",
" ##\n",
"\n",
" tree = Tree(func.__name__, children)\n",
" return func(tree)\n",
" return f\n",
"\n",
"\n",
"def apply_visit_wrapper(func, name, wrapper):\n",
" if wrapper is _vargs_meta or wrapper is _vargs_meta_inline:\n",
" raise NotImplementedError(\"Meta args not supported for internal transformer\")\n",
"\n",
" @wraps(func)\n",
" def f(children):\n",
" return wrapper(func, name, children, None)\n",
" return f\n",
"\n",
"\n",
"class ParseTreeBuilder:\n",
" def __init__(self, rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False):\n",
" self.tree_class = tree_class\n",
" self.propagate_positions = propagate_positions\n",
" self.ambiguous = ambiguous\n",
" self.maybe_placeholders = maybe_placeholders\n",
"\n",
" self.rule_builders = list(self._init_builders(rules))\n",
"\n",
" def _init_builders(self, rules):\n",
" propagate_positions = make_propagate_positions(self.propagate_positions)\n",
"\n",
" for rule in rules:\n",
" options = rule.options\n",
" keep_all_tokens = options.keep_all_tokens\n",
" expand_single_child = options.expand1\n",
"\n",
" wrapper_chain = list(filter(None, [\n",
" (expand_single_child and not rule.alias) and ExpandSingleChild,\n",
" maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None),\n",
" propagate_positions,\n",
" self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens),\n",
" self.ambiguous and partial(AmbiguousIntermediateExpander, self.tree_class)\n",
" ]))\n",
"\n",
" yield rule, wrapper_chain\n",
"\n",
" def create_callback(self, transformer=None):\n",
" callbacks = {}\n",
"\n",
" default_handler = getattr(transformer, '__default__', None)\n",
" if default_handler:\n",
" def default_callback(data, children):\n",
" return default_handler(data, children, None)\n",
" else:\n",
" default_callback = self.tree_class\n",
"\n",
" for rule, wrapper_chain in self.rule_builders:\n",
"\n",
" user_callback_name = rule.alias or rule.options.template_source or rule.origin.name\n",
" try:\n",
" f = getattr(transformer, user_callback_name)\n",
" wrapper = getattr(f, 'visit_wrapper', None)\n",
" if wrapper is not None:\n",
" f = apply_visit_wrapper(f, user_callback_name, wrapper)\n",
" elif isinstance(transformer, Transformer_InPlace):\n",
" f = inplace_transformer(f)\n",
" except AttributeError:\n",
" f = partial(default_callback, user_callback_name)\n",
"\n",
" for w in wrapper_chain:\n",
" f = w(f)\n",
"\n",
" if rule in callbacks:\n",
" raise GrammarError(\"Rule '%s' already exists\" % (rule,))\n",
"\n",
" callbacks[rule] = f\n",
"\n",
" return callbacks\n",
"\n",
"\n",
"\n",
"class Action:\n",
" def __init__(self, name):\n",
" self.name = name\n",
" def __str__(self):\n",
" return self.name\n",
" def __repr__(self):\n",
" return str(self)\n",
"\n",
"Shift = Action('Shift')\n",
"Reduce = Action('Reduce')\n",
"\n",
"StateT = TypeVar(\"StateT\")\n",
"\n",
"class ParseTableBase(Generic[StateT]):\n",
" states: Dict[StateT, Dict[str, Tuple]]\n",
" start_states: Dict[str, StateT]\n",
" end_states: Dict[str, StateT]\n",
"\n",
" def __init__(self, states, start_states, end_states):\n",
" self.states = states\n",
" self.start_states = start_states\n",
" self.end_states = end_states\n",
"\n",
" def serialize(self, memo):\n",
" tokens = Enumerator()\n",
"\n",
" states = {\n",
" state: {tokens.get(token): ((1, arg.serialize(memo)) if action is Reduce else (0, arg))\n",
" for token, (action, arg) in actions.items()}\n",
" for state, actions in self.states.items()\n",
" }\n",
"\n",
" return {\n",
" 'tokens': tokens.reversed(),\n",
" 'states': states,\n",
" 'start_states': self.start_states,\n",
" 'end_states': self.end_states,\n",
" }\n",
"\n",
" @classmethod\n",
" def deserialize(cls, data, memo):\n",
" tokens = data['tokens']\n",
" states = {\n",
" state: {tokens[token]: ((Reduce, Rule.deserialize(arg, memo)) if action==1 else (Shift, arg))\n",
" for token, (action, arg) in actions.items()}\n",
" for state, actions in data['states'].items()\n",
" }\n",
" return cls(states, data['start_states'], data['end_states'])\n",
"\n",
"class ParseTable(ParseTableBase['State']):\n",
" #--\n",
" pass\n",
"\n",
"\n",
"class IntParseTable(ParseTableBase[int]):\n",
" #--\n",
"\n",
" @classmethod\n",
" def from_ParseTable(cls, parse_table: ParseTable):\n",
" enum = list(parse_table.states)\n",
" state_to_idx: Dict['State', int] = {s:i for i,s in enumerate(enum)}\n",
" int_states = {}\n",
"\n",
" for s, la in parse_table.states.items():\n",
" la = {k:(v[0], state_to_idx[v[1]]) if v[0] is Shift else v\n",
" for k,v in la.items()}\n",
" int_states[ state_to_idx[s] ] = la\n",
"\n",
"\n",
" start_states = {start:state_to_idx[s] for start, s in parse_table.start_states.items()}\n",
" end_states = {start:state_to_idx[s] for start, s in parse_table.end_states.items()}\n",
" return cls(int_states, start_states, end_states)\n",
"\n",
"\n",
"\n",
"class ParseConf(Generic[StateT]):\n",
" __slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states'\n",
"\n",
" parse_table: ParseTableBase[StateT]\n",
" callbacks: ParserCallbacks\n",
" start: str\n",
"\n",
" start_state: StateT\n",
" end_state: StateT\n",
" states: Dict[StateT, Dict[str, tuple]]\n",
"\n",
" def __init__(self, parse_table: ParseTableBase[StateT], callbacks: ParserCallbacks, start: str):\n",
" self.parse_table = parse_table\n",
"\n",
" self.start_state = self.parse_table.start_states[start]\n",
" self.end_state = self.parse_table.end_states[start]\n",
" self.states = self.parse_table.states\n",
"\n",
" self.callbacks = callbacks\n",
" self.start = start\n",
"\n",
"class ParserState(Generic[StateT]):\n",
" __slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack'\n",
"\n",
" parse_conf: ParseConf[StateT]\n",
" lexer: LexerThread\n",
" state_stack: List[StateT]\n",
" value_stack: list\n",
"\n",
" def __init__(self, parse_conf: ParseConf[StateT], lexer: LexerThread, state_stack=None, value_stack=None):\n",
" self.parse_conf = parse_conf\n",
" self.lexer = lexer\n",
" self.state_stack = state_stack or [self.parse_conf.start_state]\n",
" self.value_stack = value_stack or []\n",
"\n",
" @property\n",
" def position(self) -> StateT:\n",
" return self.state_stack[-1]\n",
"\n",
" ##\n",
"\n",
" def __eq__(self, other) -> bool:\n",
" if not isinstance(other, ParserState):\n",
" return NotImplemented\n",
" return len(self.state_stack) == len(other.state_stack) and self.position == other.position\n",
"\n",
" def __copy__(self):\n",
" return type(self)(\n",
" self.parse_conf,\n",
" self.lexer, ##\n",
"\n",
" copy(self.state_stack),\n",
" deepcopy(self.value_stack),\n",
" )\n",
"\n",
" def copy(self) -> 'ParserState[StateT]':\n",
" return copy(self)\n",
"\n",
" def feed_token(self, token: Token, is_end=False) -> Any:\n",
" state_stack = self.state_stack\n",
" value_stack = self.value_stack\n",
" states = self.parse_conf.states\n",
" end_state = self.parse_conf.end_state\n",
" callbacks = self.parse_conf.callbacks\n",
"\n",
" while True:\n",
" state = state_stack[-1]\n",
" try:\n",
" action, arg = states[state][token.type]\n",
" except KeyError:\n",
" expected = {s for s in states[state].keys() if s.isupper()}\n",
" raise UnexpectedToken(token, expected, state=self, interactive_parser=None)\n",
"\n",
" assert arg != end_state\n",
"\n",
" if action is Shift:\n",
" ##\n",
"\n",
" assert not is_end\n",
" state_stack.append(arg)\n",
" value_stack.append(token if token.type not in callbacks else callbacks[token.type](token))\n",
" return\n",
" else:\n",
" ##\n",
"\n",
" rule = arg\n",
" size = len(rule.expansion)\n",
" if size:\n",
" s = value_stack[-size:]\n",
" del state_stack[-size:]\n",
" del value_stack[-size:]\n",
" else:\n",
" s = []\n",
"\n",
" value = callbacks[rule](s) if callbacks else s\n",
"\n",
" _action, new_state = states[state_stack[-1]][rule.origin.name]\n",
" assert _action is Shift\n",
" state_stack.append(new_state)\n",
" value_stack.append(value)\n",
"\n",
" if is_end and state_stack[-1] == end_state:\n",
" return value_stack[-1]\n",
"\n",
"\n",
"class LALR_Parser(Serialize):\n",
" def __init__(self, parser_conf: ParserConf, debug: bool=False, strict: bool=False):\n",
" analysis = LALR_Analyzer(parser_conf, debug=debug, strict=strict)\n",
" analysis.compute_lalr()\n",
" callbacks = parser_conf.callbacks\n",
"\n",
" self._parse_table = analysis.parse_table\n",
" self.parser_conf = parser_conf\n",
" self.parser = _Parser(analysis.parse_table, callbacks, debug)\n",
"\n",
" @classmethod\n",
" def deserialize(cls, data, memo, callbacks, debug=False):\n",
" inst = cls.__new__(cls)\n",
" inst._parse_table = IntParseTable.deserialize(data, memo)\n",
" inst.parser = _Parser(inst._parse_table, callbacks, debug)\n",
" return inst\n",
"\n",
" def serialize(self, memo: Any = None) -> Dict[str, Any]:\n",
" return self._parse_table.serialize(memo)\n",
"\n",
" def parse_interactive(self, lexer: LexerThread, start: str):\n",
" return self.parser.parse(lexer, start, start_interactive=True)\n",
"\n",
" def parse(self, lexer, start, on_error=None):\n",
" try:\n",
" return self.parser.parse(lexer, start)\n",
" except UnexpectedInput as e:\n",
" if on_error is None:\n",
" raise\n",
"\n",
" while True:\n",
" if isinstance(e, UnexpectedCharacters):\n",
" s = e.interactive_parser.lexer_thread.state\n",
" p = s.line_ctr.char_pos\n",
"\n",
" if not on_error(e):\n",
" raise e\n",
"\n",
" if isinstance(e, UnexpectedCharacters):\n",
" ##\n",
"\n",
" if p == s.line_ctr.char_pos:\n",
" s.line_ctr.feed(s.text[p:p+1])\n",
"\n",
" try:\n",
" return e.interactive_parser.resume_parse()\n",
" except UnexpectedToken as e2:\n",
" if (isinstance(e, UnexpectedToken)\n",
" and e.token.type == e2.token.type == '$END'\n",
" and e.interactive_parser == e2.interactive_parser):\n",
" ##\n",
"\n",
" raise e2\n",
" e = e2\n",
" except UnexpectedCharacters as e2:\n",
" e = e2\n",
"\n",
"\n",
"class _Parser:\n",
" parse_table: ParseTableBase\n",
" callbacks: ParserCallbacks\n",
" debug: bool\n",
"\n",
" def __init__(self, parse_table: ParseTableBase, callbacks: ParserCallbacks, debug: bool=False):\n",
" self.parse_table = parse_table\n",
" self.callbacks = callbacks\n",
" self.debug = debug\n",
"\n",
" def parse(self, lexer: LexerThread, start: str, value_stack=None, state_stack=None, start_interactive=False):\n",
" parse_conf = ParseConf(self.parse_table, self.callbacks, start)\n",
" parser_state = ParserState(parse_conf, lexer, state_stack, value_stack)\n",
" if start_interactive:\n",
" return InteractiveParser(self, parser_state, parser_state.lexer)\n",
" return self.parse_from_state(parser_state)\n",
"\n",
"\n",
" def parse_from_state(self, state: ParserState, last_token: Optional[Token]=None):\n",
" #--\n",
" try:\n",
" token = last_token\n",
" for token in state.lexer.lex(state):\n",
" assert token is not None\n",
" state.feed_token(token)\n",
"\n",
" end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1)\n",
" return state.feed_token(end_token, True)\n",
" except UnexpectedInput as e:\n",
" try:\n",
" e.interactive_parser = InteractiveParser(self, state, state.lexer)\n",
" except NameError:\n",
" pass\n",
" raise e\n",
" except Exception as e:\n",
" if self.debug:\n",
" print(\"\")\n",
" print(\"STATE STACK DUMP\")\n",
" print(\"----------------\")\n",
" for i, s in enumerate(state.state_stack):\n",
" print('%d)' % i , s)\n",
" print(\"\")\n",
"\n",
" raise\n",
"\n",
"\n",
"class InteractiveParser:\n",
" #--\n",
" def __init__(self, parser, parser_state, lexer_thread: LexerThread):\n",
" self.parser = parser\n",
" self.parser_state = parser_state\n",
" self.lexer_thread = lexer_thread\n",
" self.result = None\n",
"\n",
" @property\n",
" def lexer_state(self) -> LexerThread:\n",
" warnings.warn(\"lexer_state will be removed in subsequent releases. Use lexer_thread instead.\", DeprecationWarning)\n",
" return self.lexer_thread\n",
"\n",
" def feed_token(self, token: Token):\n",
" #--\n",
" return self.parser_state.feed_token(token, token.type == '$END')\n",
"\n",
" def iter_parse(self) -> Iterator[Token]:\n",
" #--\n",
" for token in self.lexer_thread.lex(self.parser_state):\n",
" yield token\n",
" self.result = self.feed_token(token)\n",
"\n",
" def exhaust_lexer(self) -> List[Token]:\n",
" #--\n",
" return list(self.iter_parse())\n",
"\n",
"\n",
" def feed_eof(self, last_token=None):\n",
" #--\n",
" eof = Token.new_borrow_pos('$END', '', last_token) if last_token is not None else self.lexer_thread._Token('$END', '', 0, 1, 1)\n",
" return self.feed_token(eof)\n",
"\n",
"\n",
" def __copy__(self):\n",
" #--\n",
" return type(self)(\n",
" self.parser,\n",
" copy(self.parser_state),\n",
" copy(self.lexer_thread),\n",
" )\n",
"\n",
" def copy(self):\n",
" return copy(self)\n",
"\n",
" def __eq__(self, other):\n",
" if not isinstance(other, InteractiveParser):\n",
" return False\n",
"\n",
" return self.parser_state == other.parser_state and self.lexer_thread == other.lexer_thread\n",
"\n",
" def as_immutable(self):\n",
" #--\n",
" p = copy(self)\n",
" return ImmutableInteractiveParser(p.parser, p.parser_state, p.lexer_thread)\n",
"\n",
" def pretty(self):\n",
" #--\n",
" out = [\"Parser choices:\"]\n",
" for k, v in self.choices().items():\n",
" out.append('\\t- %s -> %r' % (k, v))\n",
" out.append('stack size: %s' % len(self.parser_state.state_stack))\n",
" return '\\n'.join(out)\n",
"\n",
" def choices(self):\n",
" #--\n",
" return self.parser_state.parse_conf.parse_table.states[self.parser_state.position]\n",
"\n",
" def accepts(self):\n",
" #--\n",
" accepts = set()\n",
" conf_no_callbacks = copy(self.parser_state.parse_conf)\n",
" ##\n",
"\n",
" ##\n",
"\n",
" conf_no_callbacks.callbacks = {}\n",
" for t in self.choices():\n",
" if t.isupper(): ##\n",
"\n",
" new_cursor = copy(self)\n",
" new_cursor.parser_state.parse_conf = conf_no_callbacks\n",
" try:\n",
" new_cursor.feed_token(self.lexer_thread._Token(t, ''))\n",
" except UnexpectedToken:\n",
" pass\n",
" else:\n",
" accepts.add(t)\n",
" return accepts\n",
"\n",
" def resume_parse(self):\n",
" #--\n",
" return self.parser.parse_from_state(self.parser_state, last_token=self.lexer_thread.state.last_token)\n",
"\n",
"\n",
"\n",
"class ImmutableInteractiveParser(InteractiveParser):\n",
" #--\n",
"\n",
" result = None\n",
"\n",
" def __hash__(self):\n",
" return hash((self.parser_state, self.lexer_thread))\n",
"\n",
" def feed_token(self, token):\n",
" c = copy(self)\n",
" c.result = InteractiveParser.feed_token(c, token)\n",
" return c\n",
"\n",
" def exhaust_lexer(self):\n",
" #--\n",
" cursor = self.as_mutable()\n",
" cursor.exhaust_lexer()\n",
" return cursor.as_immutable()\n",
"\n",
" def as_mutable(self):\n",
" #--\n",
" p = copy(self)\n",
" return InteractiveParser(p.parser, p.parser_state, p.lexer_thread)\n",
"\n",
"\n",
"\n",
"def _wrap_lexer(lexer_class):\n",
" future_interface = getattr(lexer_class, '__future_interface__', False)\n",
" if future_interface:\n",
" return lexer_class\n",
" else:\n",
" class CustomLexerWrapper(Lexer):\n",
" def __init__(self, lexer_conf):\n",
" self.lexer = lexer_class(lexer_conf)\n",
" def lex(self, lexer_state, parser_state):\n",
" return self.lexer.lex(lexer_state.text)\n",
" return CustomLexerWrapper\n",
"\n",
"\n",
"def _deserialize_parsing_frontend(data, memo, lexer_conf, callbacks, options):\n",
" parser_conf = ParserConf.deserialize(data['parser_conf'], memo)\n",
" cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser\n",
" parser = cls.deserialize(data['parser'], memo, callbacks, options.debug)\n",
" parser_conf.callbacks = callbacks\n",
" return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser)\n",
"\n",
"\n",
"_parser_creators: 'Dict[str, Callable[[LexerConf, Any, Any], Any]]' = {}\n",
"\n",
"\n",
"class ParsingFrontend(Serialize):\n",
" __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser'\n",
"\n",
" lexer_conf: LexerConf\n",
" parser_conf: ParserConf\n",
" options: Any\n",
"\n",
" def __init__(self, lexer_conf: LexerConf, parser_conf: ParserConf, options, parser=None):\n",
" self.parser_conf = parser_conf\n",
" self.lexer_conf = lexer_conf\n",
" self.options = options\n",
"\n",
" ##\n",
"\n",
" if parser: ##\n",
"\n",
" self.parser = parser\n",
" else:\n",
" create_parser = _parser_creators.get(parser_conf.parser_type)\n",
" assert create_parser is not None, \"{} is not supported in standalone mode\".format(\n",
" parser_conf.parser_type\n",
" )\n",
" self.parser = create_parser(lexer_conf, parser_conf, options)\n",
"\n",
" ##\n",
"\n",
" lexer_type = lexer_conf.lexer_type\n",
" self.skip_lexer = False\n",
" if lexer_type in ('dynamic', 'dynamic_complete'):\n",
" assert lexer_conf.postlex is None\n",
" self.skip_lexer = True\n",
" return\n",
"\n",
" if isinstance(lexer_type, type):\n",
" assert issubclass(lexer_type, Lexer)\n",
" self.lexer = _wrap_lexer(lexer_type)(lexer_conf)\n",
" elif isinstance(lexer_type, str):\n",
" create_lexer = {\n",
" 'basic': create_basic_lexer,\n",
" 'contextual': create_contextual_lexer,\n",
" }[lexer_type]\n",
" self.lexer = create_lexer(lexer_conf, self.parser, lexer_conf.postlex, options)\n",
" else:\n",
" raise TypeError(\"Bad value for lexer_type: {lexer_type}\")\n",
"\n",
" if lexer_conf.postlex:\n",
" self.lexer = PostLexConnector(self.lexer, lexer_conf.postlex)\n",
"\n",
" def _verify_start(self, start=None):\n",
" if start is None:\n",
" start_decls = self.parser_conf.start\n",
" if len(start_decls) > 1:\n",
" raise ConfigurationError(\"Lark initialized with more than 1 possible start rule. Must specify which start rule to parse\", start_decls)\n",
" start ,= start_decls\n",
" elif start not in self.parser_conf.start:\n",
" raise ConfigurationError(\"Unknown start rule %s. Must be one of %r\" % (start, self.parser_conf.start))\n",
" return start\n",
"\n",
" def _make_lexer_thread(self, text: str) -> Union[str, LexerThread]:\n",
" cls = (self.options and self.options._plugins.get('LexerThread')) or LexerThread\n",
" return text if self.skip_lexer else cls.from_text(self.lexer, text)\n",
"\n",
" def parse(self, text: str, start=None, on_error=None):\n",
" chosen_start = self._verify_start(start)\n",
" kw = {} if on_error is None else {'on_error': on_error}\n",
" stream = self._make_lexer_thread(text)\n",
" return self.parser.parse(stream, chosen_start, **kw)\n",
"\n",
" def parse_interactive(self, text: Optional[str]=None, start=None):\n",
" ##\n",
"\n",
" ##\n",
"\n",
" chosen_start = self._verify_start(start)\n",
" if self.parser_conf.parser_type != 'lalr':\n",
" raise ConfigurationError(\"parse_interactive() currently only works with parser='lalr' \")\n",
" stream = self._make_lexer_thread(text) ##\n",
"\n",
" return self.parser.parse_interactive(stream, chosen_start)\n",
"\n",
"\n",
"def _validate_frontend_args(parser, lexer) -> None:\n",
" assert_config(parser, ('lalr', 'earley', 'cyk'))\n",
" if not isinstance(lexer, type): ##\n",
"\n",
" expected = {\n",
" 'lalr': ('basic', 'contextual'),\n",
" 'earley': ('basic', 'dynamic', 'dynamic_complete'),\n",
" 'cyk': ('basic', ),\n",
" }[parser]\n",
" assert_config(lexer, expected, 'Parser %r does not support lexer %%r, expected one of %%s' % parser)\n",
"\n",
"\n",
"def _get_lexer_callbacks(transformer, terminals):\n",
" result = {}\n",
" for terminal in terminals:\n",
" callback = getattr(transformer, terminal.name, None)\n",
" if callback is not None:\n",
" result[terminal.name] = callback\n",
" return result\n",
"\n",
"class PostLexConnector:\n",
" def __init__(self, lexer, postlexer):\n",
" self.lexer = lexer\n",
" self.postlexer = postlexer\n",
"\n",
" def lex(self, lexer_state, parser_state):\n",
" i = self.lexer.lex(lexer_state, parser_state)\n",
" return self.postlexer.process(i)\n",
"\n",
"\n",
"\n",
"def create_basic_lexer(lexer_conf, parser, postlex, options) -> BasicLexer:\n",
" cls = (options and options._plugins.get('BasicLexer')) or BasicLexer\n",
" return cls(lexer_conf)\n",
"\n",
"def create_contextual_lexer(lexer_conf: LexerConf, parser, postlex, options) -> ContextualLexer:\n",
" cls = (options and options._plugins.get('ContextualLexer')) or ContextualLexer\n",
" parse_table: ParseTableBase[int] = parser._parse_table\n",
" states: Dict[int, Collection[str]] = {idx:list(t.keys()) for idx, t in parse_table.states.items()}\n",
" always_accept: Collection[str] = postlex.always_accept if postlex else ()\n",
" return cls(lexer_conf, states, always_accept=always_accept)\n",
"\n",
"def create_lalr_parser(lexer_conf: LexerConf, parser_conf: ParserConf, options=None) -> LALR_Parser:\n",
" debug = options.debug if options else False\n",
" strict = options.strict if options else False\n",
" cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser\n",
" return cls(parser_conf, debug=debug, strict=strict)\n",
"\n",
"_parser_creators['lalr'] = create_lalr_parser\n",
"\n",
"\n",
"\n",
"\n",
"class PostLex(ABC):\n",
" @abstractmethod\n",
" def process(self, stream: Iterator[Token]) -> Iterator[Token]:\n",
" return stream\n",
"\n",
" always_accept: Iterable[str] = ()\n",
"\n",
"class LarkOptions(Serialize):\n",
" #--\n",
"\n",
" start: List[str]\n",
" debug: bool\n",
" strict: bool\n",
" transformer: 'Optional[Transformer]'\n",
" propagate_positions: Union[bool, str]\n",
" maybe_placeholders: bool\n",
" cache: Union[bool, str]\n",
" regex: bool\n",
" g_regex_flags: int\n",
" keep_all_tokens: bool\n",
" tree_class: Optional[Callable[[str, List], Any]]\n",
" parser: _ParserArgType\n",
" lexer: _LexerArgType\n",
" ambiguity: 'Literal[\"auto\", \"resolve\", \"explicit\", \"forest\"]'\n",
" postlex: Optional[PostLex]\n",
" priority: 'Optional[Literal[\"auto\", \"normal\", \"invert\"]]'\n",
" lexer_callbacks: Dict[str, Callable[[Token], Token]]\n",
" use_bytes: bool\n",
" ordered_sets: bool\n",
" edit_terminals: Optional[Callable[[TerminalDef], TerminalDef]]\n",
" import_paths: 'List[Union[str, Callable[[Union[None, str, PackageResource], str], Tuple[str, str]]]]'\n",
" source_path: Optional[str]\n",
"\n",
" OPTIONS_DOC = r\"\"\"\n",
" **=== General Options ===**\n",
"\n",
" start\n",
" The start symbol. Either a string, or a list of strings for multiple possible starts (Default: \"start\")\n",
" debug\n",
" Display debug information and extra warnings. Use only when debugging (Default: ``False``)\n",
" When used with Earley, it generates a forest graph as \"sppf.png\", if 'dot' is installed.\n",
" strict\n",
" Throw an exception on any potential ambiguity, including shift/reduce conflicts, and regex collisions.\n",
" transformer\n",
" Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster)\n",
" propagate_positions\n",
" Propagates positional attributes into the 'meta' attribute of all tree branches.\n",
" Sets attributes: (line, column, end_line, end_column, start_pos, end_pos,\n",
" container_line, container_column, container_end_line, container_end_column)\n",
" Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating.\n",
" maybe_placeholders\n",
" When ``True``, the ``[]`` operator returns ``None`` when not matched.\n",
" When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all.\n",
" (default= ``True``)\n",
" cache\n",
" Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now.\n",
"\n",
" - When ``False``, does nothing (default)\n",
" - When ``True``, caches to a temporary file in the local directory\n",
" - When given a string, caches to the path pointed by the string\n",
" regex\n",
" When True, uses the ``regex`` module instead of the stdlib ``re``.\n",
" g_regex_flags\n",
" Flags that are applied to all terminals (both regex and strings)\n",
" keep_all_tokens\n",
" Prevent the tree builder from automagically removing \"punctuation\" tokens (Default: ``False``)\n",
" tree_class\n",
" Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``.\n",
"\n",
" **=== Algorithm Options ===**\n",
"\n",
" parser\n",
" Decides which parser engine to use. Accepts \"earley\" or \"lalr\". (Default: \"earley\").\n",
" (there is also a \"cyk\" option for legacy)\n",
" lexer\n",
" Decides whether or not to use a lexer stage\n",
"\n",
" - \"auto\" (default): Choose for me based on the parser\n",
" - \"basic\": Use a basic lexer\n",
" - \"contextual\": Stronger lexer (only works with parser=\"lalr\")\n",
" - \"dynamic\": Flexible and powerful (only with parser=\"earley\")\n",
" - \"dynamic_complete\": Same as dynamic, but tries *every* variation of tokenizing possible.\n",
" ambiguity\n",
" Decides how to handle ambiguity in the parse. Only relevant if parser=\"earley\"\n",
"\n",
" - \"resolve\": The parser will automatically choose the simplest derivation\n",
" (it chooses consistently: greedy for tokens, non-greedy for rules)\n",
" - \"explicit\": The parser will return all derivations wrapped in \"_ambig\" tree nodes (i.e. a forest).\n",
" - \"forest\": The parser will return the root of the shared packed parse forest.\n",
"\n",
" **=== Misc. / Domain Specific Options ===**\n",
"\n",
" postlex\n",
" Lexer post-processing (Default: ``None``) Only works with the basic and contextual lexers.\n",
" priority\n",
" How priorities should be evaluated - \"auto\", ``None``, \"normal\", \"invert\" (Default: \"auto\")\n",
" lexer_callbacks\n",
" Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution.\n",
" use_bytes\n",
" Accept an input of type ``bytes`` instead of ``str``.\n",
" ordered_sets\n",
" Should Earley use ordered-sets to achieve stable output (~10% slower than regular sets. Default: True)\n",
" edit_terminals\n",
" A callback for editing the terminals before parse.\n",
" import_paths\n",
" A List of either paths or loader functions to specify from where grammars are imported\n",
" source_path\n",
" Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading\n",
" **=== End of Options ===**\n",
" \"\"\"\n",
" if __doc__:\n",
" __doc__ += OPTIONS_DOC\n",
"\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" _defaults: Dict[str, Any] = {\n",
" 'debug': False,\n",
" 'strict': False,\n",
" 'keep_all_tokens': False,\n",
" 'tree_class': None,\n",
" 'cache': False,\n",
" 'postlex': None,\n",
" 'parser': 'earley',\n",
" 'lexer': 'auto',\n",
" 'transformer': None,\n",
" 'start': 'start',\n",
" 'priority': 'auto',\n",
" 'ambiguity': 'auto',\n",
" 'regex': False,\n",
" 'propagate_positions': False,\n",
" 'lexer_callbacks': {},\n",
" 'maybe_placeholders': True,\n",
" 'edit_terminals': None,\n",
" 'g_regex_flags': 0,\n",
" 'use_bytes': False,\n",
" 'ordered_sets': True,\n",
" 'import_paths': [],\n",
" 'source_path': None,\n",
" '_plugins': {},\n",
" }\n",
"\n",
" def __init__(self, options_dict: Dict[str, Any]) -> None:\n",
" o = dict(options_dict)\n",
"\n",
" options = {}\n",
" for name, default in self._defaults.items():\n",
" if name in o:\n",
" value = o.pop(name)\n",
" if isinstance(default, bool) and name not in ('cache', 'use_bytes', 'propagate_positions'):\n",
" value = bool(value)\n",
" else:\n",
" value = default\n",
"\n",
" options[name] = value\n",
"\n",
" if isinstance(options['start'], str):\n",
" options['start'] = [options['start']]\n",
"\n",
" self.__dict__['options'] = options\n",
"\n",
"\n",
" assert_config(self.parser, ('earley', 'lalr', 'cyk', None))\n",
"\n",
" if self.parser == 'earley' and self.transformer:\n",
" raise ConfigurationError('Cannot specify an embedded transformer when using the Earley algorithm. '\n",
" 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)')\n",
"\n",
" if o:\n",
" raise ConfigurationError(\"Unknown options: %s\" % o.keys())\n",
"\n",
" def __getattr__(self, name: str) -> Any:\n",
" try:\n",
" return self.__dict__['options'][name]\n",
" except KeyError as e:\n",
" raise AttributeError(e)\n",
"\n",
" def __setattr__(self, name: str, value: str) -> None:\n",
" assert_config(name, self.options.keys(), \"%r isn't a valid option. Expected one of: %s\")\n",
" self.options[name] = value\n",
"\n",
" def serialize(self, memo = None) -> Dict[str, Any]:\n",
" return self.options\n",
"\n",
" @classmethod\n",
" def deserialize(cls, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]]) -> \"LarkOptions\":\n",
" return cls(data)\n",
"\n",
"\n",
"##\n",
"\n",
"##\n",
"\n",
"_LOAD_ALLOWED_OPTIONS = {'postlex', 'transformer', 'lexer_callbacks', 'use_bytes', 'debug', 'g_regex_flags', 'regex', 'propagate_positions', 'tree_class', '_plugins'}\n",
"\n",
"_VALID_PRIORITY_OPTIONS = ('auto', 'normal', 'invert', None)\n",
"_VALID_AMBIGUITY_OPTIONS = ('auto', 'resolve', 'explicit', 'forest')\n",
"\n",
"\n",
"_T = TypeVar('_T', bound=\"Lark\")\n",
"\n",
"class Lark(Serialize):\n",
" #--\n",
"\n",
" source_path: str\n",
" source_grammar: str\n",
" grammar: 'Grammar'\n",
" options: LarkOptions\n",
" lexer: Lexer\n",
" parser: 'ParsingFrontend'\n",
" terminals: Collection[TerminalDef]\n",
"\n",
" def __init__(self, grammar: 'Union[Grammar, str, IO[str]]', **options) -> None:\n",
" self.options = LarkOptions(options)\n",
" re_module: types.ModuleType\n",
"\n",
" ##\n",
"\n",
" use_regex = self.options.regex\n",
" if use_regex:\n",
" if _has_regex:\n",
" re_module = regex\n",
" else:\n",
" raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.')\n",
" else:\n",
" re_module = re\n",
"\n",
" ##\n",
"\n",
" if self.options.source_path is None:\n",
" try:\n",
" self.source_path = grammar.name ##\n",
"\n",
" except AttributeError:\n",
" self.source_path = '<string>'\n",
" else:\n",
" self.source_path = self.options.source_path\n",
"\n",
" ##\n",
"\n",
" try:\n",
" read = grammar.read ##\n",
"\n",
" except AttributeError:\n",
" pass\n",
" else:\n",
" grammar = read()\n",
"\n",
" cache_fn = None\n",
" cache_sha256 = None\n",
" if isinstance(grammar, str):\n",
" self.source_grammar = grammar\n",
" if self.options.use_bytes:\n",
" if not isascii(grammar):\n",
" raise ConfigurationError(\"Grammar must be ascii only, when use_bytes=True\")\n",
"\n",
" if self.options.cache:\n",
" if self.options.parser != 'lalr':\n",
" raise ConfigurationError(\"cache only works with parser='lalr' for now\")\n",
"\n",
" unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals', '_plugins')\n",
" options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable)\n",
" from . import __version__\n",
" s = grammar + options_str + __version__ + str(sys.version_info[:2])\n",
" cache_sha256 = sha256_digest(s)\n",
"\n",
" if isinstance(self.options.cache, str):\n",
" cache_fn = self.options.cache\n",
" else:\n",
" if self.options.cache is not True:\n",
" raise ConfigurationError(\"cache argument must be bool or str\")\n",
"\n",
" try:\n",
" username = getpass.getuser()\n",
" except Exception:\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" username = \"unknown\"\n",
"\n",
" cache_fn = tempfile.gettempdir() + \"/.lark_cache_%s_%s_%s_%s.tmp\" % (username, cache_sha256, *sys.version_info[:2])\n",
"\n",
" old_options = self.options\n",
" try:\n",
" with FS.open(cache_fn, 'rb') as f:\n",
" logger.debug('Loading grammar from cache: %s', cache_fn)\n",
" ##\n",
"\n",
" for name in (set(options) - _LOAD_ALLOWED_OPTIONS):\n",
" del options[name]\n",
" file_sha256 = f.readline().rstrip(b'\\n')\n",
" cached_used_files = pickle.load(f)\n",
" if file_sha256 == cache_sha256.encode('utf8') and verify_used_files(cached_used_files):\n",
" cached_parser_data = pickle.load(f)\n",
" self._load(cached_parser_data, **options)\n",
" return\n",
" except FileNotFoundError:\n",
" ##\n",
"\n",
" pass\n",
" except Exception: ##\n",
"\n",
" logger.exception(\"Failed to load Lark from cache: %r. We will try to carry on.\", cache_fn)\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" self.options = old_options\n",
"\n",
"\n",
" ##\n",
"\n",
" self.grammar, used_files = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens)\n",
" else:\n",
" assert isinstance(grammar, Grammar)\n",
" self.grammar = grammar\n",
"\n",
"\n",
" if self.options.lexer == 'auto':\n",
" if self.options.parser == 'lalr':\n",
" self.options.lexer = 'contextual'\n",
" elif self.options.parser == 'earley':\n",
" if self.options.postlex is not None:\n",
" logger.info(\"postlex can't be used with the dynamic lexer, so we use 'basic' instead. \"\n",
" \"Consider using lalr with contextual instead of earley\")\n",
" self.options.lexer = 'basic'\n",
" else:\n",
" self.options.lexer = 'dynamic'\n",
" elif self.options.parser == 'cyk':\n",
" self.options.lexer = 'basic'\n",
" else:\n",
" assert False, self.options.parser\n",
" lexer = self.options.lexer\n",
" if isinstance(lexer, type):\n",
" assert issubclass(lexer, Lexer) ##\n",
"\n",
" else:\n",
" assert_config(lexer, ('basic', 'contextual', 'dynamic', 'dynamic_complete'))\n",
" if self.options.postlex is not None and 'dynamic' in lexer:\n",
" raise ConfigurationError(\"Can't use postlex with a dynamic lexer. Use basic or contextual instead\")\n",
"\n",
" if self.options.ambiguity == 'auto':\n",
" if self.options.parser == 'earley':\n",
" self.options.ambiguity = 'resolve'\n",
" else:\n",
" assert_config(self.options.parser, ('earley', 'cyk'), \"%r doesn't support disambiguation. Use one of these parsers instead: %s\")\n",
"\n",
" if self.options.priority == 'auto':\n",
" self.options.priority = 'normal'\n",
"\n",
" if self.options.priority not in _VALID_PRIORITY_OPTIONS:\n",
" raise ConfigurationError(\"invalid priority option: %r. Must be one of %r\" % (self.options.priority, _VALID_PRIORITY_OPTIONS))\n",
" if self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS:\n",
" raise ConfigurationError(\"invalid ambiguity option: %r. Must be one of %r\" % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS))\n",
"\n",
" if self.options.parser is None:\n",
" terminals_to_keep = '*'\n",
" elif self.options.postlex is not None:\n",
" terminals_to_keep = set(self.options.postlex.always_accept)\n",
" else:\n",
" terminals_to_keep = set()\n",
"\n",
" ##\n",
"\n",
" self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep)\n",
"\n",
" if self.options.edit_terminals:\n",
" for t in self.terminals:\n",
" self.options.edit_terminals(t)\n",
"\n",
" self._terminals_dict = {t.name: t for t in self.terminals}\n",
"\n",
" ##\n",
"\n",
" if self.options.priority == 'invert':\n",
" for rule in self.rules:\n",
" if rule.options.priority is not None:\n",
" rule.options.priority = -rule.options.priority\n",
" for term in self.terminals:\n",
" term.priority = -term.priority\n",
" ##\n",
"\n",
" ##\n",
"\n",
" ##\n",
"\n",
" elif self.options.priority is None:\n",
" for rule in self.rules:\n",
" if rule.options.priority is not None:\n",
" rule.options.priority = None\n",
" for term in self.terminals:\n",
" term.priority = 0\n",
"\n",
" ##\n",
"\n",
" self.lexer_conf = LexerConf(\n",
" self.terminals, re_module, self.ignore_tokens, self.options.postlex,\n",
" self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes, strict=self.options.strict\n",
" )\n",
"\n",
" if self.options.parser:\n",
" self.parser = self._build_parser()\n",
" elif lexer:\n",
" self.lexer = self._build_lexer()\n",
"\n",
" if cache_fn:\n",
" logger.debug('Saving grammar to cache: %s', cache_fn)\n",
" try:\n",
" with FS.open(cache_fn, 'wb') as f:\n",
" assert cache_sha256 is not None\n",
" f.write(cache_sha256.encode('utf8') + b'\\n')\n",
" pickle.dump(used_files, f)\n",
" self.save(f, _LOAD_ALLOWED_OPTIONS)\n",
" except IOError as e:\n",
" logger.exception(\"Failed to save Lark to cache: %r.\", cache_fn, e)\n",
"\n",
" if __doc__:\n",
" __doc__ += \"\\n\\n\" + LarkOptions.OPTIONS_DOC\n",
"\n",
" __serialize_fields__ = 'parser', 'rules', 'options'\n",
"\n",
" def _build_lexer(self, dont_ignore: bool=False) -> BasicLexer:\n",
" lexer_conf = self.lexer_conf\n",
" if dont_ignore:\n",
" from copy import copy\n",
" lexer_conf = copy(lexer_conf)\n",
" lexer_conf.ignore = ()\n",
" return BasicLexer(lexer_conf)\n",
"\n",
" def _prepare_callbacks(self) -> None:\n",
" self._callbacks = {}\n",
" ##\n",
"\n",
" if self.options.ambiguity != 'forest':\n",
" self._parse_tree_builder = ParseTreeBuilder(\n",
" self.rules,\n",
" self.options.tree_class or Tree,\n",
" self.options.propagate_positions,\n",
" self.options.parser != 'lalr' and self.options.ambiguity == 'explicit',\n",
" self.options.maybe_placeholders\n",
" )\n",
" self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer)\n",
" self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals))\n",
"\n",
" def _build_parser(self) -> \"ParsingFrontend\":\n",
" self._prepare_callbacks()\n",
" _validate_frontend_args(self.options.parser, self.options.lexer)\n",
" parser_conf = ParserConf(self.rules, self._callbacks, self.options.start)\n",
" return _construct_parsing_frontend(\n",
" self.options.parser,\n",
" self.options.lexer,\n",
" self.lexer_conf,\n",
" parser_conf,\n",
" options=self.options\n",
" )\n",
"\n",
" def save(self, f, exclude_options: Collection[str] = ()) -> None:\n",
" #--\n",
" if self.options.parser != 'lalr':\n",
" raise NotImplementedError(\"Lark.save() is only implemented for the LALR(1) parser.\")\n",
" data, m = self.memo_serialize([TerminalDef, Rule])\n",
" if exclude_options:\n",
" data[\"options\"] = {n: v for n, v in data[\"options\"].items() if n not in exclude_options}\n",
" pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL)\n",
"\n",
" @classmethod\n",
" def load(cls: Type[_T], f) -> _T:\n",
" #--\n",
" inst = cls.__new__(cls)\n",
" return inst._load(f)\n",
"\n",
" def _deserialize_lexer_conf(self, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]], options: LarkOptions) -> LexerConf:\n",
" lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo)\n",
" lexer_conf.callbacks = options.lexer_callbacks or {}\n",
" lexer_conf.re_module = regex if options.regex else re\n",
" lexer_conf.use_bytes = options.use_bytes\n",
" lexer_conf.g_regex_flags = options.g_regex_flags\n",
" lexer_conf.skip_validation = True\n",
" lexer_conf.postlex = options.postlex\n",
" return lexer_conf\n",
"\n",
" def _load(self: _T, f: Any, **kwargs) -> _T:\n",
" if isinstance(f, dict):\n",
" d = f\n",
" else:\n",
" d = pickle.load(f)\n",
" memo_json = d['memo']\n",
" data = d['data']\n",
"\n",
" assert memo_json\n",
" memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {})\n",
" options = dict(data['options'])\n",
" if (set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults):\n",
" raise ConfigurationError(\"Some options are not allowed when loading a Parser: {}\"\n",
" .format(set(kwargs) - _LOAD_ALLOWED_OPTIONS))\n",
" options.update(kwargs)\n",
" self.options = LarkOptions.deserialize(options, memo)\n",
" self.rules = [Rule.deserialize(r, memo) for r in data['rules']]\n",
" self.source_path = '<deserialized>'\n",
" _validate_frontend_args(self.options.parser, self.options.lexer)\n",
" self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options)\n",
" self.terminals = self.lexer_conf.terminals\n",
" self._prepare_callbacks()\n",
" self._terminals_dict = {t.name: t for t in self.terminals}\n",
" self.parser = _deserialize_parsing_frontend(\n",
" data['parser'],\n",
" memo,\n",
" self.lexer_conf,\n",
" self._callbacks,\n",
" self.options, ##\n",
"\n",
" )\n",
" return self\n",
"\n",
" @classmethod\n",
" def _load_from_dict(cls, data, memo, **kwargs):\n",
" inst = cls.__new__(cls)\n",
" return inst._load({'data': data, 'memo': memo}, **kwargs)\n",
"\n",
" @classmethod\n",
" def open(cls: Type[_T], grammar_filename: str, rel_to: Optional[str]=None, **options) -> _T:\n",
" #--\n",
" if rel_to:\n",
" basepath = os.path.dirname(rel_to)\n",
" grammar_filename = os.path.join(basepath, grammar_filename)\n",
" with open(grammar_filename, encoding='utf8') as f:\n",
" return cls(f, **options)\n",
"\n",
" @classmethod\n",
" def open_from_package(cls: Type[_T], package: str, grammar_path: str, search_paths: 'Sequence[str]'=[\"\"], **options) -> _T:\n",
" #--\n",
" package_loader = FromPackageLoader(package, search_paths)\n",
" full_path, text = package_loader(None, grammar_path)\n",
" options.setdefault('source_path', full_path)\n",
" options.setdefault('import_paths', [])\n",
" options['import_paths'].append(package_loader)\n",
" return cls(text, **options)\n",
"\n",
" def __repr__(self):\n",
" return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer)\n",
"\n",
"\n",
" def lex(self, text: str, dont_ignore: bool=False) -> Iterator[Token]:\n",
" #--\n",
" lexer: Lexer\n",
" if not hasattr(self, 'lexer') or dont_ignore:\n",
" lexer = self._build_lexer(dont_ignore)\n",
" else:\n",
" lexer = self.lexer\n",
" lexer_thread = LexerThread.from_text(lexer, text)\n",
" stream = lexer_thread.lex(None)\n",
" if self.options.postlex:\n",
" return self.options.postlex.process(stream)\n",
" return stream\n",
"\n",
" def get_terminal(self, name: str) -> TerminalDef:\n",
" #--\n",
" return self._terminals_dict[name]\n",
"\n",
" def parse_interactive(self, text: Optional[str]=None, start: Optional[str]=None) -> 'InteractiveParser':\n",
" #--\n",
" return self.parser.parse_interactive(text, start=start)\n",
"\n",
" def parse(self, text: str, start: Optional[str]=None, on_error: 'Optional[Callable[[UnexpectedInput], bool]]'=None) -> 'ParseTree':\n",
" #--\n",
" return self.parser.parse(text, start=start, on_error=on_error)\n",
"\n",
"\n",
"\n",
"\n",
"class DedentError(LarkError):\n",
" pass\n",
"\n",
"class Indenter(PostLex, ABC):\n",
" paren_level: int\n",
" indent_level: List[int]\n",
"\n",
" def __init__(self) -> None:\n",
" self.paren_level = 0\n",
" self.indent_level = [0]\n",
" assert self.tab_len > 0\n",
"\n",
" def handle_NL(self, token: Token) -> Iterator[Token]:\n",
" if self.paren_level > 0:\n",
" return\n",
"\n",
" yield token\n",
"\n",
" indent_str = token.rsplit('\\n', 1)[1] ##\n",
"\n",
" indent = indent_str.count(' ') + indent_str.count('\\t') * self.tab_len\n",
"\n",
" if indent > self.indent_level[-1]:\n",
" self.indent_level.append(indent)\n",
" yield Token.new_borrow_pos(self.INDENT_type, indent_str, token)\n",
" else:\n",
" while indent < self.indent_level[-1]:\n",
" self.indent_level.pop()\n",
" yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token)\n",
"\n",
" if indent != self.indent_level[-1]:\n",
" raise DedentError('Unexpected dedent to column %s. Expected dedent to %s' % (indent, self.indent_level[-1]))\n",
"\n",
" def _process(self, stream):\n",
" for token in stream:\n",
" if token.type == self.NL_type:\n",
" yield from self.handle_NL(token)\n",
" else:\n",
" yield token\n",
"\n",
" if token.type in self.OPEN_PAREN_types:\n",
" self.paren_level += 1\n",
" elif token.type in self.CLOSE_PAREN_types:\n",
" self.paren_level -= 1\n",
" assert self.paren_level >= 0\n",
"\n",
" while len(self.indent_level) > 1:\n",
" self.indent_level.pop()\n",
" yield Token(self.DEDENT_type, '')\n",
"\n",
" assert self.indent_level == [0], self.indent_level\n",
"\n",
" def process(self, stream):\n",
" self.paren_level = 0\n",
" self.indent_level = [0]\n",
" return self._process(stream)\n",
"\n",
" ##\n",
"\n",
" @property\n",
" def always_accept(self):\n",
" return (self.NL_type,)\n",
"\n",
" @property\n",
" @abstractmethod\n",
" def NL_type(self) -> str:\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" @abstractmethod\n",
" def OPEN_PAREN_types(self) -> List[str]:\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" @abstractmethod\n",
" def CLOSE_PAREN_types(self) -> List[str]:\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" @abstractmethod\n",
" def INDENT_type(self) -> str:\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" @abstractmethod\n",
" def DEDENT_type(self) -> str:\n",
" raise NotImplementedError()\n",
"\n",
" @property\n",
" @abstractmethod\n",
" def tab_len(self) -> int:\n",
" raise NotImplementedError()\n",
"\n",
"\n",
"class PythonIndenter(Indenter):\n",
" NL_type = '_NEWLINE'\n",
" OPEN_PAREN_types = ['LPAR', 'LSQB', 'LBRACE']\n",
" CLOSE_PAREN_types = ['RPAR', 'RSQB', 'RBRACE']\n",
" INDENT_type = '_INDENT'\n",
" DEDENT_type = '_DEDENT'\n",
" tab_len = 8\n",
"\n",
"\n",
"import pickle, zlib, base64\n",
"DATA = (\n",
"{'parser': {'lexer_conf': {'terminals': [{'@': 0}, {'@': 1}, {'@': 2}, {'@': 3}, {'@': 4}, {'@': 5}, {'@': 6}, {'@': 7}, {'@': 8}, {'@': 9}, {'@': 10}, {'@': 11}, {'@': 12}, {'@': 13}, {'@': 14}, {'@': 15}, {'@': 16}, {'@': 17}, {'@': 18}, {'@': 19}, {'@': 20}, {'@': 21}, {'@': 22}, {'@': 23}, {'@': 24}, {'@': 25}], 'ignore': ['WS_INLINE'], 'g_regex_flags': 0, 'use_bytes': False, 'lexer_type': 'contextual', '__type__': 'LexerConf'}, 'parser_conf': {'rules': [{'@': 26}, {'@': 27}, {'@': 28}, {'@': 29}, {'@': 30}, {'@': 31}, {'@': 32}, {'@': 33}, {'@': 34}, {'@': 35}, {'@': 36}, {'@': 37}, {'@': 38}, {'@': 39}, {'@': 40}, {'@': 41}, {'@': 42}, {'@': 43}, {'@': 44}, {'@': 45}, {'@': 46}, {'@': 47}, {'@': 48}, {'@': 49}, {'@': 50}, {'@': 51}, {'@': 52}, {'@': 53}, {'@': 54}, {'@': 55}, {'@': 56}, {'@': 57}, {'@': 58}, {'@': 59}, {'@': 60}, {'@': 61}, {'@': 62}, {'@': 63}, {'@': 64}, {'@': 65}, {'@': 66}, {'@': 67}, {'@': 68}, {'@': 69}, {'@': 70}, {'@': 71}, {'@': 72}, {'@': 73}, {'@': 74}, {'@': 75}, {'@': 76}, {'@': 77}, {'@': 78}, {'@': 79}, {'@': 80}, {'@': 81}, {'@': 82}], 'start': ['start'], 'parser_type': 'lalr', '__type__': 'ParserConf'}, 'parser': {'tokens': {0: 'EQUAL', 1: 'CR', 2: 'LF', 3: 'ST_COMMENT', 4: 'LESSTHAN', 5: '__ANON_4', 6: '__ANON_3', 7: 'MORETHAN', 8: '__ANON_2', 9: '__ANON_5', 10: 'PERCENT', 11: 'STAR', 12: 'PLUS', 13: 'SLASH', 14: 'MINUS', 15: '$END', 16: 'IF', 17: 'WHILE', 18: 'DOLLAR', 19: 'END', 20: 'RETURN', 21: 'assignment', 22: 'lineend', 23: 'branching', 24: 'statement', 25: '__statements_star_0', 26: 'statements', 27: 'repeating', 28: 'const_m', 29: 'lt', 30: 'eq', 31: 'value', 32: 'const_n', 33: 'M', 34: 'condition', 35: 'ge', 36: 'le', 37: 'gt', 38: 'variable', 39: 'const_k', 40: 'number', 41: '__ANON_0', 42: 'K', 43: 'N', 44: 'ne', 45: 'mul', 46: 'expression', 47: 'add', 48: 'sub', 49: 'div', 50: 'rem', 51: 'program', 52: 'start', 53: 'var_ident', 54: '__ANON_1', 55: '__lineends_star_1', 56: 'lineends'}, 'states': {0: {0: (0, 22)}, 1: {1: (1, {'@': 54}), 2: (1, {'@': 54}), 3: (1, {'@': 54})}, 2: {4: (1, {'@': 29}), 5: (1, {'@': 29}), 6: (1, {'@': 29}), 7: (1, {'@': 29}), 8: (1, {'@': 29}), 9: (1, {'@': 29}), 1: (1, {'@': 29}), 3: (1, {'@': 29}), 2: (1, {'@': 29}), 10: (1, {'@': 29}), 11: (1, {'@': 29}), 12: (1, {'@': 29}), 13: (1, {'@': 29}), 14: (1, {'@': 29}), 15: (1, {'@': 29})}, 3: {1: (1, {'@': 58}), 16: (1, {'@': 58}), 17: (1, {'@': 58}), 3: (1, {'@': 58}), 2: (1, {'@': 58}), 18: (1, {'@': 58}), 19: (1, {'@': 58}), 20: (1, {'@': 58})}, 4: {1: (1, {'@': 52}), 2: (1, {'@': 52}), 3: (1, {'@': 52})}, 5: {1: (1, {'@': 56}), 2: (1, {'@': 56}), 3: (1, {'@': 56})}, 6: {15: (1, {'@': 27})}, 7: {1: (0, 42), 21: (0, 37), 17: (0, 17), 2: (0, 27), 22: (0, 26), 3: (0, 16), 23: (0, 72), 18: (0, 76), 24: (0, 44), 25: (0, 62), 16: (0, 46), 26: (0, 71), 27: (0, 43), 19: (1, {'@': 40})}, 8: {1: (1, {'@': 53}), 2: (1, {'@': 53}), 3: (1, {'@': 53})}, 9: {1: (1, {'@': 70}), 2: (1, {'@': 70}), 3: (1, {'@': 70})}, 10: {1: (1, {'@': 55}), 2: (1, {'@': 55}), 3: (1, {'@': 55})}, 11: {1: (1, {'@': 64}), 2: (1, {'@': 64}), 3: (1, {'@': 64})}, 12: {1: (1, {'@': 81}), 2: (1, {'@': 81}), 15: (1, {'@': 81}), 3: (1, {'@': 81})}, 13: {1: (1, {'@': 59}), 2: (1, {'@': 59}), 3: (1, {'@': 59})}, 14: {0: (1, {'@': 38}), 1: (1, {'@': 38}), 10: (1, {'@': 38}), 6: (1, {'@': 38}), 11: (1, {'@': 38}), 14: (1, {'@': 38}), 2: (1, {'@': 38}), 9: (1, {'@': 38}), 4: (1, {'@': 38}), 5: (1, {'@': 38}), 7: (1, {'@': 38}), 12: (1, {'@': 38}), 8: (1, {'@': 38}), 13: (1, {'@': 38}), 3: (1, {'@': 38}), 15: (1, {'@': 38})}, 15: {1: (1, {'@': 57}), 16: (1, {'@': 57}), 17: (1, {'@': 57}), 3: (1, {'@': 57}), 2: (1, {'@': 57}), 18: (1, {'@': 57}), 19: (1, {'@': 57}), 20: (1, {'@': 57})}, 16: {1: (0, 38), 2: (0, 59)}, 17: {28: (0, 51), 29: (0, 11), 30: (0, 13), 31: (0, 40), 32: (0, 20), 33: (0, 53), 34: (0, 56), 35: (0, 57), 36: (0, 60), 37: (0, 66), 38: (0, 69), 39: (0, 2), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 44: (0, 47)}, 18: {28: (0, 51), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 33: (0, 53), 31: (0, 23)}, 19: {1: (1, {'@': 65}), 2: (1, {'@': 65}), 3: (1, {'@': 65})}, 20: {4: (1, {'@': 31}), 5: (1, {'@': 31}), 6: (1, {'@': 31}), 7: (1, {'@': 31}), 8: (1, {'@': 31}), 9: (1, {'@': 31}), 1: (1, {'@': 31}), 3: (1, {'@': 31}), 2: (1, {'@': 31}), 10: (1, {'@': 31}), 11: (1, {'@': 31}), 12: (1, {'@': 31}), 13: (1, {'@': 31}), 14: (1, {'@': 31}), 15: (1, {'@': 31})}, 21: {1: (1, {'@': 68}), 2: (1, {'@': 68}), 3: (1, {'@': 68})}, 22: {28: (0, 51), 31: (0, 54), 32: (0, 20), 45: (0, 58), 33: (0, 53), 46: (0, 61), 47: (0, 63), 38: (0, 69), 39: (0, 2), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 48: (0, 64), 49: (0, 67), 50: (0, 68)}, 23: {1: (1, {'@': 67}), 2: (1, {'@': 67}), 3: (1, {'@': 67})}, 24: {28: (0, 51), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 31: (0, 81), 18: (0, 87), 42: (0, 28), 43: (0, 25), 33: (0, 53)}, 25: {4: (1, {'@': 36}), 5: (1, {'@': 36}), 8: (1, {'@': 36}), 6: (1, {'@': 36}), 7: (1, {'@': 36}), 9: (1, {'@': 36}), 1: (1, {'@': 36}), 2: (1, {'@': 36}), 3: (1, {'@': 36}), 10: (1, {'@': 36}), 11: (1, {'@': 36}), 12: (1, {'@': 36}), 13: (1, {'@': 36}), 14: (1, {'@': 36}), 15: (1, {'@': 36})}, 26: {1: (1, {'@': 44}), 16: (1, {'@': 44}), 17: (1, {'@': 44}), 3: (1, {'@': 44}), 2: (1, {'@': 44}), 18: (1, {'@': 44}), 19: (1, {'@': 44}), 20: (1, {'@': 44})}, 27: {1: (1, {'@': 78}), 16: (1, {'@': 78}), 17: (1, {'@': 78}), 3: (1, {'@': 78}), 2: (1, {'@': 78}), 18: (1, {'@': 78}), 19: (1, {'@': 78}), 15: (1, {'@': 78}), 20: (1, {'@': 78})}, 28: {4: (1, {'@': 34}), 5: (1, {'@': 34}), 8: (1, {'@': 34}), 6: (1, {'@': 34}), 7: (1, {'@': 34}), 9: (1, {'@': 34}), 1: (1, {'@': 34}), 2: (1, {'@': 34}), 3: (1, {'@': 34}), 10: (1, {'@': 34}), 11: (1, {'@': 34}), 12: (1, {'@': 34}), 13: (1, {'@': 34}), 14: (1, {'@': 34}), 15: (1, {'@': 34})}, 29: {1: (0, 38), 2: (0, 59), 15: (1, {'@': 73})}, 30: {1: (1, {'@': 75}), 16: (1, {'@': 75}), 17: (1, {'@': 75}), 3: (1, {'@': 75}), 2: (1, {'@': 75}), 18: (1, {'@': 75}), 19: (1, {'@': 75}), 15: (1, {'@': 75}), 20: (1, {'@': 75})}, 31: {22: (0, 73), 3: (0, 74), 1: (0, 42), 2: (0, 27), 15: (1, {'@': 72})}, 32: {20: (0, 24)}, 33: {1: (0, 42), 21: (0, 37), 17: (0, 17), 26: (0, 55), 2: (0, 27), 22: (0, 26), 3: (0, 16), 23: (0, 72), 18: (0, 76), 24: (0, 44), 25: (0, 62), 16: (0, 46), 27: (0, 43), 19: (1, {'@': 40})}, 34: {28: (0, 51), 31: (0, 9), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 33: (0, 53)}, 35: {28: (0, 51), 38: (0, 69), 39: (0, 2), 32: (0, 20), 31: (0, 21), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 33: (0, 53)}, 36: {4: (1, {'@': 37}), 5: (1, {'@': 37}), 8: (1, {'@': 37}), 6: (1, {'@': 37}), 7: (1, {'@': 37}), 9: (1, {'@': 37}), 1: (1, {'@': 37}), 2: (1, {'@': 37}), 3: (1, {'@': 37}), 10: (1, {'@': 37}), 11: (1, {'@': 37}), 12: (1, {'@': 37}), 13: (1, {'@': 37}), 14: (1, {'@': 37}), 15: (1, {'@': 37})}, 37: {1: (1, {'@': 41}), 16: (1, {'@': 41}), 17: (1, {'@': 41}), 3: (1, {'@': 41}), 2: (1, {'@': 41}), 18: (1, {'@': 41}), 19: (1, {'@': 41}), 20: (1, {'@': 41})}, 38: {2: (0, 30)}, 39: {28: (0, 51), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 31: (0, 19), 33: (0, 53)}, 40: {4: (0, 34), 8: (0, 39), 9: (0, 35), 5: (0, 18), 7: (0, 50), 6: (0, 45)}, 41: {1: (1, {'@': 77}), 16: (1, {'@': 77}), 17: (1, {'@': 77}), 3: (1, {'@': 77}), 2: (1, {'@': 77}), 18: (1, {'@': 77}), 19: (1, {'@': 77}), 15: (1, {'@': 77}), 20: (1, {'@': 77})}, 42: {2: (0, 41)}, 43: {1: (1, {'@': 43}), 16: (1, {'@': 43}), 17: (1, {'@': 43}), 3: (1, {'@': 43}), 2: (1, {'@': 43}), 18: (1, {'@': 43}), 19: (1, {'@': 43}), 20: (1, {'@': 43})}, 44: {1: (1, {'@': 79}), 16: (1, {'@': 79}), 19: (1, {'@': 79}), 17: (1, {'@': 79}), 3: (1, {'@': 79}), 2: (1, {'@': 79}), 18: (1, {'@': 79}), 20: (1, {'@': 79})}, 45: {31: (0, 52), 28: (0, 51), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 33: (0, 53)}, 46: {28: (0, 51), 34: (0, 85), 29: (0, 11), 30: (0, 13), 31: (0, 40), 32: (0, 20), 33: (0, 53), 35: (0, 57), 36: (0, 60), 37: (0, 66), 38: (0, 69), 39: (0, 2), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 44: (0, 47)}, 47: {1: (1, {'@': 60}), 2: (1, {'@': 60}), 3: (1, {'@': 60})}, 48: {1: (1, {'@': 69}), 2: (1, {'@': 69}), 3: (1, {'@': 69})}, 49: {1: (0, 42), 26: (0, 32), 21: (0, 37), 17: (0, 17), 2: (0, 27), 22: (0, 26), 3: (0, 16), 51: (0, 65), 23: (0, 72), 18: (0, 76), 52: (0, 89), 25: (0, 62), 24: (0, 44), 16: (0, 46), 27: (0, 43), 20: (1, {'@': 40})}, 50: {28: (0, 51), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 31: (0, 48), 43: (0, 25), 33: (0, 53)}, 51: {4: (1, {'@': 30}), 5: (1, {'@': 30}), 6: (1, {'@': 30}), 7: (1, {'@': 30}), 8: (1, {'@': 30}), 9: (1, {'@': 30}), 1: (1, {'@': 30}), 3: (1, {'@': 30}), 2: (1, {'@': 30}), 10: (1, {'@': 30}), 11: (1, {'@': 30}), 12: (1, {'@': 30}), 13: (1, {'@': 30}), 14: (1, {'@': 30}), 15: (1, {'@': 30})}, 52: {1: (1, {'@': 66}), 2: (1, {'@': 66}), 3: (1, {'@': 66})}, 53: {4: (1, {'@': 35}), 5: (1, {'@': 35}), 8: (1, {'@': 35}), 6: (1, {'@': 35}), 7: (1, {'@': 35}), 9: (1, {'@': 35}), 1: (1, {'@': 35}), 2: (1, {'@': 35}), 3: (1, {'@': 35}), 10: (1, {'@': 35}), 11: (1, {'@': 35}), 12: (1, {'@': 35}), 13: (1, {'@': 35}), 14: (1, {'@': 35}), 15: (1, {'@': 35})}, 54: {11: (0, 77), 12: (0, 80), 10: (0, 82), 14: (0, 84), 13: (0, 86), 1: (1, {'@': 46}), 2: (1, {'@': 46}), 3: (1, {'@': 46})}, 55: {19: (0, 70)}, 56: {22: (0, 33), 3: (0, 16), 1: (0, 42), 2: (0, 27)}, 57: {1: (1, {'@': 61}), 2: (1, {'@': 61}), 3: (1, {'@': 61})}, 58: {1: (1, {'@': 49}), 2: (1, {'@': 49}), 3: (1, {'@': 49})}, 59: {1: (1, {'@': 76}), 16: (1, {'@': 76}), 17: (1, {'@': 76}), 3: (1, {'@': 76}), 2: (1, {'@': 76}), 18: (1, {'@': 76}), 19: (1, {'@': 76}), 15: (1, {'@': 76}), 20: (1, {'@': 76})}, 60: {1: (1, {'@': 62}), 2: (1, {'@': 62}), 3: (1, {'@': 62})}, 61: {3: (0, 16), 1: (0, 42), 2: (0, 27), 22: (0, 88)}, 62: {3: (0, 16), 1: (0, 42), 21: (0, 37), 17: (0, 17), 16: (0, 46), 2: (0, 27), 22: (0, 26), 23: (0, 72), 18: (0, 76), 24: (0, 78), 27: (0, 43), 19: (1, {'@': 39}), 20: (1, {'@': 39})}, 63: {1: (1, {'@': 47}), 2: (1, {'@': 47}), 3: (1, {'@': 47})}, 64: {1: (1, {'@': 48}), 2: (1, {'@': 48}), 3: (1, {'@': 48})}, 65: {15: (1, {'@': 26})}, 66: {1: (1, {'@': 63}), 2: (1, {'@': 63}), 3: (1, {'@': 63})}, 67: {1: (1, {'@': 50}), 2: (1, {'@': 50}), 3: (1, {'@': 50})}, 68: {1: (1, {'@': 51}), 2: (1, {'@': 51}), 3: (1, {'@': 51})}, 69: {4: (1, {'@': 32}), 5: (1, {'@': 32}), 6: (1, {'@': 32}), 7: (1, {'@': 32}), 8: (1, {'@': 32}), 9: (1, {'@': 32}), 1: (1, {'@': 32}), 3: (1, {'@': 32}), 2: (1, {'@': 32}), 10: (1, {'@': 32}), 11: (1, {'@': 32}), 12: (1, {'@': 32}), 13: (1, {'@': 32}), 14: (1, {'@': 32}), 15: (1, {'@': 32})}, 70: {3: (0, 16), 1: (0, 42), 22: (0, 3), 2: (0, 27)}, 71: {19: (0, 79)}, 72: {1: (1, {'@': 42}), 16: (1, {'@': 42}), 17: (1, {'@': 42}), 3: (1, {'@': 42}), 2: (1, {'@': 42}), 18: (1, {'@': 42}), 19: (1, {'@': 42}), 20: (1, {'@': 42})}, 73: {1: (1, {'@': 82}), 2: (1, {'@': 82}), 15: (1, {'@': 82}), 3: (1, {'@': 82})}, 74: {2: (0, 59), 1: (0, 38), 15: (1, {'@': 71})}, 75: {4: (1, {'@': 28}), 5: (1, {'@': 28}), 6: (1, {'@': 28}), 7: (1, {'@': 28}), 8: (1, {'@': 28}), 9: (1, {'@': 28}), 1: (1, {'@': 28}), 3: (1, {'@': 28}), 2: (1, {'@': 28}), 10: (1, {'@': 28}), 11: (1, {'@': 28}), 12: (1, {'@': 28}), 13: (1, {'@': 28}), 14: (1, {'@': 28}), 15: (1, {'@': 28})}, 76: {53: (0, 0), 54: (0, 14)}, 77: {28: (0, 51), 31: (0, 1), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 33: (0, 53)}, 78: {1: (1, {'@': 80}), 16: (1, {'@': 80}), 19: (1, {'@': 80}), 17: (1, {'@': 80}), 3: (1, {'@': 80}), 2: (1, {'@': 80}), 18: (1, {'@': 80}), 20: (1, {'@': 80})}, 79: {3: (0, 16), 1: (0, 42), 22: (0, 15), 2: (0, 27)}, 80: {28: (0, 51), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 18: (0, 87), 31: (0, 4), 42: (0, 28), 43: (0, 25), 33: (0, 53)}, 81: {3: (0, 29), 55: (0, 31), 1: (0, 42), 56: (0, 6), 2: (0, 27), 22: (0, 12), 15: (1, {'@': 74})}, 82: {28: (0, 51), 31: (0, 5), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 33: (0, 53)}, 83: {4: (1, {'@': 33}), 5: (1, {'@': 33}), 8: (1, {'@': 33}), 6: (1, {'@': 33}), 7: (1, {'@': 33}), 9: (1, {'@': 33}), 1: (1, {'@': 33}), 2: (1, {'@': 33}), 3: (1, {'@': 33}), 10: (1, {'@': 33}), 11: (1, {'@': 33}), 12: (1, {'@': 33}), 13: (1, {'@': 33}), 14: (1, {'@': 33}), 15: (1, {'@': 33})}, 84: {28: (0, 51), 31: (0, 8), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 18: (0, 87), 42: (0, 28), 43: (0, 25), 33: (0, 53)}, 85: {3: (0, 16), 1: (0, 42), 2: (0, 27), 22: (0, 7)}, 86: {28: (0, 51), 38: (0, 69), 39: (0, 2), 32: (0, 20), 40: (0, 75), 41: (0, 83), 33: (0, 53), 18: (0, 87), 42: (0, 28), 43: (0, 25), 31: (0, 10)}, 87: {53: (0, 36), 54: (0, 14)}, 88: {1: (1, {'@': 45}), 16: (1, {'@': 45}), 17: (1, {'@': 45}), 3: (1, {'@': 45}), 2: (1, {'@': 45}), 18: (1, {'@': 45}), 19: (1, {'@': 45}), 20: (1, {'@': 45})}, 89: {}}, 'start_states': {'start': 49}, 'end_states': {'start': 89}}, '__type__': 'ParsingFrontend'}, 'rules': [{'@': 26}, {'@': 27}, {'@': 28}, {'@': 29}, {'@': 30}, {'@': 31}, {'@': 32}, {'@': 33}, {'@': 34}, {'@': 35}, {'@': 36}, {'@': 37}, {'@': 38}, {'@': 39}, {'@': 40}, {'@': 41}, {'@': 42}, {'@': 43}, {'@': 44}, {'@': 45}, {'@': 46}, {'@': 47}, {'@': 48}, {'@': 49}, {'@': 50}, {'@': 51}, {'@': 52}, {'@': 53}, {'@': 54}, {'@': 55}, {'@': 56}, {'@': 57}, {'@': 58}, {'@': 59}, {'@': 60}, {'@': 61}, {'@': 62}, {'@': 63}, {'@': 64}, {'@': 65}, {'@': 66}, {'@': 67}, {'@': 68}, {'@': 69}, {'@': 70}, {'@': 71}, {'@': 72}, {'@': 73}, {'@': 74}, {'@': 75}, {'@': 76}, {'@': 77}, {'@': 78}, {'@': 79}, {'@': 80}, {'@': 81}, {'@': 82}], 'options': {'debug': False, 'strict': False, 'keep_all_tokens': False, 'tree_class': None, 'cache': False, 'postlex': None, 'parser': 'lalr', 'lexer': 'contextual', 'transformer': None, 'start': ['start'], 'priority': 'normal', 'ambiguity': 'auto', 'regex': False, 'propagate_positions': False, 'lexer_callbacks': {}, 'maybe_placeholders': False, 'edit_terminals': None, 'g_regex_flags': 0, 'use_bytes': False, 'ordered_sets': True, 'import_paths': [], 'source_path': None, '_plugins': {}}, '__type__': 'Lark'}\n",
")\n",
"MEMO = (\n",
"{0: {'name': 'ST_COMMENT', 'pattern': {'value': '\\\\#[^\\n]*', 'flags': [], 'raw': None, '_width': [1, 18446744073709551616], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 1: {'name': 'CR', 'pattern': {'value': '\\r', 'flags': [], 'raw': '/\\\\r/', '_width': [1, 1], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 2: {'name': 'LF', 'pattern': {'value': '\\n', 'flags': [], 'raw': '/\\\\n/', '_width': [1, 1], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 3: {'name': 'WS_INLINE', 'pattern': {'value': '(?:(?:\\\\ |\\t))+', 'flags': [], 'raw': None, '_width': [1, 18446744073709551616], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 4: {'name': 'RETURN', 'pattern': {'value': 'return', 'flags': [], 'raw': '\"return\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 5: {'name': '__ANON_0', 'pattern': {'value': '[0-9]+', 'flags': [], 'raw': '/[0-9]+/', '_width': [1, 18446744073709551616], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 6: {'name': 'K', 'pattern': {'value': 'K', 'flags': [], 'raw': '\"K\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 7: {'name': 'M', 'pattern': {'value': 'M', 'flags': [], 'raw': '\"M\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 8: {'name': 'N', 'pattern': {'value': 'N', 'flags': [], 'raw': '\"N\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 9: {'name': 'DOLLAR', 'pattern': {'value': '$', 'flags': [], 'raw': '\"$\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 10: {'name': '__ANON_1', 'pattern': {'value': '[0-9]', 'flags': [], 'raw': '/[0-9]/', '_width': [1, 1], '__type__': 'PatternRE'}, 'priority': 0, '__type__': 'TerminalDef'}, 11: {'name': 'EQUAL', 'pattern': {'value': '=', 'flags': [], 'raw': '\"=\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 12: {'name': 'PLUS', 'pattern': {'value': '+', 'flags': [], 'raw': '\"+\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 13: {'name': 'MINUS', 'pattern': {'value': '-', 'flags': [], 'raw': '\"-\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 14: {'name': 'STAR', 'pattern': {'value': '*', 'flags': [], 'raw': '\"*\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 15: {'name': 'SLASH', 'pattern': {'value': '/', 'flags': [], 'raw': '\"/\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 16: {'name': 'PERCENT', 'pattern': {'value': '%', 'flags': [], 'raw': '\"%\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 17: {'name': 'IF', 'pattern': {'value': 'if', 'flags': [], 'raw': '\"if\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 18: {'name': 'END', 'pattern': {'value': 'end', 'flags': [], 'raw': '\"end\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 19: {'name': 'WHILE', 'pattern': {'value': 'while', 'flags': [], 'raw': '\"while\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 20: {'name': '__ANON_2', 'pattern': {'value': '==', 'flags': [], 'raw': '\"==\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 21: {'name': '__ANON_3', 'pattern': {'value': '!=', 'flags': [], 'raw': '\"!=\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 22: {'name': '__ANON_4', 'pattern': {'value': '>=', 'flags': [], 'raw': '\">=\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 23: {'name': '__ANON_5', 'pattern': {'value': '<=', 'flags': [], 'raw': '\"<=\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 24: {'name': 'MORETHAN', 'pattern': {'value': '>', 'flags': [], 'raw': '\">\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 25: {'name': 'LESSTHAN', 'pattern': {'value': '<', 'flags': [], 'raw': '\"<\"', '__type__': 'PatternStr'}, 'priority': 0, '__type__': 'TerminalDef'}, 26: {'origin': {'name': Token('RULE', 'start'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'program', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 27: {'origin': {'name': Token('RULE', 'program'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statements', '__type__': 'NonTerminal'}, {'name': 'RETURN', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}, {'name': 'lineends', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 28: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'number', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 29: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'const_k', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 30: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'const_m', '__type__': 'NonTerminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 31: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'const_n', '__type__': 'NonTerminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 32: {'origin': {'name': Token('RULE', 'value'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'variable', '__type__': 'NonTerminal'}], 'order': 4, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 33: {'origin': {'name': Token('RULE', 'number'), '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_0', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 34: {'origin': {'name': Token('RULE', 'const_k'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'K', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 35: {'origin': {'name': Token('RULE', 'const_m'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'M', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 36: {'origin': {'name': Token('RULE', 'const_n'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'N', 'filter_out': True, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 37: {'origin': {'name': Token('RULE', 'variable'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'DOLLAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'var_ident', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 38: {'origin': {'name': Token('RULE', 'var_ident'), '__type__': 'NonTerminal'}, 'expansion': [{'name': '__ANON_1', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 39: {'origin': {'name': Token('RULE', 'statements'), '__type__': 'NonTerminal'}, 'expansion': [{'name': '__statements_star_0', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 40: {'origin': {'name': Token('RULE', 'statements'), '__type__': 'NonTerminal'}, 'expansion': [], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 41: {'origin': {'name': Token('RULE', 'statement'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'assignment', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 42: {'origin': {'name': Token('RULE', 'statement'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'branching', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 43: {'origin': {'name': Token('RULE', 'statement'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'repeating', '__type__': 'NonTerminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 44: {'origin': {'name': Token('RULE', 'statement'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'lineend', '__type__': 'NonTerminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 45: {'origin': {'name': Token('RULE', 'assignment'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'DOLLAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'var_ident', '__type__': 'NonTerminal'}, {'name': 'EQUAL', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'expression', '__type__': 'NonTerminal'}, {'name': 'lineend', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 46: {'origin': {'name': Token('RULE', 'expression'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 47: {'origin': {'name': Token('RULE', 'expression'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'add', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 48: {'origin': {'name': Token('RULE', 'expression'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'sub', '__type__': 'NonTerminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 49: {'origin': {'name': Token('RULE', 'expression'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'mul', '__type__': 'NonTerminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 50: {'origin': {'name': Token('RULE', 'expression'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'div', '__type__': 'NonTerminal'}], 'order': 4, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 51: {'origin': {'name': Token('RULE', 'expression'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'rem', '__type__': 'NonTerminal'}], 'order': 5, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 52: {'origin': {'name': Token('RULE', 'add'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': 'PLUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 53: {'origin': {'name': Token('RULE', 'sub'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': 'MINUS', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 54: {'origin': {'name': Token('RULE', 'mul'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': 'STAR', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 55: {'origin': {'name': Token('RULE', 'div'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': 'SLASH', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 56: {'origin': {'name': Token('RULE', 'rem'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': 'PERCENT', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 57: {'origin': {'name': Token('RULE', 'branching'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'IF', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'condition', '__type__': 'NonTerminal'}, {'name': 'lineend', '__type__': 'NonTerminal'}, {'name': 'statements', '__type__': 'NonTerminal'}, {'name': 'END', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'lineend', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 58: {'origin': {'name': Token('RULE', 'repeating'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'WHILE', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'condition', '__type__': 'NonTerminal'}, {'name': 'lineend', '__type__': 'NonTerminal'}, {'name': 'statements', '__type__': 'NonTerminal'}, {'name': 'END', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'lineend', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 59: {'origin': {'name': Token('RULE', 'condition'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'eq', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 60: {'origin': {'name': Token('RULE', 'condition'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'ne', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 61: {'origin': {'name': Token('RULE', 'condition'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'ge', '__type__': 'NonTerminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 62: {'origin': {'name': Token('RULE', 'condition'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'le', '__type__': 'NonTerminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 63: {'origin': {'name': Token('RULE', 'condition'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'gt', '__type__': 'NonTerminal'}], 'order': 4, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 64: {'origin': {'name': Token('RULE', 'condition'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'lt', '__type__': 'NonTerminal'}], 'order': 5, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 65: {'origin': {'name': Token('RULE', 'eq'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': '__ANON_2', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 66: {'origin': {'name': Token('RULE', 'ne'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': '__ANON_3', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 67: {'origin': {'name': Token('RULE', 'ge'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': '__ANON_4', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 68: {'origin': {'name': Token('RULE', 'le'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': '__ANON_5', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 69: {'origin': {'name': Token('RULE', 'gt'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': 'MORETHAN', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 70: {'origin': {'name': Token('RULE', 'lt'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'value', '__type__': 'NonTerminal'}, {'name': 'LESSTHAN', 'filter_out': True, '__type__': 'Terminal'}, {'name': 'value', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 71: {'origin': {'name': Token('RULE', 'lineends'), '__type__': 'NonTerminal'}, 'expansion': [{'name': '__lineends_star_1', '__type__': 'NonTerminal'}, {'name': 'ST_COMMENT', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 72: {'origin': {'name': Token('RULE', 'lineends'), '__type__': 'NonTerminal'}, 'expansion': [{'name': '__lineends_star_1', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 73: {'origin': {'name': Token('RULE', 'lineends'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'ST_COMMENT', 'filter_out': False, '__type__': 'Terminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 74: {'origin': {'name': Token('RULE', 'lineends'), '__type__': 'NonTerminal'}, 'expansion': [], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 75: {'origin': {'name': Token('RULE', 'lineend'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'ST_COMMENT', 'filter_out': False, '__type__': 'Terminal'}, {'name': 'CR', 'filter_out': False, '__type__': 'Terminal'}, {'name': 'LF', 'filter_out': False, '__type__': 'Terminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 76: {'origin': {'name': Token('RULE', 'lineend'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'ST_COMMENT', 'filter_out': False, '__type__': 'Terminal'}, {'name': 'LF', 'filter_out': False, '__type__': 'Terminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 77: {'origin': {'name': Token('RULE', 'lineend'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'CR', 'filter_out': False, '__type__': 'Terminal'}, {'name': 'LF', 'filter_out': False, '__type__': 'Terminal'}], 'order': 2, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 78: {'origin': {'name': Token('RULE', 'lineend'), '__type__': 'NonTerminal'}, 'expansion': [{'name': 'LF', 'filter_out': False, '__type__': 'Terminal'}], 'order': 3, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': True, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 79: {'origin': {'name': '__statements_star_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'statement', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 80: {'origin': {'name': '__statements_star_0', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__statements_star_0', '__type__': 'NonTerminal'}, {'name': 'statement', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 81: {'origin': {'name': '__lineends_star_1', '__type__': 'NonTerminal'}, 'expansion': [{'name': 'lineend', '__type__': 'NonTerminal'}], 'order': 0, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}, 82: {'origin': {'name': '__lineends_star_1', '__type__': 'NonTerminal'}, 'expansion': [{'name': '__lineends_star_1', '__type__': 'NonTerminal'}, {'name': 'lineend', '__type__': 'NonTerminal'}], 'order': 1, 'alias': None, 'options': {'keep_all_tokens': False, 'expand1': False, 'priority': None, 'template_source': None, 'empty_indices': (), '__type__': 'RuleOptions'}, '__type__': 'Rule'}}\n",
")\n",
"Shift = 0\n",
"Reduce = 1\n",
"def Lark_StandAlone(**kwargs):\n",
" return Lark._load_from_dict(DATA, MEMO, **kwargs)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"cellView": "form",
"id": "Erh0o2HRIEsv"
},
"outputs": [],
"source": [
"# @title StLangEx Interpreter\n",
"class StLangExInterpreter(Interpreter):\n",
" \"\"\"StLangEx Interpreter\"\"\"\n",
" def __init__(self, k=0, m=0, n=0) -> None:\n",
" super().__init__()\n",
" assert 0 <= int(k) < 2**64 and 0 <= int(m) < 2**64 and 0 <= int(n) < 2**64, \"OutOfRangeNumber\"\n",
" self.env = { 'C': 0, 'K': int(k), 'M': int(m), 'N': int(n), '0': 0, '1': 0, '2': 0, '3': 0, '4': 0, '5': 0, '6': 0, '7': 0, '8': 0, '9': 0 }\n",
" def program(self, tree) -> int:\n",
" self.visit(tree.children[0])\n",
" return self.visit(tree.children[1])\n",
" def number(self, tree) -> int:\n",
" ret = int(tree.children[0].value)\n",
" assert 0 <= ret < 2**64, \"OutOfRangeNumber\"\n",
" return ret\n",
" def const_k(self, _tree) -> int:\n",
" return self.env['K']\n",
" def const_m(self, _tree) -> int:\n",
" return self.env['M']\n",
" def const_n(self, _tree) -> int:\n",
" return self.env['N']\n",
" def variable(self, tree) -> int:\n",
" return self.env[self.visit(tree.children[0])]\n",
" def var_ident(self, tree) -> str:\n",
" return str(tree.children[0].value)\n",
" def assignment(self, tree) -> None:\n",
" self.env['C'] += 1\n",
" assert self.env['C'] <= 1000, \"StepLimitExceed\"\n",
" self.env[self.visit(tree.children[0])] = self.visit(tree.children[1])\n",
" def add(self, tree) -> int:\n",
" ret = self.visit(tree.children[0]) + self.visit(tree.children[1])\n",
" assert 0 <= ret < 2**64, \"OutOfRangeNumber\"\n",
" return ret\n",
" def sub(self, tree) -> int:\n",
" ret = self.visit(tree.children[0]) - self.visit(tree.children[1])\n",
" assert 0 <= ret < 2**64, \"OutOfRangeNumber\"\n",
" return ret\n",
" def mul(self, tree) -> int:\n",
" ret = self.visit(tree.children[0]) * self.visit(tree.children[1])\n",
" assert 0 <= ret < 2**64, \"OutOfRangeNumber\"\n",
" return ret\n",
" def div(self, tree) -> int:\n",
" lhs, rhs = self.visit(tree.children[0]), self.visit(tree.children[1])\n",
" assert 0 < rhs, \"OutOfRangeNumber\"\n",
" return lhs // rhs\n",
" def rem(self, tree) -> int:\n",
" lhs, rhs = self.visit(tree.children[0]), self.visit(tree.children[1])\n",
" assert 0 < rhs, \"OutOfRangeNumber\"\n",
" return lhs % rhs\n",
" def branching(self, tree) -> None:\n",
" self.env['C'] += 1\n",
" assert self.env['C'] <= 1000, \"StepLimitExceed\"\n",
" if self.visit(tree.children[0]): # condition\n",
" self.visit(tree.children[2]) # statements\n",
" def repeating(self, tree) -> None:\n",
" while True:\n",
" self.env['C'] += 1\n",
" assert self.env['C'] <= 1000, \"StepLimitExceed\"\n",
" if not self.visit(tree.children[0]): # condition\n",
" break\n",
" self.visit(tree.children[2]) # statements\n",
" def eq(self, tree) -> bool:\n",
" return self.visit(tree.children[0]) == self.visit(tree.children[1])\n",
" def ne(self, tree) -> bool:\n",
" return self.visit(tree.children[0]) != self.visit(tree.children[1])\n",
" def ge(self, tree) -> bool:\n",
" return self.visit(tree.children[0]) >= self.visit(tree.children[1])\n",
" def le(self, tree) -> bool:\n",
" return self.visit(tree.children[0]) <= self.visit(tree.children[1])\n",
" def gt(self, tree) -> bool:\n",
" return self.visit(tree.children[0]) > self.visit(tree.children[1])\n",
" def lt(self, tree) -> bool:\n",
" return self.visit(tree.children[0]) < self.visit(tree.children[1])\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "xYiFUZQ5icXu"
},
"outputs": [],
"source": [
"# @title StLangEx Executor\n",
"import itertools\n",
"import random\n",
"import sys\n",
"\n",
"### TODO: ここからを本番の問題用の正しいコードに書き換える\n",
"PROGRAM = r\"\"\"\n",
"# サンプルプログラム: 二項係数 nCk の計算 (N > 62 の時は演算オーバーフローする可能性あり)\n",
"if N >= K # N < K の時の出力は 0 とする\n",
" $0 = 1\n",
" $1 = 1\n",
" $2 = N\n",
" while $1 <= K # for $1 in 1..=K\n",
" $0 = $0 * $2 # $2 == N + 1 - $1\n",
" $0 = $0 / $1 # ここで割り算の余りは生じない\n",
" $2 = $2 - 1\n",
" $1 = $1 + 1\n",
" end\n",
"end\n",
"return $0\n",
"\n",
"# return 行より後のコメント行も可\n",
"\"\"\"\n",
"### TODO: ここまでを本番の問題用の正しいコードに書き換える\n",
"\n",
"\"\"\"ランダムテストケースの生成\"\"\"\n",
"def gen_test(_: int) -> tuple[int, int, int, int]:\n",
" K = min(random.randrange(1, 2**random.randrange(1, 32)), 10**9)\n",
" M = random.randrange(0, K+1)\n",
" N = min(random.randrange(1, 2**random.randrange(10, 32)), 10**9)\n",
" K, M = 1, 0\n",
" W = M - 1\n",
" assert 1 <= K and -1 <= W < K and 1 <= N\n",
" ### TODO: k,m,n の値から回答すべき値 an を生成するコードを書く\n",
" an = 0\n",
" return K, M, N, an\n",
"\n",
"### TODO: ランダムテストケースの生成を正しく書いた場合、ランダムテストケース生成数を書き換える\n",
"\"\"\"ランダムテストケースの生成数\"\"\"\n",
"GEN_CASES = 0\n",
"\n",
"\"\"\"StLangプログラムの構文木を構築\"\"\"\n",
"assert len(PROGRAM.splitlines()) <= 1000\n",
"STLANG_TREE = Lark_StandAlone().parse(PROGRAM)\n",
"\n",
"# テストケースなどを追加変更したい場合、以下を適宜書き換える\n",
"worst = 0\n",
"for k, m, n, an in itertools.chain([ # 本番問題用のサンプルケース\n",
" ( 3, 1, 5, 6 ),\n",
" ( 8, 1, 1, 1 ),\n",
" ( 10, 1, 1000000000, 1099019513 ),\n",
" ( 3, 0, 5, 6 ),\n",
" ( 8, 8, 1, 1 ),\n",
" ( 10, 6, 1000000000, 1099019514 ),\n",
"], map(gen_test, range(GEN_CASES))):\n",
" assert 1 <= k <= 10**9 and 0 <= m <= k and 1 <= n <= 10**9 and 0 <= an < 2**64\n",
" result, judge, worker = None, None, StLangExInterpreter(k, m, n)\n",
" try:\n",
" result = worker.visit(STLANG_TREE)\n",
" judge = \"Accepted\" if result == an else \"WrongAnswer\"\n",
" except AssertionError as e:\n",
" judge, tb = \"RuntimeError\", e.__traceback__\n",
" print(type(e), e, file=sys.stderr)\n",
" while tb is not None:\n",
" print(tb.tb_frame, file=sys.stderr)\n",
" tb = tb.tb_next\n",
" if judge != \"Accepted\" or worst < worker.env['C']:\n",
" print(f\"{judge}: K={k} M={m} N={n} a_N={an} prog_return:{result} C:{worker.env['C']}\")\n",
" worst = worker.env['C']\n"
]
}
],
"metadata": {
"colab": {
"provenance": [],
"include_colab_link": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
}
},
"nbformat": 4,
"nbformat_minor": 0
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment