[pypy-svn] r65678 - pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools

benjamin at codespeak.net benjamin at codespeak.net
Mon Jun 8 23:46:10 CEST 2009


Author: benjamin
Date: Mon Jun  8 23:46:07 2009
New Revision: 65678

Added:
   pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/
   pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/Python.asdl   (contents, props changed)
   pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/asdl.py   (contents, props changed)
   pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/spark.py   (contents, props changed)
Log:
import cpython tools for asdl

Added: pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/Python.asdl
==============================================================================
--- (empty file)
+++ pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/Python.asdl	Mon Jun  8 23:46:07 2009
@@ -0,0 +1,115 @@
+-- ASDL's five builtin types are identifier, int, string, object, bool
+
+module Python version "$Revision: 43614 $"
+{
+	mod = Module(stmt* body)
+	    | Interactive(stmt* body)
+	    | Expression(expr body)
+
+	    -- not really an actual node but useful in Jython's typesystem.
+	    | Suite(stmt* body)
+
+	stmt = FunctionDef(identifier name, arguments args, 
+                           stmt* body, expr* decorators)
+	      | ClassDef(identifier name, expr* bases, stmt* body)
+	      | Return(expr? value)
+
+	      | Delete(expr* targets)
+	      | Assign(expr* targets, expr value)
+	      | AugAssign(expr target, operator op, expr value)
+
+	      -- not sure if bool is allowed, can always use int
+ 	      | Print(expr? dest, expr* values, bool nl)
+
+	      -- use 'orelse' because else is a keyword in target languages
+	      | For(expr target, expr iter, stmt* body, stmt* orelse)
+	      | While(expr test, stmt* body, stmt* orelse)
+	      | If(expr test, stmt* body, stmt* orelse)
+	      | With(expr context_expr, expr? optional_vars, stmt* body)
+
+	      -- 'type' is a bad name
+	      | Raise(expr? type, expr? inst, expr? tback)
+	      | TryExcept(stmt* body, excepthandler* handlers, stmt* orelse)
+	      | TryFinally(stmt* body, stmt* finalbody)
+	      | Assert(expr test, expr? msg)
+
+	      | Import(alias* names)
+	      | ImportFrom(identifier module, alias* names, int? level)
+
+	      -- Doesn't capture requirement that locals must be
+	      -- defined if globals is
+	      -- still supports use as a function!
+	      | Exec(expr body, expr? globals, expr? locals)
+
+	      | Global(identifier* names)
+	      | Expr(expr value)
+	      | Pass | Break | Continue
+
+	      -- XXX Jython will be different
+	      -- col_offset is the byte offset in the utf8 string the parser uses
+	      attributes (int lineno, int col_offset)
+
+	      -- BoolOp() can use left & right?
+	expr = BoolOp(boolop op, expr* values)
+	     | BinOp(expr left, operator op, expr right)
+	     | UnaryOp(unaryop op, expr operand)
+	     | Lambda(arguments args, expr body)
+	     | IfExp(expr test, expr body, expr orelse)
+	     | Dict(expr* keys, expr* values)
+	     | ListComp(expr elt, comprehension* generators)
+	     | GeneratorExp(expr elt, comprehension* generators)
+	     -- the grammar constrains where yield expressions can occur
+	     | Yield(expr? value)
+	     -- need sequences for compare to distinguish between
+	     -- x < 4 < 3 and (x < 4) < 3
+	     | Compare(expr left, cmpop* ops, expr* comparators)
+	     | Call(expr func, expr* args, keyword* keywords,
+			 expr? starargs, expr? kwargs)
+	     | Repr(expr value)
+	     | Num(object n) -- a number as a PyObject.
+	     | Str(string s) -- need to specify raw, unicode, etc?
+	     -- other literals? bools?
+
+	     -- the following expression can appear in assignment context
+	     | Attribute(expr value, identifier attr, expr_context ctx)
+	     | Subscript(expr value, slice slice, expr_context ctx)
+	     | Name(identifier id, expr_context ctx)
+	     | List(expr* elts, expr_context ctx) 
+	     | Tuple(expr *elts, expr_context ctx)
+
+	      -- col_offset is the byte offset in the utf8 string the parser uses
+	      attributes (int lineno, int col_offset)
+
+	expr_context = Load | Store | Del | AugLoad | AugStore | Param
+
+	slice = Ellipsis | Slice(expr? lower, expr? upper, expr? step) 
+	      | ExtSlice(slice* dims) 
+	      | Index(expr value) 
+
+	boolop = And | Or 
+
+	operator = Add | Sub | Mult | Div | Mod | Pow | LShift 
+                 | RShift | BitOr | BitXor | BitAnd | FloorDiv
+
+	unaryop = Invert | Not | UAdd | USub
+
+	cmpop = Eq | NotEq | Lt | LtE | Gt | GtE | Is | IsNot | In | NotIn
+
+	comprehension = (expr target, expr iter, expr* ifs)
+
+	-- not sure what to call the first argument for raise and except
+	-- TODO(jhylton): Figure out if there is a better way to handle
+	--                lineno and col_offset fields, particularly when
+        --                ast is exposed to Python.
+	excepthandler = (expr? type, expr? name, stmt* body, int lineno,
+	                 int col_offset)
+
+	arguments = (expr* args, identifier? vararg, 
+		     identifier? kwarg, expr* defaults)
+
+        -- keyword arguments supplied to call
+        keyword = (identifier arg, expr value)
+
+        -- import name with optional 'as' alias.
+        alias = (identifier name, identifier? asname)
+}

Added: pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/asdl.py
==============================================================================
--- (empty file)
+++ pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/asdl.py	Mon Jun  8 23:46:07 2009
@@ -0,0 +1,413 @@
+"""An implementation of the Zephyr Abstract Syntax Definition Language.
+
+See http://asdl.sourceforge.net/ and
+http://www.cs.princeton.edu/~danwang/Papers/dsl97/dsl97-abstract.html.
+
+Only supports top level module decl, not view.  I'm guessing that view
+is intended to support the browser and I'm not interested in the
+browser.
+
+Changes for Python: Add support for module versions
+"""
+
+import os
+import traceback
+
+import spark
+
+class Token(object):
+    # spark seems to dispatch in the parser based on a token's
+    # type attribute
+    def __init__(self, type, lineno):
+        self.type = type
+        self.lineno = lineno
+
+    def __str__(self):
+        return self.type
+
+    def __repr__(self):
+        return str(self)
+
+class Id(Token):
+    def __init__(self, value, lineno):
+        self.type = 'Id'
+        self.value = value
+        self.lineno = lineno
+
+    def __str__(self):
+        return self.value
+
+class String(Token):
+    def __init__(self, value, lineno):
+        self.type = 'String'
+        self.value = value
+        self.lineno = lineno
+
+class ASDLSyntaxError(Exception):
+
+    def __init__(self, lineno, token=None, msg=None):
+        self.lineno = lineno
+        self.token = token
+        self.msg = msg
+
+    def __str__(self):
+        if self.msg is None:
+            return "Error at '%s', line %d" % (self.token, self.lineno)
+        else:
+            return "%s, line %d" % (self.msg, self.lineno)
+
+class ASDLScanner(spark.GenericScanner, object):
+
+    def tokenize(self, input):
+        self.rv = []
+        self.lineno = 1
+        super(ASDLScanner, self).tokenize(input)
+        return self.rv
+
+    def t_id(self, s):
+        r"[\w\.]+"
+        # XXX doesn't distinguish upper vs. lower, which is
+        # significant for ASDL.
+        self.rv.append(Id(s, self.lineno))
+
+    def t_string(self, s):
+        r'"[^"]*"'
+        self.rv.append(String(s, self.lineno))
+
+    def t_xxx(self, s): # not sure what this production means
+        r"<="
+        self.rv.append(Token(s, self.lineno))
+
+    def t_punctuation(self, s):
+        r"[\{\}\*\=\|\(\)\,\?\:]"
+        self.rv.append(Token(s, self.lineno))
+
+    def t_comment(self, s):
+        r"\-\-[^\n]*"
+        pass
+
+    def t_newline(self, s):
+        r"\n"
+        self.lineno += 1
+
+    def t_whitespace(self, s):
+        r"[ \t]+"
+        pass
+
+    def t_default(self, s):
+        r" . +"
+        raise ValueError, "unmatched input: %s" % `s`
+
+class ASDLParser(spark.GenericParser, object):
+    def __init__(self):
+        super(ASDLParser, self).__init__("module")
+
+    def typestring(self, tok):
+        return tok.type
+
+    def error(self, tok):
+        raise ASDLSyntaxError(tok.lineno, tok)
+
+    def p_module_0(self, (module, name, version, _0, _1)):
+        " module ::= Id Id version { } "
+        if module.value != "module":
+            raise ASDLSyntaxError(module.lineno,
+                                  msg="expected 'module', found %s" % module)
+        return Module(name, None, version)
+
+    def p_module(self, (module, name, version, _0, definitions, _1)):
+        " module ::= Id Id version { definitions } "
+        if module.value != "module":
+            raise ASDLSyntaxError(module.lineno,
+                                  msg="expected 'module', found %s" % module)
+        return Module(name, definitions, version)
+
+    def p_version(self, (version, V)):
+        "version ::= Id String"
+        if version.value != "version":
+            raise ASDLSyntaxError(version.lineno,
+                                msg="expected 'version', found %" % version)
+        return V
+
+    def p_definition_0(self, (definition,)):
+        " definitions ::= definition "
+        return definition
+
+    def p_definition_1(self, (definitions, definition)):
+        " definitions ::= definition definitions "
+        return definitions + definition
+
+    def p_definition(self, (id, _, type)):
+        " definition ::= Id = type "
+        return [Type(id, type)]
+
+    def p_type_0(self, (product,)):
+        " type ::= product "
+        return product
+
+    def p_type_1(self, (sum,)):
+        " type ::= sum "
+        return Sum(sum)
+
+    def p_type_2(self, (sum, id, _0, attributes, _1)):
+        " type ::= sum Id ( fields ) "
+        if id.value != "attributes":
+            raise ASDLSyntaxError(id.lineno,
+                                  msg="expected attributes, found %s" % id)
+        if attributes:
+            attributes.reverse()
+        return Sum(sum, attributes)
+
+    def p_product(self, (_0, fields, _1)):
+        " product ::= ( fields ) "
+        # XXX can't I just construct things in the right order?
+        fields.reverse()
+        return Product(fields)
+
+    def p_sum_0(self, (constructor,)):
+        " sum ::= constructor "
+        return [constructor]
+
+    def p_sum_1(self, (constructor, _, sum)):
+        " sum ::= constructor | sum "
+        return [constructor] + sum
+
+    def p_sum_2(self, (constructor, _, sum)):
+        " sum ::= constructor | sum "
+        return [constructor] + sum
+
+    def p_constructor_0(self, (id,)):
+        " constructor ::= Id "
+        return Constructor(id)
+
+    def p_constructor_1(self, (id, _0, fields, _1)):
+        " constructor ::= Id ( fields ) "
+        # XXX can't I just construct things in the right order?
+        fields.reverse()
+        return Constructor(id, fields)
+
+    def p_fields_0(self, (field,)):
+        " fields ::= field "
+        return [field]
+
+    def p_fields_1(self, (field, _, fields)):
+        " fields ::= field , fields "
+        return fields + [field]
+
+    def p_field_0(self, (type,)):
+        " field ::= Id "
+        return Field(type)
+
+    def p_field_1(self, (type, name)):
+        " field ::= Id Id "
+        return Field(type, name)
+
+    def p_field_2(self, (type, _, name)):
+        " field ::= Id * Id "
+        return Field(type, name, seq=True)
+
+    def p_field_3(self, (type, _, name)):
+        " field ::= Id ? Id "
+        return Field(type, name, opt=True)
+
+    def p_field_4(self, (type, _)):
+        " field ::= Id * "
+        return Field(type, seq=True)
+
+    def p_field_5(self, (type, _)):
+        " field ::= Id ? "
+        return Field(type, opt=True)
+
+builtin_types = ("identifier", "string", "int", "bool", "object")
+
+# below is a collection of classes to capture the AST of an AST :-)
+# not sure if any of the methods are useful yet, but I'm adding them
+# piecemeal as they seem helpful
+
+class AST(object):
+    pass # a marker class
+
+class Module(AST):
+    def __init__(self, name, dfns, version):
+        self.name = name
+        self.dfns = dfns
+        self.version = version
+        self.types = {} # maps type name to value (from dfns)
+        for type in dfns:
+            self.types[type.name.value] = type.value
+
+    def __repr__(self):
+        return "Module(%s, %s)" % (self.name, self.dfns)
+
+class Type(AST):
+    def __init__(self, name, value):
+        self.name = name
+        self.value = value
+
+    def __repr__(self):
+        return "Type(%s, %s)" % (self.name, self.value)
+
+class Constructor(AST):
+    def __init__(self, name, fields=None):
+        self.name = name
+        self.fields = fields or []
+
+    def __repr__(self):
+        return "Constructor(%s, %s)" % (self.name, self.fields)
+
+class Field(AST):
+    def __init__(self, type, name=None, seq=False, opt=False):
+        self.type = type
+        self.name = name
+        self.seq = seq
+        self.opt = opt
+
+    def __repr__(self):
+        if self.seq:
+            extra = ", seq=True"
+        elif self.opt:
+            extra = ", opt=True"
+        else:
+            extra = ""
+        if self.name is None:
+            return "Field(%s%s)" % (self.type, extra)
+        else:
+            return "Field(%s, %s%s)" % (self.type, self.name, extra)
+
+class Sum(AST):
+    def __init__(self, types, attributes=None):
+        self.types = types
+        self.attributes = attributes or []
+
+    def __repr__(self):
+        if self.attributes is None:
+            return "Sum(%s)" % self.types
+        else:
+            return "Sum(%s, %s)" % (self.types, self.attributes)
+
+class Product(AST):
+    def __init__(self, fields):
+        self.fields = fields
+
+    def __repr__(self):
+        return "Product(%s)" % self.fields
+
+class VisitorBase(object):
+
+    def __init__(self, skip=False):
+        self.cache = {}
+        self.skip = skip
+
+    def visit(self, object, *args):
+        meth = self._dispatch(object)
+        if meth is None:
+            return
+        try:
+            meth(object, *args)
+        except Exception, err:
+            print "Error visiting", repr(object)
+            print err
+            traceback.print_exc()
+            # XXX hack
+            if hasattr(self, 'file'):
+                self.file.flush()
+            os._exit(1)
+
+    def _dispatch(self, object):
+        assert isinstance(object, AST), repr(object)
+        klass = object.__class__
+        meth = self.cache.get(klass)
+        if meth is None:
+            methname = "visit" + klass.__name__
+            if self.skip:
+                meth = getattr(self, methname, None)
+            else:
+                meth = getattr(self, methname)
+            self.cache[klass] = meth
+        return meth
+
+class Check(VisitorBase):
+
+    def __init__(self):
+        super(Check, self).__init__(skip=True)
+        self.cons = {}
+        self.errors = 0
+        self.types = {}
+
+    def visitModule(self, mod):
+        for dfn in mod.dfns:
+            self.visit(dfn)
+
+    def visitType(self, type):
+        self.visit(type.value, str(type.name))
+
+    def visitSum(self, sum, name):
+        for t in sum.types:
+            self.visit(t, name)
+
+    def visitConstructor(self, cons, name):
+        key = str(cons.name)
+        conflict = self.cons.get(key)
+        if conflict is None:
+            self.cons[key] = name
+        else:
+            print "Redefinition of constructor %s" % key
+            print "Defined in %s and %s" % (conflict, name)
+            self.errors += 1
+        for f in cons.fields:
+            self.visit(f, key)
+
+    def visitField(self, field, name):
+        key = str(field.type)
+        l = self.types.setdefault(key, [])
+        l.append(name)
+
+    def visitProduct(self, prod, name):
+        for f in prod.fields:
+            self.visit(f, name)
+
+def check(mod):
+    v = Check()
+    v.visit(mod)
+
+    for t in v.types:
+        if t not in mod.types and not t in builtin_types:
+            v.errors += 1
+            uses = ", ".join(v.types[t])
+            print "Undefined type %s, used in %s" % (t, uses)
+
+    return not v.errors
+
+def parse(file):
+    scanner = ASDLScanner()
+    parser = ASDLParser()
+
+    buf = open(file).read()
+    tokens = scanner.tokenize(buf)
+    try:
+        return parser.parse(tokens)
+    except ASDLSyntaxError, err:
+        print err
+        lines = buf.split("\n")
+        print lines[err.lineno - 1] # lines starts at 0, files at 1
+
+if __name__ == "__main__":
+    import glob
+    import sys
+
+    if len(sys.argv) > 1:
+        files = sys.argv[1:]
+    else:
+        testdir = "tests"
+        files = glob.glob(testdir + "/*.asdl")
+
+    for file in files:
+        print file
+        mod = parse(file)
+        print "module", mod.name
+        print len(mod.dfns), "definitions"
+        if not check(mod):
+            print "Check failed"
+        else:
+            for dfn in mod.dfns:
+                print dfn.type

Added: pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/spark.py
==============================================================================
--- (empty file)
+++ pypy/branch/parser-compiler/pypy/interpreter/astcompiler/tools/spark.py	Mon Jun  8 23:46:07 2009
@@ -0,0 +1,839 @@
+#  Copyright (c) 1998-2002 John Aycock
+#
+#  Permission is hereby granted, free of charge, to any person obtaining
+#  a copy of this software and associated documentation files (the
+#  "Software"), to deal in the Software without restriction, including
+#  without limitation the rights to use, copy, modify, merge, publish,
+#  distribute, sublicense, and/or sell copies of the Software, and to
+#  permit persons to whom the Software is furnished to do so, subject to
+#  the following conditions:
+#
+#  The above copyright notice and this permission notice shall be
+#  included in all copies or substantial portions of the Software.
+#
+#  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+#  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+#  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+#  IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+#  CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+#  TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+#  SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+__version__ = 'SPARK-0.7 (pre-alpha-5)'
+
+import re
+import string
+
+def _namelist(instance):
+    namelist, namedict, classlist = [], {}, [instance.__class__]
+    for c in classlist:
+        for b in c.__bases__:
+            classlist.append(b)
+        for name in c.__dict__.keys():
+            if not namedict.has_key(name):
+                namelist.append(name)
+                namedict[name] = 1
+    return namelist
+
+class GenericScanner:
+    def __init__(self, flags=0):
+        pattern = self.reflect()
+        self.re = re.compile(pattern, re.VERBOSE|flags)
+
+        self.index2func = {}
+        for name, number in self.re.groupindex.items():
+            self.index2func[number-1] = getattr(self, 't_' + name)
+
+    def makeRE(self, name):
+        doc = getattr(self, name).__doc__
+        rv = '(?P<%s>%s)' % (name[2:], doc)
+        return rv
+
+    def reflect(self):
+        rv = []
+        for name in _namelist(self):
+            if name[:2] == 't_' and name != 't_default':
+                rv.append(self.makeRE(name))
+
+        rv.append(self.makeRE('t_default'))
+        return string.join(rv, '|')
+
+    def error(self, s, pos):
+        print "Lexical error at position %s" % pos
+        raise SystemExit
+
+    def tokenize(self, s):
+        pos = 0
+        n = len(s)
+        while pos < n:
+            m = self.re.match(s, pos)
+            if m is None:
+                self.error(s, pos)
+
+            groups = m.groups()
+            for i in range(len(groups)):
+                if groups[i] and self.index2func.has_key(i):
+                    self.index2func[i](groups[i])
+            pos = m.end()
+
+    def t_default(self, s):
+        r'( . | \n )+'
+        print "Specification error: unmatched input"
+        raise SystemExit
+
+#
+#  Extracted from GenericParser and made global so that [un]picking works.
+#
+class _State:
+    def __init__(self, stateno, items):
+        self.T, self.complete, self.items = [], [], items
+        self.stateno = stateno
+
+class GenericParser:
+    #
+    #  An Earley parser, as per J. Earley, "An Efficient Context-Free
+    #  Parsing Algorithm", CACM 13(2), pp. 94-102.  Also J. C. Earley,
+    #  "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
+    #  Carnegie-Mellon University, August 1968.  New formulation of
+    #  the parser according to J. Aycock, "Practical Earley Parsing
+    #  and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
+    #  2001, and J. Aycock and R. N. Horspool, "Practical Earley
+    #  Parsing", unpublished paper, 2001.
+    #
+
+    def __init__(self, start):
+        self.rules = {}
+        self.rule2func = {}
+        self.rule2name = {}
+        self.collectRules()
+        self.augment(start)
+        self.ruleschanged = 1
+
+    _NULLABLE = '\e_'
+    _START = 'START'
+    _BOF = '|-'
+
+    #
+    #  When pickling, take the time to generate the full state machine;
+    #  some information is then extraneous, too.  Unfortunately we
+    #  can't save the rule2func map.
+    #
+    def __getstate__(self):
+        if self.ruleschanged:
+            #
+            #  XXX - duplicated from parse()
+            #
+            self.computeNull()
+            self.newrules = {}
+            self.new2old = {}
+            self.makeNewRules()
+            self.ruleschanged = 0
+            self.edges, self.cores = {}, {}
+            self.states = { 0: self.makeState0() }
+            self.makeState(0, self._BOF)
+        #
+        #  XXX - should find a better way to do this..
+        #
+        changes = 1
+        while changes:
+            changes = 0
+            for k, v in self.edges.items():
+                if v is None:
+                    state, sym = k
+                    if self.states.has_key(state):
+                        self.goto(state, sym)
+                        changes = 1
+        rv = self.__dict__.copy()
+        for s in self.states.values():
+            del s.items
+        del rv['rule2func']
+        del rv['nullable']
+        del rv['cores']
+        return rv
+
+    def __setstate__(self, D):
+        self.rules = {}
+        self.rule2func = {}
+        self.rule2name = {}
+        self.collectRules()
+        start = D['rules'][self._START][0][1][1]        # Blech.
+        self.augment(start)
+        D['rule2func'] = self.rule2func
+        D['makeSet'] = self.makeSet_fast
+        self.__dict__ = D
+
+    #
+    #  A hook for GenericASTBuilder and GenericASTMatcher.  Mess
+    #  thee not with this; nor shall thee toucheth the _preprocess
+    #  argument to addRule.
+    #
+    def preprocess(self, rule, func):       return rule, func
+
+    def addRule(self, doc, func, _preprocess=1):
+        fn = func
+        rules = string.split(doc)
+
+        index = []
+        for i in range(len(rules)):
+            if rules[i] == '::=':
+                index.append(i-1)
+        index.append(len(rules))
+
+        for i in range(len(index)-1):
+            lhs = rules[index[i]]
+            rhs = rules[index[i]+2:index[i+1]]
+            rule = (lhs, tuple(rhs))
+
+            if _preprocess:
+                rule, fn = self.preprocess(rule, func)
+
+            if self.rules.has_key(lhs):
+                self.rules[lhs].append(rule)
+            else:
+                self.rules[lhs] = [ rule ]
+            self.rule2func[rule] = fn
+            self.rule2name[rule] = func.__name__[2:]
+        self.ruleschanged = 1
+
+    def collectRules(self):
+        for name in _namelist(self):
+            if name[:2] == 'p_':
+                func = getattr(self, name)
+                doc = func.__doc__
+                self.addRule(doc, func)
+
+    def augment(self, start):
+        rule = '%s ::= %s %s' % (self._START, self._BOF, start)
+        self.addRule(rule, lambda args: args[1], 0)
+
+    def computeNull(self):
+        self.nullable = {}
+        tbd = []
+
+        for rulelist in self.rules.values():
+            lhs = rulelist[0][0]
+            self.nullable[lhs] = 0
+            for rule in rulelist:
+                rhs = rule[1]
+                if len(rhs) == 0:
+                    self.nullable[lhs] = 1
+                    continue
+                #
+                #  We only need to consider rules which
+                #  consist entirely of nonterminal symbols.
+                #  This should be a savings on typical
+                #  grammars.
+                #
+                for sym in rhs:
+                    if not self.rules.has_key(sym):
+                        break
+                else:
+                    tbd.append(rule)
+        changes = 1
+        while changes:
+            changes = 0
+            for lhs, rhs in tbd:
+                if self.nullable[lhs]:
+                    continue
+                for sym in rhs:
+                    if not self.nullable[sym]:
+                        break
+                else:
+                    self.nullable[lhs] = 1
+                    changes = 1
+
+    def makeState0(self):
+        s0 = _State(0, [])
+        for rule in self.newrules[self._START]:
+            s0.items.append((rule, 0))
+        return s0
+
+    def finalState(self, tokens):
+        #
+        #  Yuck.
+        #
+        if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
+            return 1
+        start = self.rules[self._START][0][1][1]
+        return self.goto(1, start)
+
+    def makeNewRules(self):
+        worklist = []
+        for rulelist in self.rules.values():
+            for rule in rulelist:
+                worklist.append((rule, 0, 1, rule))
+
+        for rule, i, candidate, oldrule in worklist:
+            lhs, rhs = rule
+            n = len(rhs)
+            while i < n:
+                sym = rhs[i]
+                if not self.rules.has_key(sym) or \
+                   not self.nullable[sym]:
+                    candidate = 0
+                    i = i + 1
+                    continue
+
+                newrhs = list(rhs)
+                newrhs[i] = self._NULLABLE+sym
+                newrule = (lhs, tuple(newrhs))
+                worklist.append((newrule, i+1,
+                                 candidate, oldrule))
+                candidate = 0
+                i = i + 1
+            else:
+                if candidate:
+                    lhs = self._NULLABLE+lhs
+                    rule = (lhs, rhs)
+                if self.newrules.has_key(lhs):
+                    self.newrules[lhs].append(rule)
+                else:
+                    self.newrules[lhs] = [ rule ]
+                self.new2old[rule] = oldrule
+
+    def typestring(self, token):
+        return None
+
+    def error(self, token):
+        print "Syntax error at or near `%s' token" % token
+        raise SystemExit
+
+    def parse(self, tokens):
+        sets = [ [(1,0), (2,0)] ]
+        self.links = {}
+
+        if self.ruleschanged:
+            self.computeNull()
+            self.newrules = {}
+            self.new2old = {}
+            self.makeNewRules()
+            self.ruleschanged = 0
+            self.edges, self.cores = {}, {}
+            self.states = { 0: self.makeState0() }
+            self.makeState(0, self._BOF)
+
+        for i in xrange(len(tokens)):
+            sets.append([])
+
+            if sets[i] == []:
+                break
+            self.makeSet(tokens[i], sets, i)
+        else:
+            sets.append([])
+            self.makeSet(None, sets, len(tokens))
+
+        #_dump(tokens, sets, self.states)
+
+        finalitem = (self.finalState(tokens), 0)
+        if finalitem not in sets[-2]:
+            if len(tokens) > 0:
+                self.error(tokens[i-1])
+            else:
+                self.error(None)
+
+        return self.buildTree(self._START, finalitem,
+                              tokens, len(sets)-2)
+
+    def isnullable(self, sym):
+        #
+        #  For symbols in G_e only.  If we weren't supporting 1.5,
+        #  could just use sym.startswith().
+        #
+        return self._NULLABLE == sym[0:len(self._NULLABLE)]
+
+    def skip(self, (lhs, rhs), pos=0):
+        n = len(rhs)
+        while pos < n:
+            if not self.isnullable(rhs[pos]):
+                break
+            pos = pos + 1
+        return pos
+
+    def makeState(self, state, sym):
+        assert sym is not None
+        #
+        #  Compute \epsilon-kernel state's core and see if
+        #  it exists already.
+        #
+        kitems = []
+        for rule, pos in self.states[state].items:
+            lhs, rhs = rule
+            if rhs[pos:pos+1] == (sym,):
+                kitems.append((rule, self.skip(rule, pos+1)))
+        core = kitems
+
+        core.sort()
+        tcore = tuple(core)
+        if self.cores.has_key(tcore):
+            return self.cores[tcore]
+        #
+        #  Nope, doesn't exist.  Compute it and the associated
+        #  \epsilon-nonkernel state together; we'll need it right away.
+        #
+        k = self.cores[tcore] = len(self.states)
+        K, NK = _State(k, kitems), _State(k+1, [])
+        self.states[k] = K
+        predicted = {}
+
+        edges = self.edges
+        rules = self.newrules
+        for X in K, NK:
+            worklist = X.items
+            for item in worklist:
+                rule, pos = item
+                lhs, rhs = rule
+                if pos == len(rhs):
+                    X.complete.append(rule)
+                    continue
+
+                nextSym = rhs[pos]
+                key = (X.stateno, nextSym)
+                if not rules.has_key(nextSym):
+                    if not edges.has_key(key):
+                        edges[key] = None
+                        X.T.append(nextSym)
+                else:
+                    edges[key] = None
+                    if not predicted.has_key(nextSym):
+                        predicted[nextSym] = 1
+                        for prule in rules[nextSym]:
+                            ppos = self.skip(prule)
+                            new = (prule, ppos)
+                            NK.items.append(new)
+            #
+            #  Problem: we know K needs generating, but we
+            #  don't yet know about NK.  Can't commit anything
+            #  regarding NK to self.edges until we're sure.  Should
+            #  we delay committing on both K and NK to avoid this
+            #  hacky code?  This creates other problems..
+            #
+            if X is K:
+                edges = {}
+
+        if NK.items == []:
+            return k
+
+        #
+        #  Check for \epsilon-nonkernel's core.  Unfortunately we
+        #  need to know the entire set of predicted nonterminals
+        #  to do this without accidentally duplicating states.
+        #
+        core = predicted.keys()
+        core.sort()
+        tcore = tuple(core)
+        if self.cores.has_key(tcore):
+            self.edges[(k, None)] = self.cores[tcore]
+            return k
+
+        nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
+        self.edges.update(edges)
+        self.states[nk] = NK
+        return k
+
+    def goto(self, state, sym):
+        key = (state, sym)
+        if not self.edges.has_key(key):
+            #
+            #  No transitions from state on sym.
+            #
+            return None
+
+        rv = self.edges[key]
+        if rv is None:
+            #
+            #  Target state isn't generated yet.  Remedy this.
+            #
+            rv = self.makeState(state, sym)
+            self.edges[key] = rv
+        return rv
+
+    def gotoT(self, state, t):
+        return [self.goto(state, t)]
+
+    def gotoST(self, state, st):
+        rv = []
+        for t in self.states[state].T:
+            if st == t:
+                rv.append(self.goto(state, t))
+        return rv
+
+    def add(self, set, item, i=None, predecessor=None, causal=None):
+        if predecessor is None:
+            if item not in set:
+                set.append(item)
+        else:
+            key = (item, i)
+            if item not in set:
+                self.links[key] = []
+                set.append(item)
+            self.links[key].append((predecessor, causal))
+
+    def makeSet(self, token, sets, i):
+        cur, next = sets[i], sets[i+1]
+
+        ttype = token is not None and self.typestring(token) or None
+        if ttype is not None:
+            fn, arg = self.gotoT, ttype
+        else:
+            fn, arg = self.gotoST, token
+
+        for item in cur:
+            ptr = (item, i)
+            state, parent = item
+            add = fn(state, arg)
+            for k in add:
+                if k is not None:
+                    self.add(next, (k, parent), i+1, ptr)
+                    nk = self.goto(k, None)
+                    if nk is not None:
+                        self.add(next, (nk, i+1))
+
+            if parent == i:
+                continue
+
+            for rule in self.states[state].complete:
+                lhs, rhs = rule
+                for pitem in sets[parent]:
+                    pstate, pparent = pitem
+                    k = self.goto(pstate, lhs)
+                    if k is not None:
+                        why = (item, i, rule)
+                        pptr = (pitem, parent)
+                        self.add(cur, (k, pparent),
+                                 i, pptr, why)
+                        nk = self.goto(k, None)
+                        if nk is not None:
+                            self.add(cur, (nk, i))
+
+    def makeSet_fast(self, token, sets, i):
+        #
+        #  Call *only* when the entire state machine has been built!
+        #  It relies on self.edges being filled in completely, and
+        #  then duplicates and inlines code to boost speed at the
+        #  cost of extreme ugliness.
+        #
+        cur, next = sets[i], sets[i+1]
+        ttype = token is not None and self.typestring(token) or None
+
+        for item in cur:
+            ptr = (item, i)
+            state, parent = item
+            if ttype is not None:
+                k = self.edges.get((state, ttype), None)
+                if k is not None:
+                    #self.add(next, (k, parent), i+1, ptr)
+                    #INLINED --v
+                    new = (k, parent)
+                    key = (new, i+1)
+                    if new not in next:
+                        self.links[key] = []
+                        next.append(new)
+                    self.links[key].append((ptr, None))
+                    #INLINED --^
+                    #nk = self.goto(k, None)
+                    nk = self.edges.get((k, None), None)
+                    if nk is not None:
+                        #self.add(next, (nk, i+1))
+                        #INLINED --v
+                        new = (nk, i+1)
+                        if new not in next:
+                            next.append(new)
+                        #INLINED --^
+            else:
+                add = self.gotoST(state, token)
+                for k in add:
+                    if k is not None:
+                        self.add(next, (k, parent), i+1, ptr)
+                        #nk = self.goto(k, None)
+                        nk = self.edges.get((k, None), None)
+                        if nk is not None:
+                            self.add(next, (nk, i+1))
+
+            if parent == i:
+                continue
+
+            for rule in self.states[state].complete:
+                lhs, rhs = rule
+                for pitem in sets[parent]:
+                    pstate, pparent = pitem
+                    #k = self.goto(pstate, lhs)
+                    k = self.edges.get((pstate, lhs), None)
+                    if k is not None:
+                        why = (item, i, rule)
+                        pptr = (pitem, parent)
+                        #self.add(cur, (k, pparent),
+                        #        i, pptr, why)
+                        #INLINED --v
+                        new = (k, pparent)
+                        key = (new, i)
+                        if new not in cur:
+                            self.links[key] = []
+                            cur.append(new)
+                        self.links[key].append((pptr, why))
+                        #INLINED --^
+                        #nk = self.goto(k, None)
+                        nk = self.edges.get((k, None), None)
+                        if nk is not None:
+                            #self.add(cur, (nk, i))
+                            #INLINED --v
+                            new = (nk, i)
+                            if new not in cur:
+                                cur.append(new)
+                            #INLINED --^
+
+    def predecessor(self, key, causal):
+        for p, c in self.links[key]:
+            if c == causal:
+                return p
+        assert 0
+
+    def causal(self, key):
+        links = self.links[key]
+        if len(links) == 1:
+            return links[0][1]
+        choices = []
+        rule2cause = {}
+        for p, c in links:
+            rule = c[2]
+            choices.append(rule)
+            rule2cause[rule] = c
+        return rule2cause[self.ambiguity(choices)]
+
+    def deriveEpsilon(self, nt):
+        if len(self.newrules[nt]) > 1:
+            rule = self.ambiguity(self.newrules[nt])
+        else:
+            rule = self.newrules[nt][0]
+        #print rule
+
+        rhs = rule[1]
+        attr = [None] * len(rhs)
+
+        for i in range(len(rhs)-1, -1, -1):
+            attr[i] = self.deriveEpsilon(rhs[i])
+        return self.rule2func[self.new2old[rule]](attr)
+
+    def buildTree(self, nt, item, tokens, k):
+        state, parent = item
+
+        choices = []
+        for rule in self.states[state].complete:
+            if rule[0] == nt:
+                choices.append(rule)
+        rule = choices[0]
+        if len(choices) > 1:
+            rule = self.ambiguity(choices)
+        #print rule
+
+        rhs = rule[1]
+        attr = [None] * len(rhs)
+
+        for i in range(len(rhs)-1, -1, -1):
+            sym = rhs[i]
+            if not self.newrules.has_key(sym):
+                if sym != self._BOF:
+                    attr[i] = tokens[k-1]
+                    key = (item, k)
+                    item, k = self.predecessor(key, None)
+            #elif self.isnullable(sym):
+            elif self._NULLABLE == sym[0:len(self._NULLABLE)]:
+                attr[i] = self.deriveEpsilon(sym)
+            else:
+                key = (item, k)
+                why = self.causal(key)
+                attr[i] = self.buildTree(sym, why[0],
+                                         tokens, why[1])
+                item, k = self.predecessor(key, why)
+        return self.rule2func[self.new2old[rule]](attr)
+
+    def ambiguity(self, rules):
+        #
+        #  XXX - problem here and in collectRules() if the same rule
+        #        appears in >1 method.  Also undefined results if rules
+        #        causing the ambiguity appear in the same method.
+        #
+        sortlist = []
+        name2index = {}
+        for i in range(len(rules)):
+            lhs, rhs = rule = rules[i]
+            name = self.rule2name[self.new2old[rule]]
+            sortlist.append((len(rhs), name))
+            name2index[name] = i
+        sortlist.sort()
+        list = map(lambda (a,b): b, sortlist)
+        return rules[name2index[self.resolve(list)]]
+
+    def resolve(self, list):
+        #
+        #  Resolve ambiguity in favor of the shortest RHS.
+        #  Since we walk the tree from the top down, this
+        #  should effectively resolve in favor of a "shift".
+        #
+        return list[0]
+
+#
+#  GenericASTBuilder automagically constructs a concrete/abstract syntax tree
+#  for a given input.  The extra argument is a class (not an instance!)
+#  which supports the "__setslice__" and "__len__" methods.
+#
+#  XXX - silently overrides any user code in methods.
+#
+
+class GenericASTBuilder(GenericParser):
+    def __init__(self, AST, start):
+        GenericParser.__init__(self, start)
+        self.AST = AST
+
+    def preprocess(self, rule, func):
+        rebind = lambda lhs, self=self: \
+                        lambda args, lhs=lhs, self=self: \
+                                self.buildASTNode(args, lhs)
+        lhs, rhs = rule
+        return rule, rebind(lhs)
+
+    def buildASTNode(self, args, lhs):
+        children = []
+        for arg in args:
+            if isinstance(arg, self.AST):
+                children.append(arg)
+            else:
+                children.append(self.terminal(arg))
+        return self.nonterminal(lhs, children)
+
+    def terminal(self, token):      return token
+
+    def nonterminal(self, type, args):
+        rv = self.AST(type)
+        rv[:len(args)] = args
+        return rv
+
+#
+#  GenericASTTraversal is a Visitor pattern according to Design Patterns.  For
+#  each node it attempts to invoke the method n_<node type>, falling
+#  back onto the default() method if the n_* can't be found.  The preorder
+#  traversal also looks for an exit hook named n_<node type>_exit (no default
+#  routine is called if it's not found).  To prematurely halt traversal
+#  of a subtree, call the prune() method -- this only makes sense for a
+#  preorder traversal.  Node type is determined via the typestring() method.
+#
+
+class GenericASTTraversalPruningException:
+    pass
+
+class GenericASTTraversal:
+    def __init__(self, ast):
+        self.ast = ast
+
+    def typestring(self, node):
+        return node.type
+
+    def prune(self):
+        raise GenericASTTraversalPruningException
+
+    def preorder(self, node=None):
+        if node is None:
+            node = self.ast
+
+        try:
+            name = 'n_' + self.typestring(node)
+            if hasattr(self, name):
+                func = getattr(self, name)
+                func(node)
+            else:
+                self.default(node)
+        except GenericASTTraversalPruningException:
+            return
+
+        for kid in node:
+            self.preorder(kid)
+
+        name = name + '_exit'
+        if hasattr(self, name):
+            func = getattr(self, name)
+            func(node)
+
+    def postorder(self, node=None):
+        if node is None:
+            node = self.ast
+
+        for kid in node:
+            self.postorder(kid)
+
+        name = 'n_' + self.typestring(node)
+        if hasattr(self, name):
+            func = getattr(self, name)
+            func(node)
+        else:
+            self.default(node)
+
+
+    def default(self, node):
+        pass
+
+#
+#  GenericASTMatcher.  AST nodes must have "__getitem__" and "__cmp__"
+#  implemented.
+#
+#  XXX - makes assumptions about how GenericParser walks the parse tree.
+#
+
+class GenericASTMatcher(GenericParser):
+    def __init__(self, start, ast):
+        GenericParser.__init__(self, start)
+        self.ast = ast
+
+    def preprocess(self, rule, func):
+        rebind = lambda func, self=self: \
+                        lambda args, func=func, self=self: \
+                                self.foundMatch(args, func)
+        lhs, rhs = rule
+        rhslist = list(rhs)
+        rhslist.reverse()
+
+        return (lhs, tuple(rhslist)), rebind(func)
+
+    def foundMatch(self, args, func):
+        func(args[-1])
+        return args[-1]
+
+    def match_r(self, node):
+        self.input.insert(0, node)
+        children = 0
+
+        for child in node:
+            if children == 0:
+                self.input.insert(0, '(')
+            children = children + 1
+            self.match_r(child)
+
+        if children > 0:
+            self.input.insert(0, ')')
+
+    def match(self, ast=None):
+        if ast is None:
+            ast = self.ast
+        self.input = []
+
+        self.match_r(ast)
+        self.parse(self.input)
+
+    def resolve(self, list):
+        #
+        #  Resolve ambiguity in favor of the longest RHS.
+        #
+        return list[-1]
+
+def _dump(tokens, sets, states):
+    for i in range(len(sets)):
+        print 'set', i
+        for item in sets[i]:
+            print '\t', item
+            for (lhs, rhs), pos in states[item[0]].items:
+                print '\t\t', lhs, '::=',
+                print string.join(rhs[:pos]),
+                print '.',
+                print string.join(rhs[pos:])
+        if i < len(tokens):
+            print
+            print 'token', str(tokens[i])
+            print



More information about the Pypy-commit mailing list