-
-
Save adragomir/644554 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os, sys | |
PREAMBLE = '''# Preamble | |
import sys | |
def Array(n): | |
return [0]*n | |
# Body | |
''' | |
POSTAMBLE = "" | |
class Data: | |
def __init__(self, text): | |
if type(text) is not str: | |
print "Bad, bad", text | |
assert False | |
self.text = text | |
def indent(self): | |
return '\n'.join(map(lambda line: (' ' + line), self.text.split('\n'))) | |
def getdepth(line): | |
return (len(line) - len(line.lstrip()))/2 | |
## head - our head line | |
## clines - list of groups of lines of children. Each has a head line at front | |
def process(head, clines, indent, **kwargs): | |
# print "Head:", head | |
def datify(text=None): | |
cdatas = map(parse, clines) | |
ret = Data(('\n'.join(filter(lambda line: line.strip() != '', map(lambda cdata: cdata.text, cdatas)))) if text is None else text) | |
ret.cdatas = cdatas | |
return ret | |
def textify(): | |
return datify().indent() | |
def untoken(line): | |
token = line.lstrip() | |
if token[:3] == 'TOK': | |
token = token[4:] | |
return token.split(':')[0] | |
def get(label): | |
ret = [] | |
for lines in clines: | |
head_ = lines[0] | |
if untoken(head_) == label: | |
ret.append(': '.join(head_.split(': ')[1:])) | |
return ret | |
def pullprop(ret, prop): | |
# print "Pull", prop | |
found = False | |
for cdata in ret.cdatas: | |
if hasattr(cdata, prop): | |
# print "foundz it" | |
setattr(ret, prop, getattr(cdata, prop)) | |
found = True | |
break | |
return found | |
token = untoken(head) | |
# print " "*indent, indent, token # XXX | |
if token in 'LC SEMI'.split(' '): | |
# print "Zzzzz", head | |
data = datify() | |
# print head, "childrens:", data.cdatas.__len__() | |
# for i in range(len(data.cdatas)): | |
# print i, data.cdatas[i].text | |
# print "Adn full is", data.text, "chz" | |
return data | |
elif token == 'UPVARS': | |
ret = datify() | |
# print "upvars:", ret.cdatas | |
pullprop(ret, 'args') | |
# print "ok?" | |
return ret | |
elif token in 'name upvar'.split(' '): | |
return Data('') | |
elif token == 'FUNCTION': | |
name = get('name')[0] | |
data = datify() | |
pullprop(data, 'args') | |
# print "Nameze:", name, data.args | |
return Data('def %s(%s):\n%s' % (name, ', '.join(data.args), data.indent())) | |
elif token == 'NAME': | |
name = get('name')[0] | |
# print "Name here:", head, name | |
ret = datify(name) | |
ret.name = name | |
# print "Needs equals" | |
pullprop(ret, 'equals') | |
# print "got it" | |
return ret | |
elif token == 'equals': | |
ret = datify() | |
ret.equals = ret.text | |
return ret | |
elif token == 'ARGSBODY': # Several NAME, then LC | |
# print "argsbody 1"#, clines | |
args = [] | |
while True: | |
# print "argsbody 2" | |
if untoken(clines[0][0]) == 'NAME': | |
args += parse(clines[0]).text | |
else: | |
break | |
clines = clines[1:] | |
# print "argsbody 3", args | |
ret = datify() | |
# print "argsbody 4" | |
ret.args = args | |
# print "argsbody args:", ret.args | |
return ret | |
elif token == 'VAR': | |
data = datify() | |
if not pullprop(data, 'name'): | |
return data # We have a child, e.g. assign, which is all we are | |
pullprop(data, 'equals') | |
if not hasattr(data, 'equals'): return Data('') | |
return Data('%s = %s' % (data.name, data.equals)) | |
elif token == 'LP': # left paren - perhaps function call | |
data = datify() | |
if len(data.cdatas) == 1: | |
return data | |
data.name = data.cdatas[0].name | |
data.args = [] | |
for i in range(1, len(data.cdatas)): | |
data.args.append(data.cdatas[i].text) | |
if data.name == 'parseInt': data.name = 'int' | |
elif data.name == 'print': | |
return Data('print %s' % (', '.join(data.args))) | |
return Data('%s(%s)' % (data.name, ', '.join(data.args))) | |
elif token in 'MINUS PLUS'.split(' '): | |
op = { | |
'MINUS': '-', | |
'PLUS': '+', | |
} | |
data = datify() | |
return Data('(%s)' % ((' ' + op[token] + ' ').join(map(lambda cdata: cdata.text, data.cdatas)))) | |
elif token in 'INC DEC'.split(' '): | |
op = { | |
'INC': '+=', | |
'DEC': '-=', | |
} | |
data = datify() | |
return Data('%s %s 1' % (data.cdatas[0].text, op[token])) | |
elif token in 'OR AND'.split(' '): | |
op = { | |
'OR': 'or', | |
'AND': 'and', | |
} | |
data = datify() | |
return Data('(%s %s %s)' % (data.cdatas[0].text, op[token], data.cdatas[1].text)) | |
elif token in 'RELOP EQOP SHOP'.split(' '): | |
data = datify() | |
return Data('(%s %s %s)' % (data.cdatas[0].text, get('op')[0], data.cdatas[2].text)) | |
elif token in 'UNARYOP'.split(' '): | |
op = { | |
'!': 'not', | |
} | |
data = datify() | |
return Data('(%s %s)' % (op[get('op')[0]], data.cdatas[1].text)) | |
elif token == 'ASSIGN': | |
data = datify() | |
# print "assign:", data.cdatas[2].text | |
return Data('%s %s %s' % (data.cdatas[0].text, get('op')[0], data.cdatas[2].text)) | |
elif token == 'LB': # left bracket - indexing op | |
data = datify() | |
subject = data.cdatas[0].text | |
if subject == 'arguments': subject = 'sys.argv[1:]' | |
return Data('%s[%s]' % (subject, data.cdatas[1].text)) | |
elif token == 'NUMBER': | |
def prettify(num): | |
if int(eval(num)) == float(num): | |
return str(int(eval(num))) | |
else: | |
return num | |
return Data(prettify(get('value')[0])) | |
elif token == 'STRING': | |
return Data('"%s"' % get('string')[0]) | |
elif token == 'PRIMARY': | |
val = get('primary')[0].split(' ')[1] | |
def conv(v): | |
if v == 'true': return 'True' | |
else: return v | |
return Data(conv(val)) | |
elif token == 'DOT': | |
func = get('dotted')[0] | |
subject = datify().cdatas[0].text | |
if func == 'toString': | |
return Data('str(%s)' % subject) | |
return Data('(%s).%s()' % (subject, func)) | |
elif token == 'FORHEAD': | |
data = datify() | |
data.init = data.cdatas[0] | |
data.condition = data.cdatas[1] | |
data.inc = data.cdatas[2] | |
# print "forhead:", data.init.text, "(((", data.condition.text, ')))', data.inc.text | |
return data | |
elif token == 'FOR': | |
data = datify() | |
forhead = data.cdatas[0] | |
semi = data.cdatas[1] | |
return Data( | |
'''%s | |
while %s: | |
%s | |
%s''' % (forhead.init.text, forhead.condition.indent(), semi.indent(), forhead.inc.indent())) | |
elif token == 'WHILE': | |
data = datify() | |
return Data( | |
'''while %s: | |
%s''' % (data.cdatas[0].text, data.cdatas[1].indent())) | |
elif token == 'IF': | |
data = datify() | |
data = Data( | |
'''if %s: | |
%s''' % (data.cdatas[0].text, data.cdatas[1].indent())) | |
# print "I am now:", data.text, "." | |
return data | |
elif token == 'RETURN': | |
return Data('return %s' % datify().cdatas[0].text) | |
elif token == 'BREAK': | |
return Data('break') | |
else: | |
return Data(token) | |
def parse(lines): | |
if len(lines) == 0: return None | |
head = lines[0] | |
depth = getdepth(head) | |
token = head.split(':')[0] | |
# print "Token:", " "*depth, token, lines | |
children = [] | |
for i in range(1, len(lines)): | |
if getdepth(lines[i]) == depth+1: | |
children.append(i) | |
children.append(-1) # So last child has all the rest | |
# print "children:", children | |
clines = [] | |
for i in range(len(children)-1): | |
# print "child:", lines, children[i], children[i+1], len(lines) | |
if children[i+1] != -1: | |
clines.append(lines[children[i]:children[i+1]]) | |
else: | |
clines.append(lines[children[i]:]) | |
# print clines[-1] | |
return process(head.lstrip(), clines, depth) | |
input_filename = sys.argv[1] | |
#output_filename = sys.argv[2] | |
generated = PREAMBLE + parse(map(lambda line: line.rstrip(), filter(lambda line: line[0:2] != '//', open(input_filename, 'r').readlines()))).text + POSTAMBLE | |
print generated | |
#f = open(output_filename, 'w') | |
#f.write(generated + '\n\n') | |
#f.close() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment