Last active
May 2, 2021 07:46
-
-
Save zeffii/275e5ba43b56fe54c2f5fbc94a102260 to your computer and use it in GitHub Desktop.
makes one text object and changes the characters individually with respect to their material
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import bpy | |
import numpy as np | |
# | |
text_in = bpy.data.texts["Text"].as_string() | |
def get_obj_and_fontcurve(context, name): | |
collection = context.scene.collection | |
curves = bpy.data.curves | |
objects = bpy.data.objects | |
# CURVES | |
if not name in curves: | |
f = curves.new(name, 'FONT') | |
else: | |
f = curves[name] | |
# CONTAINER OBJECTS | |
if name in objects: | |
sv_object = objects[name] | |
else: | |
sv_object = objects.new(name, f) | |
collection.objects.link(sv_object) | |
return sv_object, f | |
def add_material(name, base_color): | |
rgba = list(base_color) + [1.0] | |
mat = bpy.data.materials.new(name) | |
mat.use_nodes = True | |
mat.node_tree.nodes["Principled BSDF"].inputs[0].default_value = rgba | |
lex_dict = { | |
(1,): ["name1Color", 0, (0.4, 0.9, 0.8)], | |
(2,): ["numberColor", 1, (0.9, 0.9, 1.0)], | |
(3,): ["stringColor", 2, (0.148, 0.447, 0.04)], | |
(7, 8): ["parenColor", 3, (0.4, 0.3, 0.7)], | |
(9, 10): ["bracketColor", 4, (0.5, 0.7, 0.7)], | |
(22,): ["equalsColor", 5, (0.9, 0.7, 0.6)], | |
(25, 26): ["braceColor", 6, (0.4, 0.5, 0.7)], | |
(53, 54): ["opColor", 7, (1.0, 0.3, 0.7)], | |
(55, 60): ["commentColor", 8, (0.2, 0.2, 0.2)], | |
(90,): ["name2Color", 9, (0.7, 0.9, 0.3)], | |
(91,): ["name3Color", 10, (0.3, 0.9, 0.4)], | |
} | |
lex_remap = {} | |
for key, val in lex_dict.items(): | |
if len(key) == 1: | |
lex_remap[key[0]] = val[1] | |
else: | |
lex_remap[key[0]] = val[1] | |
lex_remap[key[1]] = val[1] | |
def syntax_highlight_basic(text): | |
""" | |
this uses the built in lexer/tokenizer in python to identify part of code | |
will return a meaningful lookuptable for index colours per character | |
""" | |
import tokenize | |
import io | |
import token | |
text_array = text.split('\n') | |
terminal_width = len(max(text_array, key=len)) + 1 | |
num_rows = len(text_array) | |
array_size = terminal_width * num_rows | |
ones = np.ones(array_size) *-2 | |
with io.StringIO(text) as f: | |
tokens = tokenize.generate_tokens(f.readline) | |
for token in tokens: | |
if token.type in (0, 4, 56, 256): | |
continue | |
if not token.string or (token.start == token.end): | |
continue | |
token_type = token.type | |
if token.type == 1: | |
if token.string in { | |
'print', 'def', 'class', 'break', 'continue', 'return', 'while', 'or', 'and', | |
'dir', 'if', 'in', 'as', 'out', 'with', 'from', 'import', 'with', 'for'}: | |
token_type = 90 | |
elif token.string in {'False', 'True', 'yield', 'repr', 'range', 'enumerate'}: | |
token_type = 91 | |
elif token.type in {53,}: | |
# OPS | |
# 7: 'LPAR', 8: 'RPAR | |
# 9: 'LSQB', 10: 'RSQB' | |
# 25: 'LBRACE', 26: 'RBRACE' | |
if token.exact_type in {7, 8, 9, 10, 25, 26}: | |
token_type = token.exact_type | |
elif token.exact_type == 22: | |
token_type = token.exact_type | |
current_type = float(token_type) | |
row_start, char_start = token.start[0]-1, token.start[1] | |
row_end, char_end = token.end[0]-1, token.end[1] | |
index1 = (row_start * terminal_width) + char_start | |
index2 = (row_end * terminal_width) + char_end | |
np.put(ones, np.arange(index1, index2), [current_type]) | |
final_ones = ones.reshape((-1, terminal_width)) | |
return final_ones | |
# first make sure we have materials to match all lexed types | |
for lexid, mat_description in lex_dict.items(): | |
named_color = mat_description[0] | |
if named_color in bpy.data.materials: | |
continue | |
base_color = mat_description[2] | |
add_material(named_color, base_color) | |
mat_array = syntax_highlight_basic(text=text_in).tolist() | |
sv_obj, f = get_obj_and_fontcurve(bpy.context, "lexed_test") | |
f.body = text_in | |
if len(sv_obj.data.materials) == 0: | |
for lexid, mat_description in lex_dict.items(): | |
named_color = mat_description[0] | |
sv_obj.data.materials.append(bpy.data.materials.get(named_color)) | |
idx_tracker_row = 0 | |
idx_tracker_col = 0 | |
for char, char_info in zip(f.body, f.body_format): | |
if char in {"\n"}: | |
idx_tracker_row += 1 | |
idx_tracker_col = 0 | |
continue | |
lexed_type = int(mat_array[idx_tracker_row][idx_tracker_col]) | |
if lexed_type == -2: | |
idx_tracker_col += 1 | |
continue | |
if lexed_type in lex_remap: | |
idx = lex_remap.get(lexed_type) | |
if idx: | |
char_info.material_index = idx | |
else: | |
print(char, lexed_type) | |
idx_tracker_col += 1 | |
continue | |
else: | |
idx_tracker_col += 1 | |
# print(lex_remap) | |
# for l, st in zip(mat_array, text_in.split('\n')): | |
# print(st) | |
# print(l) | |
# print() | |
# print(f.body_format[4]) material_index |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment