Created
January 21, 2018 04:40
-
-
Save prettydiff/777bbc96d1ac5c226696de6acad481ba to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
window.parseFramework={language:function framework_language(){"use strict";return},lexer:{},parse:{},parseerror:"",parser:function framework_parser(){"use strict";return}};/* | |
Parse Framework | |
*/ | |
/*jslint node:true */ | |
/*eslint-env node*/ | |
/*eslint no-console: 0*/ | |
/*global global, location*/ | |
(function parse_init() { | |
"use strict"; | |
let framework; | |
const parse = { | |
concat: function parse_concat(data, array) { | |
parse | |
.datanames | |
.forEach(function parse_concat_datanames(value) { | |
data[value] = data[value].concat(array[value]); | |
}); | |
if (data === parse.data) { | |
parse.count = data.token.length - 1; | |
} | |
}, | |
count: -1, | |
data: { | |
begin: [], | |
lexer: [], | |
lines: [], | |
presv: [], | |
stack: [], | |
token: [], | |
types: [] | |
}, | |
datanames: ["begin", "lexer", "lines", "presv", "stack", "token", "types"], | |
lineNumber: 1, | |
linesSpace: 0, | |
objectSort: function parse_objectSort(data) { | |
let cc = 0, dd = 0, ee = 0, ff = 0, behind = parse.count, commaTest = true, front = 0, keyend = 0, keylen = 0; | |
const keys = [], length = parse.count, lines = parse.linesSpace, sort = function parse_objectSort_sort(x, y) { | |
let xx = x[0], yy = y[0]; | |
if (data.types[xx] === "comment" || data.types[xx] === "comment-inline") { | |
do { | |
xx = xx + 1; | |
} while (xx < length && (data.types[xx] === "comment" || data.types[xx] === "comment-inline")); | |
} | |
if (data.types[yy] === "comment" || data.types[yy] === "comment-inline") { | |
do { | |
yy = yy + 1; | |
} while (yy < length && (data.types[yy] === "comment" || data.types[yy] === "comment-inline")); | |
} | |
if (data.token[xx].toLowerCase() > data.token[yy].toLowerCase()) { | |
return 1; | |
} | |
return -1; | |
}, store = { | |
begin: [], | |
lexer: [], | |
lines: [], | |
presv: [], | |
stack: [], | |
token: [], | |
types: [] | |
}; | |
if (data.token[behind] === "," || data.types[behind] === "comment") { | |
do { | |
behind = behind - 1; | |
} while (behind > 0 && (data.token[behind] === "," || data.types[behind] === "comment")); | |
} | |
cc = behind; | |
if (cc > -1) { | |
do { | |
if (data.types[cc] === "end") { | |
dd = dd + 1; | |
} | |
if (data.types[cc] === "start") { | |
dd = dd - 1; | |
} | |
if (dd === 0) { | |
if (data.types[cc].indexOf("template") > -1) { | |
return; | |
} | |
if (data.token[cc] === ",") { | |
commaTest = true; | |
front = cc + 1; | |
} | |
if (commaTest === true && data.token[cc] === "," && front < behind) { | |
if (data.token[behind] !== ",") { | |
behind = behind + 1; | |
} | |
if (data.types[front] === "comment-inline") { | |
front = front + 1; | |
} | |
keys.push([front, behind]); | |
behind = front - 1; | |
} | |
} | |
if (dd < 0 && cc < parse.count) { | |
if (keys.length > 0 && keys[keys.length - 1][0] > cc + 1) { | |
ee = keys[keys.length - 1][0]; | |
if (data.types[ee - 1] !== "comment-inline") { | |
ee = ee - 1; | |
} | |
keys.push([ | |
cc + 1, | |
ee | |
]); | |
} | |
if (data.token[cc - 1] === "=" || data.token[cc - 1] === ":" || data.token[cc - 1] === "(" || data.token[cc - 1] === "[" || data.token[cc - 1] === "," || data.types[cc - 1] === "word" || cc === 0) { | |
if (keys.length > 1) { | |
keys.sort(sort); | |
keylen = keys.length; | |
commaTest = false; | |
dd = 0; | |
if (dd < keylen) { | |
do { | |
keyend = keys[dd][1]; | |
if (data.lines[keys[dd][0] - 1] > 1 && store.lines.length > 0) { | |
store.lines[store.lines.length - 1] = data.lines[keys[dd][0] - 1]; | |
} | |
ee = keys[dd][0]; | |
if (ee < keyend) { | |
do { | |
parse.push(store, { | |
begin: data.begin[ee], | |
lexer: data.lexer[ee], | |
lines: data.lines[ee], | |
presv: data.presv[ee], | |
stack: data.stack[ee], | |
token: data.token[ee], | |
types: data.types[ee] | |
}, ""); | |
ff = ff + 1; | |
//remove extra commas | |
if (data.token[ee] === ",") { | |
commaTest = true; | |
} | |
else if (data.token[ee] !== "," && data.types[ee] !== "comment" && data.types[ee] !== "comment-inline") { | |
commaTest = false; | |
} | |
ee = ee + 1; | |
} while (ee < keyend); | |
} | |
if (commaTest === false && dd < keylen - 1) { | |
ee = store.types.length - 1; | |
if (store.types[ee] === "comment" || store.types[ee] === "comment-inline") { | |
do { | |
ee = ee - 1; | |
} while (ee > 0 && (store.types[ee] === "comment" || store.types[ee] === "comment-inline")); | |
} | |
ee = ee + 1; | |
parse.splice({ | |
data: store, | |
howmany: 0, | |
index: ee, | |
record: { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: store.lexer[ee - 1], | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: ",", | |
types: "separator" | |
} | |
}); | |
store.lines[ee - 1] = 0; | |
ff = ff + 1; | |
} | |
dd = dd + 1; | |
} while (dd < keylen); | |
} | |
ee = store.types.length; | |
do { | |
ee = ee - 1; | |
} while (ee > 0 && (store.types[ee] === "comment" || store.types[ee] === "comment-inline")); | |
parse.splice({ | |
data: data, | |
howmany: ff, | |
index: cc + 1, | |
record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} | |
}); | |
parse.linesSpace = lines; | |
return parse.concat(data, store); | |
} | |
} | |
return; | |
} | |
cc = cc - 1; | |
} while (cc > -1); | |
} | |
return; | |
}, | |
options: { | |
correct: false, | |
crlf: false, | |
lang: "javascript", | |
lexer: "script", | |
lexerOptions: { | |
script: {} | |
}, | |
outputFormat: "arrays", | |
source: "" | |
}, | |
pop: function parse_pop(data) { | |
const output = { | |
begin: data.begin.pop(), | |
lexer: data.lexer.pop(), | |
lines: data.lines.pop(), | |
presv: data.presv.pop(), | |
stack: data.stack.pop(), | |
token: data.token.pop(), | |
types: data.types.pop() | |
}; | |
if (data === parse.data) { | |
parse.count = parse.count - 1; | |
} | |
return output; | |
}, | |
push: function parse_push(data, record, structure) { | |
parse | |
.datanames | |
.forEach(function parse_push_datanames(value) { | |
data[value].push(record[value]); | |
}); | |
if (data === parse.data) { | |
parse.count = parse.count + 1; | |
parse.linesSpace = 0; | |
if (record.types === "start" || record.types.indexOf("_start") > 0) { | |
parse.structure.push([structure, parse.count]); | |
} | |
else if (record.types === "end" || record.types.indexOf("_end") > 0) { | |
parse.structure.pop(); | |
} | |
else if (record.types === "else" || record.types.indexOf("_else") > 0) { | |
parse.structure[parse.structure.length - 1] = ["else", parse.count]; | |
} | |
} | |
}, | |
safeSort: function parse_safeSort(array, operation, recursive) { | |
let extref = function parse_safeSort_extref(item) { | |
//worthless function for backwards compatibility with older versions of V8 node. | |
return item; | |
}; | |
const arTest = function parse_safeSort_arTest(item) { | |
if (Array.isArray(item) === true) { | |
return true; | |
} | |
return false; | |
}, normal = function parse_safeSort_normal(item) { | |
let storeb = item; | |
const done = [item[0]], child = function safeSort_normal_child() { | |
let a = 0; | |
const len = storeb.length; | |
if (a < len) { | |
do { | |
if (arTest(storeb[a]) === true) { | |
storeb[a] = parse_safeSort_normal(storeb[a]); | |
} | |
a = a + 1; | |
} while (a < len); | |
} | |
}, recurse = function parse_safeSort_normal_recurse(x) { | |
let a = 0; | |
const storea = [], len = storeb.length; | |
if (a < len) { | |
do { | |
if (storeb[a] !== x) { | |
storea.push(storeb[a]); | |
} | |
a = a + 1; | |
} while (a < len); | |
} | |
storeb = storea; | |
if (storea.length > 0) { | |
done.push(storea[0]); | |
extref(storea[0]); | |
} | |
else { | |
if (recursive === true) { | |
child(); | |
} | |
item = storeb; | |
} | |
}; | |
extref = recurse; | |
recurse(array[0]); | |
return item; | |
}, descend = function parse_safeSort_descend(item) { | |
let c = 0; | |
const len = item.length, storeb = item, child = function parse_safeSort_descend_child() { | |
let a = 0; | |
const lenc = storeb.length; | |
if (a < lenc) { | |
do { | |
if (arTest(storeb[a]) === true) { | |
storeb[a] = parse_safeSort_descend(storeb[a]); | |
} | |
a = a + 1; | |
} while (a < lenc); | |
} | |
}, recurse = function parse_safeSort_descend_recurse(value) { | |
let a = c, b = 0, d = 0, e = 0, ind = [], key = storeb[c], tstore = ""; | |
const tkey = typeof key; | |
if (a < len) { | |
do { | |
tstore = typeof storeb[a]; | |
if (storeb[a] > key || (tstore > tkey)) { | |
key = storeb[a]; | |
ind = [a]; | |
} | |
else if (storeb[a] === key) { | |
ind.push(a); | |
} | |
a = a + 1; | |
} while (a < len); | |
} | |
d = ind.length; | |
a = c; | |
b = d + c; | |
if (a < b) { | |
do { | |
storeb[ind[e]] = storeb[a]; | |
storeb[a] = key; | |
e = e + 1; | |
a = a + 1; | |
} while (a < b); | |
} | |
c = c + d; | |
if (c < len) { | |
extref(""); | |
} | |
else { | |
if (recursive === true) { | |
child(); | |
} | |
item = storeb; | |
} | |
return value; | |
}; | |
extref = recurse; | |
recurse(""); | |
return item; | |
}, ascend = function parse_safeSort_ascend(item) { | |
let c = 0; | |
const len = item.length, storeb = item, child = function parse_safeSort_ascend_child() { | |
let a = 0; | |
const lenc = storeb.length; | |
if (a < lenc) { | |
do { | |
if (arTest(storeb[a]) === true) { | |
storeb[a] = parse_safeSort_ascend(storeb[a]); | |
} | |
a = a + 1; | |
} while (a < lenc); | |
} | |
}, recurse = function parse_safeSort_ascend_recurse(value) { | |
let a = c, b = 0, d = 0, e = 0, ind = [], key = storeb[c], tstore = ""; | |
const tkey = typeof key; | |
if (a < len) { | |
do { | |
tstore = typeof storeb[a]; | |
if (storeb[a] < key || tstore < tkey) { | |
key = storeb[a]; | |
ind = [a]; | |
} | |
else if (storeb[a] === key) { | |
ind.push(a); | |
} | |
a = a + 1; | |
} while (a < len); | |
} | |
d = ind.length; | |
a = c; | |
b = d + c; | |
if (a < b) { | |
do { | |
storeb[ind[e]] = storeb[a]; | |
storeb[a] = key; | |
e = e + 1; | |
a = a + 1; | |
} while (a < b); | |
} | |
c = c + d; | |
if (c < len) { | |
extref(""); | |
} | |
else { | |
if (recursive === true) { | |
child(); | |
} | |
item = storeb; | |
} | |
return value; | |
}; | |
extref = recurse; | |
recurse(""); | |
return item; | |
}; | |
if (arTest(array) === false) { | |
return array; | |
} | |
if (operation === "normal") { | |
return normal(array); | |
} | |
if (operation === "descend") { | |
return descend(array); | |
} | |
return ascend(array); | |
}, | |
spacer: function parse_spacer(args) { | |
// * array - the characters to scan | |
// * index - the index to start scanning from | |
// * end - the length of the array, to break the loop | |
parse.linesSpace = 1; | |
do { | |
if (args.array[args.index] === "\n") { | |
parse.linesSpace = parse.linesSpace + 1; | |
parse.lineNumber = parse.lineNumber + 1; | |
} | |
if ((/\s/).test(args.array[args.index + 1]) === false) { | |
break; | |
} | |
args.index = args.index + 1; | |
} while (args.index < args.end); | |
return args.index; | |
}, | |
splice: function parse_splice(spliceData) { | |
// * data - The data object to alter | |
// * howmany - How many indexes to remove | |
// * index - The index where to start | |
// * record - A new record to insert | |
if (spliceData.record.token !== "") { | |
parse | |
.datanames | |
.forEach(function parse_splice_datanames(value) { | |
spliceData | |
.data[value] | |
.splice(spliceData.index, spliceData.howmany, spliceData.record[value]); | |
}); | |
if (spliceData.data === parse.data) { | |
parse.count = (parse.count - spliceData.howmany) + 1; | |
parse.linesSpace = 0; | |
} | |
return; | |
} | |
parse | |
.datanames | |
.forEach(function parse_splice_datanames(value) { | |
spliceData | |
.data[value] | |
.splice(spliceData.index, spliceData.howmany); | |
}); | |
if (spliceData.data === parse.data) { | |
parse.count = parse.count - spliceData.howmany; | |
parse.linesSpace = 0; | |
} | |
}, | |
structure: [["global", -1]] | |
}, parser = function parser_(options) { | |
parse.count = -1; | |
parse.data = { | |
begin: [], | |
lexer: [], | |
lines: [], | |
presv: [], | |
stack: [], | |
token: [], | |
types: [] | |
}; | |
parse.datanames = [ | |
"begin", | |
"lexer", | |
"lines", | |
"presv", | |
"stack", | |
"token", | |
"types" | |
]; | |
parse.linesSpace = 0; | |
parse.lineNumber = 1; | |
parse.structure = [ | |
["global", -1] | |
]; | |
parse.options = options; | |
parse.structure.pop = function parse_structure_pop() { | |
const len = parse.structure.length - 1, arr = parse.structure[len]; | |
if (len > 0) { | |
parse | |
.structure | |
.splice(len, 1); | |
} | |
return arr; | |
}; | |
if (framework.lexer[options.lexer] === undefined) { | |
framework.parseerror = "Lexer '" + options.lexer + "' isn't available."; | |
} | |
if (typeof framework.lexer[options.lexer] !== "function") { | |
framework.parseerror = "Specified lexer, " + options.lexer + ", is not a function."; | |
} | |
else { | |
framework.parseerror = ""; | |
options.lexerOptions = (options.lexerOptions || {}); | |
Object.keys(framework.lexer).forEach(function parse_lexers(value) { | |
options.lexerOptions[value] = (options.lexerOptions[value] || {}); | |
}); | |
// This line parses the code using a lexer file | |
framework.lexer[options.lexer](options.source + " "); | |
} | |
// validate that all the data arrays are the same length | |
(function parser_checkLengths() { | |
let a = 0, b = 0; | |
const keys = Object.keys(parse.data), c = keys.length; | |
do { | |
b = a + 1; | |
do { | |
if (parse.data[keys[a]].length !== parse.data[keys[b]].length) { | |
framework.parseerror = "'" + keys[a] + "' array is of different length than '" + | |
keys[b] + "'"; | |
break; | |
} | |
b = b + 1; | |
} while (b < c); | |
a = a + 1; | |
} while (a < c - 1); | |
}()); | |
if ((parse.options.lexerOptions[options.lexer] !== undefined && parse.options.lexerOptions[options.lexer].objectSort === true) || (parse.options.lexerOptions.markup !== undefined && parse.options.lexerOptions.markup.tagSort === true)) { | |
let a = 0; | |
const data = parse.data, b = data.begin.length, structure = [-1]; | |
do { | |
if ((data.types[a - 1] === "attribute" || data.types[a - 1] === "jsx_attribute_end") && data.types[a] !== "attribute" && data.types[a] !== "jsx_attribute_start" && data.lexer[a - 1] === "markup" && data.types[data.begin[a - 1]] === "singleton") { | |
structure.pop(); | |
} | |
if (data.begin[a] !== structure[structure.length - 1]) { | |
if (parse.options.lexerOptions[options.lexer].objectSort === true && (data.lexer[a] === "script" || data.lexer[a] === "style")) { | |
data.begin[a] = structure[structure.length - 1]; | |
} | |
else if (parse.options.lexerOptions.markup.tagSort === true && data.lexer[a] === "markup") { | |
data.begin[a] = structure[structure.length - 1]; | |
} | |
} | |
if (data.types[a] === "start" || data.types[a] === "template_start" || (data.types[a] === "cdata" && data.token[data.begin[a - 1]].toLowerCase().indexOf("<script") === 0)) { | |
structure.push(a); | |
} | |
else if (structure.length > 1 && (data.types[a] === "end" || data.types[a] === "template_end")) { | |
structure.pop(); | |
} | |
else if (data.types[a] === "template_else") { | |
structure[structure.length - 1] = a; | |
} | |
else if (data.types[a] === "attribute" && data.lexer[a] === "markup" && (data.types[a - 1] === "start" || data.types[a - 1] === "singleton")) { | |
structure.push(a - 1); | |
} | |
else if (data.lexer[a] === "markup" && data.types[a] !== "attribute" && data.types[structure[structure.length - 1] + 1] === "attribute") { | |
structure.pop(); | |
} | |
a = a + 1; | |
} while (a < b); | |
} | |
}, parserArrays = function parserArrays(options) { | |
parser(options); | |
return parse.data; | |
}, parserObjects = function parserObjects(options) { | |
let a = 0; | |
const data = []; | |
parser(options); | |
do { | |
data.push({ | |
begin: parse.data.begin[a], | |
lexer: parse.data.lexer[a], | |
lines: parse.data.lines[a], | |
presv: parse.data.presv[a], | |
stack: parse.data.stack[a], | |
token: parse.data.token[a], | |
types: parse.data.types[a] | |
}); | |
a = a + 1; | |
} while (a < parse.count); | |
return data; | |
}; | |
window.parseFramework = (window.parseFramework || { | |
lexer: {}, | |
parse: parse, | |
parseerror: "", | |
parserArrays: parserArrays, | |
parserObjects: parserObjects | |
}); | |
framework = window.parseFramework; | |
framework.parse = parse; | |
framework.parserArrays = parserArrays; | |
framework.parserObjects = parserObjects; | |
}());window.parseFramework.parse.options.lexerOptions.markdown={};window.parseFramework.parse.options.lexerOptions.markup={};window.parseFramework.parse.options.lexerOptions.script={};window.parseFramework.parse.options.lexerOptions.style={};/*global global*/ | |
(function markdown_init() { | |
"use strict"; | |
const framework = window.parseFramework, markdown = function lexer_markdown(source) { | |
let a = 0, b = 0, bc1 = 0, bc2 = 0, quote = "", stack = []; | |
const parse = framework.parse, data = parse.data, options = parse.options, lines = (options.crlf === true) | |
? source.replace(/\u0000/g, "\ufffd").split("\r\n") | |
: source.replace(/\u0000/g, "\ufffd").split("\n"), hr = function lexer_markdown_hr() { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<hr/>", | |
types: "singleton" | |
}, ""); | |
}, text = function lexer_markdown_text(item, tag, listrecurse) { | |
let tagend = tag.replace("<", "</"), struct = tag.replace("<", "").replace(/(\/?>)$/, ""); | |
// line containing strong, em, or inline code | |
if (item.indexOf("*") > -1 || item.indexOf("`") > -1 || (item.indexOf("[") > -1 && item.indexOf("](") > -1)) { | |
const esctest = function lexer_markdown_text_esctest() { | |
let bb = aa - 1; | |
if (str[bb] === "\\") { | |
do { | |
bb = bb - 1; | |
} while (str[bb] === "\\"); | |
if ((aa - bb) % 2 === 1) { | |
return true; | |
} | |
return false; | |
} | |
return false; | |
}, gencontent = function lexer_markdown_text_gencontent() { | |
return itemx.join("").replace(/\s+/g, " ").replace(/^\s/, "").replace(/\s$/, "").replace(/\\(?!(\\))/g, "").replace(/\\{2}/g, "\\"); | |
}; | |
let stray = "", str = item.split(""), content = "", itemx = [], square = 0, aa = 0, bb = str.length, cc = 0; | |
if (tag !== "multiline") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: tag, | |
types: "start" | |
}, struct); | |
} | |
do { | |
if (str[aa] === "[" && esctest() === false) { | |
cc = aa; | |
square = 0; | |
do { | |
if (str[cc] === "[") { | |
square = square + 1; | |
} | |
else if (str[cc] === "]") { | |
square = square - 1; | |
if (square < 1 && str[cc + 1] === "(") { | |
content = itemx.join("").replace(/\s+/g, " ").replace(/^\s/, "").replace(/\s$/, ""); | |
if (content !== "") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: content, | |
types: "content" | |
}, ""); | |
} | |
itemx = []; | |
stack.push("["); | |
if (str[aa - 1] === "!") { | |
content = "img"; | |
} | |
else { | |
content = "a"; | |
} | |
if (content === "img") { | |
if (data.token[parse.count] === "!") { | |
parse.pop(data); | |
} | |
else if (data.types[parse.count] === "content") { | |
data.token[parse.count] = data.token[parse.count].slice(0, data.token[parse.count].length - 1).replace(/(\s)$/, ""); | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 1, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<img/>", | |
types: "singleton" | |
}, ""); | |
content = str.slice(aa + 1, cc).join("").replace(/\s+/g, " ").replace(/^\s/, "").replace(/\s$/, ""); | |
if (content !== "") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "alt=\"" + content + "\"", | |
types: "attribute" | |
}, ""); | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 1, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "src=\"", | |
types: "attribute" | |
}, ""); | |
aa = cc - 1; | |
} | |
else { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 1, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<a>", | |
types: "start" | |
}, "a"); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "href=\"", | |
types: "attribute" | |
}, ""); | |
} | |
break; | |
} | |
} | |
cc = cc + 1; | |
} while (cc < bb); | |
} | |
else if (str[aa] === "]" && str[aa + 1] === "(" && esctest() === false && stack[stack.length - 1] === "[") { | |
content = gencontent(); | |
if (content !== "" && content.length > 1 && parse.structure[parse.structure.length - 1][0] !== "img") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: content, | |
types: "content" | |
}, ""); | |
} | |
cc = aa + 1; | |
square = 0; | |
do { | |
if (str[cc] === "(") { | |
square = square + 1; | |
} | |
else if (str[cc] === ")") { | |
square = square - 1; | |
if (square === 0) { | |
content = str.slice(aa + 2, cc).join("").replace(/\s+/g, " ").replace(/^\s/, "").replace(/\s$/, ""); | |
aa = cc + 1; | |
itemx = []; | |
cc = (parse.structure[parse.structure.length - 1][0] === "a") | |
? parse.structure[parse.structure.length - 1][1] + 1 | |
: parse.count; | |
if (content === "") { | |
data.token[cc] = data.token[cc] + "\""; | |
} | |
else { | |
data.token[cc] = data.token[cc] + content + "\""; | |
} | |
break; | |
} | |
} | |
cc = cc + 1; | |
} while (cc < bb); | |
stack.pop(); | |
if (parse.structure[parse.structure.length - 1][0] === "a") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</a>", | |
types: "end" | |
}, ""); | |
} | |
} | |
else if ((str[aa] === "*" || str[aa] === "~") && | |
esctest() === false && | |
stack[stack.length - 1] !== "`" && | |
((quote === "" && ((/\s/).test(str[aa - 1]) === true || | |
aa === 0)) || (quote === "**" && ((/\s/).test(str[aa + 2]) === true || | |
aa === bb - 2)) || (quote !== "" && ((/\s/).test(str[aa + 1]) === true || | |
aa === bb - 1)))) { | |
if (str[aa] === "~") { | |
quote = "~"; | |
do { | |
str[aa] = ""; | |
aa = aa + 1; | |
} while (str[aa] === "~"); | |
} | |
else if (str[aa + 1] === "*") { | |
quote = "**"; | |
str[aa + 1] = ""; | |
} | |
else { | |
quote = "*"; | |
} | |
str[aa] = ""; | |
if (quote === stack[stack.length - 1]) { | |
let midtag = "</em>"; | |
content = gencontent(); | |
if (content !== "") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: content, | |
types: "content" | |
}, ""); | |
} | |
itemx = []; | |
if (quote === "~") { | |
midtag = "</strike>"; | |
} | |
else if (quote === "**") { | |
midtag = "</strong>"; | |
} | |
stack.pop(); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: midtag, | |
types: "end" | |
}, ""); | |
quote = ""; | |
stack = []; | |
} | |
else { | |
let midtag = "em"; | |
content = gencontent(); | |
if (content !== "") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: content, | |
types: "content" | |
}, ""); | |
} | |
itemx = []; | |
if (quote === "~") { | |
midtag = "strike"; | |
} | |
else if (quote === "**") { | |
midtag = "strong"; | |
} | |
stack.push(quote); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 1, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<" + midtag + ">", | |
types: "start" | |
}, midtag); | |
} | |
} | |
else if (str[aa] === "`" && esctest() === false) { | |
content = gencontent(); | |
if (content !== "") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: content, | |
types: "content" | |
}, ""); | |
} | |
itemx = []; | |
if (stack[stack.length - 1] === "`") { | |
str[aa] = ""; | |
stack.pop(); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</code>", | |
types: "end" | |
}, ""); | |
quote = ""; | |
stack = []; | |
} | |
else { | |
stack.push("`"); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 1, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<code>", | |
types: "start" | |
}, "code"); | |
} | |
} | |
if (str[aa] !== stack[stack.length - 1] && str[aa - 1] + str[aa] !== stack[stack.length - 1]) { | |
itemx.push(str[aa]); | |
} | |
aa = aa + 1; | |
} while (aa < bb); | |
content = gencontent(); | |
if (tag !== "multiline" && data.types[parse.count] === "start" && data.token[parse.count] !== tag) { | |
stray = data.token[parse.count]; | |
parse.pop(data); | |
parse.structure.pop(); | |
if (stray === "<code>") { | |
stray = "`"; | |
} | |
else if (stray === "<em>") { | |
stray = "*"; | |
} | |
else if (stray === "<strong>") { | |
stray = "**"; | |
} | |
else if (stray === "<strike>") { | |
stray = "~"; | |
} | |
if (data.types[parse.count] === "start") { | |
content = item; | |
} | |
else { | |
data.token[parse.count] = data.token[parse.count] + stray; | |
} | |
} | |
if (content !== "") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 1, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: content, | |
types: "content" | |
}, ""); | |
} | |
if (listrecurse === true) { | |
list(); | |
} | |
if (tag !== "multiline") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: tagend, | |
types: "end" | |
}, ""); | |
quote = ""; | |
stack = []; | |
} | |
return; | |
} | |
if (struct.indexOf(" ") > 0) { | |
struct = struct.slice(0, struct.indexOf(" ")); | |
} | |
if (tag !== "multiline") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: tag, | |
types: "start" | |
}, struct); | |
} | |
if (listrecurse === true) { | |
list(); | |
} | |
else { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: item.replace(/^(\s+)/, "").replace(/(\s+)$/, ""), | |
types: "content" | |
}, ""); | |
} | |
if (tag !== "multiline") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: tagend, | |
types: "end" | |
}, ""); | |
quote = ""; | |
stack = []; | |
} | |
}, comtest = function lexer_markdown_comtest(index) { | |
return (/^(\s{0,3}<!--)/).test(lines[index]); | |
}, hrtest = function lexer_markdown_hrtest(index) { | |
return (/^(\s*((-\s*){3,}|(_\s*){3,}|(\*\s*){3,})\s*)$/).test(lines[index]); | |
}, codetest = function lexer_markdown_codetest(index) { | |
return ((/^(\u0020{4,}\s*\S)/).test(lines[index]) === true || (/^(\s*\t\s*\S)/).test(lines[index]) === true); | |
}, codeblocktest = function lexer_markdown_codeblocktest(index) { | |
return (/^(\s*((`{3,})|(~{3,}))+(\S+)?\s*)$/).test(lines[index]); | |
}, listtest = function lexer_markdown_listtest(index) { | |
const listy = (/^(\s*(\*|-|\+|(\d{1,9}(\)|\.))))/); | |
if (listy.test(lines[index]) === true) { | |
const listr = lines[index].replace(listy, ""); | |
if (listr === "") { | |
lines[index] = lines[index] + " "; | |
} | |
else if ((/\s/).test(listr.charAt(0)) === false) { | |
return false; | |
} | |
return true; | |
} | |
return false; | |
}, comment = function lexer_markdown_comment() { | |
const com = []; | |
let comment = ""; | |
if (lines[a].indexOf("-->") < 0) { | |
do { | |
com.push(lines[a]); | |
a = a + 1; | |
} while (a < b && lines[a].indexOf("-->") < 0); | |
} | |
else { | |
com.push(lines[a]); | |
} | |
if (options.crlf === true) { | |
comment = com.join("\r\n").replace(/^(\s*<!--+\s*)/, "").replace(/\s*-+->/, "-->"); | |
} | |
else { | |
comment = com.join("\n").replace(/^(\s*<!--+\s*)/, "").replace(/\s*-+->/, "-->"); | |
} | |
comment = comment.slice(0, comment.indexOf("-->")); | |
if (lines[a] !== undefined) { | |
lines[a] = lines[a].slice(lines[a].indexOf("-->") + 3); | |
if (lines[a].replace(/\s+/, "") !== "") { | |
a = a - 1; | |
} | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: comment, | |
types: "comment" | |
}, ""); | |
}, code = function lexer_markdown_code(codetext, language, fourspace) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<p>", | |
types: "start" | |
}, "p"); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<code>", | |
types: "start" | |
}, "code"); | |
if (language !== "") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "class=\"language-" + language + "\"", | |
types: "attribute" | |
}, ""); | |
} | |
if (codetext !== "") { | |
if (fourspace === true) { | |
if (codetext.indexOf(" ") === 0) { | |
codetext = codetext.replace(/^(\u0020{4})/, ""); | |
} | |
else { | |
codetext = codetext.replace(/^(\s*\t)/, ""); | |
} | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: codetext, | |
types: "content" | |
}, ""); | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</code>", | |
types: "end" | |
}, ""); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</p>", | |
types: "end" | |
}, ""); | |
}, codeblock = function lexer_markdown_codeblock(ticks, blockyquote, fourspace) { | |
const indentstr = (function lexer_markdown_codeblock() { | |
let inumb = (ticks === true) | |
? (/^(\s*)/).exec(lines[a])[0].length | |
: 0; | |
if (inumb > 3) { | |
return 0; | |
} | |
return inumb; | |
}()), indent = new RegExp("^(\\s{0," + indentstr + "})"), language = (ticks === true) | |
? lines[a].replace(/\s*((`+)|(~+))\s*/, "").replace(/\s*/g, "") | |
: "", tilde = (/^(\s*`)/).test(lines[a]) === false, cchar = (tilde === true) | |
? "~" | |
: "`", len = lines[a].split(cchar).length - 1, endgate = new RegExp("^((\\s{0," + indentstr + "})?\\s*" + cchar + "{" + len + ",}\\s*)$"), codes = []; | |
if (ticks === true) { | |
a = a + 1; | |
if (endgate.test(lines[a]) === true) { | |
code("", language, false); | |
return; | |
} | |
} | |
do { | |
if (lines[a] === undefined) { | |
break; | |
} | |
if (lines[a] !== "") { | |
if (ticks === true) { | |
if (endgate.test(lines[a]) === true && (/^(\u0020{4})/).test(lines[a]) === false) { | |
break; | |
} | |
codes.push(lines[a].replace(indent, "")); | |
} | |
else { | |
codes.push(lines[a].replace(/^(\u0020{4})/, "").replace(/^(\s*\t)/, "")); | |
if (lines[a + 1] !== "" && codetest(a + 1) === false) { | |
break; | |
} | |
if (lines[a + 1] === "" && codetest(a + 2) === false) { | |
a = a + 1; | |
break; | |
} | |
} | |
} | |
else { | |
if (codetest(a + 1) === false) { | |
break; | |
} | |
codes.push(""); | |
} | |
if (blockyquote === true && (/^(\s*>)/).test(lines[a + 1]) === false) { | |
break; | |
} | |
a = a + 1; | |
} while (a < b); | |
if (options.crlf === true) { | |
code(codes.join("\r\n"), language, fourspace); | |
} | |
else { | |
code(codes.join("\n"), language, fourspace); | |
} | |
}, parabuild = function lexer_markdown_parabuild() { | |
let x = a, tag = "<p>"; | |
const test = function lexer_markdown_parabuild_test(index) { | |
if (lines[index] === undefined) { | |
return false; | |
} | |
if ((/^(\s{0,3}((=+)|(-+))\s*)$/).test(lines[index]) === true) { | |
if (lines[index].indexOf("=") > -1) { | |
tag = "<h1>"; | |
} | |
else { | |
tag = "<h2>"; | |
} | |
return false; | |
} | |
if (hrtest(index) === true) { | |
return false; | |
} | |
if ((/^(\s*((`{3,})|(~{3,}))+(\S+)?\s*)$/).test(lines[index]) === true) { | |
return false; | |
} | |
if (lines[index] === "") { | |
return false; | |
} | |
if ((/^(\s*>)/).test(lines[index]) === true) { | |
return false; | |
} | |
if ((/^(\s*#{1,6}\s)/).test(lines[index]) === true) { | |
return false; | |
} | |
return true; | |
}, fixquote = function lexer_markdown_parabuild_fixquote() { | |
let key = "", x = parse.count; | |
if (quote === "*") { | |
key = "<em>"; | |
} | |
else if (quote === "**") { | |
key = "<strong>"; | |
} | |
else if (quote === "~") { | |
key = "<strike>"; | |
} | |
parse.structure.pop(); | |
if (data.token[x] !== key) { | |
do { | |
data.begin[x] = parse.structure[parse.structure.length - 1][1]; | |
data.stack[x] = parse.structure[parse.structure.length - 1][0]; | |
x = x - 1; | |
} while (x > 0 && data.token[x] !== key); | |
} | |
if (data.types[x + 1] === "content") { | |
data.token[x + 1] = quote + " " + data.token[x + 1]; | |
parse.splice({ | |
data: data, | |
howmany: 1, | |
index: x, | |
record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} | |
}); | |
} | |
else if (data.types[x - 1] === "content") { | |
data.token[x - 1] = data.token[x - 1] + quote; | |
parse.splice({ | |
data: data, | |
howmany: 1, | |
index: x, | |
record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} | |
}); | |
} | |
else { | |
data.token[x] = quote; | |
data.types[x] = "content"; | |
} | |
quote = ""; | |
}; | |
if (test(a + 1) === true) { | |
do { | |
x = x + 1; | |
} while (x < b && test(x) === true); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: tag, | |
types: "start" | |
}, tag.replace("<", "").replace(">", "")); | |
if (x === a + 1) { | |
text(lines[a], tag, false); | |
} | |
else { | |
do { | |
text(lines[a], "multiline", false); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<br/>", | |
types: "singleton" | |
}, ""); | |
a = a + 1; | |
} while (a < x); | |
parse.pop(data); | |
if (quote !== "") { | |
fixquote(); | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: tag.replace("<", "</"), | |
types: "end" | |
}, ""); | |
} | |
} | |
else { | |
text(lines[a], tag, false); | |
if (tag !== "<p>") { | |
a = a + 1; | |
} | |
} | |
quote = ""; | |
stack = []; | |
}, heading = function lexer_markdown_heading() { | |
let hash = (/^(\s*#+\s+)/).exec(lines[a])[0].replace(/\s+/g, ""), hashes = function lexer_markdown_heading_hasheds(escapes) { | |
return escapes.replace(/\\/g, "").replace(/\s+/g, ""); | |
}, content = lines[a].replace(/^(\s*#+\s+)/, "").replace(/(\s+#+\s*)$/, "").replace(/((\\?#)+\s*)$/, hashes); | |
if (content === "" || (/^(#+)$/).test(content) === true) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<h" + hash.length + ">", | |
types: "start" | |
}, "h" + hash.length); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</h" + hash.length + ">", | |
types: "end" | |
}, ""); | |
} | |
else { | |
text(content, "<h" + hash.length + ">", false); | |
} | |
}, blockquote = function lexer_markdown_blockquote() { | |
let x = a; | |
bc1 = bc1 + 1; | |
bc2 = bc1; | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<blockquote>", | |
types: "start" | |
}, "blockquote"); | |
do { | |
lines[x] = lines[x].replace(/^(\s*>\u0020?)/, ""); | |
if (listtest(x + 1) === true) { | |
break; | |
} | |
if (lines[x].replace(/\s+/, "") === "") { | |
lines[x] = ""; | |
if ((/^(\s*>)/).test(lines[x + 1]) === false) { | |
break; | |
} | |
} | |
if ((/^(\s{0,3}((-{3,})|(={3,}))\s*)$/).test(lines[x + 1]) === true) { | |
break; | |
} | |
x = x + 1; | |
} while (x < b && lines[x] !== "" && codetest(x) === false && hrtest(x) === false); | |
if (x < b - 1 && x > 0) { | |
if (listtest(x) === true) { | |
lines.splice(x + 1, 0, ""); | |
b = b + 1; | |
} | |
else if (lines[x].replace(/\s+/, "") !== "") { | |
x = x - 1; | |
} | |
} | |
do { | |
if ((/^(\s*>)/).test(lines[a]) === true) { | |
blockquote(); | |
} | |
else if ((/^(\s*((`{3,})|(~{3,}))+(\S+)?\s*)$/).test(lines[a]) === true) { | |
if (a + 1 < x) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<p>", | |
types: "start" | |
}, "p"); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<code>", | |
types: "start" | |
}, "code"); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</code>", | |
types: "end" | |
}, ""); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</p>", | |
types: "end" | |
}, ""); | |
lines[a] = ""; | |
break; | |
} | |
codeblock(true, true, false); | |
} | |
else if (codetest(a) === true) { | |
codeblock(false, true, true); | |
} | |
else if ((/^(\s*#{1,6}\s)/).test(lines[a]) === true) { | |
heading(); | |
} | |
else if (listtest(a) === true) { | |
list(); | |
} | |
else if (a > x && (/^(\s{0,3}((-{3,})|(={3,}))\s*)$/).test(lines[a + 1]) === true) { | |
text(lines[a], "<p>", false); | |
} | |
else if (lines[a].replace(/\s+/, "") !== "") { | |
parabuild(); | |
} | |
a = a + 1; | |
} while (a < x); | |
if (lines[a - 1] === "" || lines[a] !== "" || (a > x && (/^(\s{0,3}((-{3,})|(={3,}))\s*)$/).test(lines[a]) === true)) { | |
a = a - 1; | |
} | |
bc1 = bc1 - 1; | |
if (bc1 === 0) { | |
do { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</blockquote>", | |
types: "end" | |
}, ""); | |
bc2 = bc2 - 1; | |
} while (bc2 > 0); | |
} | |
}, list = function lexer_markdown_list() { | |
let paraForce = false, ind = 0, sym = lines[a].replace(/^(\s+)/, "").charAt(0), record = { | |
begin: -1, | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
}, numb = "", lasttext = "", y = 0, z = 0, order = false, end; | |
const tabs = function lexer_markdown_list_tabs(spaces) { | |
let output = spaces.split(""), uu = 0; | |
const tt = output.length; | |
do { | |
if (output[uu] === "\t") { | |
output[uu] = " "; | |
} | |
uu = uu + 1; | |
} while (uu < tt); | |
return output.join(""); | |
}, checktest = function lexer_markdown_list_checktest() { | |
return ((/^(\s*(\*|-|\+)?\s{0,3}\[( |x)\]\s*)$/).test(lines[a]) === true || (/^(\s*(\*|-|\+)?\s{0,3}\[( |x)\]\s+\S)/).test(lines[a]) === true); | |
}, indentation = (/^(\s*(\*|-|\+)?\s*)/), indlen = function lexer_markdown_list_indlen(index) { | |
if (lines[index] === undefined) { | |
return 0; | |
} | |
return indentation.exec(lines[index].replace(indentation, tabs))[0].length; | |
}, space = function lexer_markdown_list_space(index, emptyLine) { | |
let xind = indlen(index), xsym = (lines[index] === undefined) | |
? "" | |
: lines[index].replace(/^(\s+)/, "").charAt(0); | |
if (lines[index] === undefined) { | |
return 0; | |
} | |
if (order === true) { | |
xind = (/^(\s*(\d+(\)|\.)))?\s*/).exec(lines[index])[0].length; | |
xsym = lines[index].replace(/^(\s*\d+)/, "").charAt(0); | |
} | |
if (order === false && "*-+".indexOf(xsym) > -1 && xsym !== sym && xind - ind < 2 && (/\s/).test(lines[index].replace(/^(\s+)/, "").charAt(1)) === true) { | |
return -1; | |
} | |
if (xind - ind < 0) { | |
return xind; | |
} | |
if (xsym !== sym && listtest(index) === true && (emptyLine === false || (emptyLine === true && xind - ind < 2))) { | |
return 10; | |
} | |
return xind; | |
}; | |
if ((/^(\s{0,3}\d{1,9}(\)|\.)\s)/).test(lines[a]) === true) { | |
order = true; | |
ind = (/^(\s*(\d+(\)|\.)))?\s*/).exec(lines[a])[0].length; | |
sym = lines[a].replace(/^(\s*\d+)/, "").charAt(0); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<ol>", | |
types: "start" | |
}, "ol"); | |
numb = (/\d{1,9}/).exec(lines[a])[0].replace(/^(0+)/, ""); | |
if (numb !== "1") { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "start=\"" + numb + "\"", | |
types: "attribute" | |
}, ""); | |
} | |
} | |
else { | |
ind = indlen(a); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<ul>", | |
types: "start" | |
}, "ul"); | |
} | |
do { | |
paraForce = false; | |
// lists do not contain comments | |
if (comtest(a) === true) { | |
a = a - 1; | |
break; | |
} | |
// an ordered list may not have unordered list items | |
if (order === true && (/^(\s{0,3}(\*|-|\+)\s)/).test(lines[a]) === true) { | |
a = a - 1; | |
break; | |
} | |
// an unordered list may not have ordered list items | |
if (order === false && (/^(\s{0,3}\d{1,9}(\)|\.)\s)/).test(lines[a]) === true) { | |
a = a - 1; | |
break; | |
} | |
numb = lines[a]; | |
lines[a] = lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}\.))\s+)/, ""); | |
//recursive list item: - - foo | |
//console.log((space(a + 1, true) - ind)+" "+lines[a]); | |
if (listtest(a) === true) { | |
parse.structure.push(["ul", parse.count - 3]); | |
list(); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "<li>"; | |
record.types = "start"; | |
y = parse.count - 1; | |
do { | |
data.begin[y] = data.begin[y] - 1; | |
y = y - 1; | |
} while (y > 0 && data.token[y + 1] !== "<li>"); | |
parse.splice({ | |
data: data, | |
howmany: 0, | |
index: y, | |
record: record | |
}); | |
y = y + 1; | |
do { | |
if (data.types[y] === "start") { | |
data.begin[y] = y - 1; | |
data.stack[y] = data.token[y - 1].replace("<", "").replace(">", ""); | |
z = y; | |
} | |
else { | |
data.begin[y] = z; | |
if (data.types[y] === "end") { | |
z = z - 1; | |
} | |
} | |
y = y + 1; | |
} while (y < parse.count + 1); | |
record.begin = z; | |
record.stack = "li"; | |
record.token = "</li>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
else if (lines[a] === "") { | |
y = space(a + 1, true) - ind; | |
if (y < 0) { | |
break; | |
} | |
if (y > 1 && a < b - 1 && lines[a + 1].replace(/\s+/, "").charAt(0) !== sym && (/^(\s{0,3}>)/).test(lines[a + 1]) === false) { | |
paraForce = true; | |
} | |
else if (codetest(a + 1) === false && (parse.structure.join("").indexOf("blockquote") > 0 || y !== 0) && a < b - 1) { | |
break; | |
} | |
} | |
else { | |
lines[a] = numb; | |
y = space(a, false) - ind; | |
if (y < -1 || y > 9 || hrtest(a) === true) { | |
z = (lines[a + 1] === "") | |
? a + 2 | |
: a + 1; | |
if (z !== b && codetest(z) === false && listtest(z) === false && (/^\s*>/).test(lines[a]) === false) { | |
a = a - 1; | |
break; | |
} | |
} | |
if (data.token[parse.count - 1] === "<li>") { | |
paraForce = false; | |
if (lines[a - 1] === "") { | |
a = a - 1; | |
break; | |
} | |
} | |
else if (y < 1 && listtest(a) === false) { | |
paraForce = true; | |
y = 2; | |
} | |
if ((/^(\s*(\*|-|\+|(\d{1,9}(\)|\.)))\s*)$/).test(lines[a]) === true && lines[a + 1] === "") { | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "<li>"; | |
record.types = "start"; | |
parse.push(data, record, "li"); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "</li>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
else if (y < -1) { | |
//different list type | |
lines.splice(a, 0, ""); | |
b = b + 1; | |
break; | |
} | |
else if (y > 1) { | |
if ((/^(\s*>)/).test(lines[a]) === true) { | |
//blockquote in list item | |
end = parse.pop(data); | |
record.begin = end.begin; | |
record.stack = end.stack; | |
parse.structure.push([end.stack, end.begin]); | |
blockquote(); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "</li>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
else if (listtest(a) === true && (/^(\u0020{4,}\s*\S)/).test(lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}(\)|\.)))\s)/, "").replace(/^\t/, " ")) === true) { | |
//code line in list item | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "<li>"; | |
record.types = "start"; | |
parse.push(data, record, "li"); | |
lines[a] = lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}(\)|\.)))\s)/, "").replace(/^\t/, " "); | |
code(lines[a], "", true); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "</li>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
else if (paraForce || (codetest(a) === true && listtest(a) === false)) { | |
end = parse.pop(data); | |
record.begin = end.begin; | |
record.stack = end.stack; | |
parse.structure.push([end.stack, end.begin]); | |
if (lines[a - 1] === "" && data.token[parse.count] !== "</p>") { | |
lasttext = data.token[parse.count]; | |
parse.pop(data); | |
record.token = "<p>"; | |
record.types = "start"; | |
parse.push(data, record, "p"); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = lasttext; | |
record.types = "content"; | |
parse.push(data, record, ""); | |
record.token = "</p>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
lines[a] = lines[a].replace(/^(\u0020{4})/, "").replace(/^(\s*\t)/, ""); | |
if (codetest(a) === true) { | |
code(lines[a], "", true); | |
} | |
else if (data.token[parse.count] === "</p>" && lines[a - 1] !== "") { | |
parse.pop(data); | |
parse.structure.push(["p", data.begin[parse.count]]); | |
record.token = "<br/>"; | |
record.types = "singleton"; | |
parse.push(data, record, ""); | |
text(lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}\.))\s+)/, ""), "multiline", false); | |
record.token = "</p>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
else if (data.types[parse.count] === "content") { | |
a = a - 1; | |
parse.pop(data); | |
record.token = "<p>"; | |
record.types = "start"; | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
parse.push(data, record, "p"); | |
text(lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}\.))\s+)/, ""), "multiline", false); | |
record.token = "<br/>"; | |
record.types = "singleton"; | |
parse.push(data, record, ""); | |
a = a + 1; | |
text(lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}\.))\s+)/, ""), "multiline", false); | |
record.token = "</p>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
else { | |
text(lines[a], "<p>", false); | |
} | |
parse.push(data, end, ""); | |
} | |
else if (listtest(a) === true) { | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "<li>"; | |
record.types = "start"; | |
parse.push(data, record, "li"); | |
list(); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "</li>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
} | |
else if ((/^(\s*((`{3,})|(~{3,}))+(\S+)?\s*)$/).test(lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}(\)|\.)))\s)/, "")) === true) { | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "<li>"; | |
record.types = "start"; | |
parse.push(data, record, "li"); | |
lines[a] = lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}(\)|\.)))\s)/, ""); | |
codeblock(true, false, false); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "</li>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
else if (checktest() === true || (listtest(a + 1) === true && space(a + 1, false) - space(a, false) > 1)) { | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "<li>"; | |
record.types = "start"; | |
parse.push(data, record, "li"); | |
if (checktest() === true) { | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "class=\"task-list-item\""; | |
record.types = "attribute"; | |
parse.push(data, record, ""); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "input"; | |
record.types = "singleton"; | |
parse.push(data, record, ""); | |
y = parse.count; | |
record.begin = y; | |
record.stack = "input"; | |
record.token = "class=\"task-list-item-checkbox\""; | |
record.types = "attribute"; | |
parse.push(data, record, ""); | |
record.begin = y; | |
record.stack = "input"; | |
record.token = "disabled=\"disabled\""; | |
record.types = "attribute"; | |
parse.push(data, record, ""); | |
y = lines[a].indexOf("[x]"); | |
z = lines[a].indexOf("[ ]"); | |
if (data.types[parse.structure[parse.structure.length - 2][1] + 1] !== "attribute") { | |
parse.splice({ | |
data: data, | |
howmany: 0, | |
index: parse.structure[parse.structure.length - 2][1] + 1, | |
record: { | |
begin: parse.structure[parse.structure.length - 2][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 2][0], | |
token: "class=\"contains-task-list\"", | |
types: "attribute" | |
} | |
}); | |
} | |
if (y > -1 && z > -1) { | |
if (y < z) { | |
lines[a] = lines[a].replace(/\[x\]\s*/, ""); | |
record.begin = data.begin[parse.count - 1]; | |
record.stack = "input"; | |
record.token = "checked=\"checked\""; | |
record.types = "attribute"; | |
parse.push(data, record, ""); | |
} | |
else { | |
lines[a] = lines[a].replace(/\[ \]\s*/, ""); | |
} | |
} | |
else if (y > -1) { | |
lines[a] = lines[a].replace(/\[x\]\s*/, ""); | |
record.begin = data.begin[parse.count - 1]; | |
record.stack = "input"; | |
record.token = "checked=\"checked\""; | |
record.types = "attribute"; | |
parse.push(data, record, ""); | |
} | |
else { | |
lines[a] = lines[a].replace(/\[ \]\s*/, ""); | |
} | |
} | |
lines[a] = lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}\.))\s+)/, ""); | |
text(lines[a], "multiline", false); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = "</li>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
} | |
else { | |
lines[a] = lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}\.))\s+)/, ""); | |
text(lines[a], "<li>", false); | |
} | |
} | |
a = a + 1; | |
} while (a < b); | |
if (order === true) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</ol>", | |
types: "end" | |
}, ""); | |
} | |
else { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</ul>", | |
types: "end" | |
}, ""); | |
} | |
}, table = function lexer_markdown_table() { | |
let c = 0, d = 0, line = lines[a] | |
.replace(/^\|/, "") | |
.replace(/\|$/, "") | |
.replace(/\\\|/g, "parse\\?sep") | |
.split("|"), bar = lines[a + 1] | |
.replace(/\s*/g, "") | |
.replace(/^\|/, "") | |
.replace(/\|$/, "") | |
.split("|"); | |
if (line.length !== bar.length) { | |
return parabuild(); | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<table>", | |
types: "start" | |
}, "table"); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<thead>", | |
types: "start" | |
}, "thead"); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<tr>", | |
types: "start" | |
}, "tr"); | |
d = line.length; | |
do { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<th>", | |
types: "start" | |
}, "th"); | |
if ((/:-+:/).test(bar[c]) === true) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "style=\"text-align:center\"", | |
types: "attribute" | |
}, ""); | |
} | |
else if ((/:-+/).test(bar[c]) === true) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "style=\"text-align:left\"", | |
types: "attribute" | |
}, ""); | |
} | |
else if ((/-+:/).test(bar[c]) === true) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "style=\"text-align:right\"", | |
types: "attribute" | |
}, ""); | |
} | |
text(line[c].replace(/parse\\\?sep/g, "|"), "multiline", false); | |
quote = ""; | |
stack = []; | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</th>", | |
types: "end" | |
}, ""); | |
c = c + 1; | |
} while (c < d); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</tr>", | |
types: "end" | |
}, ""); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</thead>", | |
types: "end" | |
}, ""); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<tbody>", | |
types: "start" | |
}, "thead"); | |
a = a + 2; | |
d = bar.length; | |
do { | |
if (lines[a] === "") { | |
break; | |
} | |
if ((/^(\s*>)/).test(lines[a]) === true) { | |
if (data.token[parse.count] === "<tbody>") { | |
parse.structure.pop(); | |
parse.pop(data); | |
} | |
else { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</tbody>", | |
types: "end" | |
}, ""); | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</table>", | |
types: "end" | |
}, ""); | |
return blockquote(); | |
} | |
line = lines[a] | |
.replace(/^\|/, "") | |
.replace(/\|$/, "") | |
.replace(/\\\|/g, "parse\\?sep") | |
.split("|"); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<tr>", | |
types: "start" | |
}, "tr"); | |
c = 0; | |
do { | |
if (line[c] === undefined) { | |
line[c] = ""; | |
} | |
if (line[c] === " " && c === bar.length) { | |
break; | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<td>", | |
types: "start" | |
}, "td"); | |
if ((/:-+:/).test(bar[c]) === true) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "style=\"text-align:center\"", | |
types: "attribute" | |
}, ""); | |
} | |
else if ((/:-+/).test(bar[c]) === true) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "style=\"text-align:left\"", | |
types: "attribute" | |
}, ""); | |
} | |
else if ((/-+:/).test(bar[c]) === true) { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "style=\"text-align:right\"", | |
types: "attribute" | |
}, ""); | |
} | |
if (line[c] !== "") { | |
text(line[c].replace(/parse\\\?sep/g, "|"), "multiline", false); | |
} | |
quote = ""; | |
stack = []; | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</td>", | |
types: "end" | |
}, ""); | |
c = c + 1; | |
} while (c < d); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</tr>", | |
types: "end" | |
}, ""); | |
a = a + 1; | |
} while (a < b); | |
if (data.token[parse.count] === "<tbody>") { | |
parse.structure.pop(); | |
parse.pop(data); | |
} | |
else { | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</tbody>", | |
types: "end" | |
}, ""); | |
} | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</table>", | |
types: "end" | |
}, ""); | |
}; | |
b = lines.length; | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "<body>", | |
types: "start" | |
}, "body"); | |
do { | |
if ((/^(\s*)$/).test(lines[a]) === true) { | |
if (lines[a - 1] === "") { | |
lines.splice(a, 1); | |
b = b - 1; | |
a = a - 1; | |
} | |
else { | |
lines[a] = ""; | |
} | |
} | |
else if ((/^(\s*(\*|-|\+|(\d{1,9}(\)|\.))))/).test(lines[a]) === true && lines[a].replace(/^(\s*(\*|-|\+|(\d{1,9}(\)|\.)))\s*)/, "") === "") { | |
if ((/^(\u0020{2,})/).test(lines[a + 1]) === true) { | |
lines[a] = lines[a].replace(/(\s+)$/, "") + lines[a + 1].replace(" ", " "); | |
lines.splice(a + 1, 1); | |
b = b - 1; | |
} | |
else if ((/^(\t)/).test(lines[a + 1]) === true) { | |
lines[a] = lines[a].replace(/(\s+)$/, "") + lines[a + 1].replace("\t", " "); | |
lines.splice(a + 1, 1); | |
b = b - 1; | |
} | |
} | |
a = a + 1; | |
} while (a < b); | |
a = 0; | |
do { | |
if (comtest(a) === true) { | |
comment(); | |
} | |
else if (codetest(a) === true) { | |
if (codetest(a + 1) === true || (lines[a + 1] === "" && codetest(a + 2) === true)) { | |
codeblock(false, false, true); | |
} | |
else { | |
code(lines[a], "", true); | |
} | |
} | |
else if (hrtest(a) === true) { | |
hr(); | |
} | |
else if ((/^(\s{0,3}>)/).test(lines[a]) === true) { | |
blockquote(); | |
} | |
else if ((/((:-+)|(-+:)|(:-+:)|(-{2,}))\s*\|\s*/).test(lines[a + 1]) === true) { | |
table(); | |
} | |
else if (codeblocktest(a) === true) { | |
codeblock(true, false, false); | |
} | |
else if ((/^(\s*#{1,6}\s)/).test(lines[a]) === true) { | |
heading(); | |
} | |
else if (listtest(a) === true && (a === 0 || lines[a - 1] === "")) { | |
list(); | |
} | |
else if (lines[a] !== "" && (/^(\s+)$/).test(lines[a]) === false) { | |
parabuild(); | |
} | |
a = a + 1; | |
} while (a < b); | |
parse.push(data, { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markdown", | |
lines: 0, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "</body>", | |
types: "end" | |
}, ""); | |
return data; | |
}; | |
framework.lexer.markdown = markdown; | |
}()); | |
/*global global*/ | |
(function markup_init() { | |
"use strict"; | |
const framework = window.parseFramework, markup = function lexer_markup(source) { | |
let a = 0, linepreserve = 0, list = 0, litag = 0, sgmlflag = 0, minspace = "", cftransaction = false, ext = false; | |
const parse = framework.parse, data = parse.data, options = parse.options, b = source.split(""), c = b.length, | |
// Find the lowercase tag name of the provided token. | |
tagName = function lexer_markup_tagName(el) { | |
let space = 0, name = ""; | |
const reg = (/^(\{((%-?)|\{-?)\s*)/); | |
if (typeof el !== "string") { | |
return ""; | |
} | |
space = el | |
.replace(reg, "%") | |
.replace(/\s+/, " ") | |
.indexOf(" "); | |
name = el.replace(reg, " "); | |
name = (space < 0) | |
? name.slice(1, el.length - 1) | |
: name.slice(1, space); | |
if (options.lang === "html" || options.lang === "coldfusion") { | |
name = name.toLowerCase(); | |
} | |
name = name.replace(/(\}\})$/, ""); | |
if (name.indexOf("(") > 0) { | |
name = name.slice(0, name.indexOf("(")); | |
} | |
return name; | |
}, | |
//parses tags, attributes, and template elements | |
tag = function lexer_markup_tag(end) { | |
// markup is two smaller lexers that work together: tag - evaluates markup and | |
// template tags content - evaluates text content and code for external lexers | |
// | |
//type definitions: | |
// * start end type | |
// * <![CDATA[ ]]> cdata | |
// * <!-- --> comment | |
// * <#-- --> comment | |
// * <%-- --%> comment | |
// * {! !} comment | |
// * <!--[if --> conditional | |
// * text text content | |
// * </ > end | |
// * <pre </pre> ignore (html only) | |
// * text text script | |
// * <! > sgml | |
// * < /> singleton | |
// * < > start | |
// * text text style | |
// * <!--# --> template | |
// * <% %> template | |
// * {{{ }}} template | |
// * {{ }} template | |
// * {% %} template | |
// * [% %] template | |
// * {@ @} template | |
// * {# #} template | |
// * {# /} template | |
// * {? /} template | |
// * {^ /} template | |
// * {@ /} template | |
// * {< /} template | |
// * {+ /} template | |
// * {~ } template | |
// * <? ?> template | |
// * {:else} template_else | |
// * <#else > template_else | |
// * {@}else{@} template_else | |
// * <%}else{%> template_else | |
// * {{ }} template_end | |
// * <%\s*} %> template_end | |
// * [%\s*} %] template_end | |
// * {@\s*} @} template_end | |
// * { } template_end | |
// * {{# }} template_start | |
// * <% {\s*%> template_start | |
// * [% {\s*%] template_start | |
// * {@ {\s*@} template_start | |
// * {# } template_start | |
// * {? } template_start | |
// * {^ } template_start | |
// * {@ } template_start | |
// * {< } template_start | |
// * {+ } template_start | |
// * <?xml ?> xml | |
let igcount = 0, element = "", lastchar = "", ltype = "", tname = "", comment = false, cheat = false, earlyexit = false, ignoreme = false, jscom = false, nopush = false, nosort = false, preserve = false, simple = false, singleton = false, attstore = []; | |
const record = { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markup", | |
lines: parse.linesSpace, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "", | |
types: "" | |
}, | |
//cftags is a list of supported coldfusion tags | |
// * required - means must have a separate matching end tag | |
// * optional - means the tag could have a separate end tag, but is probably a | |
// singleton | |
// * prohibited - means there is no corresponding end tag | |
cftags = { | |
"cfNTauthenticate": "optional", | |
"cfabort": "prohibited", | |
"cfajaximport": "optional", | |
"cfajaxproxy": "optional", | |
"cfapplet": "prohibited", | |
"cfapplication": "prohibited", | |
"cfargument": "prohibited", | |
"cfassociate": "prohibited", | |
"cfauthenticate": "prohibited", | |
"cfbreak": "prohibited", | |
"cfcache": "optional", | |
"cfcalendar": "optional", | |
"cfcase": "required", | |
"cfcatch": "required", | |
"cfchart": "optional", | |
"cfchartdata": "prohibited", | |
"cfchartseries": "optional", | |
"cfclient": "required", | |
"cfclientsettings": "optional", | |
"cfcol": "prohibited", | |
"cfcollection": "prohibited", | |
"cfcomponent": "required", | |
"cfcontent": "optional", | |
"cfcontinue": "prohibited", | |
"cfcookie": "prohibited", | |
"cfdbinfo": "prohibited", | |
"cfdefaultcase": "required", | |
"cfdirectory": "prohibited", | |
"cfdiv": "optional", | |
"cfdocument": "optional", | |
"cfdocumentitem": "optional", | |
"cfdocumentsection": "optional", | |
"cfdump": "optional", | |
"cfelse": "prohibited", | |
"cfelseif": "prohibited", | |
"cferror": "prohibited", | |
"cfexchangecalendar": "optional", | |
"cfexchangeconnection": "optional", | |
"cfexchangecontact": "optional", | |
"cfexchangeconversation": "optional", | |
"cfexchangefilter": "optional", | |
"cfexchangefolder": "optional", | |
"cfexchangemail": "optional", | |
"cfexchangetask": "optional", | |
"cfexecute": "required", | |
"cfexit": "prohibited", | |
"cffeed": "prohibited", | |
"cffile": "optional", | |
"cffileupload": "optional", | |
"cffinally": "required", | |
"cfflush": "prohibited", | |
"cfform": "required", | |
"cfformgroup": "required", | |
"cfformitem": "optional", | |
"cfforward": "prohibited", | |
"cfftp": "prohibited", | |
"cffunction": "required", | |
"cfgraph": "required", | |
"cfgraphdata": "prohibited", | |
"cfgrid": "required", | |
"cfgridcolumn": "optional", | |
"cfgridrow": "optional", | |
"cfgridupdate": "optional", | |
"cfheader": "prohibited", | |
"cfhtmlbody": "optional", | |
"cfhtmlhead": "optional", | |
"cfhtmltopdf": "optional", | |
"cfhtmltopdfitem": "optional", | |
"cfhttp": "optional", | |
"cfhttpparam": "prohibited", | |
"cfif": "required", | |
"cfimage": "prohibited", | |
"cfimap": "prohibited", | |
"cfimapfilter": "optional", | |
"cfimport": "prohibited", | |
"cfinclude": "prohibited", | |
"cfindex": "prohibited", | |
"cfinput": "prohibited", | |
"cfinsert": "prohibited", | |
"cfinterface": "required", | |
"cfinvoke": "optional", | |
"cfinvokeargument": "prohibited", | |
"cflayout": "optional", | |
"cflayoutarea": "optional", | |
"cfldap": "prohibited", | |
"cflocation": "prohibited", | |
"cflock": "required", | |
"cflog": "prohibited", | |
"cflogic": "required", | |
"cfloginuser": "prohibited", | |
"cflogout": "prohibited", | |
"cfloop": "required", | |
"cfmail": "required", | |
"cfmailparam": "prohibited", | |
"cfmailpart": "required", | |
"cfmap": "optional", | |
"cfmapitem": "optional", | |
"cfmediaplayer": "optional", | |
"cfmenu": "required", | |
"cfmenuitem": "optional", | |
"cfmessagebox": "optional", | |
"cfmodule": "optional", | |
"cfoauth": "optional", | |
"cfobject": "prohibited", | |
"cfobjectcache": "prohibited", | |
"cfoutput": "required", | |
"cfpageencoding": "optional", | |
"cfparam": "prohibited", | |
"cfpdf": "optional", | |
"cfpdfform": "optional", | |
"cfpdfformparam": "optional", | |
"cfpdfparam": "prohibited", | |
"cfpdfsubform": "required", | |
"cfpod": "optional", | |
"cfpop": "prohibited", | |
"cfpresentation": "required", | |
"cfpresentationslide": "optional", | |
"cfpresenter": "optional", | |
"cfprint": "optional", | |
"cfprocessingdirective": "optional", | |
"cfprocparam": "prohibited", | |
"cfprocresult": "prohibited", | |
"cfprogressbar": "optional", | |
"cfproperty": "prohibited", | |
"cfquery": "required", | |
"cfqueryparam": "prohibited", | |
"cfregistry": "prohibited", | |
"cfreport": "optional", | |
"cfreportparam": "optional", | |
"cfrethrow": "prohibited", | |
"cfretry": "prohibited", | |
"cfreturn": "prohibited", | |
"cfsavecontent": "required", | |
"cfschedule": "prohibited", | |
"cfscript": "required", | |
"cfsearch": "prohibited", | |
"cfselect": "required", | |
"cfservlet": "prohibited", | |
"cfservletparam": "prohibited", | |
"cfset": "prohibited", | |
"cfsetting": "optional", | |
"cfsharepoint": "optional", | |
"cfsilent": "required", | |
"cfsleep": "prohibited", | |
"cfslider": "prohibited", | |
"cfspreadsheet": "optional", | |
"cfsprydataset": "optional", | |
"cfstatic": "required", | |
"cfstopwatch": "required", | |
"cfstoredproc": "optional", | |
"cfswitch": "required", | |
"cftable": "required", | |
"cftextarea": "optional", | |
"cfthread": "optional", | |
"cfthrow": "prohibited", | |
"cftimer": "required", | |
"cftooltip": "required", | |
"cftrace": "optional", | |
"cftransaction": "required", | |
"cftree": "required", | |
"cftreeitem": "optional", | |
"cftry": "required", | |
"cfupdate": "prohibited", | |
"cfvideo": "prohibited", | |
"cfvideoplayer": "optional", | |
"cfwddx": "prohibited", | |
"cfwebsocket": "optional", | |
"cfwhile": "required", | |
"cfwindow": "optional", | |
"cfx_": "prohibited", | |
"cfxml": "required", | |
"cfzip": "optional", | |
"cfzipparam": "prohibited" | |
}, | |
//attribute name | |
arname = function lexer_markup_tag_name(x) { | |
const eq = x.indexOf("="); | |
if (eq > 0 && ((eq < x.indexOf("\"") && x.indexOf("\"") > 0) || (eq < x.indexOf("'") && x.indexOf("'") > 0))) { | |
return [x.slice(0, eq), x.slice(eq + 1)]; | |
} | |
return [x, ""]; | |
}, | |
// attribute parser | |
attributeRecord = function lexer_markup_tag_attributeRecord() { | |
let ind = 0, eq = 0, dq = 0, sq = 0, slice = "", name = "", cft = cftags[tname | |
.toLowerCase() | |
.replace(/\/$/, "")], store = []; | |
const len = attstore.length, syntax = "<{\"'=/", jsxAttribute = function lexer_markup_tag_attributeRecord_jsxAttribute(str) { | |
if ((/\s/).test(str) === true) { | |
record.lines = str | |
.split("\n") | |
.length + 1; | |
} | |
else { | |
record.lines = 0; | |
} | |
return ""; | |
}; | |
if (attstore.length < 1) { | |
return; | |
} | |
// fix for singleton tags, since "/" at the end of the tag is not an attribute | |
if (attstore[attstore.length - 1] === "/") { | |
attstore.pop(); | |
element = element.replace(/>$/, "/>"); | |
} | |
// reconnects attribute names to their respective values if separated on "=" | |
eq = attstore.length; | |
dq = 1; | |
if (dq < eq) { | |
do { | |
name = attstore[dq - 1]; | |
if (name.charAt(name.length - 1) === "=" && attstore[dq].indexOf("=") < 0) { | |
attstore[dq - 1] = name + attstore[dq]; | |
attstore.splice(dq, 1); | |
eq = eq - 1; | |
dq = dq - 1; | |
} | |
dq = dq + 1; | |
} while (dq < eq); | |
} | |
// sort the attributes | |
if (options.lexerOptions.markup.tagSort === true && jscom === false && options.lang !== "jsx" && nosort === false && tname !== "cfif" && tname !== "cfelseif" && tname !== "cfset") { | |
attstore = parse.safeSort(attstore, "", false); | |
record.presv = true; | |
} | |
else { | |
record.presv = false; | |
} | |
// preparation for a coldfusion edge case | |
if (tname.slice(0, 3).toLowerCase() === "cf_") { | |
cft = "required"; | |
} | |
record.begin = parse.count; | |
record.lines = 0; | |
record.stack = tname.replace(/\/$/, ""); | |
record.types = "attribute"; | |
if (ind < len) { | |
do { | |
if (attstore[ind] === undefined) { | |
break; | |
} | |
eq = attstore[ind].indexOf("="); | |
dq = attstore[ind].indexOf("\""); | |
sq = attstore[ind].indexOf("'"); | |
if (eq > -1 && store.length > 0) { | |
// put certain attributes together for coldfusion | |
record.token = store.join(" "); | |
parse.push(data, record, ""); | |
if (attstore[ind].indexOf("=") > 0 && attstore[ind].indexOf("//") < 0 && attstore[ind].charAt(0) !== ";") { | |
record.token = attstore[ind].replace(/\s$/, ""); | |
} | |
else { | |
record.token = attstore[ind]; | |
} | |
parse.push(data, record, ""); | |
store = []; | |
} | |
else if (ltype === "sgml") { | |
store.push(attstore[ind]); | |
} | |
else if (cft !== undefined && eq < 0 && attstore[ind].indexOf("=") < 0) { | |
// put certain attributes together for coldfusion | |
store.push(attstore[ind]); | |
} | |
else if ((cft !== undefined && eq < 0) || (dq > 0 && dq < eq) || (sq > 0 && sq < eq) || syntax.indexOf(attstore[ind].charAt(0)) > -1) { | |
// tags stored as attributes of other tags | |
record.token = attstore[ind].replace(/\s$/, ""); | |
parse.push(data, record, ""); | |
} | |
else if (eq < 0 && cft === undefined) { | |
// in most markup languages an attribute without an expressed value has its name | |
// as its string value | |
name = attstore[ind]; | |
if (options.lang === "html") { | |
name = name.toLowerCase(); | |
} | |
if (options.lang === "jsx") { | |
record.token = name; | |
} | |
else { | |
record.token = name + "=\"" + attstore[ind] + "\""; | |
} | |
parse.push(data, record, ""); | |
} | |
else { | |
// separates out the attribute name from its value | |
slice = attstore[ind].slice(eq + 1); | |
if (syntax.indexOf(slice.charAt(0)) < 0 && cft === undefined) { | |
slice = "\"" + slice + "\""; | |
} | |
name = attstore[ind].slice(0, eq); | |
if (options.lang === "html" && cft === undefined) { | |
name = name.toLowerCase(); | |
} | |
if (options.lang === "jsx" && (/^(\s*\{)/).test(slice) === true) { | |
if (ind === 0 && (ltype === "singleton" || ltype === "template")) { | |
parse.structure.push([ | |
tagName(element).replace(/\/$/, ""), | |
parse.count | |
]); | |
} | |
record.token = name + "={"; | |
record.types = "jsx_attribute_start"; | |
parse.push(data, record, ""); | |
parse.structure.push(["jsx_attribute", parse.count]); | |
name = slice | |
.replace(/^(\s*\{)/, "") | |
.replace(/(\}\s*)$/, jsxAttribute); | |
framework.lexer.script(name); | |
record.begin = record.begin + 1; | |
record.token = "}"; | |
record.types = "jsx_attribute_end"; | |
parse.push(data, record, ""); | |
record.types = "attribute"; | |
parse.structure.pop(); | |
if (ind === len - 1 && (ltype === "singleton" || ltype === "template")) { | |
parse.structure.pop(); | |
} | |
} | |
else { | |
name = name + "=" + slice; | |
record.token = name.replace(/(\s+)$/, ""); | |
parse.push(data, record, ""); | |
} | |
} | |
ind = ind + 1; | |
} while (ind < len); | |
} | |
if (store.length > 0) { | |
record.token = store.join(" "); | |
parse.push(data, record, ""); | |
} | |
}; | |
ext = false; | |
// this complex series of conditions determines an elements delimiters look to | |
// the types being pushed to quickly reason about the logic no type is pushed | |
// for start tags or singleton tags just yet some types set the `preserve` flag, | |
// which means to preserve internal white space The `nopush` flag is set when | |
// parsed tags are to be ignored and forgotten | |
(function lexer_markup_types() { | |
if (end === "]>") { | |
end = ">"; | |
sgmlflag = sgmlflag - 1; | |
ltype = "template_end"; | |
} | |
else if (end === "---") { | |
preserve = true; | |
ltype = "comment"; | |
} | |
else if (b[a] === "<") { | |
if (b[a + 1] === "/") { | |
if (b[a + 2] === "#") { | |
ltype = "template_end"; | |
} | |
else { | |
ltype = "end"; | |
} | |
end = ">"; | |
} | |
else if (b[a + 1] === "!") { | |
if (b[a + 2] === "-" && b[a + 3] === "-") { | |
if (b[a + 4] === "#") { | |
end = "-->"; | |
ltype = "template"; | |
} | |
else if (b[a + 4] === "-" && (/<cf[a-z]/i).test(source) === true) { | |
preserve = true; | |
comment = true; | |
end = "--->"; | |
ltype = "comment"; | |
} | |
else { | |
end = "-->"; | |
preserve = true; | |
comment = true; | |
ltype = "comment"; | |
} | |
} | |
else if (b[a + 2] === "[" && b[a + 3] === "C" && b[a + 4] === "D" && b[a + 5] === "A" && b[a + 6] === "T" && b[a + 7] === "A" && b[a + 8] === "[") { | |
end = "]]>"; | |
preserve = true; | |
comment = true; | |
ltype = "cdata"; | |
} | |
else { | |
end = ">"; | |
sgmlflag = sgmlflag + 1; | |
ltype = "sgml"; | |
} | |
} | |
else if (b[a + 1] === "?") { | |
end = "?>"; | |
if (b[a + 2] === "x" && b[a + 3] === "m" && b[a + 4] === "l") { | |
ltype = "xml"; | |
} | |
else { | |
preserve = true; | |
ltype = "template"; | |
} | |
} | |
else if (b[a + 1] === "%") { | |
if (b[a + 2] !== "=") { | |
preserve = true; | |
} | |
if (b[a + 2] === "-" && b[a + 3] === "-") { | |
end = "--%>"; | |
comment = true; | |
ltype = "comment"; | |
} | |
else if (b[a + 2] === "#") { | |
end = "%>"; | |
comment = true; | |
ltype = "comment"; | |
} | |
else { | |
end = "%>"; | |
ltype = "template"; | |
} | |
} | |
else if ((b[a + 1] === "p" || b[a + 1] === "P") && (b[a + 2] === "r" || b[a + 2] === "R") && (b[a + 3] === "e" || b[a + 3] === "E") && (b[a + 4] === ">" || (/\s/).test(b[a + 4]) === true)) { | |
end = "</pre>"; | |
preserve = true; | |
ltype = "ignore"; | |
} | |
else if ((b[a + 1] === "x" || b[a + 1] === "X") && (b[a + 2] === "m" || b[a + 2] === "M") && (b[a + 3] === "l" || b[a + 3] === "L") && b[a + 4] === ":" && (b[a + 5] === "t" || b[a + 5] === "T") && (b[a + 6] === "e" || b[a + 6] === "E") && (b[a + 7] === "x" || b[a + 7] === "X") && (b[a + 8] === "t" || b[a + 8] === "T") && (b[a + 9] === ">" || (/\s/).test(b[a + 9]) === true)) { | |
end = "</xsl:text>"; | |
preserve = true; | |
ltype = "ignore"; | |
} | |
else if ((b[a + 1] === "c" || b[a + 1] === "C") && (b[a + 2] === "f" || b[a + 2] === "F") && (b[a + 3] === "q" || b[a + 3] === "Q") && (b[a + 4] === "u" || b[a + 4] === "U") && (b[a + 5] === "e" || b[a + 5] === "E") && (b[a + 6] === "r" || b[a + 6] === "R") && (b[a + 7] === "y" || b[a + 7] === "Y") && (b[a + 8] === ">" || (/\s/).test(b[a + 8]) === true)) { | |
end = "</" + b.slice(a + 1, a + 8).join("") + ">"; | |
preserve = true; | |
ltype = "content_preserve"; | |
} | |
else if (b[a + 1] === "<") { | |
if (b[a + 2] === "<") { | |
end = ">>>"; | |
} | |
else { | |
end = ">>"; | |
} | |
ltype = "template"; | |
} | |
else if (b[a + 1] === "#") { | |
if (b[a + 2] === "e" && b[a + 3] === "l" && b[a + 4] === "s" && b[a + 5] === "e") { | |
end = ">"; | |
ltype = "template_else"; | |
} | |
else if (b[a + 2] === "-" && b[a + 3] === "-") { | |
end = "-->"; | |
ltype = "comment"; | |
preserve = true; | |
} | |
else { | |
end = ">"; | |
ltype = "template_start"; | |
} | |
} | |
else { | |
simple = true; | |
end = ">"; | |
} | |
} | |
else if (b[a] === "{") { | |
preserve = true; | |
if (options.lang === "jsx") { | |
ext = true; | |
earlyexit = true; | |
record.token = "{"; | |
record.types = "script"; | |
parse.push(data, record, ""); | |
parse.structure.push(["script", parse.count]); | |
return; | |
} | |
if (options.lang === "dustjs") { | |
if (b[a + 1] === ":" && b[a + 2] === "e" && b[a + 3] === "l" && b[a + 4] === "s" && b[a + 5] === "e" && b[a + 6] === "}") { | |
a = a + 6; | |
earlyexit = true; | |
record.presv = true; | |
record.token = "{:else}"; | |
record.types = "template_else"; | |
parse.push(data, record, ""); | |
return; | |
} | |
if (b[a + 1] === "!") { | |
end = "!}"; | |
comment = true; | |
ltype = "comment"; | |
} | |
else if (b[a + 1] === "/") { | |
end = "}"; | |
ltype = "template_end"; | |
} | |
else if (b[a + 1] === "~") { | |
end = "}"; | |
ltype = "singleton"; | |
} | |
else if (b[a + 1] === ">") { | |
end = "/}"; | |
ltype = "singleton"; | |
} | |
else if (b[a + 1] === "#" || b[a + 1] === "?" || b[a + 1] === "^" || b[a + 1] === "@" || b[a + 1] === "<" || b[a + 1] === "+") { | |
end = "}"; | |
ltype = "template_start"; | |
} | |
else { | |
end = "}"; | |
ltype = "template"; | |
} | |
} | |
else if (b[a + 1] === "{") { | |
if (b[a + 2] === "{") { | |
end = "}}}"; | |
ltype = "template"; | |
} | |
else if (b[a + 2] === "#") { | |
end = "}}"; | |
ltype = "template_start"; | |
} | |
else if (b[a + 2] === "/") { | |
end = "}}"; | |
ltype = "template_end"; | |
} | |
else if (b[a + 2] === "e" && b[a + 3] === "n" && b[a + 4] === "d") { | |
end = "}}"; | |
ltype = "template_end"; | |
} | |
else if (b[a + 2] === "e" && b[a + 3] === "l" && b[a + 4] === "s" && b[a + 5] === "e") { | |
end = "}}"; | |
ltype = "template_else"; | |
} | |
else { | |
end = "}}"; | |
ltype = "template"; | |
} | |
} | |
else if (b[a + 1] === "%") { | |
end = "%}"; | |
ltype = "template"; | |
} | |
else if (b[a + 1] === "#") { | |
end = "#}"; | |
ltype = "comment"; | |
preserve = true; | |
comment = true; | |
} | |
else { | |
end = b[a + 1] + "}"; | |
ltype = "template"; | |
} | |
if (b[a + 1] === "@" && b[a + 2] === "}" && b[a + 3] === "e" && b[a + 4] === "l" && b[a + 5] === "s" && b[a + 6] === "e" && b[a + 7] === "{" && b[a + 8] === "@" && b[a + 9] === "}") { | |
a = a + 9; | |
earlyexit = true; | |
record.presv = true; | |
record.token = "{@}else{@}"; | |
record.types = "template_else"; | |
parse.push(data, record, ""); | |
return; | |
} | |
} | |
else if (b[a] === "[" && b[a + 1] === "%") { | |
end = "%]"; | |
ltype = "template"; | |
} | |
else if (b[a] === "#" && options.lang === "apacheVelocity") { | |
if (b[a + 1] === "*") { | |
preserve = true; | |
comment = true; | |
end = "*#"; | |
ltype = "comment"; | |
} | |
else if (b[a + 1] === "[" && b[a + 2] === "[") { | |
preserve = true; | |
comment = true; | |
end = "]]#"; | |
ltype = "comment"; | |
} | |
else if (b[a + 1] === "#") { | |
preserve = true; | |
comment = true; | |
end = "\n"; | |
ltype = "comment"; | |
} | |
else if (b[a + 1] === "e" && b[a + 2] === "l" && b[a + 3] === "s" && b[a + 4] === "e" && (/\s/).test(b[a + 5]) === true) { | |
end = "\n"; | |
ltype = "template_else"; | |
} | |
else if (b[a + 1] === "i" && b[a + 2] === "f") { | |
end = "\n"; | |
ltype = "template_start"; | |
} | |
else if (b[a + 1] === "f" && b[a + 2] === "o" && b[a + 3] === "r" && b[a + 4] === "e" && b[a + 5] === "a" && b[a + 6] === "c" && b[a + 7] === "h") { | |
end = "\n"; | |
ltype = "template_start"; | |
} | |
else if (b[a + 1] === "e" && b[a + 2] === "n" && b[a + 3] === "d") { | |
end = "\n"; | |
ltype = "template_end"; | |
} | |
else { | |
end = "\n"; | |
ltype = "template"; | |
} | |
} | |
else if (b[a] === "$" && options.lang === "apacheVelocity") { | |
end = "\n"; | |
ltype = "template"; | |
} | |
if (options.lexerOptions.markup.unformatted === true) { | |
preserve = true; | |
} | |
}()); | |
if (earlyexit === true) { | |
return; | |
} | |
// This is the real tag lexer. Everything that follows is attribute handling and | |
// edge cases | |
lastchar = end.charAt(end.length - 1); | |
if (a < c) { | |
let bcount = 0, braccount = 0, jsxcount = 0, e = 0, f = 0, parncount = 0, quote = "", jsxquote = "", stest = false, quotetest = false, attribute = []; | |
const lex = [], | |
//finds slash escape sequences | |
slashy = function lexer_markup_tag_slashy() { | |
let x = a; | |
do { | |
x = x - 1; | |
} while (b[x] === "\\"); | |
x = a - x; | |
if (x % 2 === 1) { | |
return false; | |
} | |
return true; | |
}, | |
// attribute lexer | |
attributeLexer = function lexer_markup_tag_attributeLexer(quotes) { | |
let atty = "", name, aa = 0, bb = 0; | |
if (quotes === true) { | |
atty = attribute.join(""); | |
name = arname(atty); | |
if (name[0] === "data-parse-ignore" || name[0] === "data-prettydiff-ignore") { | |
ignoreme = true; | |
} | |
quote = ""; | |
} | |
else { | |
atty = attribute | |
.join("") | |
.replace(/\s+/g, " "); | |
name = arname(atty); | |
if (name[0] === "data-parse-ignore" || name[0] === "data-prettydiff-ignore") { | |
ignoreme = true; | |
} | |
if (options.lang === "jsx" && attribute[0] === "{" && attribute[attribute.length - 1] === "}") { | |
jsxcount = 0; | |
} | |
} | |
if (atty.slice(0, 3) === "<%=" || atty.slice(0, 2) === "{%") { | |
nosort = true; | |
} | |
atty = atty | |
.replace(/^\u0020/, "") | |
.replace(/\u0020$/, ""); | |
attribute = atty | |
.replace(/\r\n/g, "\n") | |
.split("\n"); | |
bb = attribute.length; | |
if (aa < bb) { | |
do { | |
attribute[aa] = attribute[aa].replace(/(\s+)$/, ""); | |
aa = aa + 1; | |
} while (aa < bb); | |
} | |
if (options.crlf === true) { | |
atty = attribute.join("\r\n"); | |
} | |
else { | |
atty = attribute.join("\r\n"); | |
} | |
if (atty === "=") { | |
attstore[attstore.length - 1] = attstore[attstore.length - 1] + "="; | |
} | |
else if (atty.charAt(0) === "=" && attstore.length > 0 && attstore[attstore.length - 1].indexOf("=") < 0) { | |
//if an attribute starts with a `=` then adjoin it to the last attribute | |
attstore[attstore.length - 1] = attstore[attstore.length - 1] + atty; | |
} | |
else if (atty.charAt(0) !== "=" && attstore.length > 0 && attstore[attstore.length - 1].indexOf("=") === attstore[attstore.length - 1].length - 1) { | |
// if an attribute follows an attribute ending with `=` then adjoin it to the | |
// last attribute | |
attstore[attstore.length - 1] = attstore[attstore.length - 1] + atty; | |
} | |
else if (options.lang === "coldfusion" && attstore.length > 0 && (("+-*/(^").indexOf(atty) > -1 || ("+-*/(^").indexOf(attstore[attstore.length - 1].charAt(attstore[attstore.length - 1].length - 1)) > -1)) { | |
attstore[attstore.length - 1] = attstore[attstore.length - 1] + " " + atty; | |
} | |
else if (atty !== "" && atty !== " ") { | |
attstore.push(atty); | |
} | |
attribute = []; | |
}; | |
do { | |
if (b[a] === "\n") { | |
if (options.lang === "apacheVelocity" && lex[0] === "#") { | |
a = a - 1; | |
break; | |
} | |
parse.lineNumber = parse.lineNumber + 1; | |
} | |
if (preserve === true || (/\s/).test(b[a]) === false) { | |
lex.push(b[a]); | |
} | |
if (comment === true) { | |
quote = ""; | |
//comments must ignore fancy encapsulations and attribute parsing | |
if (b[a] === lastchar && lex.length > end.length + 1) { | |
//if current character matches the last character of the tag ending sequence | |
f = lex.length; | |
e = end.length - 1; | |
if (e > -1) { | |
do { | |
f = f - 1; | |
if (lex[f] !== end.charAt(e)) { | |
break; | |
} | |
e = e - 1; | |
} while (e > -1); | |
} | |
if (e < 0) { | |
if (end === "endcomment") { | |
f = f - 1; | |
if ((/\s/).test(lex[f]) === true) { | |
do { | |
f = f - 1; | |
} while ((/\s/).test(lex[f]) === true); | |
} | |
if (lex[f - 1] === "{" && lex[f] === "%") { | |
end = "%}"; | |
lastchar = "}"; | |
} | |
} | |
else { | |
break; | |
} | |
} | |
} | |
} | |
else { | |
if (quote === "") { | |
if (options.lang === "jsx") { | |
if (b[a] === "{") { | |
jsxcount = jsxcount + 1; | |
} | |
else if (b[a] === "}") { | |
jsxcount = jsxcount - 1; | |
} | |
} | |
if (data.types[parse.count] === "sgml" && b[a] === "[" && lex.length > 4) { | |
data.types[parse.count] = "template_start"; | |
break; | |
} | |
if (b[a] === "<" && options.lang !== "coldfusion" && preserve === false && lex.length > 1 && end !== ">>" && end !== ">>>" && simple === true) { | |
framework.parseerror = "Parse error on line " + parse.lineNumber + " on element: " + data.token[parse.count]; | |
} | |
if (stest === true && (/\s/).test(b[a]) === false && b[a] !== lastchar) { | |
//attribute start | |
stest = false; | |
quote = jsxquote; | |
igcount = 0; | |
lex.pop(); | |
if (a < c) { | |
do { | |
if (b[a] === "\n") { | |
parse.lineNumber = parse.lineNumber + 1; | |
} | |
if (options.lexerOptions.markup.unformatted === true) { | |
lex.push(b[a]); | |
} | |
attribute.push(b[a]); | |
if ((b[a] === "<" || b[a] === ">") && (quote === "" || quote === ">") && options.lang !== "jsx") { | |
if (quote === "" && b[a] === "<") { | |
quote = ">"; | |
braccount = 1; | |
} | |
else if (quote === ">") { | |
if (b[a] === "<") { | |
braccount = braccount + 1; | |
} | |
else if (b[a] === ">") { | |
braccount = braccount - 1; | |
if (braccount === 0) { | |
// the following detects if a coldfusion tag is embedded within another markup | |
// tag | |
tname = tagName(attribute.join("")); | |
if (cftags[tname] === "required") { | |
quote = "</" + tname + ">"; | |
} | |
else { | |
quote = ""; | |
igcount = 0; | |
attributeLexer(false); | |
break; | |
} | |
} | |
} | |
} | |
} | |
else if (quote === "") { | |
if (b[a + 1] === lastchar) { | |
//if at end of tag | |
if (attribute[attribute.length - 1] === "/") { | |
attribute.pop(); | |
if (preserve === true) { | |
lex.pop(); | |
} | |
a = a - 1; | |
} | |
if (attribute.length > 0) { | |
attributeLexer(false); | |
} | |
break; | |
} | |
if (b[a] === "{" && b[a - 1] === "=" && options.lang !== "jsx") { | |
quote = "}"; | |
} | |
else if (b[a] === "\"" || b[a] === "'") { | |
quote = b[a]; | |
if (b[a - 1] === "=" && (b[a + 1] === "<" || (b[a + 1] === "{" && b[a + 2] === "%") || (/\s/).test(b[a + 1]) === true)) { | |
igcount = a; | |
} | |
} | |
else if (b[a] === "(") { | |
quote = ")"; | |
parncount = 1; | |
} | |
else if (options.lang === "jsx") { | |
//jsx variable attribute | |
if ((b[a - 1] === "=" || (/\s/).test(b[a - 1]) === true) && b[a] === "{") { | |
quote = "}"; | |
bcount = 1; | |
} | |
else if (b[a] === "/") { | |
//jsx comments | |
if (b[a + 1] === "*") { | |
quote = "*/"; | |
} | |
else if (b[a + 1] === "/") { | |
quote = "\n"; | |
} | |
} | |
} | |
else if (lex[0] !== "{" && b[a] === "{" && (options.lang === "dustjs" || b[a + 1] === "{" || b[a + 1] === "%" || b[a + 1] === "@" || b[a + 1] === "#")) { | |
//opening embedded template expression | |
if (b[a + 1] === "{") { | |
if (b[a + 2] === "{") { | |
quote = "}}}"; | |
} | |
else { | |
quote = "}}"; | |
} | |
} | |
else if (options.lang === "dustjs") { | |
quote = "}"; | |
} | |
else { | |
quote = b[a + 1] + "}"; | |
} | |
} | |
if ((/\s/).test(b[a]) === true && quote === "") { | |
// testing for a run of spaces between an attribute's = and a quoted value. | |
// Unquoted values separated by space are separate attributes | |
if (attribute[attribute.length - 2] === "=") { | |
e = a + 1; | |
if (e < c) { | |
do { | |
if ((/\s/).test(b[e]) === false) { | |
if (b[e] === "\"" || b[e] === "'") { | |
a = e - 1; | |
quotetest = true; | |
attribute.pop(); | |
} | |
break; | |
} | |
e = e + 1; | |
} while (e < c); | |
} | |
} | |
if (quotetest === true) { | |
quotetest = false; | |
} | |
else if (jsxcount === 0 || (jsxcount === 1 && attribute[0] === "{")) { | |
//if there is an unquoted space attribute is complete | |
attribute.pop(); | |
attributeLexer(false); | |
stest = true; | |
break; | |
} | |
} | |
} | |
else if (b[a] === "(" && quote === ")") { | |
parncount = parncount + 1; | |
} | |
else if (b[a] === ")" && quote === ")") { | |
parncount = parncount - 1; | |
if (parncount === 0) { | |
quote = ""; | |
if (b[a + 1] === end.charAt(0)) { | |
attributeLexer(false); | |
break; | |
} | |
} | |
} | |
else if (options.lang === "jsx" && (quote === "}" || (quote === "\n" && b[a] === "\n") || (quote === "*/" && b[a - 1] === "*" && b[a] === "/"))) { | |
//jsx attributes | |
if (quote === "}") { | |
if (b[a] === "{") { | |
bcount = bcount + 1; | |
} | |
else if (b[a] === quote) { | |
bcount = bcount - 1; | |
if (bcount === 0) { | |
jsxcount = 0; | |
quote = ""; | |
element = attribute.join(""); | |
if (options.lexerOptions.markup.unformatted === false) { | |
if (options.lang === "jsx") { | |
if ((/^(\s*)$/).test(element) === false) { | |
attstore.push(element); | |
} | |
} | |
else { | |
element = element.replace(/\s+/g, " "); | |
if (element !== " ") { | |
attstore.push(element); | |
} | |
} | |
} | |
else if ((/^(\s+)$/).test(element) === false) { | |
attstore.push(element); | |
} | |
attribute = []; | |
break; | |
} | |
} | |
} | |
else { | |
quote = ""; | |
jsxquote = ""; | |
jscom = true; | |
element = attribute.join(""); | |
if (element.charAt(1) === "*") { | |
element = element + "\n"; | |
} | |
attribute = []; | |
if (element !== " ") { | |
attstore.push(element); | |
} | |
break; | |
} | |
} | |
else if (b[a] === "{" && b[a + 1] === "%" && b[igcount - 1] === "=" && (quote === "\"" || quote === "'")) { | |
quote = quote + "{%"; | |
igcount = 0; | |
} | |
else if (b[a - 1] === "%" && b[a] === "}" && (quote === "\"{%" || quote === "'{%")) { | |
quote = quote.charAt(0); | |
igcount = 0; | |
} | |
else if (b[a] === "<" && end === ">" && b[igcount - 1] === "=" && (quote === "\"" || quote === "'")) { | |
quote = quote + "<"; | |
igcount = 0; | |
} | |
else if (b[a] === ">" && (quote === "\"<" || quote === "'<")) { | |
quote = quote.charAt(0); | |
igcount = 0; | |
} | |
else if (igcount === 0 && quote !== ">" && (quote.length < 2 || (quote.charAt(0) !== "\"" && quote.charAt(0) !== "'"))) { | |
//terminate attribute at the conclusion of a quote pair | |
f = 0; | |
if (lex.length > 1) { | |
tname = lex[1] + lex[2]; | |
tname = tname.toLowerCase(); | |
} | |
// in coldfusion quotes are escaped in a string with double the characters: | |
// "cat"" and dog" | |
if (tname === "cf" && b[a] === b[a + 1] && (b[a] === "\"" || b[a] === "'")) { | |
attribute.push(b[a + 1]); | |
a = a + 1; | |
} | |
else { | |
e = quote.length - 1; | |
if (e > -1) { | |
do { | |
if (b[a - f] !== quote.charAt(e)) { | |
break; | |
} | |
f = f + 1; | |
e = e - 1; | |
} while (e > -1); | |
} | |
if (e < 0) { | |
attributeLexer(true); | |
if (b[a + 1] === lastchar) { | |
break; | |
} | |
} | |
} | |
} | |
else if (igcount > 0 && (/\s/).test(b[a]) === false) { | |
igcount = 0; | |
} | |
a = a + 1; | |
} while (a < c); | |
} | |
} | |
else if (end !== "%>" && end !== "\n" && (b[a] === "\"" || b[a] === "'")) { | |
//opening quote | |
quote = b[a]; | |
} | |
else if (comment === false && end !== "\n" && b[a] === "<" && b[a + 1] === "!" && b[a + 2] === "-" && b[a + 3] === "-" && b[a + 4] !== "#" && data.types[parse.count] !== "conditional") { | |
quote = "-->"; | |
} | |
else if (lex[0] !== "{" && end !== "\n" && b[a] === "{" && end !== "%>" && end !== "%]" && (options.lang === "dustjs" || b[a + 1] === "{" || b[a + 1] === "%" || b[a + 1] === "@" || b[a + 1] === "#")) { | |
//opening embedded template expression | |
if (b[a + 1] === "{") { | |
if (b[a + 2] === "{") { | |
quote = "}}}"; | |
} | |
else { | |
quote = "}}"; | |
} | |
} | |
else if (options.lang === "dustjs") { | |
quote = "}"; | |
} | |
else { | |
quote = b[a + 1] + "}"; | |
} | |
if (quote === end) { | |
quote = ""; | |
} | |
} | |
else if ((simple === true || ltype === "sgml") && end !== "\n" && (/\s/).test(b[a]) === true && b[a - 1] !== "<") { | |
//identify a space in a regular start or singleton tag | |
if (ltype === "sgml") { | |
lex.push(" "); | |
} | |
else { | |
stest = true; | |
} | |
} | |
else if (simple === true && options.lang === "jsx" && b[a] === "/" && (b[a + 1] === "*" || b[a + 1] === "/")) { | |
//jsx comment immediately following tag name | |
stest = true; | |
lex[lex.length - 1] = " "; | |
attribute.push(b[a]); | |
if (b[a + 1] === "*") { | |
jsxquote = "*/"; | |
} | |
else { | |
jsxquote = "\n"; | |
} | |
} | |
else if ((b[a] === lastchar || (end === "\n" && b[a + 1] === "<")) && (lex.length > end.length + 1 || lex[0] === "]") && (options.lang !== "jsx" || jsxcount === 0)) { | |
if (end === "\n") { | |
if ((/\s/).test(lex[lex.length - 1]) === true) { | |
do { | |
lex.pop(); | |
a = a - 1; | |
} while ((/\s/).test(lex[lex.length - 1]) === true); | |
} | |
break; | |
} | |
if (lex[0] === "{" && lex[1] === "%" && lex.join("").replace(/\s+/g, "") === "{%comment%}") { | |
end = "endcomment"; | |
lastchar = "t"; | |
preserve = true; | |
comment = true; | |
ltype = "comment"; | |
} | |
else { | |
//if current character matches the last character of the tag ending sequence | |
f = lex.length; | |
e = end.length - 1; | |
if (e > -1) { | |
do { | |
f = f - 1; | |
if (lex[f] !== end.charAt(e)) { | |
break; | |
} | |
e = e - 1; | |
} while (e > -1); | |
} | |
if (e < 0) { | |
break; | |
} | |
} | |
} | |
} | |
else if (b[a] === quote.charAt(quote.length - 1) && ((options.lang === "jsx" && end === "}" && (b[a - 1] !== "\\" || slashy() === false)) || options.lang !== "jsx" || end !== "}")) { | |
//find the closing quote or embedded template expression | |
f = 0; | |
if (lex.length > 1) { | |
tname = lex[1] + lex[2]; | |
tname = tname.toLowerCase(); | |
} | |
// in coldfusion quotes are escaped in a string with double the characters: | |
// "cat"" and dog" | |
if (tname === "cf" && b[a] === b[a + 1] && (b[a] === "\"" || b[a] === "'")) { | |
attribute.push(b[a + 1]); | |
a = a + 1; | |
} | |
else { | |
e = quote.length - 1; | |
if (e > -1) { | |
do { | |
if (b[a - f] !== quote.charAt(e)) { | |
break; | |
} | |
f = f + 1; | |
e = e - 1; | |
} while (e > -1); | |
} | |
if (e < 0) { | |
quote = ""; | |
} | |
} | |
} | |
} | |
a = a + 1; | |
} while (a < c); | |
//nopush flags mean an early exit | |
if (nopush) { | |
return; | |
} | |
//a correction to incomplete template tags that use multiple angle braces | |
if (options.correct === true) { | |
if (b[a + 1] === ">" && lex[0] === "<" && lex[1] !== "<") { | |
do { | |
a = a + 1; | |
} while (b[a + 1] === ">"); | |
} | |
else if (lex[0] === "<" && lex[1] === "<" && b[a + 1] !== ">" && lex[lex.length - 2] !== ">") { | |
do { | |
lex.splice(1, 1); | |
} while (lex[1] === "<"); | |
} | |
} | |
igcount = 0; | |
element = lex.join(""); | |
} | |
record.presv = preserve; | |
record.token = element; | |
record.types = ltype; | |
tname = tagName(element); | |
if ((/^(\/?cf)/i).test(tname) === true) { | |
tname = tname | |
.toLowerCase() | |
.replace(/\/$/, "") | |
.replace(/^\//, ""); | |
} | |
if (preserve === false && options.lang !== "jsx") { | |
element = element.replace(/\s+/g, " "); | |
} | |
//a quick hack to inject records for a type of template comments | |
if (tname === "comment" && element.slice(0, 2) === "{%") { | |
element = element | |
.replace(/^(\{%\s*comment\s*%\}\s*)/, "") | |
.replace(/(\s*\{%\s*endcomment\s*%\})$/, ""); | |
record.token = "{% comment %}"; | |
record.types = "template_start"; | |
parse.push(data, record, ""); | |
record.token = element; | |
record.types = "comment"; | |
parse.push(data, record, ""); | |
record.token = "{% endcomment %}"; | |
record.types = "template_end"; | |
parse.push(data, record, ""); | |
return; | |
} | |
// a type correction for template tags who have variable start tag names but a | |
// consistent ending tag name | |
if (element.indexOf("{{") === 0 && element.slice(element.length - 2) === "}}") { | |
if (tname === "end") { | |
ltype = "template_end"; | |
} | |
else if (tname === "else") { | |
ltype = "template_else"; | |
} | |
} | |
else if (element.slice(0, 2) === "<%" && element.slice(element.length - 2) === "%>") { | |
if ((/^(<%\s+end\s+-?%>)$/).test(element) === true) { | |
ltype = "template_end"; | |
} | |
else if (((/\sdo\s/).test(element) === true && element.indexOf("-%>") === element.length - 3) || (/^(<%(%|-)?\s*if)/).test(element) === true) { | |
ltype = "template_start"; | |
} | |
} | |
//update a flag for subatomic parsing in SGML tags | |
if (end !== "]>" && sgmlflag > 0 && element.charAt(element.length - 1) !== "[" && (element.slice(element.length - 2) === "]>" || (/^(<!((doctype)|(notation))\s)/i).test(element) === true)) { | |
sgmlflag = sgmlflag - 1; | |
} | |
// cheat identifies HTML singleton elements as singletons even if formatted as | |
// start tags, such as <br> (which is really <br/>) | |
cheat = (function lexer_markup_tag_cheat() { | |
let cfval = "", struc = []; | |
const ender = (/(\/>)$/), htmlsings = { | |
area: "singleton", | |
base: "singleton", | |
basefont: "singleton", | |
br: "singleton", | |
col: "singleton", | |
embed: "singleton", | |
eventsource: "singleton", | |
frame: "singleton", | |
hr: "singleton", | |
img: "singleton", | |
input: "singleton", | |
keygen: "singleton", | |
link: "singleton", | |
meta: "singleton", | |
param: "singleton", | |
progress: "singleton", | |
source: "singleton", | |
wbr: "singleton" | |
}, fixsingleton = function lexer_markup_tag_cheat_fixsingleton() { | |
let aa = parse.count, bb = 0; | |
const vname = tname.slice(1); | |
if (aa > -1) { | |
do { | |
if (data.types[aa] === "end") { | |
bb = bb + 1; | |
} | |
else if (data.types[aa] === "start") { | |
bb = bb - 1; | |
if (bb < 0) { | |
return false; | |
} | |
} | |
if (bb === 0 && data.token[aa].toLowerCase().indexOf(vname) === 1) { | |
if (cftags[tname] !== undefined) { | |
data.types[aa] = "template_start"; | |
} | |
else { | |
data.types[aa] = "start"; | |
} | |
data.token[aa] = data | |
.token[aa] | |
.replace(/(\s*\/>)$/, ">"); | |
return false; | |
} | |
aa = aa - 1; | |
} while (aa > -1); | |
} | |
return false; | |
}; | |
//determine if the current tag is an HTML singleton and exit | |
if (data.types[parse.count] === "end" && tname.slice(0, 3) !== "/cf") { | |
const lastToken = data.token[parse.count]; | |
if (data.types[parse.count - 1] === "singleton" && lastToken.charAt(lastToken.length - 2) !== "/" && "/" + tagName(lastToken) === tname) { | |
data.types[parse.count - 1] = "start"; | |
} | |
else if (tname !== "/span" && tname !== "/div" && tname !== "/script" && tname === "/" + tagName(data.token[parse.count]) && options.lexerOptions.markup.tagMerge === true && (data.types[parse.count - 1] === "start" || htmlsings[tname.slice(1)] === "singleton") && (options.lang !== "html" || (options.lang === "html" && tname !== "/li"))) { | |
parse.pop(data); | |
if (data.types[parse.count] === "start") { | |
data.token[parse.count] = data | |
.token[parse.count] | |
.replace(/>$/, "/>"); | |
} | |
data.types[parse.count] = "singleton"; | |
singleton = true; | |
return false; | |
} | |
} | |
//renames the types value for the following two template tags | |
if (tname === "/#assign" || tname === "/#global") { | |
let dd = parse.count - 1, count = 1; | |
if (dd > -1) { | |
do { | |
if (data.types[dd] === "start" || data.types[dd] === "template_start") { | |
count = count - 1; | |
} | |
else if (data.types[dd] === "end" || data.types[dd] === "template_end") { | |
count = count + 1; | |
} | |
if (count === 1) { | |
if ((data.token[dd].indexOf("<#assign") === 0 && tname === "/#assign") || (data.token[dd].indexOf("<#global") === 0 && tname === "/#global")) { | |
data.types[dd] = "template_start"; | |
return false; | |
} | |
} | |
if (count === 0) { | |
return false; | |
} | |
dd = dd - 1; | |
} while (dd > -1); | |
} | |
return false; | |
} | |
//determines if custom coldfusion tags are singletons | |
cfval = cftags[tname]; | |
if (data.types[parse.count] === "end" && tname.slice(0, 3) === "/cf" && cfval !== undefined) { | |
if (tname === "/cftransaction") { | |
cftransaction = false; | |
} | |
if (cfval !== undefined) { | |
data.types[parse.count] = "template_end"; | |
} | |
if ((cfval === "optional" || cfval === "prohibited") && tname !== "/cftransaction") { | |
return fixsingleton(); | |
} | |
return false; | |
} | |
//processes all other coldfusion tags | |
if (tname.slice(0, 2) === "cf") { | |
if (tname === "cfelse" || tname === "cfelseif") { | |
record.token = element; | |
record.types = "template_else"; | |
parse.push(data, record, ""); | |
singleton = true; | |
return false; | |
} | |
if (tname === "cftransaction" && cftransaction === true) { | |
if (element.charAt(1) === "/") { | |
record.types = "template_end"; | |
} | |
else { | |
cfval = "prohibited"; | |
} | |
} | |
else { | |
cfval = cftags[tname]; | |
} | |
if (tname === "cfscript" && element.indexOf("</cfscript") !== 0) { | |
ext = true; | |
} | |
if (cfval === "optional" || cfval === "prohibited" || tname.slice(0, 3) === "cf_") { | |
if (options.correct === true && ender.test(element) === false) { | |
element = element.slice(0, element.length - 1) + "/>"; | |
} | |
record.token = element.replace(/\s+/, " "); | |
record.types = "template"; | |
if (tname === "cfmodule" && element.charAt(1) === "/") { | |
let ss = parse.count, tt = 1; | |
do { | |
if (data.token[ss].toLowerCase() === "<cfmodule>") { | |
tt = tt - 1; | |
if (tt < 1) { | |
break; | |
} | |
} | |
else if (data.token[ss].toLowerCase() === "</cfmodule>") { | |
tt = tt + 1; | |
} | |
ss = ss - 1; | |
} while (ss > -1); | |
data.types[ss] = "template_start"; | |
tt = ss + 1; | |
struc = [["cfmodule", ss]]; | |
ss = parse.count + 1; | |
do { | |
if (data.types[tt] === "end" || data.types[tt] === "template_end") { | |
data.begin[tt] = struc[struc.length - 1][1]; | |
data.stack[tt] = struc[struc.length - 1][0]; | |
if (struc.length > 1) { | |
struc.pop(); | |
} | |
} | |
else if (data.types[tt] === "start" || data.types[tt] === "template_start" || (data.types[tt] === "cdata" && data.token[data.begin[tt + 1]].toLowerCase().indexOf("<script") === 0)) { | |
data.begin[tt] = struc[struc.length - 1][1]; | |
data.stack[tt] = struc[struc.length - 1][0]; | |
struc.push([tagName(data.token[tt]), tt]); | |
} | |
else { | |
data.begin[tt] = struc[struc.length - 1][1]; | |
data.stack[tt] = struc[struc.length - 1][0]; | |
} | |
tt = tt + 1; | |
} while (tt < ss); | |
parse.structure.push(struc[0]); | |
record.begin = struc[0][1]; | |
record.stack = "cfmodule"; | |
record.types = "template_end"; | |
} | |
parse.push(data, record, ""); | |
singleton = true; | |
return false; | |
} | |
if (cfval === "required" && tname !== "cfquery") { | |
if (tname === "cftransaction" && cftransaction === false) { | |
cftransaction = true; | |
} | |
record.token = element; | |
record.types = (ltype === "end") | |
? "template_end" | |
: "template_start"; | |
parse.push(data, record, tname); | |
singleton = true; | |
} | |
return false; | |
} | |
if (options.lang === "html") { | |
// html gets tag names in lowercase, if you want to preserve case sensitivity | |
// beautify as XML | |
if (element.charAt(0) === "<" && element.charAt(1) !== "!" && element.charAt(1) !== "?" && (parse.count < 0 || data.types[parse.count].indexOf("template") < 0) && cftags[tname] === undefined && tname.slice(0, 3) !== "cf_") { | |
element = element.toLowerCase(); | |
} | |
//looks for HTML "li" tags that have no ending tag, which is valid in HTML | |
if (tname === "li") { | |
if (litag === list && (list !== 0 || (list === 0 && parse.count > -1 && data.types[parse.count].indexOf("template") < 0))) { | |
let d = parse.count, ee = 1; | |
if (d > -1) { | |
do { | |
if (data.types[d] === "start" || data.types[d] === "template_start") { | |
ee = ee - 1; | |
} | |
else if (data.types[d] === "end" || data.types[d] === "template_end") { | |
ee = ee + 1; | |
} | |
if (ee === -1 && (tagName(data.token[d]) === "li" || (tagName(data.token[d + 1]) === "li" && (tagName(data.token[d]) === "ul" || tagName(data.token[d]) === "ol")))) { | |
record.lines = data.lines[parse.count]; | |
record.presv = false; | |
record.token = "</li>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.lines = parse.linesSpace; | |
record.presv = preserve; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = element; | |
record.types = ltype; | |
data.lines[parse.count - 1] = 0; | |
break; | |
} | |
if (ee < 0) { | |
break; | |
} | |
d = d - 1; | |
} while (d > -1); | |
} | |
} | |
else { | |
litag = litag + 1; | |
} | |
} | |
else if (tname === "/li" && litag === list) { | |
litag = litag - 1; | |
} | |
else if (tname === "ul" || tname === "ol") { | |
list = list + 1; | |
} | |
else if (tname === "/ul" || tname === "/ol") { | |
if (litag === list) { | |
record.lines = data.lines[parse.count]; | |
record.presv = false; | |
record.token = "</li>"; | |
record.types = "end"; | |
parse.push(data, record, ""); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.lines = parse.linesSpace; | |
record.presv = preserve; | |
record.stack = parse.structure[parse.structure.length - 1][0]; | |
record.token = element; | |
record.types = "end"; | |
data.lines[parse.count - 1] = 0; | |
litag = litag - 1; | |
} | |
list = list - 1; | |
} | |
//generalized corrections for the handling of singleton tags | |
if (data.types[parse.count] === "end" && htmlsings[tname.slice(1)] === "singleton" && element.toLowerCase().indexOf("/cftransaction") !== 1) { | |
return fixsingleton(); | |
} | |
//inserts a trailing slash into singleton tags if they do not already have it | |
if (htmlsings[tname] === "singleton") { | |
if (options.correct === true && ender.test(element) === false) { | |
element = element.slice(0, element.length - 1) + " />"; | |
} | |
return true; | |
} | |
} | |
return false; | |
}()); | |
//This escape flag is set in the cheat function | |
if (singleton === true) { | |
attributeRecord(); | |
return; | |
} | |
//tags with the following names are singletons | |
if (tname.charAt(0) === "#" && data.types[parse.count] === "start" && (tname === "#assign" || tname === "#break" || tname === "#case" || tname === "#default" || tname === "#fallback" || tname === "#flush" || tname === "#ftl" || tname === "#global" || tname === "#import" || tname === "#include" || tname === "#local" || tname === "#t" || tname === "#lt" || tname === "#rt" || tname === "#nested" || tname === "#nt" || tname === "#recover" || tname === "#recurse" || tname === "#return" || tname === "#sep" || tname === "#setting" || tname === "#stop" || tname === "#visit")) { | |
simple = true; | |
} | |
//correction for dustjs tags to template singleton types | |
if (options.lang === "dustjs" && data.types[parse.count] === "template_start") { | |
const first = element.charAt(1), ending = element.slice(element.length - 2); | |
if ((ending === "/}" || ending.charAt(0) === first) && (first === "#" || first === "?" || first === "^" || first === "@" || first === "<" || first === "+")) { | |
data.types[parse.count] = "template"; | |
} | |
} | |
// determine if the markup tag potentially contains code interpreted by a | |
// different lexer | |
if ((tname === "script" || tname === "style" || tname === "cfscript") && element.slice(element.length - 2) !== "/>") { | |
//get the attribute value for "type" | |
let len = attstore.length - 1, attValue = "", attr = []; | |
if (len > -1) { | |
do { | |
attr = arname(attstore[len]); | |
if (attr[0] === "type") { | |
attValue = attr[1]; | |
if (attValue.charAt(0) === "\"" || attValue.charAt(0) === "'") { | |
attValue = attValue.slice(1, attValue.length - 1); | |
} | |
break; | |
} | |
len = len - 1; | |
} while (len > -1); | |
} | |
//ext is flag to send information between the tag lexer and the content lexer | |
if (tname === "script" && (attValue === "" || attValue === "text/javascript" || attValue === "babel" || attValue === "module" || attValue === "application/javascript" || attValue === "application/x-javascript" || attValue === "text/ecmascript" || attValue === "application/ecmascript" || attValue === "text/jsx" || attValue === "application/jsx" || attValue === "text/cjs")) { | |
ext = true; | |
} | |
else if (tname === "style" && (attValue === "" || attValue === "text/css")) { | |
ext = true; | |
} | |
else if (tname === "cfscript") { | |
ext = true; | |
} | |
if (ext === true) { | |
len = a + 1; | |
if (len < c) { | |
do { | |
if ((/\s/).test(b[len]) === false) { | |
if (b[len] === "<") { | |
if (b.slice(len + 1, len + 4).join("") === "!--") { | |
len = len + 4; | |
if (len < c) { | |
do { | |
if ((/\s/).test(b[len]) === false) { | |
ext = false; | |
break; | |
} | |
if (b[len] === "\n" || b[len] === "\r") { | |
break; | |
} | |
len = len + 1; | |
} while (len < c); | |
} | |
} | |
else { | |
ext = false; | |
} | |
} | |
break; | |
} | |
len = len + 1; | |
} while (len < c); | |
} | |
} | |
} | |
//am I a singleton or a start type? | |
if (simple === true && ignoreme === false) { | |
if (cheat === true || element.slice(element.length - 2) === "/>") { | |
ltype = "singleton"; | |
} | |
else { | |
ltype = "start"; | |
} | |
record.types = ltype; | |
} | |
// additional logic is required to find the end of a tag with the attribute | |
// data-parse-ignore | |
if (simple === true && preserve === false && ignoreme && end === ">" && element.slice(element.length - 2) !== "/>") { | |
let tags = []; | |
if (cheat === true) { | |
ltype = "singleton"; | |
} | |
else { | |
preserve = true; | |
data.presv[parse.count] = true; | |
ltype = "ignore"; | |
a = a + 1; | |
if (a < c) { | |
let delim = "", ee = 0, ff = 0, endtag = false; | |
do { | |
if (b[a] === "\n") { | |
parse.lineNumber = parse.lineNumber + 1; | |
} | |
tags.push(b[a]); | |
if (delim === "") { | |
if (b[a] === "\"") { | |
delim = "\""; | |
} | |
else if (b[a] === "'") { | |
delim = "'"; | |
} | |
else if (tags[0] !== "{" && b[a] === "{" && (options.lang === "dustjs" || b[a + 1] === "{" || b[a + 1] === "%" || b[a + 1] === "@" || b[a + 1] === "#")) { | |
if (b[a + 1] === "{") { | |
if (b[a + 2] === "{") { | |
delim = "}}}"; | |
} | |
else { | |
delim = "}}"; | |
} | |
} | |
else if (options.lang === "dustjs") { | |
delim = "}"; | |
} | |
else { | |
delim = b[a + 1] + "}"; | |
} | |
} | |
else if (b[a] === "<" && simple === true) { | |
if (b[a + 1] === "/") { | |
endtag = true; | |
} | |
else { | |
endtag = false; | |
} | |
} | |
else if (b[a] === lastchar && b[a - 1] !== "/") { | |
if (endtag === true) { | |
igcount = igcount - 1; | |
if (igcount < 0) { | |
break; | |
} | |
} | |
else { | |
igcount = igcount + 1; | |
} | |
} | |
} | |
else if (b[a] === delim.charAt(delim.length - 1)) { | |
ff = 0; | |
ee = delim.length - 1; | |
if (ee > -1) { | |
do { | |
if (b[a - ff] !== delim.charAt(ee)) { | |
break; | |
} | |
ff = ff + 1; | |
ee = ee - 1; | |
} while (ee > -1); | |
} | |
if (ee < 0) { | |
delim = ""; | |
} | |
} | |
a = a + 1; | |
} while (a < c); | |
} | |
} | |
element = element + tags.join(""); | |
element = element.replace(">", " " + attstore.join(" ") + ">"); | |
record.token = element; | |
record.types = "content-ignore"; | |
attstore = []; | |
} | |
// some template tags can be evaluated as a block start/end based on syntax | |
// alone | |
if (record.types.indexOf("template") > -1) { | |
if (element.slice(0, 2) === "{%") { | |
let names = [ | |
"autoescape", | |
"block", | |
"capture", | |
"case", | |
"comment", | |
"embed", | |
"filter", | |
"for", | |
"form", | |
"if", | |
"macro", | |
"paginate", | |
"raw", | |
"sandbox", | |
"spaceless", | |
"tablerow", | |
"unless", | |
"verbatim" | |
]; | |
if (tname === "else" || tname === "elseif" || tname === "when" || tname === "elif") { | |
record.types = "template_else"; | |
} | |
else { | |
let namelen = names.length - 1; | |
if (namelen > -1) { | |
do { | |
if (tname === names[namelen]) { | |
record.types = "template_start"; | |
break; | |
} | |
if (tname === "end" + names[namelen]) { | |
record.types = "template_end"; | |
break; | |
} | |
namelen = namelen - 1; | |
} while (namelen > -1); | |
} | |
} | |
} | |
else if (element.slice(0, 2) === "{{" && element.charAt(3) !== "{") { | |
if ((/^(\{\{\s*-?\s*end\s*-?\s*\}\})$/).test(element) === true) { | |
record.types = "template_end"; | |
} | |
else if (tname === "block" || tname === "define" || tname === "form" || tname === "if" || tname === "range" || tname === "with") { | |
if (tname !== "block" || (/\{%\s*\w/).test(source) === false) { | |
record.types = "template_start"; | |
} | |
} | |
} | |
else if (record.types === "template") { | |
if (element.indexOf("else") > 2) { | |
record.types = "template_else"; | |
} | |
else if ((/^(<%\s*\})/).test(element) === true || (/^(\[%\s*\})/).test(element) === true || (/^(\{@\s*\})/).test(element) === true) { | |
record.types = "template_end"; | |
} | |
else if ((/(\{\s*%>)$/).test(element) === true || (/(\{\s*%\])$/).test(element) === true || (/(\{\s*@\})$/).test(element) === true) { | |
record.types = "template_start"; | |
} | |
} | |
if (record.types === "template_start" && (tname === "" || tname === "@" || tname === "#" || tname === "%")) { | |
tname = tname + element.slice(1).replace(tname, "").replace(/^(\s+)/, ""); | |
tname = tname.slice(0, tname.indexOf("(")).replace(/\s+/, ""); | |
} | |
} | |
// identify script hidden within a CDATA escape | |
if (ltype === "cdata" && record.stack === "script") { | |
let counta = parse.count, countb = parse.count; | |
if (data.types[countb] === "attribute") { | |
do { | |
counta = counta - 1; | |
countb = countb - 1; | |
} while (data.types[countb] === "attribute" && countb > -1); | |
} | |
record.begin = counta; | |
element = element | |
.replace(/^(\s*<!\[cdata\[)/i, "") | |
.replace(/(\]\]>\s*)$/, ""); | |
record.token = "<![CDATA["; | |
parse.push(data, record, ""); | |
parse.structure.push(["cdata", parse.count]); | |
framework.lexer.script(element); | |
record.begin = parse.structure[parse.structure.length - 1][1]; | |
record.token = "]]>"; | |
parse.push(data, record, ""); | |
parse.structure.pop(); | |
} | |
else { | |
parse.push(data, record, tname); | |
} | |
attributeRecord(); | |
//sorts child elements | |
if (options.lexerOptions.markup.tagSort === true && data.types[parse.count] === "end" && data.types[parse.count - 1] !== "start" && tname !== "/script" && tname !== "/style" && tname !== "/cfscript") { | |
let bb = 0, d = 0, startStore = 0, jsxatt = false, endData; | |
const children = [], store = { | |
begin: [], | |
lexer: [], | |
lines: [], | |
presv: [], | |
stack: [], | |
token: [], | |
types: [] | |
}, storeRecord = function lexer_markup_tag_sorttag_storeRecord(index) { | |
const output = { | |
begin: data.begin[index], | |
lexer: data.lexer[index], | |
lines: data.lines[index], | |
presv: data.presv[index], | |
stack: data.stack[index], | |
token: data.token[index], | |
types: data.types[index] | |
}; | |
return output; | |
}, childsort = function lexer_markup_tag_sorttag_childsort(a, b) { | |
if (data.token[a[0]] > data.token[b[0]]) { | |
return -1; | |
} | |
return 1; | |
}; | |
bb = parse.count - 1; | |
if (bb > -1) { | |
let endStore = 0; | |
do { | |
if (data.types[bb] === "start") { | |
d = d - 1; | |
if (d < 0) { | |
startStore = bb + 1; | |
if (data.types[startStore] === "attribute" || data.types[startStore] === "jsx_attribute_start") { | |
jsxatt = false; | |
do { | |
startStore = startStore + 1; | |
if (jsxatt === false && data.types[startStore] !== "attribute") { | |
break; | |
} | |
if (data.types[startStore] === "jsx_attribute_start") { | |
jsxatt = true; | |
} | |
else if (data.types[startStore] === "jsx_attribute_end") { | |
jsxatt = false; | |
} | |
} while (startStore < c); | |
} | |
break; | |
} | |
} | |
else if (data.types[bb] === "end") { | |
d = d + 1; | |
if (d === 1) { | |
endStore = bb; | |
} | |
} | |
if (d === 0) { | |
if (data.types[bb] === "start") { | |
children.push([bb, endStore]); | |
} | |
else { | |
if (data.types[bb] === "singleton" && (data.types[bb + 1] === "attribute" || data.types[bb + 1] === "jsx_attribute_start")) { | |
let cc = bb + 1; | |
jsxatt = false; | |
do { | |
if (data.types[cc] === "jsx_attribute_start") { | |
jsxatt = true; | |
} | |
else if (data.types[cc] === "jsx_attribute_end") { | |
jsxatt = false; | |
} | |
if (jsxatt === false && data.types[cc + 1] !== "attribute" && data.types[cc + 1] !== "jsx_attribute_start") { | |
break; | |
} | |
cc = cc + 1; | |
} while (cc < parse.count); | |
children.push([bb, cc]); | |
} | |
else if (data.types[bb] !== "attribute" && data.types[bb] !== "jsx_attribute_start") { | |
children.push([bb, bb]); | |
} | |
} | |
} | |
bb = bb - 1; | |
} while (bb > -1); | |
} | |
if (children.length < 2) { | |
return; | |
} | |
children.sort(childsort); | |
bb = children.length - 1; | |
if (bb > -1) { | |
do { | |
parse.push(store, storeRecord(children[bb][0]), ""); | |
if (children[bb][0] !== children[bb][1]) { | |
d = children[bb][0] + 1; | |
if (d < children[bb][1]) { | |
do { | |
parse.push(store, storeRecord(d), ""); | |
d = d + 1; | |
} while (d < children[bb][1]); | |
} | |
parse.push(store, storeRecord(children[bb][1]), ""); | |
} | |
bb = bb - 1; | |
} while (bb > -1); | |
} | |
endData = { | |
begin: data.begin.pop(), | |
lexer: data.lexer.pop(), | |
lines: data.lines.pop(), | |
presv: data.presv.pop(), | |
stack: data.stack.pop(), | |
token: data.token.pop(), | |
types: data.types.pop() | |
}; | |
(function lexer_markup_tag_sorttag_slice() { | |
parse.datanames.forEach(function lexer_markup_tag_sorttag_slice_datanames(value) { | |
data[value] = data[value].slice(0, startStore); | |
}); | |
}()); | |
parse.concat(data, store); | |
parse.push(data, endData, ""); | |
} | |
parse.linesSpace = 0; | |
}, content = function lexer_markup_content() { | |
let lex = [], ltoke = "", jsxbrace = (data.token[parse.count] === "{"), liner = parse.linesSpace, now = a; | |
const name = (ext === true) | |
? (jsxbrace === true) | |
? "script" | |
: (parse.structure[parse.structure.length - 1][1] > -1) | |
? tagName(data.token[parse.structure[parse.structure.length - 1][1]].toLowerCase()) | |
: tagName(data.token[data.begin[parse.count]].toLowerCase()) | |
: "", square = (data.types[parse.count] === "template_start" && data.token[parse.count].indexOf("<!") === 0 && data.token[parse.count].indexOf("<![") < 0 && data.token[parse.count].charAt(data.token[parse.count].length - 1) === "["), record = { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "markup", | |
lines: liner, | |
presv: (linepreserve > 0), | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: "", | |
types: "content" | |
}, esctest = function lexer_markup_content_esctest() { | |
let aa = a - 1, bb = 0; | |
if (b[a - 1] !== "\\") { | |
return false; | |
} | |
if (aa > -1) { | |
do { | |
if (b[aa] !== "\\") { | |
break; | |
} | |
bb = bb + 1; | |
aa = aa - 1; | |
} while (aa > -1); | |
} | |
if (bb % 2 === 1) { | |
return true; | |
} | |
return false; | |
}; | |
if (a < c) { | |
let end = "", quote = "", quotes = 0; | |
do { | |
// external code requires additional parsing to look for the appropriate end | |
// tag, but that end tag cannot be quoted or commented | |
if (ext === true) { | |
if (quote === "") { | |
if (b[a] === "/") { | |
if (b[a + 1] === "*") { | |
quote = "*"; | |
} | |
else if (b[a + 1] === "/") { | |
quote = "/"; | |
} | |
else if (name === "script" && "([{!=,;.?:&<>".indexOf(b[a - 1]) > -1) { | |
if (options.lang !== "jsx" || b[a - 1] !== "<") { | |
quote = "reg"; | |
} | |
} | |
} | |
else if ((b[a] === "\"" || b[a] === "'" || b[a] === "`") && esctest() === false) { | |
quote = b[a]; | |
} | |
else if (b[a] === "{" && jsxbrace === true) { | |
quotes = quotes + 1; | |
} | |
else if (b[a] === "}" && jsxbrace === true) { | |
if (quotes === 0) { | |
framework.lexer.script(lex.join("").replace(/^(\s+)/, "").replace(/(\s+)$/, "")); | |
parse.structure[parse.structure.length - 1][1] + 1; | |
record.token = "}"; | |
record.types = "script"; | |
parse.push(data, record, ""); | |
parse.structure.pop(); | |
break; | |
} | |
quotes = quotes - 1; | |
} | |
end = b | |
.slice(a, a + 10) | |
.join("") | |
.toLowerCase(); | |
//cfscript requires use of the script lexer | |
if (name === "cfscript" && end === "</cfscript") { | |
a = a - 1; | |
if (lex.length < 1) { | |
break; | |
} | |
framework.lexer.script(lex.join("").replace(/^(\s+)/, "").replace(/(\s+)$/, "")); | |
break; | |
} | |
//script requires use of the script lexer | |
if (name === "script") { | |
if (a === c - 9) { | |
end = end.slice(0, end.length - 1); | |
} | |
else { | |
end = end.slice(0, end.length - 2); | |
} | |
if (end === "</script") { | |
let outside = lex.join("").replace(/^(\s+)/, "").replace(/(\s+)$/, ""); | |
a = a - 1; | |
if (lex.length < 1) { | |
break; | |
} | |
if ((/^(<!--+)/).test(outside) === true && (/(--+>)$/).test(outside) === true) { | |
record.token = "<!--"; | |
record.types = "comment"; | |
parse.push(data, record, ""); | |
outside = outside.replace(/^(<!--+)/, "").replace(/(--+>)$/, ""); | |
framework.lexer.script(outside); | |
record.token = "-->"; | |
parse.push(data, record, ""); | |
} | |
else { | |
framework.lexer.script(outside); | |
} | |
break; | |
} | |
} | |
//style requires use of the style lexer | |
if (name === "style") { | |
if (a === c - 8) { | |
end = end.slice(0, end.length - 1); | |
} | |
else if (a === c - 9) { | |
end = end.slice(0, end.length - 2); | |
} | |
else { | |
end = end.slice(0, end.length - 3); | |
} | |
if (end === "</style") { | |
let outside = lex.join("").replace(/^(\s+)/, "").replace(/(\s+)$/, ""); | |
a = a - 1; | |
if (lex.length < 1) { | |
break; | |
} | |
if ((/^(<!--+)/).test(outside) === true && (/(--+>)$/).test(outside) === true) { | |
record.token = "<!--"; | |
record.types = "comment"; | |
parse.push(data, record, ""); | |
outside = outside.replace(/^(<!--+)/, "").replace(/(--+>)$/, ""); | |
framework.lexer.style(outside); | |
record.token = "-->"; | |
parse.push(data, record, ""); | |
} | |
else { | |
framework.lexer.style(outside); | |
} | |
break; | |
} | |
} | |
} | |
else if (quote === b[a] && (quote === "\"" || quote === "'" || quote === "`" || (quote === "*" && b[a + 1] === "/")) && esctest() === false) { | |
quote = ""; | |
} | |
else if (quote === "`" && b[a] === "$" && b[a + 1] === "{" && esctest() === false) { | |
quote = "}"; | |
} | |
else if (quote === "}" && b[a] === "}" && esctest() === false) { | |
quote = "`"; | |
} | |
else if (quote === "/" && (b[a] === "\n" || b[a] === "\r")) { | |
quote = ""; | |
} | |
else if (quote === "reg" && b[a] === "/" && esctest() === false) { | |
quote = ""; | |
} | |
else if (quote === "/" && b[a] === ">" && b[a - 1] === "-" && b[a - 2] === "-") { | |
end = b | |
.slice(a + 1, a + 11) | |
.join("") | |
.toLowerCase(); | |
if (name === "cfscript" && end === "</cfscript") { | |
quote = ""; | |
} | |
end = end.slice(0, end.length - 2); | |
if (name === "script" && end === "</script") { | |
quote = ""; | |
} | |
end = end.slice(0, end.length - 1); | |
if (name === "style" && end === "</style") { | |
quote = ""; | |
} | |
} | |
} | |
//typically this logic is for artifacts nested within an SGML tag | |
if (square === true && b[a] === "]") { | |
a = a - 1; | |
ltoke = minspace + lex.join(""); | |
liner = 0; | |
record.token = ltoke; | |
parse.push(data, record, ""); | |
break; | |
} | |
//general content processing | |
if (ext === false && lex.length > 0 && ((b[a] === "<" && b[a + 1] !== "=" && (/\s|\d/).test(b[a + 1]) === false) || (b[a] === "[" && b[a + 1] === "%") || (b[a] === "{" && (options.lang === "jsx" || options.lang === "dustjs" || b[a + 1] === "{" || b[a + 1] === "%" || b[a + 1] === "@" || b[a + 1] === "#")))) { | |
//dustjs template handling | |
if (options.lang === "dustjs" && b[a] === "{" && b[a + 1] === ":" && b[a + 2] === "e" && b[a + 3] === "l" && b[a + 4] === "s" && b[a + 5] === "e" && b[a + 6] === "}") { | |
a = a + 6; | |
ltoke = minspace + lex.join(""); | |
liner = 0; | |
record.token = ltoke; | |
parse.push(data, record, ""); | |
record.token = "{:else}"; | |
record.types = "template_else"; | |
record.presv = false; | |
parse.push(data, record, ""); | |
break; | |
} | |
//regular content | |
a = a - 1; | |
ltoke = minspace + lex.join(""); | |
liner = 0; | |
record.token = ltoke; | |
parse.push(data, record, ""); | |
break; | |
} | |
lex.push(b[a]); | |
a = a + 1; | |
} while (a < c); | |
} | |
if (a > now && a < c) { | |
if ((/\s/).test(b[a]) === true) { | |
let x = a; | |
parse.linesSpace = 1; | |
do { | |
if (b[x] === "\n") { | |
parse.lineNumber = parse.lineNumber + 1; | |
parse.linesSpace = parse.linesSpace + 1; | |
} | |
x = x - 1; | |
} while (x > now && (/\s/).test(b[x]) === true); | |
} | |
else { | |
parse.linesSpace = 0; | |
} | |
} | |
else if (a !== now || (a === now && ext === false)) { | |
//regular content at the end of the supplied source | |
ltoke = minspace + lex.join(""); | |
liner = 0; | |
//this condition prevents adding content that was just added in the loop above | |
if (record.token !== ltoke) { | |
record.token = ltoke; | |
parse.push(data, record, ""); | |
parse.linesSpace = 0; | |
} | |
} | |
ext = false; | |
}; | |
do { | |
if ((/\s/).test(b[a]) === true) { | |
a = parse.spacer({ array: b, end: c, index: a }); | |
} | |
else if (ext) { | |
content(); | |
} | |
else if (b[a] === "<") { | |
tag(""); | |
} | |
else if (b[a] === "[" && b[a + 1] === "%") { | |
tag("%]"); | |
} | |
else if (b[a] === "{" && (options.lang === "jsx" || options.lang === "dustjs" || b[a + 1] === "{" || b[a + 1] === "%" || b[a + 1] === "@" || b[a + 1] === "#")) { | |
tag(""); | |
} | |
else if (b[a] === "]" && sgmlflag > 0) { | |
tag("]>"); | |
} | |
else if (b[a] === "-" && b[a + 1] === "-" && b[a + 2] === "-" && options.lang === "jekyll") { | |
tag("---"); | |
} | |
else if (options.lang === "apacheVelocity" && (/\d/).test(b[a + 1]) === false && (/\s/).test(b[a + 1]) === false) { | |
if (b[a] === "#" && ((/\w/).test(b[a + 1]) === true || b[a + 1] === "*" || b[a + 1] === "#" || (b[a + 1] === "[" && b[a + 2] === "["))) { | |
tag(""); | |
} | |
else if (b[a] === "$" && b[a + 1] !== "$" && b[a + 1] !== "=" && b[a + 1] !== "[") { | |
tag(""); | |
} | |
else { | |
content(); | |
} | |
} | |
else { | |
content(); | |
} | |
a = a + 1; | |
} while (a < c); | |
return data; | |
}; | |
framework.lexer.markup = markup; | |
}()); | |
/*global global*/ | |
(function style_init() { | |
"use strict"; | |
const framework = window.parseFramework, style = function lexer_style(source) { | |
let a = 0, ltype = "", ltoke = "", endtest = false; | |
const parse = framework.parse, data = parse.data, options = parse.options, colors = [], colorNames = { | |
aliceblue: 0.9288006825347457, | |
antiquewhite: 0.8464695170775405, | |
aqua: 0.7874, | |
aquamarine: 0.8078549208338043, | |
azure: 0.9726526495416643, | |
beige: 0.8988459998705021, | |
bisque: 0.8073232737297876, | |
black: 0, | |
blanchedalmond: 0.8508443960815607, | |
blue: 0.0722, | |
blueviolet: 0.12622014321946043, | |
brown: 0.09822428787651079, | |
burlywood: 0.5155984453389335, | |
cadetblue: 0.29424681085422044, | |
chartreuse: 0.7603202590262282, | |
chocolate: 0.23898526114557292, | |
coral: 0.3701793087292368, | |
cornflowerblue: 0.30318641994179363, | |
cornsilk: 0.9356211037296492, | |
crimson: 0.16042199953025577, | |
cyan: 0.7874, | |
darkblue: 0.018640801980939217, | |
darkcyan: 0.2032931783904645, | |
darkgoldenrod: 0.27264703559992554, | |
darkgray: 0.39675523072562674, | |
darkgreen: 0.09114342904757505, | |
darkgrey: 0.39675523072562674, | |
darkkhaki: 0.45747326349994155, | |
darkmagenta: 0.07353047651207048, | |
darkolivegreen: 0.12651920884889156, | |
darkorange: 0.40016167026523863, | |
darkorchid: 0.1341314217485677, | |
darkred: 0.05488967453113126, | |
darksalmon: 0.4054147156338075, | |
darkseagreen: 0.43789249325969054, | |
darkslateblue: 0.06579284622798763, | |
darkslategray: 0.06760815192804355, | |
darkslategrey: 0.06760815192804355, | |
darkturquoise: 0.4874606277449034, | |
darkviolet: 0.10999048339343433, | |
deeppink: 0.2386689582827583, | |
deepskyblue: 0.444816033955754, | |
dimgray: 0.14126329114027164, | |
dimgrey: 0.14126329114027164, | |
dodgerblue: 0.2744253699145608, | |
firebrick: 0.10724525535015225, | |
floralwhite: 0.9592248482500424, | |
forestgreen: 0.18920812076002244, | |
fuchsia: 0.2848, | |
gainsboro: 0.7156935005064806, | |
ghostwhite: 0.9431126188632283, | |
gold: 0.6986087742815887, | |
goldenrod: 0.41919977809568404, | |
gray: 0.21586050011389915, | |
green: 0.15438342968146068, | |
greenyellow: 0.8060947261145331, | |
grey: 0.21586050011389915, | |
honeydew: 0.9633653555478173, | |
hotpink: 0.3465843816971475, | |
indianred: 0.21406134963884, | |
indigo: 0.031075614863369846, | |
ivory: 0.9907127060061531, | |
khaki: 0.7701234339412052, | |
lavendar: 0.8031875051452125, | |
lavendarblush: 0.9017274863104644, | |
lawngreen: 0.7390589312496334, | |
lemonchiffon: 0.9403899224562171, | |
lightblue: 0.6370914128080659, | |
lightcoral: 0.35522120733134843, | |
lightcyan: 0.9458729349482863, | |
lightgoldenrodyellow: 0.9334835101829635, | |
lightgray: 0.651405637419824, | |
lightgreen: 0.6909197995686475, | |
lightgrey: 0.651405637419824, | |
lightpink: 0.5856615273489745, | |
lightsalmon: 0.47806752252059587, | |
lightseagreen: 0.3505014511704197, | |
lightskyblue: 0.5619563761833096, | |
lightslategray: 0.23830165007286924, | |
lightslategrey: 0.23830165007286924, | |
lightyellow: 0.9816181839288161, | |
lime: 0.7152, | |
limegreen: 0.44571042246097864, | |
linen: 0.8835734098437936, | |
magenta: 0.2848, | |
maroon: 0.04589194232421496, | |
mediumaquamarine: 0.4938970331080111, | |
mediumblue: 0.04407778021232784, | |
mediumorchid: 0.21639251153773428, | |
mediumpurple: 0.22905858091648004, | |
mediumseagreen: 0.34393112338131226, | |
mediumslateblue: 0.20284629471622434, | |
mediumspringgreen: 0.7070430819418444, | |
mediumturquois: 0.5133827926447991, | |
mediumvioletred: 0.14371899849357186, | |
midnightblue: 0.020717866350860484, | |
mintcream: 0.9783460494758793, | |
mistyrose: 0.8218304785918541, | |
moccasin: 0.8008300099156694, | |
navajowhite: 0.7651968234278562, | |
navy: 0.015585128108223519, | |
oldlace: 0.9190063340554899, | |
olive: 0.20027537200567563, | |
olivedrab: 0.2259315095192918, | |
orange: 0.48170267036309605, | |
orangered: 0.2551624375341641, | |
orchid: 0.3134880676143873, | |
palegoldenrod: 0.7879264788761452, | |
palegreen: 0.7793675900635259, | |
paleturquoise: 0.764360779217138, | |
palevioletred: 0.2875499411788909, | |
papayawhip: 0.8779710019983541, | |
peachpuff: 0.7490558987825108, | |
peru: 0.3011307487793569, | |
pink: 0.6327107070246611, | |
plum: 0.4573422158796909, | |
powderblue: 0.6825458650060524, | |
purple: 0.061477070432438476, | |
red: 0.2126, | |
rosyblue: 0.3231945764940708, | |
royalblue: 0.16663210743188323, | |
saddlebrown: 0.09792228502052071, | |
salmon: 0.3697724152759545, | |
sandybrown: 0.46628543696283414, | |
seagreen: 0.1973419970627483, | |
seashell: 0.927378622069223, | |
sienna: 0.13697631337097677, | |
silver: 0.527115125705813, | |
skyblue: 0.5529166851818412, | |
slateblue: 0.14784278062136097, | |
slategray: 0.20896704076536138, | |
slategrey: 0.20896704076536138, | |
slightsteelblue: 0.5398388828466575, | |
snow: 0.9653334183484877, | |
springgreen: 0.7305230606852947, | |
steelblue: 0.20562642207624846, | |
tan: 0.48237604163921527, | |
teal: 0.1699685577896842, | |
thistle: 0.5681840109373312, | |
tomato: 0.3063861271941505, | |
turquoise: 0.5895536427577983, | |
violet: 0.40315452986676303, | |
wheat: 0.7490970282048214, | |
white: 1, | |
whitesmoke: 0.913098651793419, | |
yellow: 0.9278, | |
yellowgreen: 0.5076295720870697 | |
}, b = source.split(""), len = source.length, mapper = [], nosort = [], recordStore = function lexer_style_recordStore(index) { | |
return { | |
begin: data.begin[index], | |
lexer: data.lexer[index], | |
lines: data.lines[index], | |
presv: data.presv[index], | |
stack: data.stack[index], | |
token: data.token[index], | |
types: data.types[index] | |
}; | |
}, recordPush = function lexer_style_recordPush(structure) { | |
const record = { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "style", | |
lines: parse.linesSpace, | |
presv: (ltype === "ignore") | |
? true | |
: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: ltoke, | |
types: ltype | |
}; | |
parse.push(data, record, structure); | |
}, esctest = function lexer_style_esctest(xx) { | |
let yy = xx; | |
do { | |
xx = xx - 1; | |
} while (xx > 0 && b[xx] === "\\"); | |
if ((yy - xx) % 2 === 0) { | |
return true; | |
} | |
return false; | |
}, | |
// Since I am already identifying value types this is a good place to do some | |
// quick analysis and clean up on certain value conditions. These things are | |
// being corrected: | |
// * fractional values missing a leading 0 are provided a leading 0 | |
// * 0 values with a dimension indicator (px, em) have the dimension | |
// indicator removed | |
// * eliminate unnecessary leading 0s | |
// * url values that are not quoted are wrapped in double quote characters | |
// * color values are set to lowercase and reduced from 6 to 3 digits if | |
// appropriate | |
value = function lexer_style_item_value(val) { | |
const x = val.split(""), values = [], transition = (data.token[parse.count - 2] === "transition"), colorPush = function lexer_style_item_value_colorPush(value) { | |
const vl = value.toLowerCase(); | |
if ((/^(#[0-9a-f]{3,6})$/).test(vl) === true) { | |
colors.push(value); | |
} | |
else if ((/^(rgba?\()/).test(vl) === true) { | |
colors.push(value); | |
} | |
else if (colorNames[vl] !== undefined) { | |
colors.push(value); | |
} | |
return value; | |
}; | |
let cc = 0, dd = 0, block = "", leng = x.length, items = []; | |
// this loop identifies containment so that tokens/sub-tokens are correctly | |
// taken | |
if (cc < leng) { | |
do { | |
items.push(x[cc]); | |
if (block === "") { | |
if (x[cc] === "\"") { | |
block = "\""; | |
dd = dd + 1; | |
} | |
else if (x[cc] === "'") { | |
block = "'"; | |
dd = dd + 1; | |
} | |
else if (x[cc] === "(") { | |
block = ")"; | |
dd = dd + 1; | |
} | |
else if (x[cc] === "[") { | |
block = "]"; | |
dd = dd + 1; | |
} | |
} | |
else if ((x[cc] === "(" && block === ")") || (x[cc] === "[" && block === "]")) { | |
dd = dd + 1; | |
} | |
else if (x[cc] === block) { | |
dd = dd - 1; | |
if (dd === 0) { | |
block = ""; | |
} | |
} | |
if (block === "" && x[cc] === " ") { | |
items.pop(); | |
values.push(colorPush(items.join(""))); | |
items = []; | |
} | |
cc = cc + 1; | |
} while (cc < leng); | |
} | |
values.push(colorPush(items.join(""))); | |
leng = values.length; | |
//This is where the rules mentioned above are applied | |
cc = 0; | |
if (cc < leng) { | |
do { | |
if ((/^(0+([a-z]{2,3}|%))$/).test(values[cc]) === true && transition === false) { | |
values[cc] = "0"; | |
} | |
else if ((/^(0+)/).test(values[cc]) === true) { | |
values[cc] = values[cc].replace(/0+/, "0"); | |
if ((/\d/).test(values[cc].charAt(1)) === true) { | |
values[cc] = values[cc].substr(1); | |
} | |
} | |
else if ((/^url\((?!\$)/).test(values[cc]) === true && values[cc].charAt(values[cc].length - 1) === ")") { | |
block = values[cc].charAt(values[cc].indexOf("url(") + 4); | |
if (block !== "@" && block !== "{" && block !== "<") { | |
values[cc] = values[cc] | |
.replace(/url\(\s*('|")?/, "url(\"") | |
.replace(/(('|")?\s*\))$/, "\")"); | |
} | |
} | |
cc = cc + 1; | |
} while (cc < leng); | |
} | |
return values.join(" "); | |
}, | |
//the generic token builder | |
buildtoken = function lexer_style_build() { | |
let aa = a, bb = 0, out = [], outy = "", mappy = 0; | |
const block = [], comma = (parse.count > -1 && data.token[parse.count].charAt(data.token[parse.count].length - 1) === ","), spacestart = function lexer_style_build_spacestart() { | |
if ((/\s/).test(b[aa + 1]) === true) { | |
do { | |
aa = aa + 1; | |
} while ((/\s/).test(b[aa + 1]) === true); | |
} | |
}; | |
//this loop accounts for grouping mechanisms | |
if (aa < len) { | |
do { | |
out.push(b[aa]); | |
if (b[aa - 1] !== "\\" || esctest(aa) === false) { | |
if (b[aa] === "\"" && block[block.length - 1] !== "'") { | |
if (block[block.length - 1] === "\"") { | |
block.pop(); | |
} | |
else { | |
block.push("\""); | |
} | |
} | |
else if (b[aa] === "'" && block[block.length - 1] !== "\"") { | |
if (block[block.length - 1] === "'") { | |
block.pop(); | |
} | |
else { | |
block.push("'"); | |
} | |
} | |
else if (block[block.length - 1] !== "\"" && block[block.length - 1] !== "'") { | |
if (b[aa] === "(") { | |
mappy = mappy + 1; | |
block.push(")"); | |
spacestart(); | |
} | |
else if (b[aa] === "[") { | |
block.push("]"); | |
spacestart(); | |
} | |
else if (b[aa] === "#" && b[aa + 1] === "{") { | |
out.push("{"); | |
aa = aa + 1; | |
block.push("}"); | |
spacestart(); | |
} | |
else if (b[aa] === block[block.length - 1]) { | |
block.pop(); | |
if ((/\s/).test(out[out.length - 2]) === true) { | |
out.pop(); | |
do { | |
out.pop(); | |
} while ((/\s/).test(out[out.length - 1]) === true); | |
out.push(b[aa]); | |
} | |
} | |
} | |
} | |
if (parse.structure[parse.structure.length - 1][0] === "map" && block.length === 0 && (b[aa + 1] === "," || b[aa + 1] === ")")) { | |
if (b[aa + 1] === ")" && data.token[parse.count] === "(") { | |
parse.pop(data); | |
out = ["("]; | |
aa = a - 1; | |
} | |
else { | |
break; | |
} | |
} | |
if (b[aa + 1] === ":") { | |
bb = aa; | |
if ((/\s/).test(b[bb]) === true) { | |
do { | |
bb = bb - 1; | |
} while ((/\s/).test(b[bb]) === true); | |
} | |
outy = b | |
.slice(bb - 6, bb + 1) | |
.join(""); | |
if (outy.indexOf("filter") === outy.length - 6 || outy.indexOf("progid") === outy.length - 6) { | |
outy = "filter"; | |
} | |
} | |
if (block.length === 0 && ((b[aa + 1] === ";" && esctest(aa + 1) === false) || (b[aa + 1] === ":" && b[aa] !== ":" && b[aa + 2] !== ":" && outy !== "filter" && outy !== "progid") || b[aa + 1] === "}" || b[aa + 1] === "{" || (b[aa + 1] === "/" && (b[aa + 2] === "*" || b[aa + 2] === "/")))) { | |
bb = out.length - 1; | |
if ((/\s/).test(out[bb]) === true) { | |
do { | |
bb = bb - 1; | |
aa = aa - 1; | |
out.pop(); | |
} while ((/\s/).test(out[bb]) === true); | |
} | |
break; | |
} | |
if (out[0] === "@" && block.length === 0 && (b[aa + 1] === "\"" || b[aa + 1] === "'")) { | |
break; | |
} | |
aa = aa + 1; | |
} while (aa < len); | |
} | |
a = aa; | |
if (parse.structure[parse.structure.length - 1][0] === "map" && out[0] === "(") { | |
mapper[mapper.length - 1] = mapper[mapper.length - 1] - 1; | |
} | |
if (comma === true && parse.structure[parse.structure.length - 1][0] !== "map" && data.types[parse.count] !== "comment" && data.types[parse.count] !== "ignore") { | |
data.token[parse.count] = data.token[parse.count] + out | |
.join("") | |
.replace(/\s+/g, " ") | |
.replace(/^\s/, "") | |
.replace(/\s$/, ""); | |
return; | |
} | |
ltoke = out | |
.join("") | |
.replace(/\s+/g, " ") | |
.replace(/^\s/, "") | |
.replace(/\s$/, ""); | |
if (parse.count > -1 && data.token[parse.count].indexOf("extend(") === 0) { | |
ltype = "pseudo"; | |
} | |
else if (parse.count > -1 && "\"'".indexOf(data.token[parse.count].charAt(0)) > -1 && data.types[parse.count] === "variable") { | |
ltype = "item"; | |
} | |
else if (out[0] === "@" || out[0] === "$") { | |
if (data.types[parse.count] === "colon" && (data.types[parse.count - 1] === "property" || data.types[parse.count - 1] === "variable")) { | |
ltype = "value"; | |
} | |
else if (parse.count > -1) { | |
ltype = "variable"; | |
outy = data.token[parse.count]; | |
aa = outy.indexOf("("); | |
if (outy.charAt(outy.length - 1) === ")" && aa > 0) { | |
outy = outy.slice(aa + 1, outy.length - 1); | |
data.token[parse.count] = data | |
.token[parse.count] | |
.slice(0, aa + 1) + value(outy) + ")"; | |
} | |
} | |
else { | |
ltype = "variable"; | |
} | |
} | |
else { | |
ltype = "item"; | |
} | |
recordPush(""); | |
}, | |
// Some tokens receive a generic type named 'item' because their type is unknown | |
// until we know the following syntax. This function replaces the type 'item' | |
// with something more specific. | |
item = function lexer_style_item(type) { | |
let aa = parse.count + 1, bb = 0; | |
const coms = [], tokel = (parse.count > 0) | |
? data.token[parse.count - 1] | |
: "", toked = tokel.slice(tokel.length - 2); | |
//backtrack through immediately prior comments to find the correct token | |
if (ltype === "comment" || ltype === "ignore") { | |
do { | |
aa = aa - 1; | |
ltype = data.types[aa]; | |
coms.push(data.token[aa]); | |
} while (aa > 0 && (ltype === "comment" || ltype === "ignore")); | |
} | |
else { | |
aa = aa - 1; | |
} | |
//if the last non-comment type is 'item' then id it | |
if (ltype === "item" && data.lexer[aa] === "style") { | |
if (type === "start") { | |
if (data.types[aa - 1] !== "comment" && data.types[aa - 1] !== "ignore" && data.types[aa - 1] !== "end" && data.types[aa - 1] !== "start" && data.types[aa - 1] !== "semi" && data.types[aa - 1] !== undefined && data.lexer[aa - 1] === "style") { | |
let cc = aa, dd = 0; | |
const parts = []; | |
do { | |
parts.push(data.token[cc]); | |
if (data.lines[cc] > 0 && data.token[cc] === ":" && data.token[cc - 1] !== ":") { | |
parts.push(" "); | |
} | |
else if (data.token[cc] !== ":") { | |
parts.push(" "); | |
} | |
cc = cc - 1; | |
} while (cc > -1 && data.types[cc] !== "comment" && data.types[cc] !== "ignore" && data.types[cc] !== "end" && data.types[cc] !== "start" && data.types[cc] !== "semi" && data.types[cc] !== undefined); | |
parts.reverse(); | |
cc = cc + 1; | |
dd = aa - cc; | |
parse.splice({ data: data, howmany: dd, index: cc, record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} }); | |
aa = aa - dd; | |
data.token[aa] = parts | |
.join("") | |
.replace(/:\u0020/g, ":") | |
.replace(/(\s*,\s*)/g, ","); | |
} | |
else { | |
data.token[aa] = data | |
.token[aa] | |
.replace(/(\s*,\s*)/g, ","); | |
} | |
data.token[aa] = data | |
.token[aa] | |
.replace(/\s*&/, " &") | |
.replace(/\s*>\s*/g, " > ") | |
.replace(/:\s+/g, ": ") | |
.replace(/^(\s+)/, "") | |
.replace(/(\s+)$/, "") | |
.replace(/\s+::\s+/, "::"); | |
let y = 0, z = "", mark = 0; | |
const toke = data.token[aa], slen = toke.length, list = []; | |
if (y < slen) { | |
do { | |
if (z === "" && toke.charAt(y) === ",") { | |
list.push(toke.slice(mark, y)); | |
mark = y + 1; | |
} | |
else if (toke.charAt(y) === "\"" || toke.charAt(y) === "'" || toke.charAt(y) === "(" || toke.charAt(y) === "{") { | |
z = toke.charAt(y); | |
} | |
else if (toke.charAt(y) === z && (z === "\"" || z === "''")) { | |
z = ""; | |
} | |
else if (toke.charAt(y) === ")" && z === "(") { | |
z = ""; | |
} | |
else if (toke.charAt(y) === "}" && z === "{") { | |
z = ""; | |
} | |
y = y + 1; | |
} while (y < slen); | |
} | |
list.push(toke.slice(mark, y)); | |
list.sort(); | |
data.token[aa] = list | |
.join(",") | |
.replace(/^(\s+)/, ""); | |
data.types[aa] = "selector"; | |
ltype = "selector"; | |
} | |
else if (type === "end") { | |
data.types[aa] = "value"; | |
ltype = "value"; | |
data.token[aa] = data | |
.token[aa] | |
.replace(/\s*!\s+important/, " !important"); | |
data.token[aa] = value(data.token[aa]); | |
//take comments out until the 'item' is found and then put the comments back | |
if (data.token[parse.count - 1] === "{") { | |
data.types[parse.count] = "variable"; | |
} | |
else if (parse.structure[parse.structure.length - 1][0] === data.token[data.begin[parse.count] - 1] && options.correct === true) { | |
if (coms.length > 0 && ltype !== "semi" && ltype !== "end" && ltype !== "start") { | |
aa = coms.length - 1; | |
do { | |
parse.pop(data); | |
aa = aa - 1; | |
} while (aa > 0); | |
ltoke = ";"; | |
ltype = "semi"; | |
recordPush(""); | |
bb = coms.length - 1; | |
do { | |
ltoke = coms[aa]; | |
ltype = "comment"; | |
recordPush(""); | |
aa = aa + 1; | |
} while (aa < bb); | |
} | |
else { | |
ltoke = ";"; | |
ltype = "semi"; | |
recordPush(""); | |
} | |
} | |
} | |
else if (type === "semi") { | |
if (data.types[aa - 1] === "colon") { | |
data.types[aa] = "value"; | |
ltype = "value"; | |
data.token[aa] = data | |
.token[aa] | |
.replace(/\s*!\s+important/, " !important"); | |
data.token[aa] = value(data.token[aa]); | |
} | |
else { | |
//properties without values are considered variables | |
if (data.types[aa] !== "value") { | |
if (data.types[aa] === "item" && data.types[aa - 1] === "value" && (toked === "}}" || toked === "?>" || toked === "->" || toked === "%}" || toked === "%>")) { | |
if (Number.isNaN(Number(data.token[parse.count])) === false) { | |
data.token[parse.count - 1] = tokel + data.token[parse.count]; | |
} | |
else { | |
data.token[parse.count - 1] = tokel + " " + data.token[parse.count]; | |
} | |
parse.pop(data); | |
return; | |
} | |
data.types[aa] = "variable"; | |
ltype = "variable"; | |
} | |
if (data.token[aa].indexOf("\"") > 0) { | |
bb = data | |
.token[aa] | |
.indexOf("\""); | |
a = a - (data.token[aa].length - bb); | |
data.token[aa] = data | |
.token[aa] | |
.slice(0, bb); | |
buildtoken(); | |
} | |
else if (data.token[aa].indexOf("'") > 0) { | |
bb = data | |
.token[aa] | |
.indexOf("'"); | |
a = a - (data.token[aa].length - bb); | |
data.token[aa] = data | |
.token[aa] | |
.slice(0, bb); | |
buildtoken(); | |
} | |
else if ((/\s/).test(data.token[aa]) === true) { | |
bb = data | |
.token[aa] | |
.replace(/\s/, " ") | |
.indexOf(" "); | |
if (bb < data.token[aa].indexOf("(") && bb < data.token[aa].indexOf("[")) { | |
a = a - (data.token[aa].length - bb); | |
data.token[aa] = data | |
.token[aa] | |
.slice(0, bb); | |
buildtoken(); | |
} | |
} | |
} | |
} | |
else if (type === "colon") { | |
data.types[aa] = "property"; | |
ltype = "property"; | |
} | |
else if (data.token[aa].charAt(0) === "@" && ((data.types[aa - 2] !== "variable" && data.types[aa - 2] !== "property") || data.types[aa - 1] === "semi")) { | |
data.types[aa] = "variable"; | |
ltype = "variable"; | |
} | |
} | |
}, semiComment = function lexer_style_semiComment() { | |
let x = parse.count; | |
do { | |
x = x - 1; | |
} while (x > 0 && (data.types[x] === "comment")); | |
parse.splice({ | |
data: data, | |
howmany: 0, | |
index: x + 1, | |
record: { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "style", | |
lines: parse.linesSpace, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: ";", | |
types: "semi" | |
} | |
}); | |
}, template = function lexer_style_template(open, end) { | |
let quote = "", name = "", start = open.length, endlen = 0; | |
const store = [], exit = function lexer_style_template_exit(typename) { | |
const endtype = data.types[parse.count - 1]; | |
if (ltype === "item") { | |
if (endtype === "colon") { | |
data.types[parse.count] = "value"; | |
} | |
else { | |
item(endtype); | |
} | |
} | |
ltype = typename; | |
recordPush(""); | |
}; | |
nosort[nosort.length - 1] = true; | |
if (a < len) { | |
do { | |
store.push(b[a]); | |
if (quote === "") { | |
if (b[a] === "\"") { | |
quote = "\""; | |
} | |
else if (b[a] === "'") { | |
quote = "'"; | |
} | |
else if (b[a] === "/") { | |
if (b[a + 1] === "/") { | |
quote = "/"; | |
} | |
else if (b[a + 1] === "*") { | |
quote = "*"; | |
} | |
} | |
else if (b[a + 1] === end.charAt(0)) { | |
do { | |
endlen = endlen + 1; | |
a = a + 1; | |
store.push(b[a]); | |
} while (a < len && endlen < end.length && b[a + 1] === end.charAt(endlen)); | |
if (endlen === end.length) { | |
quote = store.join(""); | |
if ((/\s/).test(quote.charAt(start)) === true) { | |
do { | |
start = start + 1; | |
} while ((/\s/).test(quote.charAt(start)) === true); | |
} | |
endlen = start; | |
do { | |
endlen = endlen + 1; | |
} while (endlen < end.length && (/\s/).test(quote.charAt(endlen)) === false); | |
if (endlen === quote.length) { | |
endlen = endlen - end.length; | |
} | |
if (open === "{%") { | |
if (quote.indexOf("{%-") === 0) { | |
quote = quote | |
.replace(/^(\{%-\s*)/, "{%- ") | |
.replace(/(\s*-%\})$/, " -%}"); | |
name = quote.slice(4); | |
} | |
else { | |
quote = quote | |
.replace(/^(\{%\s*)/, "{% ") | |
.replace(/(\s*%\})$/, " %}"); | |
name = quote.slice(3); | |
} | |
} | |
if (open === "{{") { | |
quote = quote | |
.replace(/^(\{\{\s+)/, "{{") | |
.replace(/(\s+\}\})$/, "}}"); | |
} | |
if (ltype === "item" && data.types[parse.count - 1] === "colon" && (data.types[parse.count - 2] === "property" || data.types[parse.count - 2] === "variable")) { | |
ltype = "value"; | |
data.types[parse.count] = "value"; | |
if (Number.isNaN(Number(data.token[parse.count])) === true && data.token[parse.count].charAt(data.token[parse.count].length - 1) !== ")") { | |
data.token[parse.count] = data.token[parse.count] + quote; | |
} | |
else { | |
data.token[parse.count] = data.token[parse.count] + " " + quote; | |
} | |
return; | |
} | |
ltoke = quote; | |
if (open === "{%") { | |
const templateNames = [ | |
"autoescape", | |
"block", | |
"capture", | |
"case", | |
"comment", | |
"embed", | |
"filter", | |
"for", | |
"form", | |
"if", | |
"macro", | |
"paginate", | |
"raw", | |
"sandbox", | |
"spaceless", | |
"tablerow", | |
"unless", | |
"verbatim" | |
]; | |
let namesLen = templateNames.length - 1; | |
name = name.slice(0, name.indexOf(" ")); | |
if (name.indexOf("(") > 0) { | |
name = name.slice(0, name.indexOf("(")); | |
} | |
if (name === "else" || name === "elseif" || name === "when" || name === "elif") { | |
exit("template_else"); | |
return; | |
} | |
namesLen = templateNames.length - 1; | |
if (namesLen > -1) { | |
do { | |
if (name === templateNames[namesLen]) { | |
exit("template_start"); | |
return; | |
} | |
if (name === "end" + templateNames[namesLen]) { | |
exit("template_end"); | |
return; | |
} | |
namesLen = namesLen - 1; | |
} while (namesLen > -1); | |
} | |
} | |
else if (open === "{{") { | |
let group = quote.slice(2), ending = group.length, begin = 0; | |
do { | |
begin = begin + 1; | |
} while (begin < ending && (/\s/).test(group.charAt(begin)) === false && group.charAt(start) !== "("); | |
group = group.slice(0, begin); | |
if (group.charAt(group.length - 2) === "}") { | |
group = group.slice(0, group.length - 2); | |
} | |
if (group === "end") { | |
exit("template_end"); | |
return; | |
} | |
if (group === "block" || group === "define" || group === "form" || group === "if" || group === "range" || group === "with") { | |
exit("template_start"); | |
return; | |
} | |
} | |
exit("template"); | |
return; | |
} | |
endlen = 0; | |
} | |
} | |
else if (quote === b[a]) { | |
if (quote === "\"" || quote === "'") { | |
quote = ""; | |
} | |
else if (quote === "/" && (b[a] === "\r" || b[a] === "\n")) { | |
quote = ""; | |
} | |
else if (quote === "*" && b[a + 1] === "/") { | |
quote = ""; | |
} | |
} | |
a = a + 1; | |
} while (a < len); | |
} | |
}, | |
//finds comments including those JS looking '//' comments | |
comment = function lexer_style_comment(inline) { | |
let aa = a + 1, bb = 0, ender = [], ignorecom = [], extra = ""; | |
const out = [b[a]], store = []; | |
if (aa < len) { | |
do { | |
out.push(b[aa]); | |
if ((inline === false && b[aa - 1] === "*" && b[aa] === "/") || (inline === true && (b[aa + 1] === "\n" || b[aa + 1] === "\r"))) { | |
break; | |
} | |
aa = aa + 1; | |
} while (aa < len); | |
} | |
if (ltype === "item") { | |
bb = aa; | |
do { | |
bb = bb + 1; | |
if (b[bb] === "/") { | |
if (b[bb + 1] === "*" || b[bb + 1] === "/") { | |
extra = b[bb + 1]; | |
} | |
else if (b[bb - 1] === "*" && extra === "*") { | |
extra = ""; | |
bb = bb + 1; | |
} | |
} | |
else if ((b[bb] === "\n" || b[bb] === "\r") && extra === "/") { | |
extra = ""; | |
bb = bb + 1; | |
} | |
} while (bb < len && ((extra === "" && (/\s/).test(b[bb]) === true) || extra !== "")); | |
if (b[bb] === "{") { | |
item("start"); | |
} | |
else if (b[bb] === "}") { | |
item("end"); | |
} | |
else if (b[bb] === ";") { | |
item("semi"); | |
} | |
else if (b[bb] === ":") { | |
item("colon"); | |
} | |
else { | |
item(""); | |
} | |
} | |
a = aa; | |
ltype = "comment"; | |
ltoke = out.join(""); | |
if ((/^(\/(\/|\*)\s*parse-ignore-start)/).test(ltoke) === true) { | |
do { | |
if (ender[0] === undefined && (b[bb] === "/" || b[bb] === "*") && b[bb - 1] === "/") { | |
ignorecom.push(b[bb - 1]); | |
if (b[bb] === "*") { | |
ender = ["*", "/"]; | |
} | |
else { | |
ender = ["\n"]; | |
} | |
} | |
else if ((b[bb] === ender[1] || ender[1] === undefined) && b[bb - 1] === ender[0]) { | |
if ((/^(\/(\/|\*)\s*parse-ignore-end)/).test(ignorecom.join("")) === true) { | |
a = bb - 1; | |
ltoke = out.join(""); | |
ltype = "ignore"; | |
break; | |
} | |
ignorecom = []; | |
ender = []; | |
} | |
if (ender[0] !== undefined) { | |
ignorecom.push(b[bb]); | |
} | |
out.push(b[bb]); | |
bb = bb + 1; | |
} while (bb < len); | |
if (bb === len) { | |
ltoke = out.join(""); | |
ltype = "ignore"; | |
a = bb; | |
} | |
} | |
if (parse.count > -1 && store.length > 0 && (ltype === "selector" || ltype === "variable") && data.types[parse.count] !== "comment" && data.types[parse.count] !== "ignore") { | |
parse.pop(data); | |
recordPush(""); | |
ltoke = store[0].token; | |
ltype = (ltype === "variable") | |
? "variable" | |
: "selector"; | |
recordPush(""); | |
} | |
else if (ltype === "colon" || ltype === "property" || ltype === "value") { | |
do { | |
store.push(recordStore(parse.count)); | |
parse.pop(data); | |
} while (parse.count > 0 && data.types[parse.count] !== "semi" && data.types[parse.count] !== "start"); | |
recordPush(""); | |
do { | |
parse.push(data, store.pop(), ""); | |
} while (store.length > 0); | |
} | |
else { | |
recordPush(""); | |
} | |
}, | |
//do fancy things to property types like: sorting, consolidating, and padding | |
properties = function lexer_style_properties() { | |
let aa = parse.count, bb = 1, cc = 0, dd = 0, next = 0, leng = 0; | |
const p = [], set = [ | |
[] | |
], store = { | |
begin: [], | |
lexer: [], | |
lines: [], | |
presv: [], | |
stack: [], | |
token: [], | |
types: [] | |
}, fourcount = function lexer_style_properties_propcheck_fourcount(name) { | |
let test = [ | |
false, false, false, false | |
], val = [ | |
"0", "0", "0", "0" | |
], valsplit = [], start = aa, yy = -1, zz = 0; | |
const zero = (/^(0+([a-z]+|%))/), storage = function lexer_style_properties_propcheck_fourcount_storage(side) { | |
yy = yy + 1; | |
val[side] = data.token[set[aa][2]]; | |
test[side] = true; | |
if (start < 0) { | |
start = aa; | |
} | |
}; | |
if (aa < leng) { | |
do { | |
if (data.token[set[aa][2]] !== undefined && data.token[set[aa][0]].indexOf(name) === 0) { | |
if (data.token[set[aa][0]] === name || data.token[set[aa][0]].indexOf(name + " ") === 0) { | |
yy = yy + 1; | |
valsplit = data | |
.token[set[aa][2]] | |
.split(" "); | |
if (valsplit.length === 1) { | |
val = [ | |
data.token[set[aa][2]], | |
data.token[set[aa][2]], | |
data.token[set[aa][2]], | |
data.token[set[aa][2]] | |
]; | |
} | |
else if (valsplit.length === 2) { | |
val = [ | |
valsplit[0], valsplit[1], valsplit[0], valsplit[1] | |
]; | |
} | |
else if (valsplit.length === 3) { | |
val = [ | |
valsplit[0], valsplit[1], valsplit[2], valsplit[1] | |
]; | |
} | |
else if (valsplit.length === 4) { | |
val = [ | |
valsplit[0], valsplit[1], valsplit[2], valsplit[3] | |
]; | |
} | |
else { | |
return; | |
} | |
test = [true, true, true, true]; | |
} | |
else if (data.token[set[aa][0]].indexOf(name + "-bottom") === 0) { | |
storage(2); | |
} | |
else if (data.token[set[aa][0]].indexOf(name + "-left") === 0) { | |
storage(3); | |
} | |
else if (data.token[set[aa][0]].indexOf(name + "-right") === 0) { | |
storage(1); | |
} | |
else if (data.token[set[aa][0]].indexOf(name + "-top") === 0) { | |
storage(0); | |
} | |
} | |
if (aa === leng - 1 || set[aa + 1] === undefined || data.token[set[aa + 1][0]].indexOf(name) < 0) { | |
if (test[0] === true && test[1] === true && test[2] === true && test[3] === true) { | |
set.splice(start + 1, yy); | |
leng = leng - yy; | |
aa = aa - yy; | |
zz = 0; | |
bb = p.length; | |
do { | |
if (p[zz] === set[start][0]) { | |
break; | |
} | |
zz = zz + 1; | |
} while (zz < bb); | |
if (zz < bb) { | |
p.splice(zz + 1, yy); | |
} | |
data.token[set[start][0]] = name; | |
if (zero.test(val[0]) === true) { | |
val[0] = "0"; | |
} | |
if (zero.test(val[1]) === true) { | |
val[1] = "0"; | |
} | |
if (zero.test(val[2]) === true) { | |
val[2] = "0"; | |
} | |
if (zero.test(val[3]) === true) { | |
val[3] = "0"; | |
} | |
if (val[1] === val[3]) { | |
val.pop(); | |
if (val[0] === val[2]) { | |
val.pop(); | |
if (val[0] === val[1]) { | |
val.pop(); | |
} | |
} | |
} | |
data.token[set[start][2]] = val.join(" "); | |
if (data.token[set[start][2]].indexOf("!important") > 0) { | |
data.token[set[start][2]] = data | |
.token[set[start][2]] | |
.replace(/\s!important/g, "") + " !important"; | |
} | |
} | |
break; | |
} | |
aa = aa + 1; | |
} while (aa < leng); | |
} | |
}; | |
//identify properties and build out prop/val sets | |
do { | |
if (data.types[aa] === "start") { | |
bb = bb - 1; | |
if (bb === 0) { | |
next = aa; | |
set.pop(); | |
aa = set.length - 1; | |
if (aa > -1) { | |
do { | |
set[aa].reverse(); | |
aa = aa - 1; | |
} while (aa > -1); | |
} | |
break; | |
} | |
} | |
if (data.types[aa] === "end") { | |
bb = bb + 1; | |
} | |
if (bb === 1 && (data.types[aa] === "property" || (data.types[aa] === "variable" && data.types[aa + 1] === "colon"))) { | |
p.push(aa); | |
} | |
set[set.length - 1].push(aa); | |
if (bb === 1 && (data.types[aa - 1] === "comment" || data.types[aa - 1] === "semi" || data.types[aa - 1] === "end" || data.types[aa - 1] === "start") && data.types[aa] !== "start" && data.types[aa] !== "end") { | |
set.push([]); | |
} | |
aa = aa - 1; | |
} while (aa > -1); | |
//this reverse fixes the order of consecutive comments | |
set.reverse(); | |
p.reverse(); | |
//consolidate margin and padding | |
leng = set.length; | |
aa = 0; | |
if (aa < leng) { | |
do { | |
if (data.types[set[aa][0]] === "property") { | |
if (data.token[set[aa][0]].indexOf("margin") === 0) { | |
fourcount("margin"); | |
} | |
if (data.token[set[aa][0]].indexOf("padding") === 0) { | |
fourcount("padding"); | |
} | |
} | |
aa = aa + 1; | |
} while (aa < leng); | |
} | |
bb = set.length; | |
aa = 0; | |
if (aa < bb) { | |
do { | |
dd = set[aa].length; | |
cc = 0; | |
if (cc < dd) { | |
do { | |
parse.push(store, recordStore(set[aa][cc]), ""); | |
cc = cc + 1; | |
} while (cc < dd); | |
} | |
aa = aa + 1; | |
} while (aa < bb); | |
} | |
//replace a block's data with sorted analyzed data | |
parse.splice({ | |
data: data, | |
howmany: parse.count - next, | |
index: next + 1, | |
record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} | |
}); | |
parse.concat(data, store); | |
}; | |
//token building loop | |
do { | |
if ((/\s/).test(b[a]) === true) { | |
a = parse.spacer({ array: b, end: len, index: a }); | |
} | |
else if (b[a] === "/" && b[a + 1] === "*") { | |
comment(false); | |
} | |
else if (b[a] === "/" && b[a + 1] === "/") { | |
comment(true); | |
} | |
else if (b[a] === "<" && b[a + 1] === "?" && b[a + 2] === "p" && b[a + 3] === "h" && b[a + 4] === "p") { | |
//php | |
template("<?php", "?>"); | |
} | |
else if (b[a] === "<" && b[a + 1] === "%") { | |
//asp | |
template("<%", "%>"); | |
} | |
else if (b[a] === "{" && b[a + 1] === "%") { | |
//asp | |
template("{%", "%}"); | |
} | |
else if (b[a] === "{" && b[a + 1] === "{" && b[a + 2] === "{") { | |
//mustache | |
template("{{{", "}}}"); | |
} | |
else if (b[a] === "{" && b[a + 1] === "{") { | |
//handlebars | |
template("{{", "}}"); | |
} | |
else if (b[a] === "<" && b[a + 1] === "!" && b[a + 2] === "-" && b[a + 3] === "-" && b[a + 4] === "#") { | |
//ssi | |
template("<!--#", "-->"); | |
} | |
else if (b[a] === "@" && b[a + 1] === "e" && b[a + 2] === "l" && b[a + 3] === "s" && b[a + 4] === "e" && (b[a + 5] === "{" || (/\s/).test(b[a + 5]) === true)) { | |
ltoke = "@else"; | |
ltype = "template_else"; | |
recordPush(""); | |
a = a + 4; | |
} | |
else if (b[a] === "{" || (b[a] === "(" && data.token[parse.count] === ":" && data.types[parse.count - 1] === "variable")) { | |
if (b[a] === "{" && data.token[parse.count - 1] === ":") { | |
data.types[parse.count] = "pseudo"; | |
} | |
item("start"); | |
ltype = "start"; | |
ltoke = b[a]; | |
if (b[a] === "(") { | |
recordPush("map"); | |
mapper.push(0); | |
} | |
else if (data.types[parse.count] === "selector" || data.types[parse.count] === "variable") { | |
recordPush(data.token[parse.count]); | |
} | |
else if (data.types[parse.count] === "colon") { | |
recordPush(data.token[parse.count - 1]); | |
} | |
else { | |
recordPush("block"); | |
} | |
nosort.push(false); | |
} | |
else if (b[a] === "}" || (b[a] === ")" && parse.structure[parse.structure.length - 1][0] === "map" && mapper[mapper.length - 1] === 0)) { | |
endtest = true; | |
if (b[a] === "}" && data.types[parse.count] === "item" && data.token[parse.count - 1] === "{" && data.token[parse.count - 2] !== undefined && data.token[parse.count - 2].charAt(data.token[parse.count - 2].length - 1) === "@") { | |
data.token[parse.count - 2] = data.token[parse.count - 2] + "{" + data.token[parse.count] + | |
"}"; | |
parse.pop(data); | |
parse.pop(data); | |
} | |
else { | |
if (b[a] === ")") { | |
mapper.pop(); | |
} | |
item("end"); | |
if (b[a] === "}" && data.token[parse.count] !== ";" && options.correct === true) { | |
if (data.types[parse.count] === "value") { | |
ltoke = ";"; | |
ltype = "semi"; | |
recordPush(""); | |
} | |
else if (data.types[parse.count] === "comment") { | |
semiComment(); | |
} | |
} | |
properties(); | |
ltype = "end"; | |
if (options.lexerOptions.style.objectSort === true && nosort[nosort.length - 1] === false) { | |
parse.objectSort(data); | |
} | |
nosort.pop(); | |
ltoke = b[a]; | |
ltype = "end"; | |
recordPush(""); | |
} | |
} | |
else if (b[a] === ";" || (b[a] === "," && parse.structure[parse.structure.length - 1][0] === "map")) { | |
item("semi"); | |
if (data.types[parse.count] !== "semi" && data.types[parse.count] !== "start" && esctest(a) === false) { | |
ltoke = b[a]; | |
ltype = "semi"; | |
recordPush(""); | |
} | |
} | |
else if (b[a] === ":" && data.types[parse.count] !== "end") { | |
item("colon"); | |
ltoke = ":"; | |
ltype = "colon"; | |
recordPush(""); | |
} | |
else { | |
if (parse.structure[parse.structure.length - 1][0] === "map" && b[a] === "(") { | |
mapper[mapper.length - 1] = mapper[mapper.length - 1] + 1; | |
} | |
buildtoken(); | |
} | |
a = a + 1; | |
} while (a < len); | |
if (endtest === false) { | |
properties(); | |
} | |
return data; | |
}; | |
framework.lexer.style = style; | |
}()); | |
/*global global*/ | |
(function script_init() { | |
"use strict"; | |
const framework = window.parseFramework, script = function lexer_script(source) { | |
let a = 0, ltoke = "", ltype = "", lword = [], pword = [], lengthb = 0, wordTest = -1, paren = -1, tempstore, pstack; | |
const parse = framework.parse, data = parse.data, options = parse.options, sourcemap = [ | |
0, "" | |
], b = source.length, c = source.split(""), brace = [], classy = [], | |
// depth and status of templateStrings | |
templateString = [], | |
// identify variable declarations | |
vart = { | |
count: [], | |
index: [], | |
len: -1, | |
word: [] | |
}, | |
// peek at whats up next | |
nextchar = function lexer_script_nextchar(len, current) { | |
let cc = (current === true) | |
? a | |
: a + 1, dd = ""; | |
if (typeof len !== "number" || len < 1) { | |
len = 1; | |
} | |
if (c[a] === "/") { | |
if (c[a + 1] === "/") { | |
dd = "\n"; | |
} | |
else if (c[a + 1] === "*") { | |
dd = "/"; | |
} | |
} | |
if (cc < b) { | |
do { | |
if ((/\s/).test(c[cc]) === false) { | |
if (c[cc] === "/") { | |
if (dd === "") { | |
if (c[cc + 1] === "/") { | |
dd = "\n"; | |
} | |
else if (c[cc + 1] === "*") { | |
dd = "/"; | |
} | |
} | |
else if (dd === "/" && c[cc - 1] === "*") { | |
dd = ""; | |
} | |
} | |
if (dd === "" && c[cc - 1] + c[cc] !== "*/") { | |
return c | |
.slice(cc, cc + len) | |
.join(""); | |
} | |
} | |
else if (dd === "\n" && c[cc] === "\n") { | |
dd = ""; | |
} | |
cc = cc + 1; | |
} while (cc < b); | |
} | |
return ""; | |
}, | |
// cleans up improperly applied ASI | |
asifix = function lexer_script_asifix() { | |
let len = parse.count; | |
do { | |
len = len - 1; | |
} while (len > 0 && data.types[len] === "comment"); | |
if (data.token[len] === "from") { | |
len = len - 2; | |
} | |
if (data.token[len] === "x;") { | |
parse.splice({ data: data, howmany: 1, index: len, record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} }); | |
} | |
}, | |
// determine the definition of containment by stack | |
recordPush = function lexer_script_recordPush(structure) { | |
const record = { | |
begin: parse.structure[parse.structure.length - 1][1], | |
lexer: "script", | |
lines: parse.linesSpace, | |
presv: false, | |
stack: parse.structure[parse.structure.length - 1][0], | |
token: ltoke, | |
types: ltype | |
}; | |
if ((/^(\/(\/|\*)\s*parse-ignore-start)/).test(ltoke) === true) { | |
record.presv = true; | |
record.types = "ignore"; | |
} | |
parse.push(data, record, structure); | |
}, | |
// remove "vart" object data | |
vartpop = function lexer_script_vartpop() { | |
vart | |
.count | |
.pop(); | |
vart | |
.index | |
.pop(); | |
vart | |
.word | |
.pop(); | |
vart.len = vart.len - 1; | |
}, | |
// A lexer for keywords, reserved words, and variables | |
word = function lexer_script_word() { | |
let f = wordTest, g = 1, output = "", nextitem = ""; | |
const lex = [], elsefix = function lexer_script_word_elsefix() { | |
brace.push("x{"); | |
parse.splice({ | |
data: data, | |
howmany: 1, | |
index: parse.count - 3, | |
record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} | |
}); | |
}, builder = function lexer_script_word_builder(index) { | |
return { | |
begin: data.begin[index], | |
lexer: data.lexer[index], | |
lines: data.lines[index], | |
presv: data.presv[index], | |
stack: data.stack[index], | |
token: data.token[index], | |
types: data.types[index] | |
}; | |
}; | |
do { | |
lex.push(c[f]); | |
if (c[f] === "\\") { | |
framework.parseerror = "Illegal escape in JavaScript on line number " + parse.lineNumber; | |
} | |
f = f + 1; | |
} while (f < a); | |
output = lex.join(""); | |
wordTest = -1; | |
if (parse.count > 0 && output === "function" && data.token[parse.count] === "(" && (data.token[parse.count - 1] === "{" || data.token[parse.count - 1] === "x{")) { | |
data.types[parse.count] = "start"; | |
} | |
if (parse.count > 1 && output === "function" && ltoke === "(" && (data.token[parse.count - 1] === "}" || data.token[parse.count - 1] === "x}")) { | |
if (data.token[parse.count - 1] === "}") { | |
f = parse.count - 2; | |
if (f > -1) { | |
do { | |
if (data.types[f] === "end") { | |
g = g + 1; | |
} | |
else if (data.types[f] === "start" || data.types[f] === "end") { | |
g = g - 1; | |
} | |
if (g === 0) { | |
break; | |
} | |
f = f - 1; | |
} while (f > -1); | |
} | |
if (data.token[f] === "{" && data.token[f - 1] === ")") { | |
g = 1; | |
f = f - 2; | |
if (f > -1) { | |
do { | |
if (data.types[f] === "end") { | |
g = g + 1; | |
} | |
else if (data.types[f] === "start" || data.types[f] === "end") { | |
g = g - 1; | |
} | |
if (g === 0) { | |
break; | |
} | |
f = f - 1; | |
} while (f > -1); | |
} | |
if (data.token[f - 1] !== "function" && data.token[f - 2] !== "function") { | |
data.types[parse.count] = "start"; | |
} | |
} | |
} | |
else { | |
data.types[parse.count] = "start"; | |
} | |
} | |
if (options.correct === true && (output === "Object" || output === "Array") && c[a + 1] === "(" && c[a + 2] === ")" && data.token[parse.count - 1] === "=" && data.token[parse.count] === "new") { | |
if (output === "Object") { | |
data.token[parse.count] = "{"; | |
ltoke = "}"; | |
data.stack[parse.count] = "object"; | |
parse.structure[parse.structure.length - 1][0] = "object"; | |
} | |
else { | |
data.token[parse.count] = "["; | |
ltoke = "]"; | |
data.stack[parse.count] = "array"; | |
parse.structure[parse.structure.length - 1][0] = "array"; | |
} | |
data.types[parse.count] = "start"; | |
ltype = "end"; | |
c[a + 1] = ""; | |
c[a + 2] = ""; | |
a = a + 2; | |
} | |
else { | |
g = parse.count; | |
f = g; | |
if (options.lexerOptions.script.varword !== "none" && (output === "var" || output === "let" || output === "const")) { | |
if (data.types[g] === "comment") { | |
do { | |
g = g - 1; | |
} while (g > 0 && (data.types[g] === "comment")); | |
} | |
if (options.lexerOptions.script.varword === "list" && vart.len > -1 && vart.index[vart.len] === g && output === vart.word[vart.len]) { | |
ltoke = ","; | |
ltype = "separator"; | |
data.token[g] = ltoke; | |
data.types[g] = ltype; | |
vart.count[vart.len] = 0; | |
vart.index[vart.len] = g; | |
vart.word[vart.len] = output; | |
return; | |
} | |
vart.len = vart.len + 1; | |
vart | |
.count | |
.push(0); | |
vart | |
.index | |
.push(g); | |
vart | |
.word | |
.push(output); | |
g = f; | |
} | |
else if (vart.len > -1 && output !== vart.word[vart.len] && parse.count === vart.index[vart.len] && data.token[vart.index[vart.len]] === ";" && ltoke !== vart.word[vart.len] && options.lexerOptions.script.varword === "list") { | |
vartpop(); | |
} | |
if (output === "else" && data.types[g] === "comment") { | |
do { | |
f = f - 1; | |
} while (f > -1 && data.types[f] === "comment"); | |
if (data.token[f] === "x;" && (data.token[f - 1] === "}" || data.token[f - 1] === "x}")) { | |
parse.splice({ data: data, howmany: 1, index: f, record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} }); | |
g = g - 1; | |
f = f - 1; | |
} | |
do { | |
tempstore = parse.pop(data); | |
parse.splice({ | |
data: data, | |
howmany: 0, | |
index: g - 3, | |
record: builder(g) | |
}); | |
f = f + 1; | |
} while (f < g); | |
} | |
if (output === "from" && data.token[parse.count] === "x;" && data.token[parse.count - 1] === "}") { | |
asifix(); | |
} | |
if (output === "while" && data.token[parse.count] === "x;" && data.token[parse.count - 1] === "}") { | |
let d = 0, e = parse.count - 2; | |
if (e > -1) { | |
do { | |
if (data.types[e] === "end") { | |
d = d + 1; | |
} | |
else if (data.types[e] === "start") { | |
d = d - 1; | |
} | |
if (d < 0) { | |
if (data.token[e] === "{" && data.token[e - 1] === "do") { | |
asifix(); | |
} | |
return; | |
} | |
e = e - 1; | |
} while (e > -1); | |
} | |
} | |
ltoke = output; | |
ltype = "word"; | |
if (output === "from" && data.token[parse.count] === "}") { | |
asifix(); | |
} | |
} | |
recordPush(""); | |
if (output === "class") { | |
classy.push(0); | |
} | |
if (output === "do") { | |
nextitem = nextchar(1, true); | |
if (nextitem !== "{") { | |
ltoke = "x{"; | |
ltype = "start"; | |
brace.push("x{"); | |
recordPush("do"); | |
} | |
} | |
if (output === "else") { | |
nextitem = nextchar(2, true); | |
if (data.token[parse.count - 1] === "x}") { | |
if (data.token[parse.count] === "else") { | |
if (data.stack[parse.count - 1] !== "if" && data.stack[parse.count - 1] !== "else") { | |
brace.pop(); | |
parse.splice({ | |
data: data, | |
howmany: 0, | |
index: parse.count - 1, | |
record: { | |
begin: data.begin[data.begin[data.begin[parse.count - 1] - 1] - 1], | |
lexer: "script", | |
lines: 0, | |
presv: false, | |
stack: "if", | |
token: "x}", | |
types: "end" | |
} | |
}); | |
if (parse.structure.length > 1) { | |
parse.structure.splice(parse.structure.length - 2, 1); | |
parse.structure[parse.structure.length - 1][1] = parse.count; | |
} | |
} | |
else if (data.token[parse.count - 2] === "x}" && pstack[0] !== "if" && data.stack[parse.count] === "else") { | |
elsefix(); | |
} | |
else if (data.token[parse.count - 2] === "}" && data.stack[parse.count - 2] === "if" && pstack[0] === "if" && data.token[pstack[1] - 1] !== "if" && data.token[data.begin[parse.count - 1]] === "x{") { | |
// fixes when "else" is following a block that isn't "if" | |
elsefix(); | |
} | |
} | |
else if (data.token[parse.count] === "x}" && data.stack[parse.count] === "if") { | |
elsefix(); | |
} | |
} | |
if (nextitem !== "if" && nextitem.charAt(0) !== "{") { | |
ltoke = "x{"; | |
ltype = "start"; | |
brace.push("x{"); | |
recordPush("else"); | |
} | |
} | |
if ((output === "for" || output === "if" || output === "switch" || output === "catch") && options.lang !== "twig" && data.token[parse.count - 1] !== ".") { | |
nextitem = nextchar(1, true); | |
if (nextitem !== "(") { | |
paren = parse.count; | |
start("x("); | |
} | |
} | |
}, | |
// determines if a slash comprises a valid escape or if it is escaped itself | |
slashes = function lexer_script_slashes(index) { | |
let slashy = index; | |
do { | |
slashy = slashy - 1; | |
} while (c[slashy] === "\\" && slashy > 0); | |
if ((index - slashy) % 2 === 1) { | |
return true; | |
} | |
return false; | |
}, | |
// the generic function is a generic tokenizer start argument contains the | |
// token's starting syntax offset argument is length of start minus control | |
// chars end is how is to identify where the token ends | |
generic = function lexer_script_genericBuilder(starting, ending) { | |
let ee = 0, output = "", escape = false, ignorecom = [], build = [starting], ender = ending.split(""); | |
const endlen = ender.length, jj = b, base = a + starting.length; | |
if (wordTest > -1) { | |
word(); | |
} | |
// this insanity is for JSON where all the required quote characters are | |
// escaped. | |
if (c[a - 1] === "\\" && slashes(a - 1) === true && (c[a] === "\"" || c[a] === "'")) { | |
parse.pop(data); | |
if (data.token[0] === "{") { | |
if (c[a] === "\"") { | |
starting = "\""; | |
ending = "\\\""; | |
build = ["\""]; | |
} | |
else { | |
starting = "'"; | |
ending = "\\'"; | |
build = ["'"]; | |
} | |
escape = true; | |
} | |
else { | |
if (c[a] === "\"") { | |
return "\\\""; | |
} | |
return "\\'"; | |
} | |
} | |
ee = base; | |
if (ee < jj) { | |
do { | |
if (ee > a + 1) { | |
if (c[ee] === "<" && c[ee + 1] === "?" && c[ee + 2] === "p" && c[ee + 3] === "h" && c[ee + 4] === "p" && c[ee + 5] !== starting && starting !== "//" && starting !== "/*") { | |
a = ee; | |
build.push(lexer_script_genericBuilder("<?php", "?>")); | |
ee = ee + build[build.length - 1].length - 1; | |
} | |
else if (c[ee] === "<" && c[ee + 1] === "%" && c[ee + 2] !== starting && starting !== "//" && starting !== "/*") { | |
a = ee; | |
build.push(lexer_script_genericBuilder("<%", "%>")); | |
ee = ee + build[build.length - 1].length - 1; | |
} | |
else if (c[ee] === "{" && c[ee + 1] === "%" && c[ee + 2] !== starting && starting !== "//" && starting !== "/*") { | |
a = ee; | |
build.push(lexer_script_genericBuilder("{%", "%}")); | |
ee = ee + build[build.length - 1].length - 1; | |
} | |
else if (c[ee] === "{" && c[ee + 1] === "{" && c[ee + 2] === "{" && c[ee + 3] !== starting && starting !== "//" && starting !== "/*") { | |
a = ee; | |
build.push(lexer_script_genericBuilder("{{{", "}}}")); | |
ee = ee + build[build.length - 1].length - 1; | |
} | |
else if (c[ee] === "{" && c[ee + 1] === "{" && c[ee + 2] !== starting && starting !== "//" && starting !== "/*") { | |
a = ee; | |
build.push(lexer_script_genericBuilder("{{", "}}")); | |
ee = ee + build[build.length - 1].length - 1; | |
} | |
else if (c[ee] === "<" && c[ee + 1] === "!" && c[ee + 2] === "-" && c[ee + 3] === "-" && c[ee + 4] === "#" && c[ee + 5] !== starting && starting !== "//" && starting !== "/*") { | |
a = ee; | |
build.push(lexer_script_genericBuilder("<!--#", "-->")); | |
ee = ee + build[build.length - 1].length - 1; | |
} | |
else { | |
build.push(c[ee]); | |
} | |
} | |
else { | |
build.push(c[ee]); | |
} | |
if ((starting === "\"" || starting === "'") && options.lang !== "json" && c[ee - 1] !== "\\" && (c[ee] !== c[ee - 1] || (c[ee] !== "\"" && c[ee] !== "'")) && (c[ee] === "\n" || ee === jj - 1)) { | |
framework.parseerror = "Unterminated string in script on line number " + parse.lineNumber; | |
break; | |
} | |
if (c[ee] === ender[endlen - 1] && (c[ee - 1] !== "\\" || slashes(ee - 1) === false)) { | |
if (endlen === 1) { | |
break; | |
} | |
// `ee - base` is a cheap means of computing length of build array the `ee - | |
// base` and `endlen` are both length based values, so adding two (1 for each) | |
// provides an index based number | |
if (build[ee - base] === ender[0] && build.slice(ee - base - endlen + 2).join("") === ending) { | |
break; | |
} | |
} | |
ee = ee + 1; | |
} while (ee < jj); | |
} | |
if (escape === true) { | |
output = build[build.length - 1]; | |
build.pop(); | |
build.pop(); | |
build.push(output); | |
} | |
a = ee; | |
if (starting === "//") { | |
build.pop(); | |
} | |
output = build.join(""); | |
if ((/^(\/(\/|\*)\s*parse-ignore-start)/).test(output) === true && ee < jj) { | |
ender = []; | |
do { | |
if (ender[0] === undefined && (c[ee] === "/" || c[ee] === "*") && c[ee - 1] === "/") { | |
ignorecom.push(c[ee - 1]); | |
if (c[ee] === "*") { | |
ender = ["*", "/"]; | |
} | |
else { | |
ender = ["\n"]; | |
} | |
} | |
else if ((c[ee] === ender[1] || ender[1] === undefined) && c[ee - 1] === ender[0]) { | |
if ((/^(\/(\/|\*)\s*parse-ignore-end)/).test(ignorecom.join("")) === true) { | |
a = ee - 1; | |
output = build.join(""); | |
break; | |
} | |
ignorecom = []; | |
ender = []; | |
} | |
if (ender[0] !== undefined) { | |
ignorecom.push(c[ee]); | |
} | |
build.push(c[ee]); | |
ee = ee + 1; | |
} while (ee < jj); | |
if (ee === jj) { | |
output = build.join(""); | |
a = ee; | |
} | |
} | |
else { | |
if (starting === "//") { | |
output = output.replace(/(\s+)$/, ""); | |
} | |
else if (starting === "/*") { | |
if (options.crlf === true) { | |
build = output.split("\r\n"); | |
} | |
else { | |
build = output.split("\n"); | |
} | |
ee = build.length - 1; | |
if (ee > -1) { | |
do { | |
build[ee] = build[ee].replace(/(\s+)$/, ""); | |
ee = ee - 1; | |
} while (ee > -1); | |
} | |
if (options.crlf === true) { | |
output = build.join("\r\n"); | |
} | |
else { | |
output = build.join("\n"); | |
} | |
} | |
if (starting === "{%") { | |
if (output.indexOf("{%-") < 0) { | |
output = output | |
.replace(/^(\{%\s*)/, "{% ") | |
.replace(/(\s*%\})$/, " %}"); | |
} | |
else { | |
output = output | |
.replace(/^(\{%-\s*)/, "{%- ") | |
.replace(/(\s*-%\})$/, " -%}"); | |
} | |
} | |
if (output.indexOf("#region") === 0 || output.indexOf("#endregion") === 0) { | |
output = output.replace(/(\s+)$/, ""); | |
} | |
} | |
return output; | |
}, | |
// inserts ending curly brace (where absent) | |
blockinsert = function lexer_script_blockinsert() { | |
let next = nextchar(5, false), name = ""; | |
const g = parse.count, lines = parse.linesSpace; | |
if (options.lang === "json") { | |
return; | |
} | |
if (data.stack[parse.count] === "do" && next === "while" && data.token[parse.count] === "}") { | |
return; | |
} | |
next = next.slice(0, 4); | |
if (next === "else" && ltoke === "}" && data.stack[parse.count] === "if" && data.token[data.begin[parse.count]] !== "x{") { | |
return; | |
} | |
if (ltoke === ";" && data.token[g - 1] === "x{") { | |
name = data.token[data.begin[g - 2] - 1]; | |
if (data.token[g - 2] === "do" || (data.token[g - 2] === ")" && "ifforwhilecatch".indexOf(name) > -1)) { | |
tempstore = parse.pop(data); | |
ltoke = "x}"; | |
ltype = "end"; | |
pstack = parse.structure[parse.structure.length - 1]; | |
recordPush(""); | |
brace.pop(); | |
parse.linesSpace = lines; | |
return; | |
} | |
// to prevent the semicolon from inserting between the braces --> while (x) {}; | |
tempstore = parse.pop(data); | |
ltoke = "x}"; | |
ltype = "end"; | |
pstack = parse.structure[parse.structure.length - 1]; | |
recordPush(""); | |
brace.pop(); | |
ltoke = ";"; | |
ltype = "end"; | |
parse.push(data, tempstore, ""); | |
parse.linesSpace = lines; | |
return; | |
} | |
ltoke = "x}"; | |
ltype = "end"; | |
if (data.token[parse.count] === "x}") { | |
return; | |
} | |
if (data.stack[parse.count] === "if" && (data.token[parse.count] === ";" || data.token[parse.count] === "x;") && next === "else") { | |
pstack = parse.structure[parse.structure.length - 1]; | |
recordPush(""); | |
brace.pop(); | |
parse.linesSpace = lines; | |
return; | |
} | |
do { | |
pstack = parse.structure[parse.structure.length - 1]; | |
recordPush(""); | |
brace.pop(); | |
if (data.stack[parse.count] === "do") { | |
break; | |
} | |
} while (brace[brace.length - 1] === "x{"); | |
parse.linesSpace = lines; | |
}, | |
// commaComment ensures that commas immediately precede comments instead of | |
// immediately follow | |
commaComment = function lexer_script_commacomment() { | |
let x = parse.count; | |
if (data.stack[x] === "object" && options.lexerOptions.script.objectSort === true) { | |
ltoke = ","; | |
ltype = "separator"; | |
asifix(); | |
recordPush(""); | |
} | |
else { | |
do { | |
x = x - 1; | |
} while (x > 0 && data.types[x - 1] === "comment"); | |
parse.splice({ | |
data: data, | |
howmany: 0, | |
index: x, | |
record: { | |
begin: data.begin[x], | |
lexer: "script", | |
lines: parse.linesSpace, | |
presv: false, | |
stack: data.stack[x], | |
token: ",", | |
types: "separator" | |
} | |
}); | |
recordPush(""); | |
} | |
}, | |
// automatic semicolon insertion | |
asi = function lexer_script_asi(isEnd) { | |
let aa = 0; | |
const next = nextchar(1, false), record = { | |
begin: data.begin[parse.count], | |
lexer: data.lexer[parse.count], | |
lines: data.lines[parse.count], | |
presv: data.presv[parse.count], | |
stack: data.stack[parse.count], | |
token: data.token[parse.count], | |
types: data.types[parse.count] | |
}, clist = (parse.structure.length === 0) | |
? "" | |
: parse.structure[parse.structure.length - 1][0]; | |
if (data.lexer[parse.count - 1] !== "script" || options.lang === "java" || options.lang === "csharp") { | |
return; | |
} | |
if (options.lang === "json" || record.token === ";" || record.token === "," || next === "{" || record.stack === "class" || record.stack === "map" || record.stack === "attribute" || clist === "initializer" || data.types[record.begin - 1] === "generic") { | |
return; | |
} | |
if (((record.stack === "global" && record.types !== "end") || (record.types === "end" && data.stack[record.begin - 1] === "global")) && (next === "" || next === "}") && record.stack === data.stack[parse.count - 1] && options.lang === "jsx") { | |
return; | |
} | |
if (record.stack === "array" && record.token !== "]") { | |
return; | |
} | |
if (record.types !== undefined && record.types.indexOf("template") > -1) { | |
return; | |
} | |
if (next === ";" && isEnd === false) { | |
return; | |
} | |
if (options.lang === "qml") { | |
if (record.types === "start") { | |
return; | |
} | |
ltoke = "x;"; | |
ltype = "separator"; | |
recordPush(""); | |
if (brace[brace.length - 1] === "x{" && next !== "}") { | |
blockinsert(); | |
} | |
return; | |
} | |
if (record.token === "}" && (record.stack === "function" || record.stack === "if" || record.stack === "else" || record.stack === "for" || record.stack === "do" || record.stack === "while" || record.stack === "switch" || record.stack === "class" || record.stack === "try" || record.stack === "catch" || record.stack === "finally" || record.stack === "block")) { | |
if (data.token[record.begin - 1] === ")") { | |
aa = data.begin[record.begin - 1] - 1; | |
if (data.token[aa - 1] === "function") { | |
aa = aa - 1; | |
} | |
if (data.stack[aa - 1] === "object" || data.stack[aa - 1] === "switch") { | |
return; | |
} | |
if (data.token[aa - 1] !== "=" && data.token[aa - 1] !== "return" && data.token[aa - 1] !== ":") { | |
return; | |
} | |
} | |
else { | |
return; | |
} | |
} | |
if (record.types === "comment" || clist === "method" || clist === "paren" || clist === "expression" || clist === "array" || clist === "object" || (clist === "switch" && record.stack !== "method" && data.token[data.begin[parse.count]] === "(")) { | |
return; | |
} | |
if (data.stack[parse.count] === "expression" && (data.token[data.begin[parse.count] - 1] !== "while" || (data.token[data.begin[parse.count] - 1] === "while" && data.stack[data.begin[parse.count] - 2] !== "do"))) { | |
return; | |
} | |
if (next !== "" && "=<>+*?|^:&%~,.()]".indexOf(next) > -1 && isEnd === false) { | |
return; | |
} | |
if (record.types === "comment") { | |
aa = parse.count; | |
do { | |
aa = aa - 1; | |
} while (aa > 0 && data.types[aa] === "comment"); | |
if (aa < 1) { | |
return; | |
} | |
record.token = data.token[aa]; | |
record.types = data.types[aa]; | |
record.stack = data.stack[aa]; | |
} | |
if (record.token === undefined || record.types === "start" || record.types === "separator" || (record.types === "operator" && record.token !== "++" && record.token !== "--") || record.token === "x}" || record.token === "var" || record.token === "let" || record.token === "const" || record.token === "else" || record.token.indexOf("#!/") === 0 || record.token === "instanceof") { | |
return; | |
} | |
if (record.stack === "method" && (data.token[record.begin - 1] === "function" || data.token[record.begin - 2] === "function")) { | |
return; | |
} | |
if (options.lexerOptions.script.varword === "list") { | |
vart.index[vart.len] = parse.count; | |
} | |
if (options.correct === true) { | |
ltoke = ";"; | |
} | |
else { | |
ltoke = "x;"; | |
} | |
ltype = "separator"; | |
aa = parse.linesSpace; | |
recordPush(""); | |
parse.linesSpace = aa; | |
if (brace[brace.length - 1] === "x{" && next !== "}") { | |
blockinsert(); | |
} | |
}, | |
// convert ++ and -- into "= x +" and "= x -" in most cases | |
plusplus = function lexer_script_plusplus() { | |
let pre = true, toke = "+", tokea = "", tokeb = "", tokec = "", inc = 0, ind = 0, walk = 0, next = ""; | |
const store = [], end = function lexer_script_plusplus_end() { | |
walk = data.begin[walk] - 1; | |
if (data.types[walk] === "end") { | |
lexer_script_plusplus_end(); | |
} | |
else if (data.token[walk - 1] === ".") { | |
period(); | |
} | |
}, period = function lexer_script_plusplus_period() { | |
walk = walk - 2; | |
if (data.types[walk] === "end") { | |
end(); | |
} | |
else if (data.token[walk - 1] === ".") { | |
lexer_script_plusplus_period(); | |
} | |
}, applyStore = function lexer_script_plusplus_applyStore() { | |
let x = 0; | |
const y = store.length; | |
if (x < y) { | |
do { | |
parse.push(data, store[x], ""); | |
x = x + 1; | |
} while (x < y); | |
} | |
}, recordStore = function lexer_script_plusplus_recordStore(index) { | |
return { | |
begin: data.begin[index], | |
lexer: data.lexer[index], | |
lines: data.lines[index], | |
presv: data.presv[index], | |
stack: data.stack[index], | |
token: data.token[index], | |
types: data.types[index] | |
}; | |
}; | |
tokea = data.token[parse.count]; | |
tokeb = data.token[parse.count - 1]; | |
tokec = data.token[parse.count - 2]; | |
if (tokea !== "++" && tokea !== "--" && tokeb !== "++" && tokeb !== "--") { | |
walk = parse.count; | |
if (data.types[walk] === "end") { | |
end(); | |
} | |
else if (data.token[walk - 1] === ".") { | |
period(); | |
} | |
} | |
if (data.token[walk - 1] === "++" || data.token[walk - 1] === "--") { | |
if ("startendoperator".indexOf(data.types[walk - 2]) > -1) { | |
return; | |
} | |
inc = walk; | |
if (inc < parse.count + 1) { | |
do { | |
store.push(recordStore(inc)); | |
inc = inc + 1; | |
} while (inc < parse.count + 1); | |
parse.splice({ | |
data: data, | |
howmany: parse.count - walk, | |
index: walk, | |
record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} | |
}); | |
} | |
} | |
else { | |
if (options.correct === false || (tokea !== "++" && tokea !== "--" && tokeb !== "++" && tokeb !== "--")) { | |
return; | |
} | |
next = nextchar(1, false); | |
if ((tokea === "++" || tokea === "--") && (c[a] === ";" || next === ";" || c[a] === "}" || next === "}" || c[a] === ")" || next === ")")) { | |
toke = data.stack[parse.count]; | |
if (toke === "array" || toke === "method" || toke === "object" || toke === "paren" || toke === "notation" || (data.token[data.begin[parse.count] - 1] === "while" && toke !== "while")) { | |
return; | |
} | |
inc = parse.count; | |
do { | |
inc = inc - 1; | |
if (data.token[inc] === "return") { | |
return; | |
} | |
if (data.types[inc] === "end") { | |
do { | |
inc = data.begin[inc] - 1; | |
} while (data.types[inc] === "end" && inc > 0); | |
} | |
} while (inc > 0 && (data.token[inc] === "." || data.types[inc] === "word" || data.types[inc] === "end")); | |
if (data.token[inc] === "," && c[a] !== ";" && next !== ";" && c[a] !== "}" && next !== "}" && c[a] !== ")" && next !== ")") { | |
return; | |
} | |
if (data.types[inc] === "operator") { | |
if (data.stack[inc] === "switch" && data.token[inc] === ":") { | |
do { | |
inc = inc - 1; | |
if (data.types[inc] === "start") { | |
ind = ind - 1; | |
if (ind < 0) { | |
break; | |
} | |
} | |
else if (data.types[inc] === "end") { | |
ind = ind + 1; | |
} | |
if (data.token[inc] === "?" && ind === 0) { | |
return; | |
} | |
} while (inc > 0); | |
} | |
else { | |
return; | |
} | |
} | |
pre = false; | |
if (tokea === "--") { | |
toke = "-"; | |
} | |
else { | |
toke = "+"; | |
} | |
} | |
else if (tokec === "[" || tokec === ";" || tokec === "x;" || tokec === "}" || tokec === "{" || tokec === "(" || tokec === ")" || tokec === "," || tokec === "return") { | |
if (tokea === "++" || tokea === "--") { | |
if (tokec === "[" || tokec === "(" || tokec === "," || tokec === "return") { | |
return; | |
} | |
if (tokea === "--") { | |
toke = "-"; | |
} | |
pre = false; | |
} | |
else if (tokeb === "--" || tokea === "--") { | |
toke = "-"; | |
} | |
} | |
else { | |
return; | |
} | |
if (pre === false) { | |
tempstore = parse.pop(data); | |
} | |
walk = parse.count; | |
if (data.types[walk] === "end") { | |
end(); | |
} | |
else if (data.token[walk - 1] === ".") { | |
period(); | |
} | |
inc = walk; | |
if (inc < parse.count + 1) { | |
do { | |
store.push(recordStore(inc)); | |
inc = inc + 1; | |
} while (inc < parse.count + 1); | |
} | |
} | |
if (pre === true) { | |
parse.splice({ | |
data: data, | |
howmany: 1, | |
index: walk - 1, | |
record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} | |
}); | |
ltoke = "="; | |
ltype = "operator"; | |
recordPush(""); | |
applyStore(); | |
ltoke = toke; | |
ltype = "operator"; | |
recordPush(""); | |
ltoke = "1"; | |
ltype = "number"; | |
recordPush(""); | |
} | |
else { | |
ltoke = "="; | |
ltype = "operator"; | |
recordPush(""); | |
applyStore(); | |
ltoke = toke; | |
ltype = "operator"; | |
recordPush(""); | |
ltoke = "1"; | |
ltype = "number"; | |
recordPush(""); | |
} | |
ltoke = data.token[parse.count]; | |
ltype = data.types[parse.count]; | |
if (next === "}" && c[a] !== ";") { | |
asi(false); | |
} | |
}, | |
// fixes asi location if inserted after an inserted brace | |
asibrace = function lexer_script_asibrace() { | |
let aa = parse.count; | |
do { | |
aa = aa - 1; | |
} while (aa > -1 && data.token[aa] === "x}"); | |
if (data.stack[aa] === "else") { | |
return recordPush(""); | |
} | |
aa = aa + 1; | |
parse.splice({ | |
data: data, | |
howmany: 0, | |
index: aa, | |
record: { | |
begin: data.begin[aa], | |
lexer: "script", | |
lines: parse.linesSpace, | |
presv: false, | |
stack: data.stack[aa], | |
token: ltoke, | |
types: ltype | |
} | |
}); | |
recordPush(""); | |
}, | |
// a tokenizer for regular expressions | |
regex = function lexer_script_regex() { | |
let ee = a + 1, h = 0, i = 0, output = "", square = false; | |
const f = b, build = ["/"]; | |
if (ee < f) { | |
do { | |
build.push(c[ee]); | |
if (c[ee - 1] !== "\\" || c[ee - 2] === "\\") { | |
if (c[ee] === "[") { | |
square = true; | |
} | |
if (c[ee] === "]") { | |
square = false; | |
} | |
} | |
if (c[ee] === "/" && square === false) { | |
if (c[ee - 1] === "\\") { | |
i = 0; | |
h = ee - 1; | |
if (h > 0) { | |
do { | |
if (c[h] === "\\") { | |
i = i + 1; | |
} | |
else { | |
break; | |
} | |
h = h - 1; | |
} while (h > 0); | |
} | |
if (i % 2 === 0) { | |
break; | |
} | |
} | |
else { | |
break; | |
} | |
} | |
ee = ee + 1; | |
} while (ee < f); | |
} | |
if (c[ee + 1] === "g" || c[ee + 1] === "i" || c[ee + 1] === "m" || c[ee + 1] === "y" || c[ee + 1] === "u") { | |
build.push(c[ee + 1]); | |
if (c[ee + 2] !== c[ee + 1] && (c[ee + 2] === "g" || c[ee + 2] === "i" || c[ee + 2] === "m" || c[ee + 2] === "y" || c[ee + 2] === "u")) { | |
build.push(c[ee + 2]); | |
if (c[ee + 3] !== c[ee + 1] && c[ee + 3] !== c[ee + 2] && (c[ee + 3] === "g" || c[ee + 3] === "i" || c[ee + 3] === "m" || c[ee + 3] === "y" || c[ee + 3] === "u")) { | |
build.push(c[ee + 3]); | |
if (c[ee + 4] !== c[ee + 1] && c[ee + 4] !== c[ee + 2] && c[ee + 4] !== c[ee + 3] && (c[ee + 4] === "g" || c[ee + 4] === "i" || c[ee + 4] === "m" || c[ee + 4] === "y" || c[ee + 4] === "u")) { | |
build.push(c[ee + 4]); | |
if (c[ee + 5] !== c[ee + 1] && c[ee + 5] !== c[ee + 2] && c[ee + 5] !== c[ee + 3] && c[ee + 5] !== c[ee + 4] && (c[ee + 5] === "g" || c[ee + 5] === "i" || c[ee + 5] === "m" || c[ee + 5] === "y" || c[ee + 5] === "u")) { | |
build.push(c[ee + 4]); | |
a = ee + 5; | |
} | |
else { | |
a = ee + 4; | |
} | |
} | |
else { | |
a = ee + 3; | |
} | |
} | |
else { | |
a = ee + 2; | |
} | |
} | |
else { | |
a = ee + 1; | |
} | |
} | |
else { | |
a = ee; | |
} | |
output = build.join(""); | |
return output; | |
}, | |
// a unique tokenizer for operator characters | |
operator = function lexer_script_operator() { | |
let g = 0, h = 0, jj = b, output = ""; | |
const syntax = [ | |
"=", | |
"<", | |
">", | |
"+", | |
"*", | |
"?", | |
"|", | |
"^", | |
":", | |
"&", | |
"%", | |
"~" | |
], synlen = syntax.length, plusequal = function lexer_script_operator_plusequal(op) { | |
let walk = parse.count, inc = 0; | |
const toke = op.charAt(0), store = [], applyStore = function lexer_script_plusplus_applyStore() { | |
let x = 0; | |
const y = store.length; | |
if (x < y) { | |
do { | |
parse.push(data, store[x], ""); | |
x = x + 1; | |
} while (x < y); | |
} | |
}, end = function lexer_script_operator_plusequal_end() { | |
walk = data.begin[walk] - 1; | |
if (data.types[walk] === "end") { | |
lexer_script_operator_plusequal_end(); | |
} | |
else if (data.token[walk - 1] === ".") { | |
period(); | |
} | |
}, period = function lexer_script_operator_plusequal_period() { | |
walk = walk - 2; | |
if (data.types[walk] === "end") { | |
end(); | |
} | |
else if (data.token[walk - 1] === ".") { | |
lexer_script_operator_plusequal_period(); | |
} | |
}; | |
if (data.types[walk] === "end") { | |
end(); | |
} | |
else if (data.token[walk - 1] === ".") { | |
period(); | |
} | |
inc = walk; | |
do { | |
store.push({ | |
begin: data.begin[inc], | |
lexer: data.lexer[inc], | |
lines: data.lines[inc], | |
presv: data.presv[inc], | |
stack: data.stack[inc], | |
token: data.token[inc], | |
types: data.types[inc] | |
}); | |
inc = inc + 1; | |
} while (inc < parse.count); | |
ltoke = "="; | |
ltype = "operator"; | |
recordPush(""); | |
applyStore(); | |
return toke; | |
}; | |
if (wordTest > -1) { | |
word(); | |
} | |
if (c[a] === "/" && (parse.count > -1 && (ltype !== "word" || ltoke === "typeof" || ltoke === "return" || ltoke === "else") && ltype !== "number" && ltype !== "string" && ltype !== "end")) { | |
if (ltoke === "return" || ltoke === "typeof" || ltoke === "else" || ltype !== "word") { | |
ltoke = regex(); | |
ltype = "regex"; | |
} | |
else { | |
ltoke = "/"; | |
ltype = "operator"; | |
} | |
recordPush(""); | |
return "regex"; | |
} | |
if (c[a] === "?" && ("+-*/".indexOf(c[a + 1]) > -1 || (c[a + 1] === ":" && syntax.join("").indexOf(c[a + 2]) < 0))) { | |
return "?"; | |
} | |
if (c[a] === ":" && "+-*/".indexOf(c[a + 1]) > -1) { | |
return ":"; | |
} | |
if (a < b - 1) { | |
if (c[a] !== "<" && c[a + 1] === "<") { | |
return c[a]; | |
} | |
if (c[a] === "!" && c[a + 1] === "/") { | |
return "!"; | |
} | |
if (c[a] === "-") { | |
if (c[a + 1] === "-") { | |
output = "--"; | |
} | |
else if (c[a + 1] === "=") { | |
output = "-="; | |
} | |
else if (c[a + 1] === ">") { | |
output = "->"; | |
} | |
if (output === "") { | |
return "-"; | |
} | |
} | |
if (c[a] === "+") { | |
if (c[a + 1] === "+") { | |
output = "++"; | |
} | |
else if (c[a + 1] === "=") { | |
output = "+="; | |
} | |
if (output === "") { | |
return "+"; | |
} | |
} | |
if (c[a] === "=" && c[a + 1] !== "=" && c[a + 1] !== "!" && c[a + 1] !== ">") { | |
return "="; | |
} | |
} | |
if (output === "") { | |
if ((c[a + 1] === "+" && c[a + 2] === "+") || (c[a + 1] === "-" && c[a + 2] === "-")) { | |
output = c[a]; | |
} | |
else { | |
const buildout = [c[a]]; | |
g = a + 1; | |
if (g < jj) { | |
do { | |
if ((c[g] === "+" && c[g + 1] === "+") || (c[g] === "-" && c[g + 1] === "-")) { | |
break; | |
} | |
h = 0; | |
if (h < synlen) { | |
do { | |
if (c[g] === syntax[h]) { | |
buildout.push(syntax[h]); | |
break; | |
} | |
h = h + 1; | |
} while (h < synlen); | |
} | |
if (h === synlen) { | |
break; | |
} | |
g = g + 1; | |
} while (g < jj); | |
} | |
output = buildout.join(""); | |
} | |
} | |
a = a + (output.length - 1); | |
if (output === "=>" && ltoke === ")") { | |
g = parse.count; | |
jj = data.begin[g]; | |
do { | |
if (data.begin[g] === jj) { | |
data.stack[g] = "method"; | |
} | |
g = g - 1; | |
} while (g > jj - 1); | |
} | |
if (output.length === 2 && output.charAt(1) === "=" && "!=<>|&?".indexOf(output.charAt(0)) < 0 && options.correct === true) { | |
return plusequal(output); | |
} | |
return output; | |
}, | |
// ES6 template string support | |
tempstring = function lexer_script_tempstring() { | |
const output = [c[a]]; | |
a = a + 1; | |
if (a < b) { | |
do { | |
output.push(c[a]); | |
if (c[a] === "`" && (c[a - 1] !== "\\" || slashes(a - 1) === false)) { | |
templateString.pop(); | |
break; | |
} | |
if (c[a - 1] === "$" && c[a] === "{" && (c[a - 2] !== "\\" || slashes(a - 2) === false)) { | |
templateString[templateString.length - 1] = true; | |
break; | |
} | |
a = a + 1; | |
} while (a < b); | |
} | |
return output.join(""); | |
}, | |
// a tokenizer for numbers | |
numb = function lexer_script_number() { | |
const f = b, build = [c[a]]; | |
let ee = 0, test = /zz/, dot = (build[0] === "."); | |
if (a < b - 2 && c[a] === "0") { | |
if (c[a + 1] === "x") { | |
test = /[0-9a-fA-F]/; | |
} | |
else if (c[a + 1] === "o") { | |
test = /[0-9]/; | |
} | |
else if (c[a + 1] === "b") { | |
test = /0|1/; | |
} | |
if (test.test(c[a + 2]) === true) { | |
build.push(c[a + 1]); | |
ee = a + 1; | |
do { | |
ee = ee + 1; | |
build.push(c[ee]); | |
} while (test.test(c[ee + 1]) === true); | |
a = ee; | |
return build.join(""); | |
} | |
} | |
ee = a + 1; | |
if (ee < f) { | |
do { | |
if ((/[0-9]/).test(c[ee]) || (c[ee] === "." && dot === false)) { | |
build.push(c[ee]); | |
if (c[ee] === ".") { | |
dot = true; | |
} | |
} | |
else { | |
break; | |
} | |
ee = ee + 1; | |
} while (ee < f); | |
} | |
if (ee < f - 1 && ((/\d/).test(c[ee - 1]) === true || ((/\d/).test(c[ee - 2]) === true && (c[ee - 1] === "-" || c[ee - 1] === "+"))) && (c[ee] === "e" || c[ee] === "E")) { | |
build.push(c[ee]); | |
if (c[ee + 1] === "-" || c[ee + 1] === "+") { | |
build.push(c[ee + 1]); | |
ee = ee + 1; | |
} | |
dot = false; | |
ee = ee + 1; | |
if (ee < f) { | |
do { | |
if ((/[0-9]/).test(c[ee]) || (c[ee] === "." && dot === false)) { | |
build.push(c[ee]); | |
if (c[ee] === ".") { | |
dot = true; | |
} | |
} | |
else { | |
break; | |
} | |
ee = ee + 1; | |
} while (ee < f); | |
} | |
} | |
a = ee - 1; | |
return build.join(""); | |
}, | |
// Identifies blocks of markup embedded within JavaScript for language supersets | |
// like React JSX. | |
markup = function lexer_script_markup() { | |
let curlytest = false, endtag = false, anglecount = 0, curlycount = 0, tagcount = 0, d = 0, next = "", output = []; | |
const syntaxnum = "0123456789=<>+-*?|^:&.,;%(){}[]~", syntax = "=<>+-*?|^:&.,;%(){}[]~", applyMarkup = function lexer_script_markup_applyMarkup() { | |
if (ltoke === "(") { | |
parse.structure[parse.structure.length - 1] = ["paren", parse.count]; | |
} | |
framework.lexer.markup(output.join("")); | |
}; | |
if (wordTest > -1) { | |
word(); | |
} | |
d = parse.count; | |
if (data.types[d] === "comment") { | |
do { | |
d = d - 1; | |
} while (d > 0 && data.types[d] === "comment"); | |
} | |
if (c[a] === "<" && c[a + 1] === ">") { | |
a = a + 1; | |
ltype = "generic"; | |
ltoke = "<>"; | |
} | |
if ((c[a] !== "<" && syntaxnum.indexOf(c[a + 1]) > -1) || data.token[d] === "++" || data.token[d] === "--" || (/\s/).test(c[a + 1]) === true || ((/\d/).test(c[a + 1]) === true && (ltype === "operator" || ltype === "string" || ltype === "number" || (ltype === "word" && ltoke !== "return")))) { | |
ltype = "operator"; | |
ltoke = operator(); | |
return recordPush(""); | |
} | |
if (options.lang !== "typescript" && (data.token[d] === "return" || data.types[d] === "operator" || data.types[d] === "start" || data.types[d] === "separator" || (data.token[d] === "}" && parse.structure[parse.structure.length - 1][0] === "global"))) { | |
ltype = "markup"; | |
options.lang = "jsx"; | |
} | |
else if (options.lang === "typescript" || data.token[parse.count] === "#include" || (((/\s/).test(c[a - 1]) === false || ltoke === "public" || ltoke === "private" || ltoke === "static" || ltoke === "final" || ltoke === "implements" || ltoke === "class" || ltoke === "void" || ltoke === "Promise") && syntaxnum.indexOf(c[a + 1]) < 0)) { | |
// Java type generics | |
let comma = false, e = 1, f = 0; | |
const generics = [ | |
"<", | |
c[a + 1] | |
], jj = b; | |
if (c[a + 1] === "<") { | |
e = 2; | |
} | |
d = a + 2; | |
if (d < jj) { | |
do { | |
generics.push(c[d]); | |
if (c[d] === "?" && c[d + 1] === ">") { | |
generics.push(">"); | |
d = d + 1; | |
} | |
if (c[d] === ",") { | |
comma = true; | |
if ((/\s/).test(c[d + 1]) === false) { | |
generics.push(" "); | |
} | |
} | |
else if (c[d] === "[") { | |
f = f + 1; | |
} | |
else if (c[d] === "]") { | |
f = f - 1; | |
} | |
else if (c[d] === "<") { | |
e = e + 1; | |
} | |
else if (c[d] === ">") { | |
e = e - 1; | |
if (e === 0 && f === 0) { | |
if ((/\s/).test(c[d - 1]) === true) { | |
ltype = "operator"; | |
operator(); | |
return; | |
} | |
ltype = "generic"; | |
a = d; | |
ltoke = generics | |
.join("") | |
.replace(/\s+/g, " "); | |
return recordPush(""); | |
} | |
} | |
if ((syntax.indexOf(c[d]) > -1 && c[d] !== "," && c[d] !== "<" && c[d] !== ">" && c[d] !== "[" && c[d] !== "]") || (comma === false && (/\s/).test(c[d]) === true)) { | |
ltype = "operator"; | |
operator(); | |
return; | |
} | |
d = d + 1; | |
} while (d < jj); | |
} | |
return; | |
} | |
else { | |
ltype = "operator"; | |
ltoke = operator(); | |
return recordPush(""); | |
} | |
do { | |
output.push(c[a]); | |
if (c[a] === "{") { | |
curlycount = curlycount + 1; | |
curlytest = true; | |
} | |
else if (c[a] === "}") { | |
curlycount = curlycount - 1; | |
if (curlycount === 0) { | |
curlytest = false; | |
} | |
} | |
else if (c[a] === "<" && curlytest === false) { | |
if (c[a + 1] === "<") { | |
do { | |
output.push(c[a]); | |
a = a + 1; | |
} while (a < b && c[a + 1] === "<"); | |
} | |
anglecount = anglecount + 1; | |
if (c[a + 1] === "/") { | |
endtag = true; | |
} | |
} | |
else if (c[a] === ">" && curlytest === false) { | |
if (c[a + 1] === ">") { | |
do { | |
output.push(c[a]); | |
a = a + 1; | |
} while (c[a + 1] === ">"); | |
} | |
anglecount = anglecount - 1; | |
if (endtag === true) { | |
tagcount = tagcount - 1; | |
} | |
else if (c[a - 1] !== "/") { | |
tagcount = tagcount + 1; | |
} | |
if (anglecount === 0 && curlycount === 0 && tagcount < 1) { | |
next = nextchar(2, false); | |
if (next.charAt(0) !== "<") { | |
// if followed by nonmarkup | |
return applyMarkup(); | |
} | |
// catch additional trailing tag sets | |
if (next.charAt(0) === "<" && syntaxnum.indexOf(next.charAt(1)) < 0 && (/\s/).test(next.charAt(1)) === false) { | |
// perform a minor safety test to verify if "<" is a tag start or a less than | |
// operator | |
d = a + 1; | |
do { | |
d = d + 1; | |
if (c[d] === ">" || ((/\s/).test(c[d - 1]) === true && syntaxnum.indexOf(c[d]) < 0)) { | |
break; | |
} | |
if (syntaxnum.indexOf(c[d]) > -1) { | |
// if followed by additional markup tags | |
return applyMarkup(); | |
} | |
} while (d < b); | |
} | |
else { | |
// if a nonmarkup "<" follows markup | |
return applyMarkup(); | |
} | |
} | |
endtag = false; | |
} | |
a = a + 1; | |
} while (a < b); | |
return applyMarkup(); | |
}, | |
// operations for end types: ), ], } | |
end = function lexer_script_end(x) { | |
let insert = false; | |
const next = nextchar(1, false), newarray = function lexer_script_end_newarray() { | |
let bb = 0, cc = 0, arraylen = 0; | |
const aa = data.begin[parse.count], ar = (data.token[data.begin[parse.count] - 1] === "Array"), startar = (ar === true) | |
? "[" | |
: "{", endar = (ar === true) | |
? "]" | |
: "}", namear = (ar === true) | |
? "array" | |
: "object"; | |
tempstore = parse.pop(data); | |
if (ar === true && data.token[parse.count - 1] === "(" && data.types[parse.count] === "number") { | |
arraylen = data.begin[parse.count] - 1; | |
tempstore = parse.pop(data); | |
tempstore = parse.pop(data); | |
tempstore = parse.pop(data); | |
data.token[parse.count] = "["; | |
data.types[parse.count] = "start"; | |
data.lines[parse.count] = 0; | |
data.stack[parse.count] = "array"; | |
data.begin[parse.count] = parse.count; | |
parse.structure[parse.structure.length - 1] = ["array", parse.count]; | |
ltoke = ","; | |
ltype = "separator"; | |
do { | |
recordPush(""); | |
arraylen = arraylen - 1; | |
} while (arraylen > 0); | |
} | |
else { | |
data.token[aa] = startar; | |
data.types[aa] = "start"; | |
cc = data.begin[aa]; | |
parse.splice({ | |
data: data, | |
howmany: 2, | |
index: aa - 2, | |
record: { | |
begin: 0, | |
lexer: "", | |
lines: 0, | |
presv: false, | |
stack: "", | |
token: "", | |
types: "" | |
} | |
}); | |
parse.structure[parse.structure.length - 1] = [ | |
namear, aa - 2 | |
]; | |
pstack = [namear, aa]; | |
bb = parse.count; | |
do { | |
if (data.begin[bb] === cc) { | |
data.stack[bb] = namear; | |
data.begin[bb] = data.begin[bb] - 2; | |
} | |
bb = bb - 1; | |
} while (bb > aa - 3); | |
} | |
ltoke = endar; | |
ltype = "end"; | |
recordPush(""); | |
}; | |
if (wordTest > -1) { | |
word(); | |
} | |
if (classy.length > 0) { | |
if (classy[classy.length - 1] === 0) { | |
classy.pop(); | |
} | |
else { | |
classy[classy.length - 1] = classy[classy.length - 1] - 1; | |
} | |
} | |
if (x === ")" || x === "x)" || x === "]") { | |
if (options.correct === true) { | |
plusplus(); | |
} | |
asifix(); | |
} | |
if (x === ")" || x === "x)") { | |
asi(false); | |
} | |
if (vart.len > -1) { | |
if (x === "}" && ((options.lexerOptions.script.varword === "list" && vart.count[vart.len] === 0) || (data.token[parse.count] === "x;" && options.lexerOptions.script.varword === "each"))) { | |
vartpop(); | |
} | |
vart.count[vart.len] = vart.count[vart.len] - 1; | |
if (vart.count[vart.len] < 0) { | |
vartpop(); | |
} | |
} | |
if (ltoke === "," && data.stack[parse.count] !== "initializer" && ((x === "]" && data.token[parse.count - 1] === "[") || x === "}")) { | |
tempstore = parse.pop(data); | |
} | |
if (x === ")" || x === "x)") { | |
ltoke = x; | |
ltype = "end"; | |
if (lword.length > 0) { | |
pword = lword[lword.length - 1]; | |
if (pword.length > 1 && next !== "{" && (pword[0] === "if" || pword[0] === "for" || (pword[0] === "while" && data.stack[pword[1] - 2] !== undefined && data.stack[pword[1] - 2] !== "do") || pword[0] === "with")) { | |
insert = true; | |
} | |
} | |
} | |
else if (x === "]") { | |
ltoke = "]"; | |
ltype = "end"; | |
} | |
else if (x === "}") { | |
if (ltoke !== "," && options.correct === true) { | |
plusplus(); | |
} | |
if (parse.structure.length > 0 && parse.structure[parse.structure.length - 1][0] !== "object") { | |
asi(true); | |
} | |
else if (options.lexerOptions.script.objectSort === true) { | |
parse.objectSort(data); | |
} | |
if (ltype === "comment") { | |
ltoke = data.token[parse.count]; | |
ltype = data.types[parse.count]; | |
} | |
ltoke = "}"; | |
ltype = "end"; | |
} | |
lword.pop(); | |
pstack = parse.structure[parse.structure.length - 1]; | |
if (x === ")" && options.correct === true && (data.token[data.begin[parse.count] - 1] === "Array" || data.token[data.begin[parse.count] - 1] === "Object") && data.token[data.begin[parse.count] - 2] === "new") { | |
newarray(); | |
} | |
if (brace[brace.length - 1] === "x{" && x === "}") { | |
blockinsert(); | |
brace.pop(); | |
if (data.stack[parse.count] !== "try") { | |
if (next !== ":" && next !== ";" && data.token[data.begin[a] - 1] !== "?") { | |
blockinsert(); | |
} | |
} | |
ltoke = "}"; | |
} | |
else { | |
brace.pop(); | |
} | |
recordPush(""); | |
if (insert === true) { | |
ltoke = "x{"; | |
ltype = "start"; | |
recordPush(pword[0]); | |
brace.push("x{"); | |
pword[1] = parse.count; | |
} | |
}, | |
// determines tag names for {% %} based template tags and returns a type | |
tname = function lexer_script_tname(x) { | |
let sn = 2, en = 0, name = ""; | |
const st = x.slice(0, 2), len = x.length, namelist = [ | |
"autoescape", | |
"block", | |
"capture", | |
"case", | |
"comment", | |
"embed", | |
"filter", | |
"for", | |
"form", | |
"if", | |
"macro", | |
"paginate", | |
"raw", | |
"sandbox", | |
"spaceless", | |
"tablerow", | |
"unless", | |
"verbatim" | |
]; | |
if (x.charAt(2) === "-") { | |
sn = sn + 1; | |
} | |
if ((/\s/).test(x.charAt(sn)) === true) { | |
do { | |
sn = sn + 1; | |
} while ((/\s/).test(x.charAt(sn)) === true && sn < len); | |
} | |
en = sn; | |
do { | |
en = en + 1; | |
} while ((/\s/).test(x.charAt(en)) === false && x.charAt(en) !== "(" && en < len); | |
if (en === len) { | |
en = x.length - 2; | |
} | |
name = x.slice(sn, en); | |
if (name === "else" || (st === "{%" && (name === "elseif" || name === "when" || name === "elif"))) { | |
return "template_else"; | |
} | |
if (st === "{{") { | |
if (name === "end") { | |
return "template_end"; | |
} | |
if (name === "block" || name === "define" || name === "form" || name === "if" || name === "range" || name === "with") { | |
return "template_start"; | |
} | |
return "template"; | |
} | |
en = namelist.length - 1; | |
if (en > -1) { | |
do { | |
if (name === namelist[en]) { | |
return "template_start"; | |
} | |
if (name === "end" + namelist[en]) { | |
return "template_end"; | |
} | |
en = en - 1; | |
} while (en > -1); | |
} | |
return "template"; | |
}, start = function lexer_script_start(x) { | |
let aa = parse.count, wordx = "", wordy = "", stack = ""; | |
brace.push(x); | |
if (wordTest > -1) { | |
word(); | |
aa = parse.count; | |
} | |
if (vart.len > -1) { | |
vart.count[vart.len] = vart.count[vart.len] + 1; | |
} | |
if (data.token[aa - 1] === "function") { | |
lword.push([ | |
"function", aa + 1 | |
]); | |
} | |
else { | |
lword.push([ | |
ltoke, aa + 1 | |
]); | |
} | |
ltoke = x; | |
ltype = "start"; | |
if (x === "(" || x === "x(") { | |
asifix(); | |
} | |
else if (x === "{") { | |
if (paren > -1) { | |
if (data.begin[paren - 1] === data.begin[data.begin[aa] - 1] || data.token[data.begin[aa]] === "x(") { | |
paren = -1; | |
end("x)"); | |
asifix(); | |
ltoke = "{"; | |
ltype = "start"; | |
} | |
} | |
else if (ltoke === ")") { | |
asifix(); | |
} | |
if (ltype === "comment" && data.token[aa - 1] === ")") { | |
ltoke = data.token[aa]; | |
data.token[aa] = "{"; | |
ltype = data.types[aa]; | |
data.types[aa] = "start"; | |
} | |
} | |
wordx = data.token[aa]; | |
wordy = (data.stack[aa] === undefined) | |
? "" | |
: data.token[data.begin[aa] - 1]; | |
if (ltoke === "{" || ltoke === "x{") { | |
if (wordx === "else" || wordx === "do" || wordx === "try" || wordx === "finally" || wordx === "switch") { | |
stack = wordx; | |
} | |
else if (classy[classy.length - 1] === 0 && wordx !== "return") { | |
classy.pop(); | |
stack = "class"; | |
} | |
else if (data.token[aa - 1] === "class") { | |
stack = "class"; | |
} | |
else if (data.token[aa] === "]" && data.token[aa - 1] === "[") { | |
stack = "array"; | |
} | |
else if (data.types[aa] === "word" && (data.types[aa - 1] === "word" || (data.token[aa - 1] === "?" && data.types[aa - 2] === "word")) && data.token[aa] !== "in" && data.token[aa - 1] !== "export" && data.token[aa - 1] !== "import") { | |
stack = "map"; | |
} | |
else if (data.stack[aa] === "method" && data.types[aa] === "end" && data.types[data.begin[aa] - 1] === "word" && data.token[data.begin[aa] - 2] === "new") { | |
stack = "initializer"; | |
} | |
else if (ltoke === "{" && (wordx === ")" || wordx === "x)") && (data.types[data.begin[aa] - 1] === "word" || data.token[data.begin[aa] - 1] === "]")) { | |
if (wordy === "if") { | |
stack = "if"; | |
} | |
else if (wordy === "for") { | |
stack = "for"; | |
} | |
else if (wordy === "while") { | |
stack = "while"; | |
} | |
else if (wordy === "class") { | |
stack = "class"; | |
} | |
else if (wordy === "switch" || data.token[data.begin[aa] - 1] === "switch") { | |
stack = "switch"; | |
} | |
else if (wordy === "catch") { | |
stack = "catch"; | |
} | |
else { | |
stack = "function"; | |
} | |
} | |
else if (ltoke === "{" && (wordx === ";" || wordx === "x;")) { | |
// ES6 block | |
stack = "block"; | |
} | |
else if (ltoke === "{" && data.token[aa] === ":" && data.stack[aa] === "switch") { | |
// ES6 block | |
stack = "block"; | |
} | |
else if (data.token[aa - 1] === "import" || data.token[aa - 2] === "import" || data.token[aa - 1] === "export" || data.token[aa - 2] === "export") { | |
stack = "object"; | |
} | |
else if (wordx === ")" && (pword[0] === "function" || pword[0] === "if" || pword[0] === "for" || pword[0] === "class" || pword[0] === "while" || pword[0] === "switch" || pword[0] === "catch")) { | |
// if preceeded by a paren the prior containment is preceeded by a keyword if | |
// (...) { | |
stack = pword[0]; | |
} | |
else if (data.stack[aa] === "notation") { | |
// if following a TSX array type declaration | |
stack = "function"; | |
} | |
else if ((data.types[aa] === "number" || data.types[aa] === "string" || data.types[aa] === "word") && data.types[aa - 1] === "word" && data.token[data.begin[aa] - 1] !== "for") { | |
// if preceed by a word and either string or word public class { | |
stack = "function"; | |
} | |
else if (parse.structure.length > 0 && data.token[aa] !== ":" && parse.structure[parse.structure.length - 1][0] === "object" && (data.token[data.begin[aa] - 2] === "{" || data.token[data.begin[aa] - 2] === ",")) { | |
// if an object wrapped in some containment which is itself preceeded by a curly | |
// brace or comma var a={({b:{cat:"meow"}})}; | |
stack = "function"; | |
} | |
else if (data.types[pword[1] - 1] === "markup" && data.token[pword[1] - 3] === "function") { | |
// checking for TSX function using an angle brace name | |
stack = "function"; | |
} | |
else if (wordx === "=>") { | |
// checking for fat arrow assignment | |
stack = "function"; | |
} | |
else if (wordx === ")" && data.stack[aa] === "method" && data.types[data.begin[aa] - 1] === "word") { | |
stack = "function"; | |
} | |
else if (data.types[aa] === "word" && ltoke === "{" && data.token[aa] !== "return" && data.token[aa] !== "in" && data.token[aa] !== "import" && data.token[aa] !== "const" && data.token[aa] !== "let" && data.token[aa] !== "") { | |
// ES6 block | |
stack = "block"; | |
} | |
else { | |
stack = "object"; | |
} | |
} | |
else if (ltoke === "[") { | |
if ((/\s/).test(c[a - 1]) === true && data.types[aa] === "word" && wordx !== "return" && options.lang !== "twig") { | |
stack = "notation"; | |
} | |
else { | |
stack = "array"; | |
} | |
} | |
else if (ltoke === "(" || ltoke === "x(") { | |
if (wordx === "function" || data.token[aa - 1] === "function") { | |
stack = "arguments"; | |
} | |
else if (data.token[aa - 1] === "." || data.token[data.begin[aa] - 2] === ".") { | |
stack = "method"; | |
} | |
else if (data.types[aa] === "generic") { | |
stack = "method"; | |
} | |
else if (data.token[aa] === "}" && data.stack[aa] === "function") { | |
stack = "method"; | |
} | |
else if (wordx === "if" || wordx === "for" || wordx === "class" || wordx === "while" || wordx === "catch" || wordx === "switch" || wordx === "with") { | |
stack = "expression"; | |
} | |
else if (data.types[aa] === "word") { | |
stack = "method"; | |
} | |
else { | |
stack = "paren"; | |
} | |
} | |
else if (ltoke === ":" && data.types[aa] === "word" && data.token[aa - 1] === "[") { | |
stack = "attribute"; | |
} | |
recordPush(stack); | |
if (classy.length > 0) { | |
classy[classy.length - 1] = classy[classy.length - 1] + 1; | |
} | |
}; | |
do { | |
if ((/\s/).test(c[a])) { | |
if (wordTest > -1) { | |
word(); | |
} | |
a = parse.spacer({ array: c, end: b, index: a }); | |
if (parse.linesSpace > 1 && ltoke !== ";" && lengthb < parse.count && c[a + 1] !== "}") { | |
asi(false); | |
lengthb = parse.count; | |
} | |
} | |
else if (c[a] === "<" && c[a + 1] === "?" && c[a + 2] === "p" && c[a + 3] === "h" && c[a + 4] === "p") { | |
// php | |
ltoke = generic("<?php", "?>"); | |
ltype = "template"; | |
recordPush(""); | |
} | |
else if (c[a] === "<" && c[a + 1] === "%") { | |
// asp | |
ltoke = generic("<%", "%>"); | |
ltype = "template"; | |
recordPush(""); | |
} | |
else if (c[a] === "{" && c[a + 1] === "%") { | |
// twig | |
ltoke = generic("{%", "%}"); | |
ltype = tname(ltoke); | |
recordPush(""); | |
} | |
else if (c[a] === "{" && c[a + 1] === "{" && c[a + 2] === "{") { | |
// mustache | |
ltoke = generic("{{{", "}}}"); | |
ltype = "template"; | |
recordPush(""); | |
} | |
else if (c[a] === "{" && c[a + 1] === "{") { | |
// handlebars | |
ltoke = generic("{{", "}}"); | |
ltype = tname(ltoke); | |
recordPush(""); | |
} | |
else if (c[a] === "<" && c[a + 1] === "!" && c[a + 2] === "-" && c[a + 3] === "-" && c[a + 4] === "#") { | |
// ssi | |
ltoke = generic("<!--#", "-->"); | |
ltype = "template"; | |
recordPush(""); | |
} | |
else if (c[a] === "<" && c[a + 1] === "!" && c[a + 2] === "-" && c[a + 3] === "-") { | |
// markup comment | |
ltoke = generic("<!--", "-->"); | |
ltype = "comment"; | |
recordPush(""); | |
} | |
else if (c[a] === "<") { | |
// markup | |
markup(); | |
} | |
else if (c[a] === "/" && (a === b - 1 || c[a + 1] === "*")) { | |
// comment block | |
ltoke = generic("/*", "*\u002f"); | |
if (ltoke.indexOf("# sourceMappingURL=") === 2) { | |
sourcemap[0] = parse.count + 1; | |
sourcemap[1] = ltoke; | |
} | |
ltype = "comment"; | |
if (data.token[parse.count] === "var" || data.token[parse.count] === "let" || data.token[parse.count] === "const") { | |
tempstore = parse.pop(data); | |
recordPush(""); | |
parse.push(data, tempstore, ""); | |
if (data.lines[parse.count - 2] === 0) { | |
data.lines[parse.count - 2] = data.lines[parse.count]; | |
} | |
data.lines[parse.count] = 0; | |
} | |
else { | |
if (data.token[parse.count] === "x}" || data.token[parse.count] === "x)") { | |
let ignore = ((/^(\/\*\s*parse-ignore-start)/).test(ltoke) === true); | |
parse.splice({ | |
data: data, | |
howmany: 0, | |
index: parse.count, | |
record: { | |
begin: data.begin[parse.count], | |
lexer: "script", | |
lines: parse.linesSpace, | |
presv: (ignore === true), | |
stack: data.stack[parse.count], | |
token: ltoke, | |
types: (ignore === true) | |
? "ignore" | |
: "comment" | |
} | |
}); | |
} | |
else { | |
recordPush(""); | |
} | |
} | |
} | |
else if ((parse.count < 0 || data.lines[parse.count] > 0) && c[a] === "#" && c[a + 1] === "!" && (c[a + 2] === "/" || c[a + 2] === "[")) { | |
// shebang | |
ltoke = generic("#!" + c[a + 2], "\n"); | |
ltoke = ltoke.slice(0, ltoke.length - 1); | |
ltype = "string"; | |
parse.linesSpace = 2; | |
recordPush(""); | |
} | |
else if (c[a] === "/" && (a === b - 1 || c[a + 1] === "/")) { | |
// comment line | |
asi(false); | |
ltoke = generic("//", "\n"); | |
ltype = "comment"; | |
if (ltoke.indexOf("# sourceMappingURL=") === 2) { | |
sourcemap[0] = parse.count + 1; | |
sourcemap[1] = ltoke; | |
} | |
if (data.token[parse.count] === "x}" || data.token[parse.count] === "x)") { | |
let ignore = ((/^(\/\/\s*parse-ignore-start)/).test(ltoke) === true); | |
parse.splice({ | |
data: data, | |
howmany: 0, | |
index: parse.count, | |
record: { | |
begin: data.begin[parse.count], | |
lexer: "script", | |
lines: parse.linesSpace, | |
presv: (ignore === true), | |
stack: data.stack[parse.count], | |
token: ltoke, | |
types: (ignore === true) | |
? "ignore" | |
: "comment" | |
} | |
}); | |
} | |
else { | |
recordPush(""); | |
} | |
a = parse.spacer({ array: c, end: b, index: a }); | |
} | |
else if (c[a] === "#" && c[a + 1] === "r" && c[a + 2] === "e" && c[a + 3] === "g" && c[a + 4] === "i" && c[a + 5] === "o" && c[a + 6] === "n" && (/\s/).test(c[a + 7]) === true) { | |
// comment line | |
asi(false); | |
ltoke = generic("#region", "\n"); | |
ltype = "comment"; | |
recordPush(""); | |
} | |
else if (c[a] === "#" && c[a + 1] === "e" && c[a + 2] === "n" && c[a + 3] === "d" && c[a + 4] === "r" && c[a + 5] === "e" && c[a + 6] === "g" && c[a + 7] === "i" && c[a + 8] === "o" && c[a + 9] === "n") { | |
// comment line | |
asi(false); | |
ltoke = generic("#endregion", "\n"); | |
ltype = "comment"; | |
recordPush(""); | |
} | |
else if (c[a] === "`" || (c[a] === "}" && templateString[templateString.length - 1] === true)) { | |
// template string | |
if (wordTest > -1) { | |
word(); | |
} | |
if (c[a] === "`") { | |
templateString.push(false); | |
} | |
else { | |
templateString[templateString.length - 1] = false; | |
} | |
ltoke = tempstring(); | |
ltype = "string"; | |
recordPush(""); | |
} | |
else if (c[a] === "\"" || c[a] === "'") { | |
// string | |
ltoke = generic(c[a], c[a]); | |
ltype = "string"; | |
recordPush(""); | |
} | |
else if (c[a] === "-" && (a < b - 1 && c[a + 1] !== "=" && c[a + 1] !== "-") && (ltype === "number" || ltype === "word") && ltoke !== "return" && (ltoke === ")" || ltoke === "]" || ltype === "word" || ltype === "number")) { | |
// subtraction | |
if (wordTest > -1) { | |
word(); | |
} | |
ltoke = "-"; | |
ltype = "operator"; | |
recordPush(""); | |
} | |
else if (wordTest === -1 && (c[a] !== "0" || (c[a] === "0" && c[a + 1] !== "b")) && ((/\d/).test(c[a]) || (a !== b - 2 && c[a] === "-" && c[a + 1] === "." && (/\d/).test(c[a + 2])) || (a !== b - 1 && (c[a] === "-" || c[a] === ".") && (/\d/).test(c[a + 1])))) { | |
// number | |
if (wordTest > -1) { | |
word(); | |
} | |
if (ltype === "end" && c[a] === "-") { | |
ltoke = "-"; | |
ltype = "operator"; | |
} | |
else { | |
ltoke = numb(); | |
ltype = "number"; | |
} | |
recordPush(""); | |
} | |
else if (c[a] === ":" && c[a + 1] === ":") { | |
if (wordTest > -1) { | |
word(); | |
} | |
if (options.correct === true) { | |
plusplus(); | |
} | |
asifix(); | |
a = a + 1; | |
ltoke = "::"; | |
ltype = "separator"; | |
recordPush(""); | |
} | |
else if (c[a] === ",") { | |
// comma | |
if (wordTest > -1) { | |
word(); | |
} | |
if (options.correct === true) { | |
plusplus(); | |
} | |
if (ltype === "comment") { | |
commaComment(); | |
} | |
else if (vart.len > -1 && vart.count[vart.len] === 0 && options.lexerOptions.script.varword === "each") { | |
asifix(); | |
ltoke = ";"; | |
ltype = "separator"; | |
recordPush(""); | |
ltoke = vart.word[vart.len]; | |
ltype = "word"; | |
recordPush(""); | |
vart.index[vart.len] = parse.count; | |
} | |
else { | |
ltoke = ","; | |
ltype = "separator"; | |
asifix(); | |
recordPush(""); | |
} | |
} | |
else if (c[a] === ".") { | |
// period | |
if (wordTest > -1) { | |
word(); | |
} | |
if (c[a + 1] === "." && c[a + 2] === ".") { | |
ltoke = "..."; | |
ltype = "operator"; | |
a = a + 2; | |
} | |
else { | |
asifix(); | |
ltoke = "."; | |
ltype = "separator"; | |
} | |
if ((/\s/).test(c[a - 1]) === true) { | |
parse.linesSpace = 1; | |
} | |
recordPush(""); | |
} | |
else if (c[a] === ";") { | |
// semicolon | |
if (wordTest > -1) { | |
word(); | |
} | |
if (options.lang === "qml") { | |
ltoke = "x;"; | |
ltype = "separator"; | |
recordPush(""); | |
} | |
else { | |
if (classy[classy.length - 1] === 0) { | |
classy.pop(); | |
} | |
if (vart.len > -1 && vart.count[vart.len] === 0) { | |
if (options.lexerOptions.script.varword === "each") { | |
vartpop(); | |
} | |
else { | |
vart.index[vart.len] = parse.count + 1; | |
} | |
} | |
if (options.correct === true) { | |
plusplus(); | |
} | |
ltoke = ";"; | |
ltype = "separator"; | |
if (data.token[parse.count] === "x}") { | |
asibrace(); | |
} | |
else { | |
recordPush(""); | |
} | |
} | |
if (brace[brace.length - 1] === "x{" && nextchar(1, false) !== "}") { | |
blockinsert(); | |
} | |
} | |
else if (c[a] === "(" || c[a] === "[" || c[a] === "{") { | |
start(c[a]); | |
} | |
else if (c[a] === ")" || c[a] === "]" || c[a] === "}") { | |
end(c[a]); | |
} | |
else if (c[a] === "*" && data.stack[parse.count] === "object" && wordTest < 0 && (/\s/).test(c[a + 1]) === false && c[a + 1] !== "=" && (/\d/).test(c[a + 1]) === false) { | |
wordTest = a; | |
} | |
else if (c[a] === "=" || c[a] === "&" || c[a] === "<" || c[a] === ">" || c[a] === "+" || c[a] === "-" || c[a] === "*" || c[a] === "/" || c[a] === "!" || c[a] === "?" || c[a] === "|" || c[a] === "^" || c[a] === ":" || c[a] === "%" || c[a] === "~") { | |
// operator | |
ltoke = operator(); | |
if (ltoke === "regex") { | |
ltoke = data.token[parse.count]; | |
} | |
else { | |
ltype = "operator"; | |
if (ltoke !== "!" && ltoke !== "++" && ltoke !== "--") { | |
asifix(); | |
} | |
recordPush(""); | |
} | |
} | |
else if (wordTest < 0 && c[a] !== "") { | |
wordTest = a; | |
} | |
if (vart.len > -1 && parse.count === vart.index[vart.len] + 1 && data.token[vart.index[vart.len]] === ";" && ltoke !== vart.word[vart.len] && ltype !== "comment" && options.lexerOptions.script.varword === "list") { | |
vartpop(); | |
} | |
a = a + 1; | |
} while (a < b); | |
if (wordTest > -1) { | |
word(); | |
} | |
if (options.correct === true && options.lang !== "jsx" && ((data.token[parse.count] !== "}" && data.token[0] === "{") || data.token[0] !== "{") && ((data.token[parse.count] !== "]" && data.token[0] === "[") || data.token[0] !== "[")) { | |
asi(false); | |
} | |
if (sourcemap[0] === parse.count) { | |
ltoke = "\n" + sourcemap[1]; | |
ltype = "string"; | |
recordPush(""); | |
} | |
if (data.token[parse.count] === "x;" && (data.token[parse.count - 1] === "}" || data.token[parse.count - 1] === "]") && data.begin[parse.count - 1] === 0) { | |
tempstore = parse.pop(data); | |
} | |
if (options.correct === true) { | |
let aa = 0; | |
const bb = parse.count + 1; | |
do { | |
if (data.token[aa] === "x;") { | |
data.token[aa] = ";"; | |
} | |
else if (data.token[aa] === "x{") { | |
data.token[aa] = "{"; | |
} | |
else if (data.token[aa] === "x}") { | |
data.token[aa] = "}"; | |
} | |
else if (data.token[aa] === "x(") { | |
data.token[aa] = "("; | |
} | |
else if (data.token[aa] === "x)") { | |
data.token[aa] = ")"; | |
} | |
aa = aa + 1; | |
} while (aa < bb); | |
} | |
return data; | |
}; | |
framework.lexer.script = script; | |
}()); | |
/*global global*/ | |
/* | |
Taken from Pretty Diff. This file is not a formal release product. It exists to make testing code in node and browser a bit faster. | |
*/ | |
(function language_init() { | |
"use strict"; | |
const language = { | |
auto: function language_auto(sample, defaultLang) { | |
let b = [], c = 0; | |
const vartest = (/(((var)|(let)|(const)|(function)|(import))\s+(\w|\$)+[a-zA-Z0-9]*)/).test(sample), finalstatic = (/((((final)|(public)|(private))\s+static)|(static\s+void))/).test(sample), output = function language_auto_output(langname) { | |
if (langname === "unknown") { | |
return [defaultLang, language.setlangmode(defaultLang), "unknown"]; | |
} | |
if (langname === "xhtml" || langname === "markup") { | |
return ["xml", "markup", "XHTML"]; | |
} | |
if (langname === "tss") { | |
return ["tss", "script", "Titanium Stylesheets"]; | |
} | |
if (langname === "markdown") { | |
return ["markdown", "markdown", "Markdown"]; | |
} | |
return [langname, language.setlangmode(langname), language.nameproper(langname)]; | |
}, cssA = function language_auto_cssA() { | |
if ((/\$[a-zA-Z]/).test(sample) === true || (/\{\s*(\w|\.|\$|#)+\s*\{/).test(sample) === true) { | |
return output("scss"); | |
} | |
if ((/@[a-zA-Z]/).test(sample) === true || (/\{\s*(\w|\.|@|#)+\s*\{/).test(sample) === true) { | |
return output("less"); | |
} | |
return output("css"); | |
}, notmarkup = function language_auto_notmarkup() { | |
let d = 1, join = "", flaga = false, flagb = false; | |
const publicprivate = (/((public)|(private))\s+(static\s+)?(((v|V)oid)|(class)|(final))/).test(sample), javascriptA = function language_auto_notmarkup_javascriptA() { | |
if (sample.indexOf("(") > -1 || sample.indexOf("=") > -1 || (sample.indexOf(";") > -1 && sample.indexOf("{") > -1)) { | |
if (vartest === false && ((/\n\s+#region\s/).test(sample) === true || (/\[\w+:/).test(sample) === true)) { | |
return output("csharp"); | |
} | |
if (finalstatic === true || (/\w<\w+(,\s+\w+)*>/).test(sample) === true) { | |
if ((/:\s*((number)|(string))/).test(sample) === false && vartest === false && (finalstatic === true || publicprivate === true)) { | |
return output("java"); | |
} | |
return output("typescript"); | |
} | |
if ((/final\s+static/).test(sample) === true) { | |
return output("java"); | |
} | |
if ((/(\(|\}|\?|,|(return)|(=>?))\s*</).test(sample) === true) { | |
return output("jsx"); | |
} | |
return output("javascript"); | |
} | |
return output("unknown"); | |
}, cssOrJavaScript = function language_auto_notmarkup_cssOrJavaScript() { | |
if ((/:\s*((number)|(string))/).test(sample) === true && (/((public)|(private))\s+/).test(sample) === true) { | |
return output("typescript"); | |
} | |
if ((/import\s+java(\.|(fx))/).test(sample) === true || (/((public)|(private))\s+static\s+/).test(sample) === true) { | |
return output("java"); | |
} | |
if ((/\sclass\s+\w/).test(sample) === false && (/<[a-zA-Z]/).test(sample) === true && (/<\/[a-zA-Z]/).test(sample) === true && ((/\s?\{%/).test(sample) === true || (/\{(\{|#)(?!(\{|#|=))/).test(sample) === true)) { | |
return output("twig"); | |
} | |
if ((/^(\s*(\$|@))/).test(sample) === false && (/(\};?\s*)$/).test(sample) === true) { | |
if ((/export\s+default\s+\{/).test(sample) === true || (/(\?|:)\s*(\{|\[)/).test(sample) === true || (/(\{|\s|;)render\s*\(\)\s*\{/).test(sample) === true || (/^(\s*return;?\s*\{)/).test(sample) === true) { | |
return output("javascript"); | |
} | |
} | |
if ((/\{\{#/).test(sample) === true && (/\{\{\//).test(sample) === true && (/<\w/).test(sample) === true) { | |
return output("handlebars"); | |
} | |
if ((/\{\s*(\w|\.|@|#)+\s*\{/).test(sample) === true) { | |
return output("less"); | |
} | |
if ((/\$(\w|-)/).test(sample) === true) { | |
return output("scss"); | |
} | |
if ((/(;|\{|:)\s*@\w/).test(sample) === true) { | |
return output("less"); | |
} | |
if ((/class\s+\w+\s+\{/).test(sample) === true) { | |
return output("java"); | |
} | |
return output("css"); | |
}; | |
if (d < c) { | |
do { | |
if (flaga === false) { | |
if (b[d] === "*" && b[d - 1] === "/") { | |
b[d - 1] = ""; | |
flaga = true; | |
} | |
else if (flagb === false && b[d] === "f" && d < c - 6 && b[d + 1] === "i" && b[d + 2] === "l" && b[d + 3] === "t" && b[d + 4] === "e" && b[d + 5] === "r" && b[d + 6] === ":") { | |
flagb = true; | |
} | |
} | |
else if (flaga === true && b[d] === "*" && d !== c - 1 && b[d + 1] === "/") { | |
flaga = false; | |
b[d] = ""; | |
b[d + 1] = ""; | |
} | |
else if (flagb === true && b[d] === ";") { | |
flagb = false; | |
b[d] = ""; | |
} | |
if (flaga === true || flagb === true) { | |
b[d] = ""; | |
} | |
d = d + 1; | |
} while (d < c); | |
} | |
join = b.join(""); | |
if ((/\s\/\//).test(sample) === false && (/\/\/\s/).test(sample) === false && (/^(\s*(\{|\[)(?!%))/).test(sample) === true && (/((\]|\})\s*)$/).test(sample) && sample.indexOf(",") !== -1) { | |
return output("json"); | |
} | |
if ((/((\}?(\(\))?\)*;?\s*)|([a-z0-9]("|')?\)*);?(\s*\})*)$/i).test(sample) === true && (vartest === true || publicprivate === true || (/console\.log\(/).test(sample) === true || (/export\s+default\s+class\s+/).test(sample) === true || (/document\.get/).test(sample) === true || (/((=|(\$\())\s*function)|(\s*function\s+(\w*\s+)?\()/).test(sample) === true || sample.indexOf("{") === -1 || (/^(\s*if\s+\()/).test(sample) === true)) { | |
return javascriptA(); | |
} | |
// * u007b === { | |
// * u0024 === $ | |
// * u002e === . | |
if (sample.indexOf("{") > -1 && ((/^(\s*[\u007b\u0024\u002e#@a-z0-9])/i).test(sample) === true || (/^(\s*\/(\*|\/))/).test(sample) === true || (/^(\s*\*\s*\{)/).test(sample) === true) && (/^(\s*if\s*\()/).test(sample) === false && (/=\s*(\{|\[|\()/).test(join) === false && (((/(\+|-|=|\?)=/).test(join) === false || (/\/\/\s*=+/).test(join) === true) || ((/=+('|")?\)/).test(sample) === true && (/;\s*base64/).test(sample) === true)) && (/function(\s+\w+)*\s*\(/).test(join) === false) { | |
if ((/\s*#((include)|(define)|(endif))\s+/).test(sample)) { | |
return output("c_cpp"); | |
} | |
return cssOrJavaScript(); | |
} | |
if ((/"\s*:\s*\{/).test(sample) === true) { | |
return output("tss"); | |
} | |
if (sample.indexOf("{%") > -1) { | |
return output("twig"); | |
} | |
return output("unknown"); | |
}, markup = function language_auto_markup() { | |
const html = function language_auto_markup_html() { | |
if ((/<%\s*\}/).test(sample) === true) { | |
return output("ejs"); | |
} | |
if ((/<%\s*end/).test(sample) === true) { | |
return output("html_ruby"); | |
} | |
if ((/\{\{(#|\/|\{)/).test(sample) === true) { | |
return output("handlebars"); | |
} | |
if ((/\{\{end\}\}/).test(sample) === true) { | |
//place holder for Go lang templates | |
return output("html"); | |
} | |
if ((/\s?\{%/).test(sample) === true && (/\{(\{|#)(?!(\{|#|=))/).test(sample) === true) { | |
return output("twig"); | |
} | |
if ((/<\?/).test(sample) === true) { | |
return output("php"); | |
} | |
if ((/<jsp:include\s/).test(sample) === true || (/<c:((set)|(if))\s/).test(sample) === true) { | |
return output("jsp"); | |
} | |
if ((/\{(#|\?|\^|@|<|\+|~)/).test(sample) === true && (/\{\//).test(sample) === true) { | |
return output("dustjs"); | |
} | |
return output("html"); | |
}; | |
if ((/^(\s*<!doctype\u0020html>)/i).test(sample) === true || (/^(\s*<html)/i).test(sample) === true || ((/^(\s*<!DOCTYPE\s+((html)|(HTML))\s+PUBLIC\s+)/).test(sample) === true && (/XHTML\s+1\.1/).test(sample) === false && (/XHTML\s+1\.0\s+(S|s)((trict)|(TRICT))/).test(sample) === false)) { | |
return html(); | |
} | |
if ((/<jsp:include\s/).test(sample) === true || (/<c:((set)|(if))\s/).test(sample) === true) { | |
return output("jsp"); | |
} | |
if ((/<%\s*\}/).test(sample) === true) { | |
return output("ejs"); | |
} | |
if ((/<%\s*end/).test(sample) === true) { | |
return output("html_ruby"); | |
} | |
if ((/\{\{(#|\/|\{)/).test(sample) === true) { | |
return output("handlebars"); | |
} | |
if ((/\{\{end\}\}/).test(sample) === true) { | |
//place holder for Go lang templates | |
return output("xml"); | |
} | |
if ((/\s?\{%/).test(sample) === true && (/\{\{(?!(\{|#|=))/).test(sample) === true) { | |
return output("twig"); | |
} | |
if ((/<\?(?!(xml))/).test(sample) === true) { | |
return output("php"); | |
} | |
if ((/\{(#|\?|\^|@|<|\+|~)/).test(sample) === true && (/\{\//).test(sample) === true) { | |
return output("dustjs"); | |
} | |
if ((/<jsp:include\s/).test(sample) === true || (/<c:((set)|(if))\s/).test(sample) === true) { | |
return output("jsp"); | |
} | |
if ((/<cfset\s/i).test(sample) === true && (/<cfif\s/i).test(sample) === true) { | |
return output("coldfusion"); | |
} | |
return output("xml"); | |
}; | |
if (sample === null) { | |
return; | |
} | |
if ((/\n#+\s+\w/).test(sample) === true && (/\s\*{1,2}\w+(\s+\w+)*\*{1,2}\s/).test(sample) === true) { | |
return output("markdown"); | |
} | |
if ((/^(\s*<!DOCTYPE\s+html>)/i).test(sample) === true) { | |
return output("html"); | |
} | |
if ((/^(\s*((if)|(for)|(function))\s*\()/).test(sample) === false && (/(\s|;|\})((if)|(for)|(function\s*\w*))\s*\(/).test(sample) === false && vartest === false && (/return\s*\w*\s*(;|\})/).test(sample) === false && (sample === undefined || (/^(\s*#(?!(!\/)))/).test(sample) === true || ((/\n\s*(\.|@)\w+(\(?|(\s*:))/).test(sample) === true && (/>\s*<\w/).test(sample) === false))) { | |
return cssA(); | |
} | |
b = sample | |
.replace(/\[[a-zA-Z][\w-]*=("|')?[a-zA-Z][\w-]*("|')?\]/g, "") | |
.split(""); | |
c = b.length; | |
if ((/^(\s*\{(%|#|\{))/).test(sample) === true) { | |
return markup(); | |
} | |
if (((/^([\s\w-]*<)/).test(sample) === false && (/(>[\s\w-]*)$/).test(sample) === false) || finalstatic === true) { | |
return notmarkup(); | |
} | |
if ((((/(>[\w\s:]*)?<(\/|!|#)?[\w\s:-\[]+/).test(sample) === true || (/^(\s*<\?xml)/).test(sample) === true) && ((/^([\s\w]*<)/).test(sample) === true || (/(>[\s\w]*)$/).test(sample) === true)) || ((/^(\s*<s((cript)|(tyle)))/i).test(sample) === true && (/(<\/s((cript)|(tyle))>\s*)$/i).test(sample) === true)) { | |
if ((/^([\s\w]*<)/).test(sample) === false || (/(>[\s\w]*)$/).test(sample) === false) { | |
return notmarkup(); | |
} | |
return markup(); | |
} | |
return output("unknown"); | |
}, | |
nameproper: function language_nameproper(input) { | |
const langmap = { | |
c_cpp: "C++ (Not yet supported)", | |
coldfusion: "ColdFusion", | |
csharp: "C#", | |
dustjs: "Dust.js", | |
ejs: "EJS Template", | |
elm: "Elm Template", | |
go: "Go Lang Template", | |
handlebars: "Handlebars Template", | |
html_ruby: "ERB (Ruby) Template", | |
java: "Java", | |
javascript: "JavaScript", | |
jsp: "JSTL (JSP)", | |
jsx: "React JSX", | |
liquid: "Liquid Template", | |
markup: "markup", | |
scss: "SCSS", | |
text: "Plain Text", | |
titanium: "Titanium Stylesheets", | |
tss: "Titanium Stylesheets", | |
twig: "HTML TWIG Template", | |
typescript: "TypeScript", | |
velocity: "Apache Velocity", | |
volt: "Volt Template" | |
}; | |
if (typeof input !== "string" || langmap[input] === undefined) { | |
return input.toUpperCase(); | |
} | |
return langmap[input]; | |
}, | |
// [0] = language value for ace mode [1] = prettydiff language category from [0] | |
// [2] = pretty formatting for text output to user | |
setlangmode: function language_setlangmode(input) { | |
const langmap = { | |
c_cpp: "script", | |
coldfusion: "markup", | |
csharp: "script", | |
css: "style", | |
csv: "csv", | |
dustjs: "markup", | |
ejs: "markup", | |
go: "markup", | |
handlebars: "markup", | |
html: "markup", | |
html_ruby: "markup", | |
java: "script", | |
javascript: "script", | |
json: "script", | |
jsp: "markup", | |
jsx: "script", | |
less: "style", | |
markup: "markup", | |
php: "markup", | |
qml: "markup", | |
scss: "style", | |
swig: "markup", | |
text: "text", | |
titanium: "script", | |
tss: "script", | |
twig: "markup", | |
typescript: "script", | |
velocity: "markup", | |
xhtml: "markup", | |
xml: "markup" | |
}; | |
if (typeof input !== "string") { | |
return "script"; | |
} | |
if (input.indexOf("html") > -1) { | |
return "markup"; | |
} | |
if (langmap[input] === undefined) { | |
return "script"; | |
} | |
return langmap[input]; | |
} | |
}; | |
window.parseFramework.language = language; | |
}()); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment