/******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function(exports, name, getter) { /******/ if(!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, { enumerable: true, get: getter }); /******/ } /******/ }; /******/ /******/ // define __esModule on exports /******/ __webpack_require__.r = function(exports) { /******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) { /******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' }); /******/ } /******/ Object.defineProperty(exports, '__esModule', { value: true }); /******/ }; /******/ /******/ // create a fake namespace object /******/ // mode & 1: value is a module id, require it /******/ // mode & 2: merge all properties of value into the ns /******/ // mode & 4: return value when already ns object /******/ // mode & 8|1: behave like require /******/ __webpack_require__.t = function(value, mode) { /******/ if(mode & 1) value = __webpack_require__(value); /******/ if(mode & 8) return value; /******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value; /******/ var ns = Object.create(null); /******/ __webpack_require__.r(ns); /******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value }); /******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key)); /******/ return ns; /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function(module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; /******/ /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 0); /******/ }) /************************************************************************/ /******/ ([ /* 0 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); __webpack_require__(1); const tokenizer_1 = __webpack_require__(2); /* $(function() { }); */ const opts = new tokenizer_1.TokenizerOptions(); const result = tokenizer_1.tokenize(` // H2R supports // # and /**/ as comments // A group is only captured if given a name. // You can use "and", "or", "not" to specify "[]" regex // You can use "then" to combine match statements, however I find using multiple "match" statements easier to read // exact matching means use a ^ and $ to signify the start and end of the string using global and exact matching create an optional group called "protocol" match "http" optionally match "s" match "://" create a group called "subdomain" repeat match 1+ words match "." create a group called "domain" match 1+ words or "_" or "-" match "." match a word # port, but we don't care about it, so ignore it optionally match ":" then 0+ digits create an optional group called "path" repeat match "/" match 0+ words or "_" or "-" create an optional group # we don't want to capture the '?', so don't name the group until afterwards match "?" create a group called "query" repeat match 1+ words or "_" or "-" match "=" match 1+ words or "_" or "-" create an optional group # fragment, again, we don't care, so ignore everything afterwards match "#" match 0+ any thing `, opts); for (const r of result.tokens) { console.log(r.to_string()); } console.log(result.errors); /***/ }), /* 1 */ /***/ (function(module, __webpack_exports__, __webpack_require__) { "use strict"; __webpack_require__.r(__webpack_exports__); // extracted by mini-css-extract-plugin /***/ }), /* 2 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; /*! Copyright (c) 2020 Patrick Demian; Licensed under MIT */ Object.defineProperty(exports, "__esModule", { value: true }); exports.tokenize = exports.TokenizerOptions = void 0; // TODO: replace every version of switch() with switch(.charCodeAt(0)) const tokens_1 = __webpack_require__(3); const keywords = { /* Full Keywords */ "optional": tokens_1.TokenType.KEYWORD_OPTIONAL, "optionally": tokens_1.TokenType.KEYWORD_OPTIONAL, "match": tokens_1.TokenType.KEYWORD_MATCH, "matches": tokens_1.TokenType.KEYWORD_MATCH, "then": tokens_1.TokenType.KEYWORD_THEN, "any": tokens_1.TokenType.KEYWORD_ANY, "anything": tokens_1.TokenType.KEYWORD_ANY, "anythings": tokens_1.TokenType.KEYWORD_ANY, "of": tokens_1.TokenType.KEYWORD_OF, "or": tokens_1.TokenType.KEYWORD_OR, "and": tokens_1.TokenType.KEYWORD_AND, "word": tokens_1.TokenType.KEYWODE_WORD_SPECIFIER, "digit": tokens_1.TokenType.KEYWORD_DIGIT_SPECIFIER, "character": tokens_1.TokenType.KEYWORD_CHAR_SPECIFIER, "whitespace": tokens_1.TokenType.KEYWORD_WHITESPACE_SPECIFIER, "number": tokens_1.TokenType.KEYWORD_NUMBER_SPECIFIER, "words": tokens_1.TokenType.KEYWODE_WORD_SPECIFIER, "digits": tokens_1.TokenType.KEYWORD_DIGIT_SPECIFIER, "characters": tokens_1.TokenType.KEYWORD_CHAR_SPECIFIER, "whitespaces": tokens_1.TokenType.KEYWORD_WHITESPACE_SPECIFIER, "numbers": tokens_1.TokenType.KEYWORD_NUMBER_SPECIFIER, "multiple": tokens_1.TokenType.KEYWORD_MULTIPLE, "as": tokens_1.TokenType.KEYWORD_AS, "if": tokens_1.TokenType.KEYWORD_IF, "start": tokens_1.TokenType.KEYWORD_STARTS, "starts": tokens_1.TokenType.KEYWORD_STARTS, "with": tokens_1.TokenType.KEYWORD_WITH, "ends": tokens_1.TokenType.KEYWORD_ENDS, "otherwise": tokens_1.TokenType.KEYWORD_ELSE, "else": tokens_1.TokenType.KEYWORD_ELSE, "unless": tokens_1.TokenType.KEYWORD_UNLESS, "while": tokens_1.TokenType.KEYWORD_WHILE, "more": tokens_1.TokenType.KEYWORD_MORE, "using": tokens_1.TokenType.KEYWORD_USING, "global": tokens_1.TokenType.KEYWORD_GLOBAL, "multiline": tokens_1.TokenType.KEYWORD_MULTILINE, "exact": tokens_1.TokenType.KEYWORD_EXACT, "matching": tokens_1.TokenType.KEYWORD_MATCHING, "not": tokens_1.TokenType.KEYWORD_NOT, "between": tokens_1.TokenType.KEYWORD_BETWEEN, "tab": tokens_1.TokenType.KEYWORD_TAB, "linefeed": tokens_1.TokenType.KEYWORD_LINEFEED, "group": tokens_1.TokenType.KEYWORD_GROUP, "by": tokens_1.TokenType.KEYWORD_BY, "an": tokens_1.TokenType.KEYWORD_ARTICLE, "a": tokens_1.TokenType.KEYWORD_ARTICLE, "the": tokens_1.TokenType.KEYWORD_ARTICLE, "exactly": tokens_1.TokenType.KEYWORD_EXACTLY, "inclusive": tokens_1.TokenType.KEYWORD_INCLUSIVE, "inclusively": tokens_1.TokenType.KEYWORD_INCLUSIVE, "exclusive": tokens_1.TokenType.KEYWORD_EXCLUSIVE, "exclusively": tokens_1.TokenType.KEYWORD_EXCLUSIVE, "from": tokens_1.TokenType.KEYWORD_FROM, "to": tokens_1.TokenType.KEYWORD_TO, "create": tokens_1.TokenType.KEYWORD_CREATE, "creates": tokens_1.TokenType.KEYWORD_CREATE, "called": tokens_1.TokenType.KEYWORD_CALLED, "repeat": tokens_1.TokenType.KEYWORD_REPEAT, "repeats": tokens_1.TokenType.KEYWORD_REPEAT, "newline": tokens_1.TokenType.KEYWORD_NEWLINE, "none": tokens_1.TokenType.KEYWORD_NONE, "neither": tokens_1.TokenType.KEYWORD_NEITHER, /* Partial keywords */ "thing": tokens_1.TokenType.PARTIAL_KEYWORD, "things": tokens_1.TokenType.PARTIAL_KEYWORD, "white": tokens_1.TokenType.PARTIAL_KEYWORD, "space": tokens_1.TokenType.PARTIAL_KEYWORD, "spaces": tokens_1.TokenType.PARTIAL_KEYWORD, "other": tokens_1.TokenType.PARTIAL_KEYWORD, "wise": tokens_1.TokenType.PARTIAL_KEYWORD, "multi": tokens_1.TokenType.PARTIAL_KEYWORD, "new": tokens_1.TokenType.PARTIAL_KEYWORD, "line": tokens_1.TokenType.PARTIAL_KEYWORD, "feed": tokens_1.TokenType.PARTIAL_KEYWORD, "carriage": tokens_1.TokenType.PARTIAL_KEYWORD, "return": tokens_1.TokenType.PARTIAL_KEYWORD, "case": tokens_1.TokenType.PARTIAL_KEYWORD, "insensitive": tokens_1.TokenType.PARTIAL_KEYWORD, "sensitive": tokens_1.TokenType.PARTIAL_KEYWORD }; const numbers = { "zero": "0", "one": "1", "two": "2", "three": "3", "four": "4", "five": "5", "six": "6", "seven": "7", "eight": "8", "nine": "9", "ten": "10" }; const token_transformations = { "thing": [{ preceeding_token: "any", transforms_to: tokens_1.TokenType.KEYWORD_ANY }], "things": [{ preceeding_token: "any", transforms_to: tokens_1.TokenType.KEYWORD_ANY }], "space": [{ preceeding_token: "white", transforms_to: tokens_1.TokenType.KEYWORD_WHITESPACE_SPECIFIER }], "spaces": [{ preceeding_token: "white", transforms_to: tokens_1.TokenType.KEYWORD_WHITESPACE_SPECIFIER }], "wise": [{ preceeding_token: "other", transforms_to: tokens_1.TokenType.KEYWORD_ELSE }], "line": [{ preceeding_token: "multi", transforms_to: tokens_1.TokenType.KEYWORD_MULTILINE }, { preceeding_token: "new", transforms_to: tokens_1.TokenType.KEYWORD_NEWLINE }], "feed": [{ preceeding_token: "line", transforms_to: tokens_1.TokenType.KEYWORD_LINEFEED }], "return": [{ preceeding_token: "carriage", transforms_to: tokens_1.TokenType.KEYWORD_CARRIAGE_RETURN }], "sensitive": [{ preceeding_token: "case", transforms_to: tokens_1.TokenType.KEYWORD_CASE_SENSITIVE }], "insensitive": [{ preceeding_token: "case", transforms_to: tokens_1.TokenType.KEYWORD_CASE_INSENSITIVE }], }; const escape_sequences = { "a": "\a", "b": "\b", "e": "\e", "f": "\f", "n": "\n", "r": "\r", "t": "\t", "'": "'", "\"": '"', "\\": "\\", }; const escape_sequence_hex_regex = new RegExp(/[0-9A-Fa-f]/g); function escape_sequence_gather_hex(input, i, max) { let hex = ""; for (i++; i < input.length && max-- > 0; i++) { if (escape_sequence_hex_regex.test(input[i])) { hex += input[i]; } } return hex; } function escape_sequence_mapper(input, i) { if (escape_sequences[input[i]]) { return { code: escape_sequences[input[i]], read: 1 }; } //variable hex code else if (input[i] === "x") { const hex = escape_sequence_gather_hex(input, ++i, 4); return { code: String.fromCharCode(parseInt(hex, 16)), read: hex.length + 1 }; } //4 hex unicode else if (input[i] === "u") { const unicode = escape_sequence_gather_hex(input, ++i, 4); if (unicode.length !== 4) { return { code: "", read: unicode.length + 1, error: new Error("Bad escape sequence") }; } else { return { code: String.fromCharCode(parseInt(unicode, 16)), read: 5 }; } } else if (input[i] === "U") { const unicode = escape_sequence_gather_hex(input, ++i, 8); if (unicode.length !== 8) { return { code: "", read: unicode.length + 1, error: new Error("Bad escape sequence") }; } else { return { code: String.fromCharCode(parseInt(unicode, 16)), read: 9 }; } } else { // should throw an exception, but gonna just ignore it return { code: input[i], read: 1 }; } } const test_char_0 = "0".charCodeAt(0); const test_char_9 = "9".charCodeAt(0); const test_char_a = "a".charCodeAt(0); const test_char_z = "z".charCodeAt(0); const test_char_A = "A".charCodeAt(0); const test_char_Z = "Z".charCodeAt(0); function is_digit(input, i) { const value = input.charCodeAt(i); return value >= test_char_0 && value <= test_char_9; } function is_char(input, i) { const value = input.charCodeAt(i); return ((value >= test_char_a && value <= test_char_z) || (value >= test_char_A && value <= test_char_Z)); } function transform_tokens(tokens, errors) { for (let i = 0; i < tokens.length; i++) { //check past tokens: if it matches the preceeding tokens, we transform it. if (tokens[i].type === tokens_1.TokenType.PARTIAL_KEYWORD && token_transformations[tokens[i].token_string]) { const transform = token_transformations[tokens[i].token_string]; for (let j = 0; j < transform.length; j++) { if (i - 1 >= 0 && transform[j].preceeding_token === tokens[i - 1].token_string) { // use the i-1 token because it has the start line and position tokens[i - 1].type = transform[j].transforms_to; tokens[i - 1].token_string += " " + tokens[i].token_string; tokens.splice(i, 1); // remove this token i--; // move token counter back because we removed the token break; } } } /* else ignore */ } // do we still have partial tokens? those are errors then for (let i = 0; i < tokens.length; i++) { if (tokens[i].type === tokens_1.TokenType.PARTIAL_KEYWORD) { errors.push(new tokens_1.TokenError(`Unknown keyword "${tokens[i].token_string}"`, tokens[i].line, tokens[i].position)); } } } class TokenizerOptions { constructor() { this.convert_spaces_to_tabs = true; } } exports.TokenizerOptions = TokenizerOptions; /* Basic Tokenizer */ function tokenize(input, options) { let line = 1; let position = 1; const tokens = []; const errors = []; // gather tokens for (let i = 0; i < input.length; i++, position++) { // 4 spaces = 1 tab. That is final. Debate over if (options.convert_spaces_to_tabs && input.startsWith(" ", i)) { tokens.push(new tokens_1.Token(tokens_1.TokenType.INDENT, line, position, 4)); i += 3; position += 3; } // between (ex: 0...3 or 0-3) else if (input.startsWith("...", i)) { tokens.push(new tokens_1.Token(tokens_1.TokenType.BETWEEN, line, position, 3)); i += 2; position += 2; } else if (input.startsWith("..", i)) { tokens.push(new tokens_1.Token(tokens_1.TokenType.BETWEEN, line, position, 3)); i++; position++; } // comments else if (input.startsWith("//", i)) { for (i++, position++; i < input.length; i++, position++) { if (input[i] === "\n") { tokens.push(new tokens_1.Token(tokens_1.TokenType.END_OF_STATEMENT, line, position, -1)); break; } } line++; position = 0; } else if (input.startsWith("/*", i)) { for (i++, position++; i < input.length - 1; i++, position++) { if (input[i] === "*" && input[i + 1] === "/") { i++; position++; break; } if (input[i] === "\n") { line++; position = 0; } } if (i === input.length - 1) { errors.push(new tokens_1.TokenError("Unexpected EOF", line, position)); } else { line++; position = 0; } } else if (input.startsWith("\r\n", i)) { tokens.push(new tokens_1.Token(tokens_1.TokenType.END_OF_STATEMENT, line, position, -1)); i++; line++; position = 0; } else { switch (input[i]) { // comment case "#": for (i++, position++; i < input.length; i++, position++) { if (input[i] === "\n") { tokens.push(new tokens_1.Token(tokens_1.TokenType.END_OF_STATEMENT, line, position, -1)); line++; position = 0; break; } } break; // quote case '"': case '\"': { // build up a word between quotes const quote_begin = { line: line, position: position }; const quote_char = input[i]; let found_ending = false; let quote = ""; do { i++; position++; if (input[i] === "\\") { i++; position++; const sequence = escape_sequence_mapper(input, i); if (sequence.error) { errors.push(new tokens_1.TokenError(sequence.error.message, line, position)); } position += sequence.read; i += sequence.read; quote += sequence.code; } else if (input[i] === quote_char) { found_ending = true; break; } else if (input[i] === "\n") { line++; position = 0; break; } else { quote += input[i]; } } while (i < input.length); if (found_ending) { tokens.push(new tokens_1.Token(tokens_1.TokenType.QUOTE, line, position, quote.length + 2, quote)); } else { //we reached the end of the line or the end of the file errors.push(new tokens_1.TokenError(`Unexpected end of quote. Quote began at ${quote_begin.line}:${quote_begin.position}`, line, position)); line++; position = 0; } break; } // between (ex: 0...3 or 0-3) case "-": tokens.push(new tokens_1.Token(tokens_1.TokenType.BETWEEN, line, position, 1)); break; case "+": tokens.push(new tokens_1.Token(tokens_1.TokenType.KEYWORD_OR, line, position, 1)); tokens.push(new tokens_1.Token(tokens_1.TokenType.KEYWORD_MORE, line, position, 0)); break; case "\n": tokens.push(new tokens_1.Token(tokens_1.TokenType.END_OF_STATEMENT, line, position, -1)); line++; position = 0; break; case "\r": // ignore break; case "\t": tokens.push(new tokens_1.Token(tokens_1.TokenType.INDENT, line, position, 1)); break; case " ": // ignore break; default: // is digit? build up a number if (is_digit(input, i)) { const digit_begin = position; let digits = input[i]; for (; i + 1 < input.length && is_digit(input, i + 1); i++, position++) { digits += input[i + 1]; } tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, digit_begin, position - digit_begin + 1, digits)); } // is char? build up a word else if (is_char(input, i)) { const word_begin = position; let text = input[i]; for (; i + 1 < input.length && is_char(input, i + 1); i++, position++) { text += input[i + 1]; } const keyword_text = text.toLowerCase(); // keyword (ex. "match") if (keywords[keyword_text]) { tokens.push(new tokens_1.Token(keywords[keyword_text], line, word_begin, position - word_begin + 1, keyword_text)); } // text number (ex. "one") else if (numbers[keyword_text]) { tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, word_begin, position - word_begin + 1, keyword_text)); } else { errors.push(new tokens_1.TokenError(`Unknown keyword "${text}"`, line, word_begin)); } } else { errors.push(new tokens_1.TokenError(`Unknown character in text: "${input[i]}" (${input.charCodeAt(i)})`, line, position)); } break; } } } // transform tokens transform_tokens(tokens, errors); return { tokens: tokens, errors: errors }; } exports.tokenize = tokenize; /***/ }), /* 3 */ /***/ (function(module, exports, __webpack_require__) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.Token = exports.TokenError = exports.TokenType = void 0; var TokenType; (function (TokenType) { TokenType[TokenType["END_OF_STATEMENT"] = 0] = "END_OF_STATEMENT"; TokenType[TokenType["INDENT"] = 1] = "INDENT"; TokenType[TokenType["BETWEEN"] = 2] = "BETWEEN"; TokenType[TokenType["QUOTE"] = 3] = "QUOTE"; TokenType[TokenType["NUMBER"] = 4] = "NUMBER"; TokenType[TokenType["PARTIAL_KEYWORD"] = 5] = "PARTIAL_KEYWORD"; TokenType[TokenType["KEYWORD_BETWEEN"] = 6] = "KEYWORD_BETWEEN"; TokenType[TokenType["KEYWORD_OPTIONAL"] = 7] = "KEYWORD_OPTIONAL"; TokenType[TokenType["KEYWORD_MATCH"] = 8] = "KEYWORD_MATCH"; TokenType[TokenType["KEYWORD_THEN"] = 9] = "KEYWORD_THEN"; TokenType[TokenType["KEYWORD_AND"] = 10] = "KEYWORD_AND"; TokenType[TokenType["KEYWORD_OR"] = 11] = "KEYWORD_OR"; TokenType[TokenType["KEYWORD_ANY"] = 12] = "KEYWORD_ANY"; TokenType[TokenType["KEYWORD_OF"] = 13] = "KEYWORD_OF"; TokenType[TokenType["KEYWORD_NONE"] = 14] = "KEYWORD_NONE"; TokenType[TokenType["KEYWORD_NEITHER"] = 15] = "KEYWORD_NEITHER"; TokenType[TokenType["KEYWODE_WORD_SPECIFIER"] = 16] = "KEYWODE_WORD_SPECIFIER"; TokenType[TokenType["KEYWORD_DIGIT_SPECIFIER"] = 17] = "KEYWORD_DIGIT_SPECIFIER"; TokenType[TokenType["KEYWORD_CHAR_SPECIFIER"] = 18] = "KEYWORD_CHAR_SPECIFIER"; TokenType[TokenType["KEYWORD_WHITESPACE_SPECIFIER"] = 19] = "KEYWORD_WHITESPACE_SPECIFIER"; TokenType[TokenType["KEYWORD_NUMBER_SPECIFIER"] = 20] = "KEYWORD_NUMBER_SPECIFIER"; TokenType[TokenType["KEYWORD_MULTIPLE"] = 21] = "KEYWORD_MULTIPLE"; TokenType[TokenType["KEYWORD_AS"] = 22] = "KEYWORD_AS"; TokenType[TokenType["KEYWORD_IF"] = 23] = "KEYWORD_IF"; TokenType[TokenType["KEYWORD_STARTS"] = 24] = "KEYWORD_STARTS"; TokenType[TokenType["KEYWORD_WITH"] = 25] = "KEYWORD_WITH"; TokenType[TokenType["KEYWORD_ENDS"] = 26] = "KEYWORD_ENDS"; TokenType[TokenType["KEYWORD_ELSE"] = 27] = "KEYWORD_ELSE"; TokenType[TokenType["KEYWORD_UNLESS"] = 28] = "KEYWORD_UNLESS"; TokenType[TokenType["KEYWORD_WHILE"] = 29] = "KEYWORD_WHILE"; TokenType[TokenType["KEYWORD_MORE"] = 30] = "KEYWORD_MORE"; TokenType[TokenType["KEYWORD_USING"] = 31] = "KEYWORD_USING"; TokenType[TokenType["KEYWORD_GLOBAL"] = 32] = "KEYWORD_GLOBAL"; TokenType[TokenType["KEYWORD_MULTILINE"] = 33] = "KEYWORD_MULTILINE"; TokenType[TokenType["KEYWORD_EXACT"] = 34] = "KEYWORD_EXACT"; TokenType[TokenType["KEYWORD_MATCHING"] = 35] = "KEYWORD_MATCHING"; TokenType[TokenType["KEYWORD_NOT"] = 36] = "KEYWORD_NOT"; TokenType[TokenType["KEYWORD_TAB"] = 37] = "KEYWORD_TAB"; TokenType[TokenType["KEYWORD_LINEFEED"] = 38] = "KEYWORD_LINEFEED"; TokenType[TokenType["KEYWORD_CARRIAGE_RETURN"] = 39] = "KEYWORD_CARRIAGE_RETURN"; TokenType[TokenType["KEYWORD_GROUP"] = 40] = "KEYWORD_GROUP"; TokenType[TokenType["KEYWORD_BY"] = 41] = "KEYWORD_BY"; TokenType[TokenType["KEYWORD_ARTICLE"] = 42] = "KEYWORD_ARTICLE"; TokenType[TokenType["KEYWORD_EXACTLY"] = 43] = "KEYWORD_EXACTLY"; TokenType[TokenType["KEYWORD_INCLUSIVE"] = 44] = "KEYWORD_INCLUSIVE"; TokenType[TokenType["KEYWORD_EXCLUSIVE"] = 45] = "KEYWORD_EXCLUSIVE"; TokenType[TokenType["KEYWORD_FROM"] = 46] = "KEYWORD_FROM"; TokenType[TokenType["KEYWORD_TO"] = 47] = "KEYWORD_TO"; TokenType[TokenType["KEYWORD_CREATE"] = 48] = "KEYWORD_CREATE"; TokenType[TokenType["KEYWORD_CALLED"] = 49] = "KEYWORD_CALLED"; TokenType[TokenType["KEYWORD_REPEAT"] = 50] = "KEYWORD_REPEAT"; TokenType[TokenType["KEYWORD_NEWLINE"] = 51] = "KEYWORD_NEWLINE"; TokenType[TokenType["KEYWORD_CASE_SENSITIVE"] = 52] = "KEYWORD_CASE_SENSITIVE"; TokenType[TokenType["KEYWORD_CASE_INSENSITIVE"] = 53] = "KEYWORD_CASE_INSENSITIVE"; })(TokenType = exports.TokenType || (exports.TokenType = {})); class TokenError extends Error { constructor(message, line, position) { super(message); this.line = line; this.position = position; } to_string() { return `Token Error: ${this.line}:${this.position} ${this.message}`; } } exports.TokenError = TokenError; class Token { constructor(type, line, position, length, token_string) { this.type = type; this.line = line; this.position = position; this.length = length; this.token_string = token_string; /* nothing required */ } to_string() { let str = `${this.line}:${this.position} ${TokenType[this.type]}`; if (this.token_string) { str += ` "${this.token_string}"`; } str += ` (size: ${this.length})`; return str; } } exports.Token = Token; /***/ }) /******/ ]);