mirror of
https://github.com/pdemian/human2regex.git
synced 2025-05-16 04:20:35 -07:00
542 lines
22 KiB
JavaScript
542 lines
22 KiB
JavaScript
/******/ (function(modules) { // webpackBootstrap
|
|
/******/ // The module cache
|
|
/******/ var installedModules = {};
|
|
/******/
|
|
/******/ // The require function
|
|
/******/ function __webpack_require__(moduleId) {
|
|
/******/
|
|
/******/ // Check if module is in cache
|
|
/******/ if(installedModules[moduleId]) {
|
|
/******/ return installedModules[moduleId].exports;
|
|
/******/ }
|
|
/******/ // Create a new module (and put it into the cache)
|
|
/******/ var module = installedModules[moduleId] = {
|
|
/******/ i: moduleId,
|
|
/******/ l: false,
|
|
/******/ exports: {}
|
|
/******/ };
|
|
/******/
|
|
/******/ // Execute the module function
|
|
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
|
|
/******/
|
|
/******/ // Flag the module as loaded
|
|
/******/ module.l = true;
|
|
/******/
|
|
/******/ // Return the exports of the module
|
|
/******/ return module.exports;
|
|
/******/ }
|
|
/******/
|
|
/******/
|
|
/******/ // expose the modules object (__webpack_modules__)
|
|
/******/ __webpack_require__.m = modules;
|
|
/******/
|
|
/******/ // expose the module cache
|
|
/******/ __webpack_require__.c = installedModules;
|
|
/******/
|
|
/******/ // define getter function for harmony exports
|
|
/******/ __webpack_require__.d = function(exports, name, getter) {
|
|
/******/ if(!__webpack_require__.o(exports, name)) {
|
|
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
|
|
/******/ }
|
|
/******/ };
|
|
/******/
|
|
/******/ // define __esModule on exports
|
|
/******/ __webpack_require__.r = function(exports) {
|
|
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
|
|
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
|
|
/******/ }
|
|
/******/ Object.defineProperty(exports, '__esModule', { value: true });
|
|
/******/ };
|
|
/******/
|
|
/******/ // create a fake namespace object
|
|
/******/ // mode & 1: value is a module id, require it
|
|
/******/ // mode & 2: merge all properties of value into the ns
|
|
/******/ // mode & 4: return value when already ns object
|
|
/******/ // mode & 8|1: behave like require
|
|
/******/ __webpack_require__.t = function(value, mode) {
|
|
/******/ if(mode & 1) value = __webpack_require__(value);
|
|
/******/ if(mode & 8) return value;
|
|
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
|
|
/******/ var ns = Object.create(null);
|
|
/******/ __webpack_require__.r(ns);
|
|
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
|
|
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
|
|
/******/ return ns;
|
|
/******/ };
|
|
/******/
|
|
/******/ // getDefaultExport function for compatibility with non-harmony modules
|
|
/******/ __webpack_require__.n = function(module) {
|
|
/******/ var getter = module && module.__esModule ?
|
|
/******/ function getDefault() { return module['default']; } :
|
|
/******/ function getModuleExports() { return module; };
|
|
/******/ __webpack_require__.d(getter, 'a', getter);
|
|
/******/ return getter;
|
|
/******/ };
|
|
/******/
|
|
/******/ // Object.prototype.hasOwnProperty.call
|
|
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
|
|
/******/
|
|
/******/ // __webpack_public_path__
|
|
/******/ __webpack_require__.p = "";
|
|
/******/
|
|
/******/
|
|
/******/ // Load entry module and return exports
|
|
/******/ return __webpack_require__(__webpack_require__.s = 0);
|
|
/******/ })
|
|
/************************************************************************/
|
|
/******/ ([
|
|
/* 0 */
|
|
/***/ (function(module, exports, __webpack_require__) {
|
|
|
|
"use strict";
|
|
|
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
__webpack_require__(1);
|
|
const tokenizer_1 = __webpack_require__(2);
|
|
/*
|
|
$(function() {
|
|
|
|
});
|
|
*/
|
|
const opts = new tokenizer_1.TokenizerOptions();
|
|
const res = tokenizer_1.tokenize("match 1+ thing from thingy", opts);
|
|
console.log(res);
|
|
|
|
|
|
/***/ }),
|
|
/* 1 */
|
|
/***/ (function(module, __webpack_exports__, __webpack_require__) {
|
|
|
|
"use strict";
|
|
__webpack_require__.r(__webpack_exports__);
|
|
// extracted by mini-css-extract-plugin
|
|
|
|
|
|
/***/ }),
|
|
/* 2 */
|
|
/***/ (function(module, exports, __webpack_require__) {
|
|
|
|
"use strict";
|
|
|
|
/*! Copyright (c) 2020 Patrick Demian; Licensed under MIT */
|
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
exports.tokenize = exports.TokenizerOptions = void 0;
|
|
// TODO: replace every version of switch(<some string>) with switch(<some string>.charCodeAt(0))
|
|
const tokens_1 = __webpack_require__(3);
|
|
const keywords = {
|
|
"optional": tokens_1.TokenType.KEYWORD_OPTIONAL,
|
|
"optionally": tokens_1.TokenType.KEYWORD_OPTIONAL,
|
|
"match": tokens_1.TokenType.KEYWORD_MATCH,
|
|
"then": tokens_1.TokenType.KEYWORD_THEN,
|
|
"any": tokens_1.TokenType.KEYWORD_ANY,
|
|
"anything": tokens_1.TokenType.KEYWORD_ANY,
|
|
"of": tokens_1.TokenType.KEYWORD_OF,
|
|
"or": tokens_1.TokenType.KEYWORD_OR,
|
|
"and": tokens_1.TokenType.KEYWORD_AND,
|
|
"word": tokens_1.TokenType.KEYWODE_WORD_SPECIFIER,
|
|
"digit": tokens_1.TokenType.KEYWORD_DIGIT_SPECIFIER,
|
|
"character": tokens_1.TokenType.KEYWORD_CHAR_SPECIFIER,
|
|
"whitespace": tokens_1.TokenType.KEYWORD_WHITESPACE_SPECIFIER,
|
|
"number": tokens_1.TokenType.KEYWORD_NUMBER_SPECIFIER,
|
|
"multiple": tokens_1.TokenType.KEYWORD_MULTIPLE,
|
|
"as": tokens_1.TokenType.KEYWORD_AS,
|
|
"if": tokens_1.TokenType.KEYWORD_IF,
|
|
"starts": tokens_1.TokenType.KEYWORD_STARTS,
|
|
"with": tokens_1.TokenType.KEYWORD_WITH,
|
|
"ends": tokens_1.TokenType.KEYWORD_ENDS,
|
|
"otherwise": tokens_1.TokenType.KEYWORD_ELSE,
|
|
"else": tokens_1.TokenType.KEYWORD_ELSE,
|
|
"unless": tokens_1.TokenType.KEYWORD_UNLESS,
|
|
"while": tokens_1.TokenType.KEYWORD_WHILE,
|
|
"more": tokens_1.TokenType.KEYWORD_MORE,
|
|
"using": tokens_1.TokenType.KEYWORD_USING,
|
|
"global": tokens_1.TokenType.KEYWORD_GLOBAL,
|
|
"multiline": tokens_1.TokenType.KEYWORD_MULTILINE,
|
|
"exact": tokens_1.TokenType.KEYWORD_EXACT,
|
|
"matching": tokens_1.TokenType.KEYWORD_MATCHING,
|
|
"not": tokens_1.TokenType.KEYWORD_NOT,
|
|
"between": tokens_1.TokenType.KEYWORD_BETWEEN,
|
|
"tab": tokens_1.TokenType.KEYWORD_TAB,
|
|
"linefeed": tokens_1.TokenType.KEYWORD_LINEFEED,
|
|
"carriage": tokens_1.TokenType.KEYWORD_CARRIAGE,
|
|
"return": tokens_1.TokenType.KEYWORD_RETURN,
|
|
"group": tokens_1.TokenType.KEYWORD_GROUP,
|
|
"by": tokens_1.TokenType.KEYWORD_BY,
|
|
"an": tokens_1.TokenType.KEYWORD_ARTICLE,
|
|
"a": tokens_1.TokenType.KEYWORD_ARTICLE,
|
|
"the": tokens_1.TokenType.KEYWORD_ARTICLE,
|
|
"exactly": tokens_1.TokenType.KEYWORD_EXACTLY,
|
|
"inclusive": tokens_1.TokenType.KEYWORD_INCLUSIVE,
|
|
"inclusively": tokens_1.TokenType.KEYWORD_INCLUSIVE,
|
|
"exclusive": tokens_1.TokenType.KEYWORD_EXCLUSIVE,
|
|
"exclusively": tokens_1.TokenType.KEYWORD_EXCLUSIVE,
|
|
"from": tokens_1.TokenType.KEYWORD_FROM,
|
|
"to": tokens_1.TokenType.KEYWORD_TO
|
|
};
|
|
const escape_sequences = {
|
|
"a": "\a",
|
|
"b": "\b",
|
|
"e": "\e",
|
|
"f": "\f",
|
|
"n": "\n",
|
|
"r": "\r",
|
|
"t": "\t",
|
|
"'": "'",
|
|
"\"": '"',
|
|
"\\": "\\",
|
|
};
|
|
class TokenizerOptions {
|
|
constructor() {
|
|
this.convert_spaces_to_tabs = false;
|
|
}
|
|
}
|
|
exports.TokenizerOptions = TokenizerOptions;
|
|
const escape_sequence_hex_regex = new RegExp(/[0-9A-Fa-f]/g);
|
|
function escape_sequence_gather_hex(input, i, max) {
|
|
let hex = "";
|
|
for (i++; i < input.length && max-- > 0; i++) {
|
|
if (escape_sequence_hex_regex.test(input[i])) {
|
|
hex += input[i];
|
|
}
|
|
}
|
|
return hex;
|
|
}
|
|
function escape_sequence_mapper(input, i) {
|
|
if (escape_sequences[input[i]]) {
|
|
return { code: escape_sequences[input[i]], read: 1 };
|
|
}
|
|
//variable hex code
|
|
else if (input[i] === "x") {
|
|
const hex = escape_sequence_gather_hex(input, ++i, 4);
|
|
return { code: String.fromCharCode(parseInt(hex, 16)), read: hex.length + 1 };
|
|
}
|
|
//4 hex unicode
|
|
else if (input[i] === "u") {
|
|
const unicode = escape_sequence_gather_hex(input, ++i, 4);
|
|
if (unicode.length !== 4) {
|
|
return { code: "", read: unicode.length + 1, error: new Error("Bad escape sequence") };
|
|
}
|
|
else {
|
|
return { code: String.fromCharCode(parseInt(unicode, 16)), read: 5 };
|
|
}
|
|
}
|
|
else if (input[i] === "U") {
|
|
const unicode = escape_sequence_gather_hex(input, ++i, 8);
|
|
if (unicode.length !== 8) {
|
|
return { code: "", read: unicode.length + 1, error: new Error("Bad escape sequence") };
|
|
}
|
|
else {
|
|
return { code: String.fromCharCode(parseInt(unicode, 16)), read: 9 };
|
|
}
|
|
}
|
|
else {
|
|
// should throw an exception, but gonna just ignore it
|
|
return { code: input[i], read: 1 };
|
|
}
|
|
}
|
|
function is_digit(input) {
|
|
//return /[0-9]/g.test(input);
|
|
const value = input.charCodeAt(0);
|
|
return value >= 48 && value <= 57;
|
|
}
|
|
function is_char(input) {
|
|
//return input.toUpperCase() != input.toLowerCase();
|
|
//return /[a-zA-Z]/g.test(input);
|
|
const value = input.charCodeAt(0);
|
|
return ((value >= 65 && value <= 90) || (value >= 97 && value <= 122));
|
|
}
|
|
/* Basic Tokenizer */
|
|
function tokenize(input, options) {
|
|
let line = 1;
|
|
let position = 1;
|
|
const tokens = [];
|
|
const errors = [];
|
|
for (let i = 0; i < input.length; i++, position++) {
|
|
// 4 spaces = 1 tab. That is final. Debate over
|
|
if (options.convert_spaces_to_tabs && input.startsWith(" ", i)) {
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.INDENT, line, position));
|
|
i += 3;
|
|
position += 3;
|
|
}
|
|
// between (ex: 0...3 or 0-3)
|
|
else if (input.startsWith("...", i)) {
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.BETWEEN, line, position));
|
|
i += 2;
|
|
position += 2;
|
|
}
|
|
else if (input.startsWith("..", i)) {
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.BETWEEN, line, position));
|
|
i++;
|
|
position++;
|
|
}
|
|
// comments
|
|
else if (input.startsWith("//", i)) {
|
|
for (i++, position++; i < input.length; i++, position++) {
|
|
if (input[i] === "\n") {
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.END_OF_STATEMENT, line, position));
|
|
break;
|
|
}
|
|
}
|
|
line++;
|
|
position = 0;
|
|
}
|
|
else if (input.startsWith("/*", i)) {
|
|
for (i++, position++; i < input.length - 1; i++, position++) {
|
|
if (input[i] === "*" && input[i + 1] === "/") {
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.END_OF_STATEMENT, line, position));
|
|
i++;
|
|
position++;
|
|
break;
|
|
}
|
|
if (input[i] === "\n") {
|
|
line++;
|
|
position = 0;
|
|
}
|
|
}
|
|
if (i === input.length - 1) {
|
|
errors.push(new tokens_1.TokenError("Unexpected EOF", line, position));
|
|
}
|
|
else {
|
|
line++;
|
|
position = 0;
|
|
}
|
|
}
|
|
else if (input.startsWith("\r\n", i)) {
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.END_OF_STATEMENT, line, position));
|
|
i++;
|
|
line++;
|
|
position = 0;
|
|
}
|
|
else {
|
|
switch (input[i]) {
|
|
// comment
|
|
case "#":
|
|
for (i++, position++; i < input.length; i++, position++) {
|
|
if (input[i] === "\n") {
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.END_OF_STATEMENT, line, position));
|
|
line++;
|
|
position = 0;
|
|
break;
|
|
}
|
|
}
|
|
break;
|
|
// quote
|
|
case '"':
|
|
case '\"':
|
|
{
|
|
// build up a word between quotes
|
|
const quote_begin = { line: line, position: position };
|
|
const quote_char = input[i];
|
|
let found_ending = false;
|
|
let quote = "";
|
|
do {
|
|
i++;
|
|
position++;
|
|
if (input[i] === "\\") {
|
|
i++;
|
|
position++;
|
|
const sequence = escape_sequence_mapper(input, i);
|
|
if (sequence.error) {
|
|
errors.push(new tokens_1.TokenError(sequence.error.message, line, position));
|
|
}
|
|
position += sequence.read;
|
|
i += sequence.read;
|
|
quote += sequence.code;
|
|
}
|
|
else if (input[i] === quote_char) {
|
|
found_ending = true;
|
|
break;
|
|
}
|
|
else if (input[i] === "\n") {
|
|
line++;
|
|
position = 0;
|
|
break;
|
|
}
|
|
else {
|
|
quote += input[i];
|
|
}
|
|
} while (i < input.length);
|
|
if (found_ending) {
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.QUOTE, line, position, quote));
|
|
}
|
|
else {
|
|
//we reached the end of the line or the end of the file
|
|
errors.push(new tokens_1.TokenError(`Unexpected end of quote. Quote began at ${quote_begin.line}:${quote_begin.position}`, line, position));
|
|
line++;
|
|
position = 0;
|
|
}
|
|
break;
|
|
}
|
|
// between (ex: 0...3 or 0-3)
|
|
case "-":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.BETWEEN, line, position));
|
|
break;
|
|
case "\n":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.END_OF_STATEMENT, line, position));
|
|
break;
|
|
case "\r":
|
|
// ignore
|
|
break;
|
|
case "\t":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.INDENT, line, position));
|
|
break;
|
|
case " ":
|
|
break;
|
|
default:
|
|
// is digit? build up a number
|
|
if (is_digit(input[i])) {
|
|
let digits = input[i];
|
|
do {
|
|
i++;
|
|
position++;
|
|
digits += input[i];
|
|
} while (i + 1 < input.length && is_digit(input[i + 1]));
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, digits));
|
|
}
|
|
// is char? build up a word
|
|
else if (is_char(input[i])) {
|
|
let text = input[i];
|
|
do {
|
|
i++;
|
|
position++;
|
|
text += input[i];
|
|
} while (i + 1 < input.length && is_char(input[i + 1]));
|
|
const keyword_text = text.toLowerCase();
|
|
if (keywords[keyword_text]) {
|
|
tokens.push(new tokens_1.Token(keywords[keyword_text], line, position));
|
|
}
|
|
else {
|
|
switch (keyword_text) {
|
|
case "none":
|
|
case "zero":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "0"));
|
|
break;
|
|
case "one":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "1"));
|
|
break;
|
|
case "two":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "2"));
|
|
break;
|
|
case "three":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "3"));
|
|
break;
|
|
case "four":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "4"));
|
|
break;
|
|
case "five":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "5"));
|
|
break;
|
|
case "six":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "6"));
|
|
break;
|
|
case "seven":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "7"));
|
|
break;
|
|
case "eight":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "8"));
|
|
break;
|
|
case "nine":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "9"));
|
|
break;
|
|
case "ten":
|
|
tokens.push(new tokens_1.Token(tokens_1.TokenType.NUMBER, line, position, "10"));
|
|
break;
|
|
default:
|
|
errors.push(new tokens_1.TokenError(`Unknown keyword ${text}`, line, position));
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
else {
|
|
errors.push(new tokens_1.TokenError(`Unknown character in text: ${input.charCodeAt(i)}`, line, position));
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
return { tokens: tokens, errors: errors };
|
|
}
|
|
exports.tokenize = tokenize;
|
|
|
|
|
|
/***/ }),
|
|
/* 3 */
|
|
/***/ (function(module, exports, __webpack_require__) {
|
|
|
|
"use strict";
|
|
|
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
exports.Token = exports.TokenError = exports.TokenType = void 0;
|
|
var TokenType;
|
|
(function (TokenType) {
|
|
TokenType[TokenType["END_OF_STATEMENT"] = 0] = "END_OF_STATEMENT";
|
|
TokenType[TokenType["INDENT"] = 1] = "INDENT";
|
|
TokenType[TokenType["BETWEEN"] = 2] = "BETWEEN";
|
|
TokenType[TokenType["QUOTE"] = 3] = "QUOTE";
|
|
TokenType[TokenType["NUMBER"] = 4] = "NUMBER";
|
|
TokenType[TokenType["KEYWORD_BETWEEN"] = 5] = "KEYWORD_BETWEEN";
|
|
TokenType[TokenType["KEYWORD_OPTIONAL"] = 6] = "KEYWORD_OPTIONAL";
|
|
TokenType[TokenType["KEYWORD_MATCH"] = 7] = "KEYWORD_MATCH";
|
|
TokenType[TokenType["KEYWORD_THEN"] = 8] = "KEYWORD_THEN";
|
|
TokenType[TokenType["KEYWORD_AND"] = 9] = "KEYWORD_AND";
|
|
TokenType[TokenType["KEYWORD_OR"] = 10] = "KEYWORD_OR";
|
|
TokenType[TokenType["KEYWORD_ANY"] = 11] = "KEYWORD_ANY";
|
|
TokenType[TokenType["KEYWORD_OF"] = 12] = "KEYWORD_OF";
|
|
TokenType[TokenType["KEYWODE_WORD_SPECIFIER"] = 13] = "KEYWODE_WORD_SPECIFIER";
|
|
TokenType[TokenType["KEYWORD_DIGIT_SPECIFIER"] = 14] = "KEYWORD_DIGIT_SPECIFIER";
|
|
TokenType[TokenType["KEYWORD_CHAR_SPECIFIER"] = 15] = "KEYWORD_CHAR_SPECIFIER";
|
|
TokenType[TokenType["KEYWORD_WHITESPACE_SPECIFIER"] = 16] = "KEYWORD_WHITESPACE_SPECIFIER";
|
|
TokenType[TokenType["KEYWORD_NUMBER_SPECIFIER"] = 17] = "KEYWORD_NUMBER_SPECIFIER";
|
|
TokenType[TokenType["KEYWORD_MULTIPLE"] = 18] = "KEYWORD_MULTIPLE";
|
|
TokenType[TokenType["KEYWORD_AS"] = 19] = "KEYWORD_AS";
|
|
TokenType[TokenType["KEYWORD_IF"] = 20] = "KEYWORD_IF";
|
|
TokenType[TokenType["KEYWORD_STARTS"] = 21] = "KEYWORD_STARTS";
|
|
TokenType[TokenType["KEYWORD_WITH"] = 22] = "KEYWORD_WITH";
|
|
TokenType[TokenType["KEYWORD_ENDS"] = 23] = "KEYWORD_ENDS";
|
|
TokenType[TokenType["KEYWORD_ELSE"] = 24] = "KEYWORD_ELSE";
|
|
TokenType[TokenType["KEYWORD_UNLESS"] = 25] = "KEYWORD_UNLESS";
|
|
TokenType[TokenType["KEYWORD_WHILE"] = 26] = "KEYWORD_WHILE";
|
|
TokenType[TokenType["KEYWORD_MORE"] = 27] = "KEYWORD_MORE";
|
|
TokenType[TokenType["KEYWORD_USING"] = 28] = "KEYWORD_USING";
|
|
TokenType[TokenType["KEYWORD_GLOBAL"] = 29] = "KEYWORD_GLOBAL";
|
|
TokenType[TokenType["KEYWORD_MULTILINE"] = 30] = "KEYWORD_MULTILINE";
|
|
TokenType[TokenType["KEYWORD_EXACT"] = 31] = "KEYWORD_EXACT";
|
|
TokenType[TokenType["KEYWORD_MATCHING"] = 32] = "KEYWORD_MATCHING";
|
|
TokenType[TokenType["KEYWORD_NOT"] = 33] = "KEYWORD_NOT";
|
|
TokenType[TokenType["KEYWORD_TAB"] = 34] = "KEYWORD_TAB";
|
|
TokenType[TokenType["KEYWORD_LINEFEED"] = 35] = "KEYWORD_LINEFEED";
|
|
TokenType[TokenType["KEYWORD_CARRIAGE"] = 36] = "KEYWORD_CARRIAGE";
|
|
TokenType[TokenType["KEYWORD_RETURN"] = 37] = "KEYWORD_RETURN";
|
|
TokenType[TokenType["KEYWORD_GROUP"] = 38] = "KEYWORD_GROUP";
|
|
TokenType[TokenType["KEYWORD_BY"] = 39] = "KEYWORD_BY";
|
|
TokenType[TokenType["KEYWORD_ARTICLE"] = 40] = "KEYWORD_ARTICLE";
|
|
TokenType[TokenType["KEYWORD_EXACTLY"] = 41] = "KEYWORD_EXACTLY";
|
|
TokenType[TokenType["KEYWORD_INCLUSIVE"] = 42] = "KEYWORD_INCLUSIVE";
|
|
TokenType[TokenType["KEYWORD_EXCLUSIVE"] = 43] = "KEYWORD_EXCLUSIVE";
|
|
TokenType[TokenType["KEYWORD_FROM"] = 44] = "KEYWORD_FROM";
|
|
TokenType[TokenType["KEYWORD_TO"] = 45] = "KEYWORD_TO";
|
|
})(TokenType = exports.TokenType || (exports.TokenType = {}));
|
|
class TokenError extends Error {
|
|
constructor(message, line, position) {
|
|
super(message);
|
|
this.line = line;
|
|
this.position = position;
|
|
}
|
|
to_string() {
|
|
return `${this.line}:${this.position} ${this.message}`;
|
|
}
|
|
}
|
|
exports.TokenError = TokenError;
|
|
class Token {
|
|
constructor(type, line, position, token_string) {
|
|
this.type = type;
|
|
this.line = line;
|
|
this.position = position;
|
|
this.token_string = token_string;
|
|
}
|
|
}
|
|
exports.Token = Token;
|
|
|
|
|
|
/***/ })
|
|
/******/ ]); |