mirror of
https://github.com/pdemian/human2regex.git
synced 2025-05-15 20:10:19 -07:00
Minor bug fixes & updated dependencies
This commit is contained in:
parent
14b464074d
commit
519e2f88e4
@ -75,6 +75,7 @@
|
||||
"no-invalid-this": "error",
|
||||
"no-new": "error",
|
||||
"no-unmodified-loop-condition": "error",
|
||||
"no-unused-expression": [ true, "allow-fast-null-checks" ],
|
||||
"init-declarations": [
|
||||
"error",
|
||||
"always"
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||

|
||||
|
||||
[](https://travis-ci.org/pdemian/human2regex)
|
||||
[](https://travis-ci.com/pdemian/human2regex)
|
||||
[](https://codecov.io/gh/pdemian/human2regex)
|
||||
[](https://lgtm.com/projects/g/pdemian/human2regex/)
|
||||

|
||||
|
8
docs/LICENSE
Normal file
8
docs/LICENSE
Normal file
@ -0,0 +1,8 @@
|
||||
Chevrotain, Copyright © 2018-present SAP SE or an SAP affiliate company
|
||||
https://github.com/Chevrotain/chevrotain/blob/master/LICENSE.txt
|
||||
|
||||
CodeMirror, Copyright (C) 2017 by Marijn Haverbeke <marijnh@gmail.com> and others
|
||||
https://codemirror.net/LICENSE
|
||||
|
||||
Human2Regex, Copyright (C) 2021 Patrick Demian
|
||||
https://github.com/pdemian/human2regex/blob/master/LICENSE
|
6
docs/bundle.min.css
vendored
6
docs/bundle.min.css
vendored
File diff suppressed because one or more lines are too long
16
docs/bundle.min.js
vendored
16
docs/bundle.min.js
vendored
File diff suppressed because one or more lines are too long
@ -467,7 +467,10 @@ class CountSubStatementCST extends H2RCST {
|
||||
utilities_1.unusedParameter(context, "Context is not needed");
|
||||
const errors = [];
|
||||
if (this.to !== null && ((this.opt === "exclusive" && (this.to - 1) <= this.from) || this.to <= this.from)) {
|
||||
errors.push(this.error("Values must be in range of eachother"));
|
||||
errors.push(this.error("Values must be in range of each other"));
|
||||
}
|
||||
else if (this.to === null && this.from === 0 && this.opt === null) {
|
||||
errors.push(this.error("Count cannot be 0. This will match nothing. Use comments if you meant to ignore the next match."));
|
||||
}
|
||||
return errors;
|
||||
}
|
||||
@ -486,7 +489,9 @@ class CountSubStatementCST extends H2RCST {
|
||||
}
|
||||
}
|
||||
else if (from === 0) {
|
||||
return this.opt === "+" ? "*" : "{0}";
|
||||
// This will never return {0} as we validate against it
|
||||
//return this.opt === "+" ? "*" : "{0}";
|
||||
return "*";
|
||||
}
|
||||
}
|
||||
if (to !== null) {
|
||||
@ -698,7 +703,7 @@ class BackrefStatementCST extends StatementCST {
|
||||
}
|
||||
}
|
||||
else if (this.optional) {
|
||||
str = "?";
|
||||
str += "?";
|
||||
}
|
||||
return str;
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ function dontClobberRepetition(fragment, repetition) {
|
||||
}
|
||||
}
|
||||
else {
|
||||
fragment += repetition;
|
||||
fragment = groupIfRequired(fragment) + repetition;
|
||||
}
|
||||
return fragment;
|
||||
}
|
||||
|
19
lib/lexer.js
19
lib/lexer.js
@ -170,15 +170,16 @@ class Human2RegexLexer {
|
||||
// new indent is below the past indent
|
||||
else if (curr_indent_level < utilities_1.last(indent_stack)) {
|
||||
const index = utilities_1.findLastIndex(indent_stack, curr_indent_level);
|
||||
if (index < 0) {
|
||||
lex_result.errors.push(this.lexError(start_token));
|
||||
}
|
||||
else {
|
||||
const number_of_dedents = indent_stack.length - index - 1;
|
||||
for (let j = 0; j < number_of_dedents; j++) {
|
||||
indent_stack.pop();
|
||||
tokens.push(chevrotain_1.createTokenInstance(tokens_1.Outdent, "", start_token.startOffset, start_token.startOffset + length, (_a = start_token.startLine) !== null && _a !== void 0 ? _a : NaN, (_b = start_token.endLine) !== null && _b !== void 0 ? _b : NaN, (_c = start_token.startColumn) !== null && _c !== void 0 ? _c : NaN, ((_d = start_token.startColumn) !== null && _d !== void 0 ? _d : NaN) + length));
|
||||
}
|
||||
// this will never happen since earlier up we exclude when you're too far ahead
|
||||
// you have to go in order, 1 by 1, thus you can never not be in the indent stack
|
||||
//if (index < 0) {
|
||||
// lex_result.errors.push(this.lexError(start_token));
|
||||
// continue;
|
||||
//}
|
||||
const number_of_dedents = indent_stack.length - index - 1;
|
||||
for (let j = 0; j < number_of_dedents; j++) {
|
||||
indent_stack.pop();
|
||||
tokens.push(chevrotain_1.createTokenInstance(tokens_1.Outdent, "", start_token.startOffset, start_token.startOffset + length, (_a = start_token.startLine) !== null && _a !== void 0 ? _a : NaN, (_b = start_token.endLine) !== null && _b !== void 0 ? _b : NaN, (_c = start_token.startColumn) !== null && _c !== void 0 ? _c : NaN, ((_d = start_token.startColumn) !== null && _d !== void 0 ? _d : NaN) + length));
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -630,7 +630,6 @@ class Human2RegexParser extends chevrotain_1.EmbeddedActionsParser {
|
||||
this.input = tokens;
|
||||
return new ParseResult(this.regexp(), this.errors.map(utilities_1.CommonError.fromParseError));
|
||||
}
|
||||
/* istanbul ignore next */
|
||||
/**
|
||||
* Sets the options for this parser
|
||||
*
|
||||
@ -639,7 +638,7 @@ class Human2RegexParser extends chevrotain_1.EmbeddedActionsParser {
|
||||
* @public
|
||||
*/
|
||||
setOptions(options) {
|
||||
utilities_1.unusedParameter(options, "skip_validations is not valid to change once we've already initialized");
|
||||
throw new Error("skip_validations is not valid to change once we've already initialized");
|
||||
}
|
||||
}
|
||||
exports.Human2RegexParser = Human2RegexParser;
|
||||
|
2
lib/tokens.d.ts
vendored
2
lib/tokens.d.ts
vendored
@ -13,7 +13,6 @@
|
||||
/** @internal */ export declare const Optional: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Match: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Then: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Anything: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Or: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const And: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Word: import("chevrotain").TokenType;
|
||||
@ -32,6 +31,7 @@
|
||||
/** @internal */ export declare const Exact: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Matching: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Not: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Anything: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Between: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Tab: import("chevrotain").TokenType;
|
||||
/** @internal */ export declare const Linefeed: import("chevrotain").TokenType;
|
||||
|
@ -1,7 +1,7 @@
|
||||
"use strict";
|
||||
/*! Copyright (c) 2021 Patrick Demian; Licensed under MIT */
|
||||
Object.defineProperty(exports, "__esModule", { value: true });
|
||||
exports.CaseInsensitive = exports.CarriageReturn = exports.Newline = exports.Repeat = exports.Called = exports.Create = exports.To = exports.From = exports.Exclusive = exports.Inclusive = exports.Exactly = exports.Times = exports.A = exports.Group = exports.Linefeed = exports.Tab = exports.Between = exports.Not = exports.Matching = exports.Exact = exports.Multiline = exports.Global = exports.Using = exports.Unicode = exports.Number = exports.Boundary = exports.Whitespace = exports.Integer = exports.Decimal = exports.Letter = exports.Character = exports.Digit = exports.Word = exports.And = exports.Or = exports.Anything = exports.Then = exports.Match = exports.Optional = exports.Ten = exports.Nine = exports.Eight = exports.Seven = exports.Six = exports.Five = exports.Four = exports.Three = exports.Two = exports.One = exports.Zero = void 0;
|
||||
exports.CaseInsensitive = exports.CarriageReturn = exports.Newline = exports.Repeat = exports.Called = exports.Create = exports.To = exports.From = exports.Exclusive = exports.Inclusive = exports.Exactly = exports.Times = exports.A = exports.Group = exports.Linefeed = exports.Tab = exports.Between = exports.Anything = exports.Not = exports.Matching = exports.Exact = exports.Multiline = exports.Global = exports.Using = exports.Unicode = exports.Number = exports.Boundary = exports.Whitespace = exports.Integer = exports.Decimal = exports.Letter = exports.Character = exports.Digit = exports.Word = exports.And = exports.Or = exports.Then = exports.Match = exports.Optional = exports.Ten = exports.Nine = exports.Eight = exports.Seven = exports.Six = exports.Five = exports.Four = exports.Three = exports.Two = exports.One = exports.Zero = void 0;
|
||||
exports.AllTokens = exports.Outdent = exports.Indent = exports.StringLiteral = exports.NumberLiteral = exports.Identifier = exports.MultilineComment = exports.SingleLineComment = exports.WS = exports.EndOfLine = exports.Is = exports.Else = exports.If = exports.The = exports.Rerun = exports.OrMore = exports.CaseSensitive = void 0;
|
||||
/**
|
||||
* The tokens required for Human2Regex
|
||||
@ -22,7 +22,6 @@ const chevrotain_1 = require("chevrotain");
|
||||
/** @internal */ exports.Optional = chevrotain_1.createToken({ name: "Optional", pattern: /(optional(ly)?|possibl[ye]|maybe)/i });
|
||||
/** @internal */ exports.Match = chevrotain_1.createToken({ name: "Match", pattern: /match(es)?/i });
|
||||
/** @internal */ exports.Then = chevrotain_1.createToken({ name: "Then", pattern: /then/i });
|
||||
/** @internal */ exports.Anything = chevrotain_1.createToken({ name: "Anything", pattern: /(any thing|any|anything)(s)?/i });
|
||||
/** @internal */ exports.Or = chevrotain_1.createToken({ name: "Or", pattern: /or/i });
|
||||
/** @internal */ exports.And = chevrotain_1.createToken({ name: "And", pattern: /and|,/i });
|
||||
/** @internal */ exports.Word = chevrotain_1.createToken({ name: "WordSpecifier", pattern: /word(s)?/i });
|
||||
@ -41,6 +40,7 @@ const chevrotain_1 = require("chevrotain");
|
||||
/** @internal */ exports.Exact = chevrotain_1.createToken({ name: "Exact", pattern: /exact/i });
|
||||
/** @internal */ exports.Matching = chevrotain_1.createToken({ name: "Matching", pattern: /matching/i });
|
||||
/** @internal */ exports.Not = chevrotain_1.createToken({ name: "Not", pattern: /not|anything but|any thing but|everything but|every thing but/i });
|
||||
/** @internal */ exports.Anything = chevrotain_1.createToken({ name: "Anything", pattern: /(anything|any thing|any|everything|every thing)(s)?/i, longer_alt: exports.Not });
|
||||
/** @internal */ exports.Between = chevrotain_1.createToken({ name: "Between", pattern: /between/i });
|
||||
/** @internal */ exports.Tab = chevrotain_1.createToken({ name: "Tab", pattern: /tab/i });
|
||||
/** @internal */ exports.Linefeed = chevrotain_1.createToken({ name: "Linefeed", pattern: /(line feed|linefeed)/i });
|
||||
@ -68,7 +68,7 @@ const chevrotain_1 = require("chevrotain");
|
||||
/** @internal */ exports.EndOfLine = chevrotain_1.createToken({ name: "EOL", pattern: /\n/ });
|
||||
/** @internal */ exports.WS = chevrotain_1.createToken({ name: "Whitespace", pattern: /[^\S\n]+/, start_chars_hint: [" ", "\r"], group: chevrotain_1.Lexer.SKIPPED });
|
||||
/** @internal */ exports.SingleLineComment = chevrotain_1.createToken({ name: "SingleLineComment", pattern: /(#|\/\/).*/, group: chevrotain_1.Lexer.SKIPPED });
|
||||
/** @internal */ exports.MultilineComment = chevrotain_1.createToken({ name: "MultiLineComment", pattern: /\/\*(.*)\*\//, line_breaks: true, group: chevrotain_1.Lexer.SKIPPED });
|
||||
/** @internal */ exports.MultilineComment = chevrotain_1.createToken({ name: "MultiLineComment", pattern: /\/\*(.|\n|\r)*\*\//, line_breaks: true, group: chevrotain_1.Lexer.SKIPPED });
|
||||
/** @internal */ exports.Identifier = chevrotain_1.createToken({ name: "Identifier", pattern: /[a-z]\w*/i });
|
||||
/** @internal */ exports.NumberLiteral = chevrotain_1.createToken({ name: "NumberLiteral", pattern: /\d+/ });
|
||||
/** @internal */ exports.StringLiteral = chevrotain_1.createToken({ name: "StringLiteral", pattern: /"(?:[^\\"]|\\(?:[bfnrtv"\\/]|u[0-9a-f]{4}|U[0-9a-f]{8}))*"/i });
|
||||
@ -94,8 +94,8 @@ exports.AllTokens = [
|
||||
exports.Matching,
|
||||
exports.Match,
|
||||
exports.Then,
|
||||
exports.Not,
|
||||
exports.Anything,
|
||||
exports.Not,
|
||||
exports.And,
|
||||
exports.Boundary,
|
||||
exports.Word,
|
||||
|
6436
package-lock.json
generated
6436
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
39
package.json
39
package.json
@ -1,34 +1,35 @@
|
||||
{
|
||||
"name": "human2regex",
|
||||
"version": "1.2.1",
|
||||
"version": "1.2.2",
|
||||
"description": "Humanized Regular Expressions",
|
||||
"main": "./lib/index.js",
|
||||
"typings": "./lib/index.d.ts",
|
||||
"devDependencies": {
|
||||
"@types/glob": "^7.1.3",
|
||||
"@types/html-minifier": "^3.5.3",
|
||||
"@types/jest": "^26.0.19",
|
||||
"@typescript-eslint/eslint-plugin": "^4.14.0",
|
||||
"@typescript-eslint/parser": "^4.14.0",
|
||||
"before-build-webpack": "^0.2.9",
|
||||
"@types/jest": "^26.0.23",
|
||||
"@typescript-eslint/eslint-plugin": "^4.28.1",
|
||||
"@typescript-eslint/parser": "^4.28.1",
|
||||
"before-build-webpack": "^0.2.11",
|
||||
"clean-webpack-plugin": "^3.0.0",
|
||||
"codecov": "^3.8.1",
|
||||
"codecov": "^3.8.2",
|
||||
"copy-webpack-plugin": "^6.4.1",
|
||||
"css-loader": "^4.3.0",
|
||||
"eslint": "^7.18.0",
|
||||
"glob": "^7.1.6",
|
||||
"handlebars": "^4.7.6",
|
||||
"eslint": "^7.30.0",
|
||||
"glob": "^7.1.7",
|
||||
"handlebars": "^4.7.7",
|
||||
"html-minifier": "^4.0.0",
|
||||
"jest": "^26.6.3",
|
||||
"mini-css-extract-plugin": "^1.3.3",
|
||||
"optimize-css-assets-webpack-plugin": "^5.0.4",
|
||||
"remove-files-webpack-plugin": "^1.4.4",
|
||||
"ts-jest": "^26.4.4",
|
||||
"ts-loader": "^8.0.13",
|
||||
"mini-css-extract-plugin": "^1.6.2",
|
||||
"optimize-css-assets-webpack-plugin": "^6.0.1",
|
||||
"postcss": "^8.3.5",
|
||||
"remove-files-webpack-plugin": "^1.4.5",
|
||||
"ts-jest": "^26.5.6",
|
||||
"ts-loader": "^8.3.0",
|
||||
"ts-node": "^9.1.1",
|
||||
"typescript": "^4.1.3",
|
||||
"webpack": "^4.44.2",
|
||||
"webpack-cli": "^3.3.12"
|
||||
"typescript": "^4.3.5",
|
||||
"webpack": "^5.42.0",
|
||||
"webpack-cli": "^4.7.2"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "tsc && webpack --config webpack.config.js",
|
||||
@ -45,8 +46,8 @@
|
||||
"author": "Patrick Demian",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"chevrotain": "^7.1.0",
|
||||
"codemirror": "^5.59.2"
|
||||
"chevrotain": "^7.1.2",
|
||||
"codemirror": "^5.62.0"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
|
8
src/docs/assets/LICENSE
Normal file
8
src/docs/assets/LICENSE
Normal file
@ -0,0 +1,8 @@
|
||||
Chevrotain, Copyright © 2018-present SAP SE or an SAP affiliate company
|
||||
https://github.com/Chevrotain/chevrotain/blob/master/LICENSE.txt
|
||||
|
||||
CodeMirror, Copyright (C) 2017 by Marijn Haverbeke <marijnh@gmail.com> and others
|
||||
https://codemirror.net/LICENSE
|
||||
|
||||
Human2Regex, Copyright (C) 2021 Patrick Demian
|
||||
https://github.com/pdemian/human2regex/blob/master/LICENSE
|
@ -539,7 +539,10 @@ export class CountSubStatementCST extends H2RCST {
|
||||
const errors: ISemanticError[] = [];
|
||||
|
||||
if (this.to !== null && ((this.opt === "exclusive" && (this.to-1) <= this.from) || this.to <= this.from)) {
|
||||
errors.push(this.error("Values must be in range of eachother"));
|
||||
errors.push(this.error("Values must be in range of each other"));
|
||||
}
|
||||
else if(this.to === null && this.from === 0 && this.opt === null) {
|
||||
errors.push(this.error("Count cannot be 0. This will match nothing. Use comments if you meant to ignore the next match."));
|
||||
}
|
||||
|
||||
return errors;
|
||||
@ -563,7 +566,9 @@ export class CountSubStatementCST extends H2RCST {
|
||||
}
|
||||
}
|
||||
else if (from === 0) {
|
||||
return this.opt === "+" ? "*" : "{0}";
|
||||
// This will never return {0} as we validate against it
|
||||
//return this.opt === "+" ? "*" : "{0}";
|
||||
return "*";
|
||||
}
|
||||
}
|
||||
|
||||
@ -805,7 +810,7 @@ export class BackrefStatementCST extends StatementCST {
|
||||
}
|
||||
}
|
||||
else if (this.optional) {
|
||||
str = "?";
|
||||
str += "?";
|
||||
}
|
||||
|
||||
return str;
|
||||
|
@ -222,7 +222,7 @@ export function dontClobberRepetition(fragment: string, repetition: string): str
|
||||
}
|
||||
}
|
||||
else {
|
||||
fragment += repetition;
|
||||
fragment = groupIfRequired(fragment) + repetition;
|
||||
}
|
||||
|
||||
return fragment;
|
||||
|
19
src/lexer.ts
19
src/lexer.ts
@ -191,16 +191,17 @@ export class Human2RegexLexer {
|
||||
else if (curr_indent_level < last(indent_stack)) {
|
||||
const index = findLastIndex(indent_stack, curr_indent_level);
|
||||
|
||||
if (index < 0) {
|
||||
lex_result.errors.push(this.lexError(start_token));
|
||||
}
|
||||
else {
|
||||
const number_of_dedents = indent_stack.length - index - 1;
|
||||
// this will never happen since earlier up we exclude when you're too far ahead
|
||||
// you have to go in order, 1 by 1, thus you can never not be in the indent stack
|
||||
//if (index < 0) {
|
||||
// lex_result.errors.push(this.lexError(start_token));
|
||||
// continue;
|
||||
//}
|
||||
const number_of_dedents = indent_stack.length - index - 1;
|
||||
|
||||
for (let j = 0; j < number_of_dedents; j++) {
|
||||
indent_stack.pop();
|
||||
tokens.push(createTokenInstance(Outdent, "", start_token.startOffset, start_token.startOffset + length, start_token.startLine ?? NaN, start_token.endLine ?? NaN, start_token.startColumn ?? NaN, (start_token.startColumn ?? NaN) + length));
|
||||
}
|
||||
for (let j = 0; j < number_of_dedents; j++) {
|
||||
indent_stack.pop();
|
||||
tokens.push(createTokenInstance(Outdent, "", start_token.startOffset, start_token.startOffset + length, start_token.startLine ?? NaN, start_token.endLine ?? NaN, start_token.startColumn ?? NaN, (start_token.startColumn ?? NaN) + length));
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -8,7 +8,7 @@
|
||||
import { EmbeddedActionsParser, IOrAlt, IToken } from "chevrotain";
|
||||
import * as T from "./tokens";
|
||||
import { CountSubStatementCST, UsingFlags, MatchSubStatementType, MatchSubStatementValue, MatchSubStatementCST, UsingStatementCST, RegularExpressionCST, StatementCST, RepeatStatementCST, MatchStatementValue, MatchStatementCST, GroupStatementCST, RegexDialect, BackrefStatementCST, GeneratorContext, IfPatternStatementCST, IfIdentStatementCST } from "./generator";
|
||||
import { first, usefulConditional, unusedParameter, CommonError } from "./utilities";
|
||||
import { first, usefulConditional, CommonError } from "./utilities";
|
||||
|
||||
/**
|
||||
* The options for the Parser
|
||||
@ -700,7 +700,6 @@ export class Human2RegexParser extends EmbeddedActionsParser {
|
||||
this.regexp = Regex;
|
||||
}
|
||||
|
||||
/* istanbul ignore next */
|
||||
/**
|
||||
* Sets the options for this parser
|
||||
*
|
||||
@ -709,6 +708,6 @@ export class Human2RegexParser extends EmbeddedActionsParser {
|
||||
* @public
|
||||
*/
|
||||
public setOptions(options: Human2RegexParserOptions): void {
|
||||
unusedParameter(options, "skip_validations is not valid to change once we've already initialized");
|
||||
throw new Error("skip_validations is not valid to change once we've already initialized");
|
||||
}
|
||||
}
|
@ -22,7 +22,7 @@ import { createToken, Lexer } from "chevrotain";
|
||||
/** @internal */ export const Optional = createToken({name: "Optional", pattern: /(optional(ly)?|possibl[ye]|maybe)/i});
|
||||
/** @internal */ export const Match = createToken({name: "Match", pattern: /match(es)?/i});
|
||||
/** @internal */ export const Then = createToken({name: "Then", pattern: /then/i});
|
||||
/** @internal */ export const Anything = createToken({name: "Anything", pattern: /(any thing|any|anything)(s)?/i});
|
||||
|
||||
/** @internal */ export const Or = createToken({name: "Or", pattern: /or/i});
|
||||
/** @internal */ export const And = createToken({name: "And", pattern: /and|,/i});
|
||||
/** @internal */ export const Word = createToken({name: "WordSpecifier", pattern: /word(s)?/i});
|
||||
@ -41,6 +41,7 @@ import { createToken, Lexer } from "chevrotain";
|
||||
/** @internal */ export const Exact = createToken({name: "Exact", pattern: /exact/i});
|
||||
/** @internal */ export const Matching = createToken({name: "Matching", pattern: /matching/i});
|
||||
/** @internal */ export const Not = createToken({name: "Not", pattern: /not|anything but|any thing but|everything but|every thing but/i});
|
||||
/** @internal */ export const Anything = createToken({name: "Anything", pattern: /(anything|any thing|any|everything|every thing)(s)?/i, longer_alt: Not});
|
||||
/** @internal */ export const Between = createToken({name: "Between", pattern: /between/i});
|
||||
/** @internal */ export const Tab = createToken({name: "Tab", pattern: /tab/i});
|
||||
/** @internal */ export const Linefeed = createToken({name: "Linefeed", pattern: /(line feed|linefeed)/i});
|
||||
@ -69,7 +70,7 @@ import { createToken, Lexer } from "chevrotain";
|
||||
/** @internal */ export const EndOfLine = createToken({name: "EOL", pattern: /\n/});
|
||||
/** @internal */ export const WS = createToken({name: "Whitespace", pattern: /[^\S\n]+/, start_chars_hint: [ " ", "\r" ], group: Lexer.SKIPPED});
|
||||
/** @internal */ export const SingleLineComment = createToken({name: "SingleLineComment", pattern: /(#|\/\/).*/, group: Lexer.SKIPPED});
|
||||
/** @internal */ export const MultilineComment = createToken({name: "MultiLineComment", pattern: /\/\*(.*)\*\//, line_breaks: true, group: Lexer.SKIPPED});
|
||||
/** @internal */ export const MultilineComment = createToken({name: "MultiLineComment", pattern: /\/\*(.|\n|\r)*\*\//, line_breaks: true, group: Lexer.SKIPPED});
|
||||
|
||||
/** @internal */ export const Identifier = createToken({name: "Identifier", pattern: /[a-z]\w*/i});
|
||||
/** @internal */ export const NumberLiteral = createToken({name: "NumberLiteral", pattern: /\d+/});
|
||||
@ -98,8 +99,8 @@ export const AllTokens = [
|
||||
Matching,
|
||||
Match,
|
||||
Then,
|
||||
Not,
|
||||
Anything,
|
||||
Not,
|
||||
And,
|
||||
Boundary,
|
||||
Word,
|
||||
|
@ -4,7 +4,6 @@ import { Human2RegexParser, Human2RegexParserOptions } from "../src/parser";
|
||||
import { Human2RegexLexer, Human2RegexLexerOptions } from "../src/lexer";
|
||||
import { RegexDialect } from "../src/generator";
|
||||
|
||||
|
||||
describe("Generator functionality", function() {
|
||||
const lexer = new Human2RegexLexer(new Human2RegexLexerOptions(true));
|
||||
const parser = new Human2RegexParser(new Human2RegexParserOptions(true));
|
||||
@ -52,7 +51,20 @@ describe("Generator functionality", function() {
|
||||
expect(reg5.validate(RegexDialect.JS).length).toBe(0);
|
||||
expect(reg5.toRegex(RegexDialect.JS)).toBe("/(?<test>hello)?/");
|
||||
|
||||
//(?<test>hello)?
|
||||
const toks6 = lexer.tokenize("match not anything").tokens;
|
||||
const reg6 = parser.parse(toks6);
|
||||
expect(reg6.validate(RegexDialect.JS).length).toBe(0);
|
||||
expect(reg6.toRegex(RegexDialect.JS)).toBe("/[^.]/");
|
||||
|
||||
const toks7 = lexer.tokenize("match 2+ \"hello\"").tokens;
|
||||
const reg7 = parser.parse(toks7);
|
||||
expect(reg7.validate(RegexDialect.JS).length).toBe(0);
|
||||
expect(reg7.toRegex(RegexDialect.JS)).toBe("/(?:hello){2,}/");
|
||||
|
||||
const toks8 = lexer.tokenize('optionally repeat\n\tmatch "hello"').tokens;
|
||||
const reg8 = parser.parse(toks8);
|
||||
expect(reg8.validate(RegexDialect.JS).length).toBe(0);
|
||||
expect(reg8.toRegex(RegexDialect.JS)).toBe("/(?:hello)*?/");
|
||||
});
|
||||
|
||||
it("generates an advanced regex", function() {
|
||||
@ -99,6 +111,14 @@ describe("Generator functionality", function() {
|
||||
const toks7 = lexer.tokenize("rerun thing").tokens;
|
||||
const reg7 = parser.parse(toks7);
|
||||
expect(reg7.validate(RegexDialect.JS).length).toBeGreaterThan(0);
|
||||
|
||||
const toks8 = lexer.tokenize("match 0 \"hello\"").tokens;
|
||||
const reg8 = parser.parse(toks8);
|
||||
expect(reg8.validate(RegexDialect.JS).length).toBeGreaterThan(0);
|
||||
|
||||
const toks9 = lexer.tokenize('/*create a group called thing\n\tmatch "a"\n*/if thing\n\tmatch "b"\nelse\n\tmatch "c"\n').tokens;
|
||||
const reg9 = parser.parse(toks9);
|
||||
expect(reg9.validate(RegexDialect.PCRE).length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("handles ranges", function() {
|
||||
@ -121,6 +141,11 @@ describe("Generator functionality", function() {
|
||||
const reg3 = parser.parse(toks3);
|
||||
expect(reg3.validate(RegexDialect.JS).length).toBe(0);
|
||||
expect(reg3.toRegex(RegexDialect.JS)).toBe("/[0a-zA-Z]/");
|
||||
|
||||
const toks4 = lexer.tokenize('match not "a" to "z"').tokens;
|
||||
const reg4 = parser.parse(toks4);
|
||||
expect(reg4.validate(RegexDialect.JS).length).toBe(0);
|
||||
expect(reg4.toRegex(RegexDialect.JS)).toBe("/[^a-z]/");
|
||||
});
|
||||
|
||||
it("handles specifiers", function() {
|
||||
@ -228,14 +253,14 @@ describe("Generator functionality", function() {
|
||||
});
|
||||
|
||||
it("can generate backreferences", function() {
|
||||
const toks0 = lexer.tokenize('create a group called thing\n\tmatch "Hello World"\nrerun thing\noptionally recapture 3 times the group called thing').tokens;
|
||||
const toks0 = lexer.tokenize('create a group called thing\n\tmatch "Hello World"\nrerun thing\noptionally recapture 3 times the group called thing\noptionally rerun thing').tokens;
|
||||
const reg0 = parser.parse(toks0);
|
||||
expect(reg0.validate(RegexDialect.JS).length).toBe(0);
|
||||
|
||||
expect(reg0.toRegex(RegexDialect.JS)).toBe("/(?<thing>Hello World)\\g<thing>(?:\\g<thing>{3})?/");
|
||||
expect(reg0.toRegex(RegexDialect.PCRE)).toBe("/(?P<thing>Hello World)\\g<thing>(?:\\g<thing>{3})?/");
|
||||
expect(reg0.toRegex(RegexDialect.Python)).toBe("/(?P<thing>Hello World)(?P=thing)(?:(?P=thing){3})?/");
|
||||
expect(reg0.toRegex(RegexDialect.DotNet)).toBe("/(?<thing>Hello World)\\k<thing>(?:\\k<thing>{3})?/");
|
||||
expect(reg0.toRegex(RegexDialect.JS)).toBe("/(?<thing>Hello World)\\g<thing>(?:\\g<thing>{3})?\\g<thing>?/");
|
||||
expect(reg0.toRegex(RegexDialect.PCRE)).toBe("/(?P<thing>Hello World)\\g<thing>(?:\\g<thing>{3})?\\g<thing>?/");
|
||||
expect(reg0.toRegex(RegexDialect.Python)).toBe("/(?P<thing>Hello World)(?P=thing)(?:(?P=thing){3})?(?P=thing)?/");
|
||||
expect(reg0.toRegex(RegexDialect.DotNet)).toBe("/(?<thing>Hello World)\\k<thing>(?:\\k<thing>{3})?\\k<thing>?/");
|
||||
});
|
||||
|
||||
it("can generate if statements", function() {
|
||||
@ -258,6 +283,13 @@ describe("Generator functionality", function() {
|
||||
expect(reg2.validate(RegexDialect.PCRE).length).toBe(0);
|
||||
expect(reg2.toRegex(RegexDialect.PCRE)).toBe("/(?P<thing>a)(?(thing)b|c)/");
|
||||
expect(reg2.toRegex(RegexDialect.Boost)).toBe("/(?<thing>a)(?(<thing>)b|c)/");
|
||||
|
||||
const toks3 = lexer.tokenize('create a group called thing\n\tmatch "a"\nif thing\n\tmatch "b"').tokens;
|
||||
const reg3 = parser.parse(toks3);
|
||||
expect(reg3.validate(RegexDialect.JS).length).toBeGreaterThan(0);
|
||||
expect(reg3.validate(RegexDialect.PCRE).length).toBe(0);
|
||||
expect(reg3.toRegex(RegexDialect.PCRE)).toBe("/(?P<thing>a)(?(thing)b)/");
|
||||
expect(reg3.toRegex(RegexDialect.Boost)).toBe("/(?<thing>a)(?(<thing>)b)/");
|
||||
});
|
||||
|
||||
it("generate dialect specific regex", function() {
|
||||
@ -284,6 +316,12 @@ describe("Generator functionality", function() {
|
||||
const reg1 = parser.parse(toks1);
|
||||
expect(reg1.validate(RegexDialect.DotNet).length).toBe(0);
|
||||
expect(reg1.toRegex(RegexDialect.DotNet)).toBe("/\\p{IsLatin}/");
|
||||
|
||||
const toks2 = lexer.tokenize('match not unicode class "Latin"').tokens;
|
||||
const reg2 = parser.parse(toks2);
|
||||
|
||||
expect(reg2.validate(RegexDialect.JS).length).toBe(0);
|
||||
expect(reg2.toRegex(RegexDialect.JS)).toBe("/\\P{Latin}/");
|
||||
});
|
||||
|
||||
it("runs complex scripts", function() {
|
||||
|
@ -8,6 +8,7 @@ describe("Lexer capabilities", function() {
|
||||
|
||||
it("validates", function() {
|
||||
expect(() => lexer.setOptions(new Human2RegexLexerOptions(false, IndentType.Both))).not.toThrow();
|
||||
expect(() => new Human2RegexLexer(new Human2RegexLexerOptions(true))).toThrow();
|
||||
});
|
||||
|
||||
it("parses nothing", function() {
|
||||
@ -15,6 +16,10 @@ describe("Lexer capabilities", function() {
|
||||
expect(lexer.tokenize("").errors).toHaveLength(0);
|
||||
expect(lexer.tokenize("").tokens).toHaveLength(0);
|
||||
|
||||
expect(() => lexer.tokenize("/*create a group called thing\n*/")).not.toThrow();
|
||||
expect(lexer.tokenize("/*create a group called thing\n*/").errors).toHaveLength(0);
|
||||
expect(lexer.tokenize("/*create a group called thing\n*/").tokens).toHaveLength(0);
|
||||
|
||||
expect(() => lexer.tokenize("\n/* hello world */\n")).not.toThrow();
|
||||
expect(lexer.tokenize("\n/* hello world */\n").errors).toHaveLength(0);
|
||||
expect(lexer.tokenize("\n/* hello world */\n").tokens).toHaveLength(0);
|
||||
|
@ -11,6 +11,7 @@ describe("Parser capabilities", function() {
|
||||
|
||||
it("validates", function() {
|
||||
expect(() => parser = new Human2RegexParser(new Human2RegexParserOptions(false))).not.toThrow();
|
||||
expect(() => parser.setOptions(new Human2RegexParserOptions(true))).toThrow();
|
||||
});
|
||||
|
||||
it("parses nothing", function() {
|
||||
|
@ -3,7 +3,7 @@
|
||||
import "../src/utilities";
|
||||
import { isSingleRegexCharacter, findLastIndex, removeQuotes, regexEscape, hasFlag, combineFlags, makeFlag, first, last, CommonError, append, isRangeRegex } from "../src/utilities";
|
||||
import { UsingFlags, ISemanticError } from "../src/generator";
|
||||
import { IRecognitionException, ILexingError, createTokenInstance } from "chevrotain";
|
||||
import { IRecognitionException, ILexingError, createTokenInstance, IToken } from "chevrotain";
|
||||
import { Indent } from "../src/tokens";
|
||||
|
||||
describe("Utility functions", function() {
|
||||
@ -116,6 +116,15 @@ describe("Utility functions", function() {
|
||||
context: { ruleStack: [], ruleOccurrenceStack: [] }
|
||||
};
|
||||
|
||||
const par_error_unknown : IRecognitionException = {
|
||||
name: "Recognition Exception",
|
||||
message: "Mismatch at 0,0",
|
||||
// eslint-disable-next-line no-magic-numbers
|
||||
token: { tokenType: Indent, image: "\t", startOffset: 123, startLine: undefined, startColumn: undefined, endOffset: undefined, tokenTypeIdx: -1 } as IToken,
|
||||
resyncedTokens: [],
|
||||
context: { ruleStack: [], ruleOccurrenceStack: [] }
|
||||
};
|
||||
|
||||
const sem_error: ISemanticError = {
|
||||
startLine: 123,
|
||||
startColumn: 123,
|
||||
@ -126,6 +135,7 @@ describe("Utility functions", function() {
|
||||
expect(CommonError.fromLexError(lex_error)).toBeInstanceOf(CommonError);
|
||||
expect(CommonError.fromParseError(par_error)).toBeInstanceOf(CommonError);
|
||||
expect(CommonError.fromSemanticError(sem_error)).toBeInstanceOf(CommonError);
|
||||
expect(CommonError.fromParseError(par_error_unknown)).toBeInstanceOf(CommonError);
|
||||
|
||||
expect(() => CommonError.fromSemanticError(sem_error).toString()).not.toThrow();
|
||||
expect(CommonError.fromSemanticError(sem_error).toString()).not.toBeNull();
|
||||
|
@ -11,6 +11,7 @@
|
||||
"noImplicitAny": false, /* Raise error on expressions and declarations with an implied 'any' type. */
|
||||
"strictNullChecks": true, /* Enable strict null checks. */
|
||||
"strictFunctionTypes": true, /* Enable strict checking of function types. */
|
||||
"skipLibCheck": true, /* Skip type checking of declaration files. */
|
||||
"esModuleInterop": true /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */
|
||||
},
|
||||
"include": [
|
||||
|
@ -1,16 +0,0 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "es2017", /* Specify ECMAScript target version: 'ES3' (default), 'ES5', 'ES2015', 'ES2016', 'ES2017','ES2018' or 'ESNEXT'. */
|
||||
"module": "commonjs", /* Specify module code generation: 'none', 'commonjs', 'amd', 'system', 'umd', 'es2015', or 'ESNext'. */
|
||||
"lib": ["es2017", "dom"], /* Specify library files to be included in the compilation. */
|
||||
"declaration": true, /* Generates corresponding '.d.ts' file. */
|
||||
"declarationMap": false, /* Generates a sourcemap for each corresponding '.d.ts' file. */
|
||||
"outDir": "./lib/", /* Redirect output structure to the directory. */
|
||||
"rootDir": "./src/", /* Specify the root directory of input files. Use to control the output directory structure with --outDir. */
|
||||
"strict": true, /* Enable all strict type-checking options. */
|
||||
"noImplicitAny": false, /* Raise error on expressions and declarations with an implied 'any' type. */
|
||||
"strictNullChecks": true, /* Enable strict null checks. */
|
||||
"strictFunctionTypes": true, /* Enable strict checking of function types. */
|
||||
"esModuleInterop": true /* Enables emit interoperability between CommonJS and ES Modules via creation of namespace objects for all imports. Implies 'allowSyntheticDefaultImports'. */
|
||||
}
|
||||
}
|
@ -78,7 +78,14 @@ module.exports = {
|
||||
},
|
||||
optimization: {
|
||||
minimize: config.prod,
|
||||
minimizer: [ new TerserPlugin({cache: true, parallel: true}), new OptimizeCSSAssetsPlugin({}) ]
|
||||
minimizer: [
|
||||
new TerserPlugin(
|
||||
{
|
||||
extractComments: false,
|
||||
parallel: true
|
||||
}
|
||||
),
|
||||
new OptimizeCSSAssetsPlugin({}) ]
|
||||
},
|
||||
performance: {
|
||||
hints: false,
|
||||
|
Loading…
x
Reference in New Issue
Block a user