From e3a944528e40082febf186c7eeef83ddc851604b Mon Sep 17 00:00:00 2001 From: Shahar Soel Date: Fri, 30 Jun 2023 21:16:16 +0300 Subject: [PATCH] chore: cr fixes --- packages/chevrotain/src/parse/grammar/checks.ts | 10 +++++----- packages/chevrotain/src/parse/grammar/interpreter.ts | 7 +++---- packages/chevrotain/src/parse/grammar/llk_lookahead.ts | 4 ++-- packages/chevrotain/src/parse/grammar/lookahead.ts | 7 ++++--- packages/chevrotain/src/parse/parser/parser.ts | 5 +++-- .../src/parse/parser/traits/recognizer_engine.ts | 5 +++-- .../chevrotain/src/parse/parser/traits/recoverable.ts | 3 ++- packages/chevrotain/src/scan/lexer.ts | 5 +++-- packages/chevrotain/src/scan/lexer_public.ts | 3 ++- packages/chevrotain/src/scan/tokens.ts | 3 ++- packages/chevrotain/src/utils.ts | 4 ++++ 11 files changed, 33 insertions(+), 23 deletions(-) diff --git a/packages/chevrotain/src/parse/grammar/checks.ts b/packages/chevrotain/src/parse/grammar/checks.ts index 573cc0c971..11bd990da0 100644 --- a/packages/chevrotain/src/parse/grammar/checks.ts +++ b/packages/chevrotain/src/parse/grammar/checks.ts @@ -39,7 +39,7 @@ import { IParserDefinitionError } from "./types" import { tokenStructuredMatcher } from "../../scan/tokens" -import { difference, flatMap, groupBy } from "../../utils" +import { difference, flatMap, groupBy, isEmpty } from "../../utils" export function validateLookahead(options: { lookaheadStrategy: ILookaheadStrategy @@ -256,7 +256,7 @@ export function validateNoLeftRecursion( ): IParserDefinitionError[] { const errors: IParserDefinitionError[] = [] const nextNonTerminals = getFirstNoneTerminal(currRule.definition) - if (nextNonTerminals.length === 0) { + if (isEmpty(nextNonTerminals)) { return [] } else { const ruleName = topRule.name @@ -292,7 +292,7 @@ export function validateNoLeftRecursion( export function getFirstNoneTerminal(definition: IProduction[]): Rule[] { let result: Rule[] = [] - if (definition.length === 0) { + if (isEmpty(definition)) { return result } const firstProd = definition[0] @@ -359,7 +359,7 @@ export function validateEmptyOrAlternative( tokenStructuredMatcher, 1 ) - if (possibleFirstInAlt.length === 0) { + if (isEmpty(possibleFirstInAlt)) { return [ { message: errMsgProvider.buildEmptyAlternationError({ @@ -498,7 +498,7 @@ export function validateSomeNonEmptyLookaheadPath( actualMaxLookahead ) const pathsInsideProduction = paths[0] - if (pathsInsideProduction.every((path) => path.length === 0)) { + if (pathsInsideProduction.every((path) => isEmpty(path))) { const errMsg = errMsgProvider.buildEmptyRepetitionError({ topLevelRule: currTopRule, repetition: currProd diff --git a/packages/chevrotain/src/parse/grammar/interpreter.ts b/packages/chevrotain/src/parse/grammar/interpreter.ts index 73db497089..cf30964335 100644 --- a/packages/chevrotain/src/parse/grammar/interpreter.ts +++ b/packages/chevrotain/src/parse/grammar/interpreter.ts @@ -21,6 +21,7 @@ import { ITokenGrammarPath, TokenType } from "@chevrotain/types" +import { isEmpty } from "../../utils" export abstract class AbstractNextPossibleTokensWalker extends RestWalker { protected possibleTokTypes: TokenType[] = [] @@ -85,7 +86,7 @@ export abstract class AbstractNextPossibleTokensWalker extends RestWalker { updateExpectedNext(): void { // need to consume the Terminal - if (this.ruleStack.length === 0) { + if (isEmpty(this.ruleStack)) { // must reset nextProductionXXX to avoid walking down another Top Level production while what we are // really seeking is the last Terminal... this.nextProductionName = "" @@ -127,8 +128,6 @@ export class NextAfterTokenWalker extends AbstractNextPossibleTokensWalker { } } -export type AlternativesFirstTokens = TokenType[][] - export interface IFirstAfterRepetition { token: TokenType | undefined occurrence: number | undefined @@ -392,7 +391,7 @@ export function nextPossibleTokensAfter( const currOccurrenceStack = currPath.occurrenceStack // For Example: an empty path could exist in a valid grammar in the case of an EMPTY_ALT - if (currDef.length === 0) { + if (isEmpty(currDef)) { continue } diff --git a/packages/chevrotain/src/parse/grammar/llk_lookahead.ts b/packages/chevrotain/src/parse/grammar/llk_lookahead.ts index 730aa2e797..8b52d53ca1 100644 --- a/packages/chevrotain/src/parse/grammar/llk_lookahead.ts +++ b/packages/chevrotain/src/parse/grammar/llk_lookahead.ts @@ -22,7 +22,7 @@ import { getProdType } from "./lookahead" import { IParserDefinitionError } from "./types" -import { flatMap } from "../../utils" +import { flatMap, isEmpty } from "../../utils" export class LLkLookaheadStrategy implements ILookaheadStrategy { readonly maxLookahead: number @@ -39,7 +39,7 @@ export class LLkLookaheadStrategy implements ILookaheadStrategy { }): ILookaheadValidationError[] { const leftRecursionErrors = this.validateNoLeftRecursion(options.rules) - if (leftRecursionErrors.length === 0) { + if (isEmpty(leftRecursionErrors)) { const emptyAltErrors = this.validateEmptyOrAlternatives(options.rules) const ambiguousAltsErrors = this.validateAmbiguousAlternationAlternatives( options.rules, diff --git a/packages/chevrotain/src/parse/grammar/lookahead.ts b/packages/chevrotain/src/parse/grammar/lookahead.ts index a300846431..d5fb74f7a4 100644 --- a/packages/chevrotain/src/parse/grammar/lookahead.ts +++ b/packages/chevrotain/src/parse/grammar/lookahead.ts @@ -25,6 +25,7 @@ import { TokenType, BaseParser } from "@chevrotain/types" +import { isEmpty } from "../../utils" export enum PROD_TYPE { OPTION, @@ -288,7 +289,7 @@ export function buildSingleAlternativeLookaheadFunction( if ( singleTokensTypes.length === 1 && - singleTokensTypes[0].categoryMatches!.length === 0 + isEmpty(singleTokensTypes[0].categoryMatches) ) { const expectedTokenType = singleTokensTypes[0] const expectedTokenUniqueKey = (expectedTokenType).tokenTypeIdx @@ -594,7 +595,7 @@ export function lookAheadSequenceFromAlternatives( const prefixKeys = pathToHashKeys(currPathPrefix) const isUnique = isUniquePrefixHash(altsHashes, prefixKeys, altIdx) // End of the line for this path. - if (isUnique || suffixDef.length === 0 || currPathPrefix.length === k) { + if (isUnique || isEmpty(suffixDef) || currPathPrefix.length === k) { const currAltResult = finalResult[altIdx] // TODO: Can we implement a containsPath using Maps/Dictionaries? if (containsPath(currAltResult, currPathPrefix) === false) { @@ -718,7 +719,7 @@ export function areTokenCategoriesNotUsed( ): boolean { return lookAheadPaths.every((singleAltPaths) => singleAltPaths.every((singlePath) => - singlePath.every((token) => token.categoryMatches!.length === 0) + singlePath.every((token) => isEmpty(token.categoryMatches)) ) ) } diff --git a/packages/chevrotain/src/parse/parser/parser.ts b/packages/chevrotain/src/parse/parser/parser.ts index b1234c0d38..978d76c8d9 100644 --- a/packages/chevrotain/src/parse/parser/parser.ts +++ b/packages/chevrotain/src/parse/parser/parser.ts @@ -35,6 +35,7 @@ import { IParserDefinitionError } from "../grammar/types" import { Rule } from "@chevrotain/gast" import { IParserConfigInternal, ParserMethodInternal } from "./types" import { validateLookahead } from "../grammar/checks" +import { isEmpty } from "../../utils" export const END_OF_FILE = createTokenInstance( EOF, @@ -193,7 +194,7 @@ export class Parser { this.TRACE_INIT("Grammar Validations", () => { // only perform additional grammar validations IFF no resolving errors have occurred. // as unresolved grammar may lead to unhandled runtime exceptions in the follow up validations. - if (resolverErrors.length === 0 && this.skipValidations === false) { + if (isEmpty(resolverErrors) && this.skipValidations === false) { const validationErrors = validateGrammar({ rules: Object.values(this.gastProductionsCache), tokenTypes: Object.values(this.tokensMap), @@ -214,7 +215,7 @@ export class Parser { }) // this analysis may fail if the grammar is not perfectly valid - if (this.definitionErrors.length === 0) { + if (isEmpty(this.definitionErrors)) { // The results of these computations are not needed unless error recovery is enabled. if (this.recoveryEnabled) { this.TRACE_INIT("computeAllProdsFollows", () => { diff --git a/packages/chevrotain/src/parse/parser/traits/recognizer_engine.ts b/packages/chevrotain/src/parse/parser/traits/recognizer_engine.ts index b0284a7e4d..39d8dfc0c4 100644 --- a/packages/chevrotain/src/parse/parser/traits/recognizer_engine.ts +++ b/packages/chevrotain/src/parse/parser/traits/recognizer_engine.ts @@ -52,6 +52,7 @@ import { } from "../../../scan/tokens" import { Rule } from "@chevrotain/gast" import { ParserMethodInternal } from "../types" +import { isEmpty } from "../../../utils" /** * This trait is responsible for the runtime parsing engine @@ -103,7 +104,7 @@ export class RecognizerEngine { // This only checks for Token vocabularies provided as arrays. // That is good enough because the main objective is to detect users of pre-V4.0 APIs // rather than all edge cases of empty Token vocabularies. - if (tokenVocabulary.length === 0) { + if (isEmpty(tokenVocabulary)) { throw Error( "A Token Vocabulary cannot be empty.\n" + "\tNote that the first argument for the parser constructor\n" + @@ -647,7 +648,7 @@ export class RecognizerEngine { // NOOP when cst is disabled this.cstFinallyStateUpdate() - if (this.RULE_STACK.length === 0 && this.isAtEndOfInput() === false) { + if (isEmpty(this.RULE_STACK) && this.isAtEndOfInput() === false) { const firstRedundantTok = this.LA(1) const errMsg = this.errorMessageProvider.buildNotAllInputParsedMessage({ firstRedundant: firstRedundantTok, diff --git a/packages/chevrotain/src/parse/parser/traits/recoverable.ts b/packages/chevrotain/src/parse/parser/traits/recoverable.ts index 0656a9f504..27ea4b34f0 100644 --- a/packages/chevrotain/src/parse/parser/traits/recoverable.ts +++ b/packages/chevrotain/src/parse/parser/traits/recoverable.ts @@ -17,6 +17,7 @@ import { MismatchedTokenException } from "../../exceptions_public" import { IN } from "../../constants" import { MixedInParser } from "./parser_traits" import { DEFAULT_PARSER_CONFIG } from "../parser" +import { isEmpty } from "../../../utils" export const EOF_FOLLOW_KEY: any = {} @@ -231,7 +232,7 @@ export class Recoverable { } // must know the possible following tokens to perform single token insertion - if (follows.length === 0) { + if (isEmpty(follows)) { return false } diff --git a/packages/chevrotain/src/scan/lexer.ts b/packages/chevrotain/src/scan/lexer.ts index aeb4d68fb7..cb1fd0442a 100644 --- a/packages/chevrotain/src/scan/lexer.ts +++ b/packages/chevrotain/src/scan/lexer.ts @@ -14,6 +14,7 @@ import { TokenType } from "@chevrotain/types" import { getRegExpAst } from "./reg_exp_parser" +import { isEmpty } from "../utils" const PATTERN = "PATTERN" export const DEFAULT_MODE = "defaultMode" @@ -319,7 +320,7 @@ export function analyzeTokenTypes( /* istanbul ignore if */ // start code will only be empty given an empty regExp or failure of regexp-to-ast library // the first should be a different validation and the second cannot be tested. - if (optimizedCodes.length === 0) { + if (isEmpty(optimizedCodes)) { // we cannot understand what codes may start possible matches // The optimization correctness requires knowing start codes for ALL patterns. // Not actually sure this is an error, no debug message @@ -1127,7 +1128,7 @@ export function charCodeToOptimizedIndex(charCode: number): number { * TODO: Perhaps it should be lazy initialized only if a charCode > 255 is used. */ function initCharCodeToOptimizedIndexMap() { - if (charCodeToOptimizedIdxMap.length === 0) { + if (isEmpty(charCodeToOptimizedIdxMap)) { charCodeToOptimizedIdxMap = new Array(65536) for (let i = 0; i < 65536; i++) { charCodeToOptimizedIdxMap[i] = i > 255 ? 255 + ~~(i / 255) : i diff --git a/packages/chevrotain/src/scan/lexer_public.ts b/packages/chevrotain/src/scan/lexer_public.ts index d485cd1cd5..e21722f19c 100644 --- a/packages/chevrotain/src/scan/lexer_public.ts +++ b/packages/chevrotain/src/scan/lexer_public.ts @@ -25,6 +25,7 @@ import { } from "@chevrotain/types" import { defaultLexerErrorProvider } from "./lexer_errors_public" import { clearRegExpParserCache } from "./reg_exp_parser" +import { isEmpty } from "../utils" export interface ILexingResult { tokens: IToken[] @@ -225,7 +226,7 @@ export class Lexer { // If definition errors were encountered, the analysis phase may fail unexpectedly/ // Considering a lexer with definition errors may never be used, there is no point // to performing the analysis anyhow... - if (this.lexerDefinitionErrors.length === 0) { + if (isEmpty(this.lexerDefinitionErrors)) { augmentTokenTypes(currModDef) let currAnalyzeResult!: IAnalyzeResult diff --git a/packages/chevrotain/src/scan/tokens.ts b/packages/chevrotain/src/scan/tokens.ts index 381aafa6a8..ae9af5a20a 100644 --- a/packages/chevrotain/src/scan/tokens.ts +++ b/packages/chevrotain/src/scan/tokens.ts @@ -1,4 +1,5 @@ import { IToken, TokenType } from "@chevrotain/types" +import { isEmpty } from "../utils" export function tokenStructuredMatcher( tokInstance: IToken, @@ -57,7 +58,7 @@ export function expandCategories(tokenTypes: TokenType[]): TokenType[] { result = result.concat(newCategories) - if (newCategories.length === 0) { + if (isEmpty(newCategories)) { searching = false } else { categories = newCategories diff --git a/packages/chevrotain/src/utils.ts b/packages/chevrotain/src/utils.ts index 4dd7beaaa2..df105c7b2f 100644 --- a/packages/chevrotain/src/utils.ts +++ b/packages/chevrotain/src/utils.ts @@ -40,3 +40,7 @@ export function difference(arrA: T[], arrB: T[]): T[] { return arrA.filter((val) => arrB.indexOf(val) === -1) } + +export function isEmpty(arr: T[] | undefined): boolean { + return arr?.length === 0 +}