Skip to content

Commit

Permalink
chore: cr fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
bd82 committed Jun 30, 2023
1 parent 36d9aa7 commit 8a57c0c
Show file tree
Hide file tree
Showing 14 changed files with 51 additions and 31 deletions.
3 changes: 2 additions & 1 deletion packages/chevrotain/src/parse/exceptions_public.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ import {
IRecognitionException,
IRecognizerContext
} from "@chevrotain/types"
import { includes } from "../utils"

const MISMATCHED_TOKEN_EXCEPTION = "MismatchedTokenException"
const NO_VIABLE_ALT_EXCEPTION = "NoViableAltException"
Expand All @@ -21,7 +22,7 @@ Object.freeze(RECOGNITION_EXCEPTION_NAMES)
// hacks to bypass no support for custom Errors in javascript/typescript
export function isRecognitionException(error: Error) {
// can't do instanceof on hacked custom js exceptions
return RECOGNITION_EXCEPTION_NAMES.indexOf(error.name) !== -1
return includes(RECOGNITION_EXCEPTION_NAMES, error.name)
}

abstract class RecognitionException
Expand Down
14 changes: 7 additions & 7 deletions packages/chevrotain/src/parse/grammar/checks.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ import {
IParserDefinitionError
} from "./types"
import { tokenStructuredMatcher } from "../../scan/tokens"
import { difference, flatMap, groupBy } from "../../utils"
import { difference, flatMap, groupBy, includes, isEmpty } from "../../utils"

export function validateLookahead(options: {
lookaheadStrategy: ILookaheadStrategy
Expand Down Expand Up @@ -256,11 +256,11 @@ export function validateNoLeftRecursion(
): IParserDefinitionError[] {
const errors: IParserDefinitionError[] = []
const nextNonTerminals = getFirstNoneTerminal(currRule.definition)
if (nextNonTerminals.length === 0) {
if (isEmpty(nextNonTerminals)) {
return []
} else {
const ruleName = topRule.name
const foundLeftRecursion = nextNonTerminals.indexOf(topRule) !== -1
const foundLeftRecursion = includes(nextNonTerminals, topRule)
if (foundLeftRecursion) {
errors.push({
message: errMsgProvider.buildLeftRecursionError({
Expand Down Expand Up @@ -292,7 +292,7 @@ export function validateNoLeftRecursion(

export function getFirstNoneTerminal(definition: IProduction[]): Rule[] {
let result: Rule[] = []
if (definition.length === 0) {
if (isEmpty(definition)) {
return result
}
const firstProd = definition[0]
Expand Down Expand Up @@ -359,7 +359,7 @@ export function validateEmptyOrAlternative(
tokenStructuredMatcher,
1
)
if (possibleFirstInAlt.length === 0) {
if (isEmpty(possibleFirstInAlt)) {
return [
{
message: errMsgProvider.buildEmptyAlternationError({
Expand Down Expand Up @@ -498,7 +498,7 @@ export function validateSomeNonEmptyLookaheadPath(
actualMaxLookahead
)
const pathsInsideProduction = paths[0]
if (pathsInsideProduction.every((path) => path.length === 0)) {
if (pathsInsideProduction.every((path) => isEmpty(path))) {
const errMsg = errMsgProvider.buildEmptyRepetitionError({
topLevelRule: currTopRule,
repetition: currProd
Expand Down Expand Up @@ -658,7 +658,7 @@ function checkTerminalAndNoneTerminalsNameSpace(

topLevels.forEach((currRule) => {
const currRuleName = currRule.name
if (tokenNames.indexOf(currRuleName) !== -1) {
if (includes(tokenNames, currRuleName)) {
const errMsg = errMsgProvider.buildNamespaceConflictError(currRule)

errors.push({
Expand Down
7 changes: 3 additions & 4 deletions packages/chevrotain/src/parse/grammar/interpreter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import {
ITokenGrammarPath,
TokenType
} from "@chevrotain/types"
import { isEmpty } from "../../utils"

export abstract class AbstractNextPossibleTokensWalker extends RestWalker {
protected possibleTokTypes: TokenType[] = []
Expand Down Expand Up @@ -85,7 +86,7 @@ export abstract class AbstractNextPossibleTokensWalker extends RestWalker {

updateExpectedNext(): void {
// need to consume the Terminal
if (this.ruleStack.length === 0) {
if (isEmpty(this.ruleStack)) {
// must reset nextProductionXXX to avoid walking down another Top Level production while what we are
// really seeking is the last Terminal...
this.nextProductionName = ""
Expand Down Expand Up @@ -127,8 +128,6 @@ export class NextAfterTokenWalker extends AbstractNextPossibleTokensWalker {
}
}

export type AlternativesFirstTokens = TokenType[][]

export interface IFirstAfterRepetition {
token: TokenType | undefined
occurrence: number | undefined
Expand Down Expand Up @@ -392,7 +391,7 @@ export function nextPossibleTokensAfter(
const currOccurrenceStack = currPath.occurrenceStack

// For Example: an empty path could exist in a valid grammar in the case of an EMPTY_ALT
if (currDef.length === 0) {
if (isEmpty(currDef)) {
continue
}

Expand Down
4 changes: 2 additions & 2 deletions packages/chevrotain/src/parse/grammar/llk_lookahead.ts
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import {
getProdType
} from "./lookahead"
import { IParserDefinitionError } from "./types"
import { flatMap } from "../../utils"
import { flatMap, isEmpty } from "../../utils"

export class LLkLookaheadStrategy implements ILookaheadStrategy {
readonly maxLookahead: number
Expand All @@ -39,7 +39,7 @@ export class LLkLookaheadStrategy implements ILookaheadStrategy {
}): ILookaheadValidationError[] {
const leftRecursionErrors = this.validateNoLeftRecursion(options.rules)

if (leftRecursionErrors.length === 0) {
if (isEmpty(leftRecursionErrors)) {
const emptyAltErrors = this.validateEmptyOrAlternatives(options.rules)
const ambiguousAltsErrors = this.validateAmbiguousAlternationAlternatives(
options.rules,
Expand Down
7 changes: 4 additions & 3 deletions packages/chevrotain/src/parse/grammar/lookahead.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import {
TokenType,
BaseParser
} from "@chevrotain/types"
import { isEmpty } from "../../utils"

export enum PROD_TYPE {
OPTION,
Expand Down Expand Up @@ -288,7 +289,7 @@ export function buildSingleAlternativeLookaheadFunction(

if (
singleTokensTypes.length === 1 &&
singleTokensTypes[0].categoryMatches!.length === 0
isEmpty(singleTokensTypes[0].categoryMatches)
) {
const expectedTokenType = singleTokensTypes[0]
const expectedTokenUniqueKey = (<any>expectedTokenType).tokenTypeIdx
Expand Down Expand Up @@ -594,7 +595,7 @@ export function lookAheadSequenceFromAlternatives(
const prefixKeys = pathToHashKeys(currPathPrefix)
const isUnique = isUniquePrefixHash(altsHashes, prefixKeys, altIdx)
// End of the line for this path.
if (isUnique || suffixDef.length === 0 || currPathPrefix.length === k) {
if (isUnique || isEmpty(suffixDef) || currPathPrefix.length === k) {
const currAltResult = finalResult[altIdx]
// TODO: Can we implement a containsPath using Maps/Dictionaries?
if (containsPath(currAltResult, currPathPrefix) === false) {
Expand Down Expand Up @@ -718,7 +719,7 @@ export function areTokenCategoriesNotUsed(
): boolean {
return lookAheadPaths.every((singleAltPaths) =>
singleAltPaths.every((singlePath) =>
singlePath.every((token) => token.categoryMatches!.length === 0)
singlePath.every((token) => isEmpty(token.categoryMatches))
)
)
}
5 changes: 3 additions & 2 deletions packages/chevrotain/src/parse/parser/parser.ts
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ import { IParserDefinitionError } from "../grammar/types"
import { Rule } from "@chevrotain/gast"
import { IParserConfigInternal, ParserMethodInternal } from "./types"
import { validateLookahead } from "../grammar/checks"
import { isEmpty } from "../../utils"

export const END_OF_FILE = createTokenInstance(
EOF,
Expand Down Expand Up @@ -193,7 +194,7 @@ export class Parser {
this.TRACE_INIT("Grammar Validations", () => {
// only perform additional grammar validations IFF no resolving errors have occurred.
// as unresolved grammar may lead to unhandled runtime exceptions in the follow up validations.
if (resolverErrors.length === 0 && this.skipValidations === false) {
if (isEmpty(resolverErrors) && this.skipValidations === false) {
const validationErrors = validateGrammar({
rules: Object.values(this.gastProductionsCache),
tokenTypes: Object.values(this.tokensMap),
Expand All @@ -214,7 +215,7 @@ export class Parser {
})

// this analysis may fail if the grammar is not perfectly valid
if (this.definitionErrors.length === 0) {
if (isEmpty(this.definitionErrors)) {
// The results of these computations are not needed unless error recovery is enabled.
if (this.recoveryEnabled) {
this.TRACE_INIT("computeAllProdsFollows", () => {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import { MixedInParser } from "./parser_traits"
import { Rule, serializeGrammar } from "@chevrotain/gast"
import { IParserDefinitionError } from "../../grammar/types"
import { ParserMethodInternal } from "../types"
import { includes } from "../../../utils"

/**
* This trait is responsible for implementing the public API
Expand Down Expand Up @@ -641,7 +642,7 @@ export class RecognizerApi {
implementation: (...implArgs: any[]) => T,
config: IRuleConfig<T> = DEFAULT_RULE_CONFIG
): (idxInCallingRule?: number, ...args: any[]) => T | any {
if (this.definedRulesNames.indexOf(name) !== -1) {
if (includes(this.definedRulesNames, name)) {
const errMsg =
defaultGrammarValidatorErrorProvider.buildDuplicateRuleNameError({
topLevelRule: name,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ import {
} from "../../../scan/tokens"
import { Rule } from "@chevrotain/gast"
import { ParserMethodInternal } from "../types"
import { isEmpty } from "../../../utils"

/**
* This trait is responsible for the runtime parsing engine
Expand Down Expand Up @@ -103,7 +104,7 @@ export class RecognizerEngine {
// This only checks for Token vocabularies provided as arrays.
// That is good enough because the main objective is to detect users of pre-V4.0 APIs
// rather than all edge cases of empty Token vocabularies.
if (tokenVocabulary.length === 0) {
if (isEmpty(tokenVocabulary)) {
throw Error(
"A Token Vocabulary cannot be empty.\n" +
"\tNote that the first argument for the parser constructor\n" +
Expand Down Expand Up @@ -647,7 +648,7 @@ export class RecognizerEngine {
// NOOP when cst is disabled
this.cstFinallyStateUpdate()

if (this.RULE_STACK.length === 0 && this.isAtEndOfInput() === false) {
if (isEmpty(this.RULE_STACK) && this.isAtEndOfInput() === false) {
const firstRedundantTok = this.LA(1)
const errMsg = this.errorMessageProvider.buildNotAllInputParsedMessage({
firstRedundant: firstRedundantTok,
Expand Down
5 changes: 3 additions & 2 deletions packages/chevrotain/src/parse/parser/traits/recoverable.ts
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ import { MismatchedTokenException } from "../../exceptions_public"
import { IN } from "../../constants"
import { MixedInParser } from "./parser_traits"
import { DEFAULT_PARSER_CONFIG } from "../parser"
import { includes, isEmpty } from "../../../utils"

export const EOF_FOLLOW_KEY: any = {}

Expand Down Expand Up @@ -231,7 +232,7 @@ export class Recoverable {
}

// must know the possible following tokens to perform single token insertion
if (follows.length === 0) {
if (isEmpty(follows)) {
return false
}

Expand Down Expand Up @@ -265,7 +266,7 @@ export class Recoverable {
): boolean {
const followKey = this.getCurrFollowKey()
const currentRuleReSyncSet = this.getFollowSetFromFollowKey(followKey)
return currentRuleReSyncSet.indexOf(tokenTypeIdx) !== -1
return includes(currentRuleReSyncSet, tokenTypeIdx)
}

findReSyncTokenType(this: MixedInParser): TokenType {
Expand Down
5 changes: 3 additions & 2 deletions packages/chevrotain/src/scan/lexer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import {
TokenType
} from "@chevrotain/types"
import { getRegExpAst } from "./reg_exp_parser"
import { isEmpty } from "../utils"

const PATTERN = "PATTERN"
export const DEFAULT_MODE = "defaultMode"
Expand Down Expand Up @@ -319,7 +320,7 @@ export function analyzeTokenTypes(
/* istanbul ignore if */
// start code will only be empty given an empty regExp or failure of regexp-to-ast library
// the first should be a different validation and the second cannot be tested.
if (optimizedCodes.length === 0) {
if (isEmpty(optimizedCodes)) {
// we cannot understand what codes may start possible matches
// The optimization correctness requires knowing start codes for ALL patterns.
// Not actually sure this is an error, no debug message
Expand Down Expand Up @@ -1127,7 +1128,7 @@ export function charCodeToOptimizedIndex(charCode: number): number {
* TODO: Perhaps it should be lazy initialized only if a charCode > 255 is used.
*/
function initCharCodeToOptimizedIndexMap() {
if (charCodeToOptimizedIdxMap.length === 0) {
if (isEmpty(charCodeToOptimizedIdxMap)) {
charCodeToOptimizedIdxMap = new Array(65536)
for (let i = 0; i < 65536; i++) {
charCodeToOptimizedIdxMap[i] = i > 255 ? 255 + ~~(i / 255) : i
Expand Down
3 changes: 2 additions & 1 deletion packages/chevrotain/src/scan/lexer_public.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import {
} from "@chevrotain/types"
import { defaultLexerErrorProvider } from "./lexer_errors_public"
import { clearRegExpParserCache } from "./reg_exp_parser"
import { isEmpty } from "../utils"

export interface ILexingResult {
tokens: IToken[]
Expand Down Expand Up @@ -225,7 +226,7 @@ export class Lexer {
// If definition errors were encountered, the analysis phase may fail unexpectedly/
// Considering a lexer with definition errors may never be used, there is no point
// to performing the analysis anyhow...
if (this.lexerDefinitionErrors.length === 0) {
if (isEmpty(this.lexerDefinitionErrors)) {
augmentTokenTypes(currModDef)

let currAnalyzeResult!: IAnalyzeResult
Expand Down
7 changes: 4 additions & 3 deletions packages/chevrotain/src/scan/reg_exp.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import {
import { PRINT_ERROR, PRINT_WARNING } from "@chevrotain/utils"
import { ASTNode, getRegExpAst } from "./reg_exp_parser"
import { charCodeToOptimizedIndex, minOptimizationVal } from "./lexer"
import { includes } from "../utils"

const complementErrorMessage =
"Complement Sets are not supported for first char optimization"
Expand Down Expand Up @@ -78,7 +79,7 @@ export function firstCharOptimizedIndices(
for (let i = 0; i < terms.length; i++) {
const term = terms[i]

// skip terms that cannot effect the first char results
// skip terms that cannot affect the first char results
switch (term.type) {
case "EndAnchor":
// A group back reference cannot affect potential starting char.
Expand Down Expand Up @@ -218,7 +219,7 @@ function handleIgnoreCase(
function findCode(setNode: Set, targetCharCodes: number[]) {
return setNode.value.find((codeOrRange) => {
if (typeof codeOrRange === "number") {
return targetCharCodes.indexOf(codeOrRange) !== -1
return includes(targetCharCodes, codeOrRange)
} else {
// range
const range = <any>codeOrRange
Expand Down Expand Up @@ -272,7 +273,7 @@ class CharCodeFinder extends BaseRegExpVisitor {
}

visitCharacter(node: Character) {
if (this.targetCharCodes.indexOf(node.value) !== -1) {
if (includes(this.targetCharCodes, node.value)) {
this.found = true
}
}
Expand Down
3 changes: 2 additions & 1 deletion packages/chevrotain/src/scan/tokens.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import { IToken, TokenType } from "@chevrotain/types"
import { isEmpty } from "../utils"

export function tokenStructuredMatcher(
tokInstance: IToken,
Expand Down Expand Up @@ -57,7 +58,7 @@ export function expandCategories(tokenTypes: TokenType[]): TokenType[] {

result = result.concat(newCategories)

if (newCategories.length === 0) {
if (isEmpty(newCategories)) {
searching = false
} else {
categories = newCategories
Expand Down
11 changes: 11 additions & 0 deletions packages/chevrotain/src/utils.ts
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,14 @@ export function difference<T>(arrA: T[], arrB: T[]): T[] {

return arrA.filter((val) => arrB.indexOf(val) === -1)
}

export function isEmpty<T>(arr: T[] | undefined): boolean {
return arr?.length === 0
}

export function includes<T>(arr: T[], target: T): boolean {
if (!Array.isArray(arr)) {
return false
}
return arr.indexOf(target) !== -1
}

0 comments on commit 8a57c0c

Please sign in to comment.