Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

@NaridaL refactor(utils): set tsconfig strict=true for utils package and fix errors #1679

Merged
merged 3 commits into from
Oct 25, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/chevrotain/src/parse/errors_public.ts
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ export const defaultParserErrorProvider: IParserErrorMessageProvider = {
const allLookAheadPaths = reduce(
expectedPathsPerAlt,
(result, currAltPaths) => result.concat(currAltPaths),
[]
[] as TokenType[][]
)
const nextValidTokenSequences = map(
allLookAheadPaths,
Expand Down
12 changes: 7 additions & 5 deletions packages/chevrotain/src/parse/grammar/checks.ts
Original file line number Diff line number Diff line change
Expand Up @@ -465,8 +465,10 @@ export function validateAmbiguousAlternationAlternatives(
return errors
}

export class RepetionCollector extends GAstVisitor {
public allProductions: IProduction[] = []
export class RepetitionCollector extends GAstVisitor {
public allProductions: (IProductionWithOccurrence & {
maxLookahead?: number
})[] = []

public visitRepetitionWithSeparator(manySep: RepetitionWithSeparator): void {
this.allProductions.push(manySep)
Expand Down Expand Up @@ -524,7 +526,7 @@ export function validateSomeNonEmptyLookaheadPath(
): IParserDefinitionError[] {
const errors = []
forEach(topLevelRules, (currTopRule) => {
const collectorVisitor = new RepetionCollector()
const collectorVisitor = new RepetitionCollector()
currTopRule.accept(collectorVisitor)
const allRuleProductions = collectorVisitor.allProductions
forEach(allRuleProductions, (currProd) => {
Expand Down Expand Up @@ -601,7 +603,7 @@ function checkAlternativesAmbiguities(
})
return result
},
[]
[] as { alts: number[]; path: TokenType[] }[]
)

const currErrors = utils.map(identicalAmbiguities, (currAmbDescriptor) => {
Expand All @@ -622,7 +624,7 @@ function checkAlternativesAmbiguities(
type: ParserDefinitionErrorType.AMBIGUOUS_ALTS,
ruleName: rule.name,
occurrence: alternation.idx,
alternatives: [currAmbDescriptor.alts]
alternatives: currAmbDescriptor.alts
}
})

Expand Down
2 changes: 1 addition & 1 deletion packages/chevrotain/src/parse/grammar/gast/gast.ts
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ export function collectMethods(rule: Rule): {
repetition: Repetition[]
repetitionWithSeparator: RepetitionWithSeparator[]
repetitionMandatory: RepetitionMandatory[]
repetitionMandatoryWithSeparator: RepetitionMandatoryWithSeparator
repetitionMandatoryWithSeparator: RepetitionMandatoryWithSeparator[]
} {
collectorVisitor.reset()
rule.accept(collectorVisitor)
Expand Down
2 changes: 2 additions & 0 deletions packages/chevrotain/src/parse/grammar/gast/gast_public.ts
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,7 @@ export class RepetitionMandatoryWithSeparator
{
public separator: TokenType
public idx: number = 1
public maxLookahead?: number

constructor(options: {
definition: IProduction[]
Expand Down Expand Up @@ -193,6 +194,7 @@ export class RepetitionWithSeparator
{
public separator: TokenType
public idx: number = 1
public maxLookahead?: number

constructor(options: {
definition: IProduction[]
Expand Down
3 changes: 2 additions & 1 deletion packages/chevrotain/src/parse/parser/traits/gast_recorder.ts
Original file line number Diff line number Diff line change
Expand Up @@ -393,7 +393,8 @@ function recordOrProd(mainProdArg: any, occurrence: number): any {
const prevProd: any = peek(this.recordingProdStack)
// Only an array of alternatives
const hasOptions = isArray(mainProdArg) === false
const alts = hasOptions === false ? mainProdArg : mainProdArg.DEF
const alts: IOrAlt<unknown>[] =
hasOptions === false ? mainProdArg : mainProdArg.DEF

const newOrProd = new Alternation({
definition: [],
Expand Down
2 changes: 1 addition & 1 deletion packages/chevrotain/src/parse/parser/traits/looksahead.ts
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ export class LooksAhead {
prodOccurrence: number,
prodKey: number,
prodType: PROD_TYPE,
prodMaxLookahead: number,
prodMaxLookahead: number | undefined,
dslMethodName: string
): void {
this.TRACE_INIT(
Expand Down
30 changes: 16 additions & 14 deletions packages/chevrotain/src/scan/lexer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import {
keys,
map,
mapValues,
packArray,
PRINT_ERROR,
reduce,
reject
Expand Down Expand Up @@ -53,6 +52,7 @@ export interface IPatternConfig {
group: any
push: string
pop: boolean
tokenType: TokenType
tokenTypeIdx: number
}

Expand Down Expand Up @@ -102,7 +102,7 @@ export function analyzeTokenTypes(
initCharCodeToOptimizedIndexMap()
})

let onlyRelevantTypes
let onlyRelevantTypes: TokenType[]
tracer("Reject Lexer.NA", () => {
onlyRelevantTypes = reject(tokenTypes, (currType) => {
return currType[PATTERN] === Lexer.NA
Expand Down Expand Up @@ -250,7 +250,10 @@ export function analyzeTokenTypes(
if (
checkLineBreaksIssues(tokType, lineTerminatorCharCodes) === false
) {
return canMatchCharCode(lineTerminatorCharCodes, tokType.PATTERN)
return canMatchCharCode(
lineTerminatorCharCodes,
tokType.PATTERN as RegExp | string
)
}
}
})
Expand Down Expand Up @@ -374,9 +377,6 @@ export function analyzeTokenTypes(
)
})
}
tracer("ArrayPacking", () => {
charCodeToPatternIdxToConfig = packArray(charCodeToPatternIdxToConfig)
bd82 marked this conversation as resolved.
Show resolved Hide resolved
})

return {
emptyGroups: emptyGroups,
Expand Down Expand Up @@ -490,7 +490,7 @@ export function findInvalidPatterns(
return { errors, valid }
}

const end_of_input = /[^\\][\$]/
const end_of_input = /[^\\][$]/
NaridaL marked this conversation as resolved.
Show resolved Hide resolved

export function findEndOfInputAnchor(
tokenTypes: TokenType[]
Expand All @@ -504,18 +504,18 @@ export function findEndOfInputAnchor(
}

const invalidRegex = filter(tokenTypes, (currType) => {
const pattern = currType[PATTERN]
const pattern = currType.PATTERN

try {
const regexpAst = getRegExpAst(pattern)
const regexpAst = getRegExpAst(pattern as RegExp)
const endAnchorVisitor = new EndAnchorFinder()
endAnchorVisitor.visit(regexpAst)

return endAnchorVisitor.found
} catch (e) {
// old behavior in case of runtime exceptions with regexp-to-ast.
/* istanbul ignore next - cannot ensure an error in regexp-to-ast*/
return end_of_input.test(pattern.source)
return end_of_input.test((pattern as RegExp).source)
}
})

Expand All @@ -540,7 +540,7 @@ export function findEmptyMatchRegExps(
tokenTypes: TokenType[]
): ILexerDefinitionError[] {
const matchesEmptyString = filter(tokenTypes, (currType) => {
const pattern = currType[PATTERN]
const pattern = currType.PATTERN as RegExp
return pattern.test("")
})

Expand Down Expand Up @@ -572,7 +572,7 @@ export function findStartOfInputAnchor(
}

const invalidRegex = filter(tokenTypes, (currType) => {
const pattern = currType[PATTERN]
const pattern = currType.PATTERN as RegExp
try {
const regexpAst = getRegExpAst(pattern)
const startAnchorVisitor = new StartAnchorFinder()
Expand Down Expand Up @@ -919,7 +919,9 @@ export function performWarningRuntimeChecks(
hasAnyLineBreak = true
}
} else {
if (canMatchCharCode(terminatorCharCodes, tokType.PATTERN)) {
if (
canMatchCharCode(terminatorCharCodes, tokType.PATTERN as RegExp)
) {
hasAnyLineBreak = true
}
}
Expand Down Expand Up @@ -1086,7 +1088,7 @@ export function buildLineBreakIssueMessage(

function getCharCodes(charsOrCodes: (number | string)[]): number[] {
const charCodes = map(charsOrCodes, (numOrString) => {
bd82 marked this conversation as resolved.
Show resolved Hide resolved
if (isString(numOrString) && numOrString.length > 0) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

getCharCodes is used on input from an end-user (the custom lineTerminatorCharacters).
Until now if a user passed an empty string it would be ignored, now it would add NaN to the result.
I don't know what effect this could have, and not sure I want to spend the time investigating.

Could we keep the extra length condition to ensure numOrString.charCodeAt(0) will return a real number?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The function as written before returned numbers or empty strings, which didn't match the function return signature. As far as I can see, none of the callers of getCharCodes seem to handle strings?

Copy link
Member

@bd82 bd82 Oct 25, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah good catch 👍 it seems like a bug, so maybe returning NaN is better (after your refactor)

if (isString(numOrString)) {
return numOrString.charCodeAt(0)
} else {
return numOrString
Expand Down
25 changes: 12 additions & 13 deletions packages/chevrotain/src/scan/lexer_public.ts
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import {
keys,
last,
map,
merge,
NOOP,
PRINT_WARNING,
reduce,
Expand Down Expand Up @@ -72,7 +71,7 @@ export interface IRegExpExec {
exec: CustomPatternMatcherFunc
}

const DEFAULT_LEXER_CONFIG: ILexerConfig = {
const DEFAULT_LEXER_CONFIG: Required<ILexerConfig> = {
deferDefinitionErrorsHandling: false,
positionTracking: "full",
lineTerminatorsPattern: /\n|\r\n?/g,
Expand Down Expand Up @@ -103,14 +102,14 @@ export class Lexer {
protected defaultMode: string
protected emptyGroups: { [groupName: string]: IToken } = {}

private config: ILexerConfig = undefined
private config: Required<ILexerConfig>
private trackStartLines: boolean = true
private trackEndLines: boolean = true
private hasCustom: boolean = false
private canModeBeOptimized: any = {}

private traceInitPerf: boolean | number
private traceInitMaxIdent: number
private traceInitPerf!: boolean | number
private traceInitMaxIdent!: number
bd82 marked this conversation as resolved.
Show resolved Hide resolved
private traceInitIndent: number

constructor(
Expand All @@ -125,7 +124,7 @@ export class Lexer {
}

// todo: defaults func?
this.config = merge(DEFAULT_LEXER_CONFIG, config)
this.config = Object.assign({}, DEFAULT_LEXER_CONFIG, config)

const traceInitVal = this.config.traceInitPerf
if (traceInitVal === true) {
Expand All @@ -138,7 +137,7 @@ export class Lexer {
this.traceInitIndent = -1

this.TRACE_INIT("Lexer Constructor", () => {
let actualDefinition: IMultiModeLexerDefinition
let actualDefinition!: IMultiModeLexerDefinition
let hasOnlySingleMode = true
this.TRACE_INIT("Lexer Config handling", () => {
if (
Expand Down Expand Up @@ -172,11 +171,10 @@ export class Lexer {

// Convert SingleModeLexerDefinition into a IMultiModeLexerDefinition.
if (isArray(lexerDefinition)) {
actualDefinition = <any>{ modes: {} }
actualDefinition.modes[DEFAULT_MODE] = cloneArr(
<TokenType[]>lexerDefinition
)
actualDefinition[DEFAULT_MODE] = DEFAULT_MODE
actualDefinition = {
modes: { defaultMode: cloneArr(lexerDefinition) },
defaultMode: DEFAULT_MODE
}
} else {
// no conversion needed, input should already be a IMultiModeLexerDefinition
hasOnlySingleMode = false
Expand Down Expand Up @@ -262,7 +260,8 @@ export class Lexer {
this.charCodeToPatternIdxToConfig[currModName] =
currAnalyzeResult.charCodeToPatternIdxToConfig

this.emptyGroups = merge(
this.emptyGroups = Object.assign(
Copy link
Member

@bd82 bd82 Oct 24, 2021

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

keep the merge so it would be easy for me to replace with 3rd party library and

  • otherwise I will likely miss this and won't replace when I introduce the new library
  • or use the assign from the utils instead of re-introducing the merge if its easier...

{},
this.emptyGroups,
currAnalyzeResult.emptyGroups
)
Expand Down
2 changes: 1 addition & 1 deletion packages/chevrotain/src/scan/reg_exp.ts
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ export function firstCharOptimizedIndices(ast, result, ignoreCase): number[] {
addOptimizedIdxToResult(code, result, ignoreCase)
} else {
// range
const range = code
const range = code as any
// cannot optimize when ignoreCase is
if (ignoreCase === true) {
for (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,10 @@ const allTokens = [
// file even if the tests will avoid running it.
if (typeof (<any>new RegExp("(?:)")).sticky === "boolean") {
forEach(allTokens, (currTokType) => {
currTokType.PATTERN = new RegExp(currTokType.PATTERN.source, "y")
currTokType.PATTERN = new RegExp(
(currTokType.PATTERN as RegExp).source,
"y"
)
})
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,13 @@ export class SwitchCaseRecoveryParser extends EmbeddedActionsParser {
// what about a string with some random value? this could still lead to duplicate keys in the returned parse result
private tokTypesThatCannotBeInsertedInRecovery = [IdentTok, StringTok, IntTok]

// DOCS: overriding this method allows us to customize the logic for which tokens may not be automaticaly inserted
// DOCS: overriding this method allows us to customize the logic for which tokens may not be automatically inserted
// during error recovery.
public canTokenTypeBeInsertedInRecovery(tokType: TokenType) {
return !contains(this.tokTypesThatCannotBeInsertedInRecovery, tokType)
return !contains(
this.tokTypesThatCannotBeInsertedInRecovery,
tokType as unknown
)
}

public parseSwitchStmt(): RetType {
Expand Down
3 changes: 2 additions & 1 deletion packages/chevrotain/test/parse/grammar/interperter_spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import {
NextTerminalAfterAtLeastOneWalker,
NextTerminalAfterManySepWalker,
NextTerminalAfterManyWalker,
PartialPathAndSuffixes,
possiblePathsFrom
} from "../../../src/parse/grammar/interpreter"
import { createRegularToken, setEquality } from "../../utils/matchers"
Expand Down Expand Up @@ -588,7 +589,7 @@ describe("The NextTerminalAfterAtLeastOneSepWalker", () => {
})

describe("The chevrotain grammar interpreter capabilities", () => {
function extractPartialPaths(newResultFormat) {
function extractPartialPaths(newResultFormat: PartialPathAndSuffixes[]) {
return map(newResultFormat, (currItem) => currItem.partialPath)
}

Expand Down
6 changes: 3 additions & 3 deletions packages/chevrotain/test/scan/lexer_spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -660,7 +660,7 @@ function defineLexerSpecs(

expect(allPatterns.length).to.equal(8)
const allPatternsString = map(allPatterns, (pattern) => {
return isString(pattern) ? pattern : pattern.source
return isString(pattern) ? pattern : (pattern as RegExp).source
})
setEquality(allPatternsString, [
"^(?:(\\t| ))",
Expand Down Expand Up @@ -690,7 +690,7 @@ function defineLexerSpecs(
}

if (!skipValidationChecks && ORG_SUPPORT_STICKY) {
it("can transform/analyze an array of Token Typees into matched/ignored/patternToClass - sticky", () => {
it("can transform/analyze an array of Token Types into matched/ignored/patternToClass - sticky", () => {
const tokenClasses = [
Keyword,
If,
Expand All @@ -713,7 +713,7 @@ function defineLexerSpecs(
)
expect(allPatterns.length).to.equal(8)
const allPatternsString = map(allPatterns, (pattern) => {
return isString(pattern) ? pattern : pattern.source
return isString(pattern) ? pattern : (pattern as RegExp).source
})
setEquality(allPatternsString, [
"(\\t| )",
Expand Down
Loading