diff --git a/packages/core/cache/src/FSCache.js b/packages/core/cache/src/FSCache.js index 54da08a76d6..a18241006dc 100644 --- a/packages/core/cache/src/FSCache.js +++ b/packages/core/cache/src/FSCache.js @@ -12,7 +12,6 @@ import logger from '@parcel/logger'; import {serialize, deserialize, registerSerializableClass} from '@parcel/core'; // flowlint-next-line untyped-import:off import packageJson from '../package.json'; -import {WRITE_LIMIT_CHUNK} from './constants'; const pipeline: (Readable, Writable) => Promise = promisify( stream.pipeline, @@ -82,32 +81,12 @@ export class FSCache implements Cache { } } - #getFilePath(key: string, index: number): string { - return path.join(this.dir, `${key}-${index}`); - } - - async #unlinkChunks(key: string, index: number): Promise { - try { - await this.fs.unlink(this.#getFilePath(key, index)); - await this.#unlinkChunks(key, index + 1); - } catch (err) { - // If there's an error, no more chunks are left to delete - } - } - hasLargeBlob(key: string): Promise { - return this.fs.exists(this.#getFilePath(key, 0)); + return this.fs.exists(this._getCachePath(`${key}-large`)); } - async getLargeBlob(key: string): Promise { - const buffers: Promise[] = []; - for (let i = 0; await this.fs.exists(this.#getFilePath(key, i)); i += 1) { - const file: Promise = this.fs.readFile(this.#getFilePath(key, i)); - - buffers.push(file); - } - - return Buffer.concat(await Promise.all(buffers)); + getLargeBlob(key: string): Promise { + return this.fs.readFile(this._getCachePath(`${key}-large`)); } async setLargeBlob( @@ -115,55 +94,13 @@ export class FSCache implements Cache { contents: Buffer | string, options?: {|signal?: AbortSignal|}, ): Promise { - const chunks = Math.ceil(contents.length / WRITE_LIMIT_CHUNK); - - const writePromises: Promise[] = []; - if (chunks === 1) { - // If there's one chunk, don't slice the content - writePromises.push( - this.fs.writeFile(this.#getFilePath(key, 0), contents, { - signal: options?.signal, - }), - ); - } else { - for (let i = 0; i < chunks; i += 1) { - writePromises.push( - this.fs.writeFile( - this.#getFilePath(key, i), - typeof contents === 'string' - ? contents.slice( - i * WRITE_LIMIT_CHUNK, - (i + 1) * WRITE_LIMIT_CHUNK, - ) - : contents.subarray( - i * WRITE_LIMIT_CHUNK, - (i + 1) * WRITE_LIMIT_CHUNK, - ), - {signal: options?.signal}, - ), - ); - } - } - - // If there's already a files following this chunk, it's old and should be removed - writePromises.push(this.#unlinkChunks(key, chunks)); - - await Promise.all(writePromises); + await this.fs.writeFile(this._getCachePath(`${key}-large`), contents, { + signal: options?.signal, + }); } async deleteLargeBlob(key: string): Promise { - const deletePromises: Promise[] = []; - - let i = 0; - let filePath = this.#getFilePath(key, i); - - while (await this.fs.exists(filePath)) { - deletePromises.push(this.fs.rimraf(filePath)); - i += 1; - filePath = this.#getFilePath(key, i); - } - - await Promise.all(deletePromises); + await this.fs.rimraf(this._getCachePath(`${key}-large`)); } async get(key: string): Promise { diff --git a/packages/core/cache/src/LMDBCache.js b/packages/core/cache/src/LMDBCache.js index 644be76c117..15205dfe09e 100644 --- a/packages/core/cache/src/LMDBCache.js +++ b/packages/core/cache/src/LMDBCache.js @@ -13,8 +13,6 @@ import packageJson from '../package.json'; // $FlowFixMe import lmdb from 'lmdb'; -import {FSCache} from './FSCache'; - const pipeline: (Readable, Writable) => Promise = promisify( stream.pipeline, ); @@ -24,12 +22,10 @@ export class LMDBCache implements Cache { dir: FilePath; // $FlowFixMe store: any; - fsCache: FSCache; constructor(cacheDir: FilePath) { this.fs = new NodeFS(); this.dir = cacheDir; - this.fsCache = new FSCache(this.fs, cacheDir); this.store = lmdb.open(cacheDir, { name: 'parcel-cache', @@ -95,17 +91,13 @@ export class LMDBCache implements Cache { return Promise.resolve(this.store.get(key)); } - #getFilePath(key: string, index: number): string { - return path.join(this.dir, `${key}-${index}`); - } - hasLargeBlob(key: string): Promise { - return this.fs.exists(this.#getFilePath(key, 0)); + return this.fs.exists(path.join(this.dir, key)); } // eslint-disable-next-line require-await async getLargeBlob(key: string): Promise { - return this.fsCache.getLargeBlob(key); + return this.fs.readFile(path.join(this.dir, key)); } // eslint-disable-next-line require-await @@ -114,11 +106,13 @@ export class LMDBCache implements Cache { contents: Buffer | string, options?: {|signal?: AbortSignal|}, ): Promise { - return this.fsCache.setLargeBlob(key, contents, options); + await this.fs.writeFile(path.join(this.dir, key), contents, { + signal: options?.signal, + }); } - deleteLargeBlob(key: string): Promise { - return this.fsCache.deleteLargeBlob(key); + async deleteLargeBlob(key: string): Promise { + await this.fs.rimraf(path.join(this.dir, key)); } refresh(): void { diff --git a/packages/core/cache/src/constants.js b/packages/core/cache/src/constants.js deleted file mode 100644 index 0b6e9277384..00000000000 --- a/packages/core/cache/src/constants.js +++ /dev/null @@ -1,4 +0,0 @@ -// @flow strict-local - -// Node has a file size limit of 2 GB -export const WRITE_LIMIT_CHUNK = 2 * 1024 ** 3; diff --git a/packages/core/core/src/Parcel.js b/packages/core/core/src/Parcel.js index 6ca59498439..9880b461a5c 100644 --- a/packages/core/core/src/Parcel.js +++ b/packages/core/core/src/Parcel.js @@ -171,7 +171,6 @@ export default class Parcel { let result = await this._build({startTime}); - await this.#requestTracker.writeToCache(); await this._end(); if (result.type === 'buildFailure') { @@ -184,31 +183,10 @@ export default class Parcel { async _end(): Promise { this.#initialized = false; + await this.#requestTracker.writeToCache(); await this.#disposable.dispose(); } - async writeRequestTrackerToCache(): Promise { - if (this.#watchQueue.getNumWaiting() === 0) { - // If there's no queued events, we are safe to write the request graph to disk - const abortController = new AbortController(); - - const unsubscribe = this.#watchQueue.subscribeToAdd(() => { - abortController.abort(); - }); - - try { - await this.#requestTracker.writeToCache(abortController.signal); - } catch (err) { - if (!abortController.signal.aborted) { - // We expect abort errors if we interrupt the cache write - throw err; - } - } - - unsubscribe(); - } - } - async _startNextBuild(): Promise { this.#watchAbortController = new AbortController(); await this.#farm.callAllWorkers('clearConfigCache', []); @@ -228,9 +206,6 @@ export default class Parcel { if (!(err instanceof BuildAbortError)) { throw err; } - } finally { - // If the build passes or fails, we want to cache the request graph - await this.writeRequestTrackerToCache(); } } diff --git a/packages/core/core/src/RequestTracker.js b/packages/core/core/src/RequestTracker.js index f37baa5b770..eb5eb8434ea 100644 --- a/packages/core/core/src/RequestTracker.js +++ b/packages/core/core/src/RequestTracker.js @@ -3,7 +3,6 @@ import invariant, {AssertionError} from 'assert'; import path from 'path'; -import type {Cache} from '@parcel/cache'; import {ContentGraph} from '@parcel/graph'; import type { ContentGraphOpts, @@ -19,7 +18,6 @@ import { isGlobMatch, isDirectoryInside, makeDeferredWithPromise, - PromiseQueue, } from '@parcel/utils'; import type {Options as WatcherOptions, Event} from '@parcel/watcher'; import type WorkerFarm from '@parcel/workers'; @@ -43,7 +41,6 @@ import { toProjectPathUnsafe, toProjectPath, } from './projectPath'; -import {report} from './ReporterRunner'; import {getConfigKeyContentHash} from './requests/ConfigRequest'; import type {AssetGraphRequestResult} from './requests/AssetGraphRequest'; import type {PackageRequestResult} from './requests/PackageRequest'; @@ -91,7 +88,6 @@ type RequestGraphOpts = {| optionNodeIds: Set, unpredicatableNodeIds: Set, invalidateOnBuildNodeIds: Set, - cachedRequestChunks: Set, configKeyNodes: Map>, |}; @@ -104,7 +100,6 @@ type SerializedRequestGraph = {| optionNodeIds: Set, unpredicatableNodeIds: Set, invalidateOnBuildNodeIds: Set, - cachedRequestChunks: Set, configKeyNodes: Map>, |}; @@ -292,11 +287,6 @@ const keyFromEnvContentKey = (contentKey: ContentKey): string => const keyFromOptionContentKey = (contentKey: ContentKey): string => contentKey.slice('option:'.length); - -// This constant is chosen by local profiling the time to serialise n nodes and tuning until an average time of ~50 ms per blob. -// The goal is to free up the event loop periodically to allow interruption by the user. -const NODES_PER_BLOB = 2 ** 14; - export class RequestGraph extends ContentGraph< RequestGraphNode, RequestGraphEdgeType, @@ -311,9 +301,7 @@ export class RequestGraph extends ContentGraph< // filesystem changes alone. They should rerun on each startup of Parcel. unpredicatableNodeIds: Set = new Set(); invalidateOnBuildNodeIds: Set = new Set(); - cachedRequestChunks: Set = new Set(); configKeyNodes: Map> = new Map(); - nodesPerBlob: number = NODES_PER_BLOB; // $FlowFixMe[prop-missing] static deserialize(opts: RequestGraphOpts): RequestGraph { @@ -326,7 +314,6 @@ export class RequestGraph extends ContentGraph< deserialized.optionNodeIds = opts.optionNodeIds; deserialized.unpredicatableNodeIds = opts.unpredicatableNodeIds; deserialized.invalidateOnBuildNodeIds = opts.invalidateOnBuildNodeIds; - deserialized.cachedRequestChunks = opts.cachedRequestChunks; deserialized.configKeyNodes = opts.configKeyNodes; return deserialized; } @@ -342,7 +329,6 @@ export class RequestGraph extends ContentGraph< optionNodeIds: this.optionNodeIds, unpredicatableNodeIds: this.unpredicatableNodeIds, invalidateOnBuildNodeIds: this.invalidateOnBuildNodeIds, - cachedRequestChunks: this.cachedRequestChunks, configKeyNodes: this.configKeyNodes, }; } @@ -363,8 +349,6 @@ export class RequestGraph extends ContentGraph< this.optionNodeIds.add(nodeId); } - this.removeCachedRequestChunkForNode(nodeId); - return nodeId; } @@ -437,9 +421,6 @@ export class RequestGraph extends ContentGraph< for (let parentNode of parentNodes) { this.invalidateNode(parentNode, reason); } - - // If the node is invalidated, the cached request chunk on disk needs to be re-written - this.removeCachedRequestChunkForNode(nodeId); } invalidateUnpredictableNodes() { @@ -1053,18 +1034,6 @@ export class RequestGraph extends ContentGraph< return didInvalidate && this.invalidNodeIds.size > 0; } - - hasCachedRequestChunk(index: number): boolean { - return this.cachedRequestChunks.has(index); - } - - setCachedRequestChunk(index: number): void { - this.cachedRequestChunks.add(index); - } - - removeCachedRequestChunkForNode(nodeId: number): void { - this.cachedRequestChunks.delete(Math.floor(nodeId / this.nodesPerBlob)); - } } export default class RequestTracker { @@ -1168,7 +1137,6 @@ export default class RequestTracker { if (node && node.type === REQUEST) { node.invalidateReason = VALID; } - this.graph.removeCachedRequestChunkForNode(nodeId); } rejectRequest(nodeId: NodeId) { @@ -1377,143 +1345,66 @@ export default class RequestTracker { async writeToCache(signal?: AbortSignal) { let cacheKey = getCacheKey(this.options); - let requestGraphKey = `requestGraph-${cacheKey}`; + let requestGraphKey = `${cacheKey}-RequestGraph`; let snapshotKey = `snapshot-${cacheKey}`; if (this.options.shouldDisableCache) { return; } - let serialisedGraph = this.graph.serialize(); - - // Delete an existing request graph cache, to prevent invalid states - await this.options.cache.deleteLargeBlob(requestGraphKey); - - let total = 0; - const serialiseAndSet = async ( - key: string, - // $FlowFixMe serialise input is any type - contents: any, - ): Promise => { - if (signal?.aborted) { - throw new Error('Serialization was aborted'); + let keys = [requestGraphKey]; + let promises = []; + for (let node of this.graph.nodes) { + if (!node || node.type !== REQUEST) { + continue; } - await this.options.cache.setLargeBlob( - key, - serialize(contents), - signal - ? { - signal: signal, - } - : undefined, - ); - - total += 1; - - report({ - type: 'cache', - phase: 'write', - total, - size: this.graph.nodes.length, - }); - }; - - let queue = new PromiseQueue({ - maxConcurrent: 32, - }); - - report({ - type: 'cache', - phase: 'start', - total, - size: this.graph.nodes.length, - }); - - // Preallocating a sparse array is faster than pushing when N is high enough - let cacheableNodes = new Array(serialisedGraph.nodes.length); - for (let i = 0; i < serialisedGraph.nodes.length; i += 1) { - let node = serialisedGraph.nodes[i]; - - let resultCacheKey = node?.resultCacheKey; - if ( - node?.type === REQUEST && - resultCacheKey != null && - node?.result != null - ) { - queue - .add(() => serialiseAndSet(resultCacheKey, node.result)) - .catch(() => { - // Handle promise rejection - }); - - // eslint-disable-next-line no-unused-vars - let {result: _, ...newNode} = node; - cacheableNodes[i] = newNode; - } else { - cacheableNodes[i] = node; - } - } - - let nodeCountsPerBlob = []; - - for ( - let i = 0; - i * this.graph.nodesPerBlob < cacheableNodes.length; - i += 1 - ) { - let nodesStartIndex = i * this.graph.nodesPerBlob; - let nodesEndIndex = Math.min( - (i + 1) * this.graph.nodesPerBlob, - cacheableNodes.length, - ); - - nodeCountsPerBlob.push(nodesEndIndex - nodesStartIndex); - - if (!this.graph.hasCachedRequestChunk(i)) { - // We assume the request graph nodes are immutable and won't change - let nodesToCache = cacheableNodes.slice(nodesStartIndex, nodesEndIndex); - - queue - .add(() => - serialiseAndSet( - getRequestGraphNodeKey(i, cacheKey), - nodesToCache, - ).then(() => { - // Succeeded in writing to disk, save that we have completed this chunk - this.graph.setCachedRequestChunk(i); - }), - ) - .catch(() => { - // Handle promise rejection - }); + let resultCacheKey = node.resultCacheKey; + if (resultCacheKey != null && node.result != null) { + keys.push(resultCacheKey); + promises.push( + this.options.cache.setLargeBlob( + resultCacheKey, + serialize(node.result), + {signal}, + ), + ); + delete node.result; } } - try { - await queue.run(); - - // Set the request graph after the queue is flushed to avoid writing an invalid state - await serialiseAndSet(requestGraphKey, { - ...serialisedGraph, - nodeCountsPerBlob, - nodes: undefined, - }); - - let opts = getWatcherOptions(this.options); - let snapshotPath = path.join(this.options.cacheDir, snapshotKey + '.txt'); + promises.push( + this.options.cache.setLargeBlob(requestGraphKey, serialize(this.graph), { + signal, + }), + ); - await this.options.inputFS.writeSnapshot( + let opts = getWatcherOptions(this.options); + let snapshotPath = path.join(this.options.cacheDir, snapshotKey + '.txt'); + promises.push( + this.options.inputFS.writeSnapshot( this.options.watchDir, snapshotPath, opts, - ); + ), + ); + + try { + await Promise.all(promises); } catch (err) { - // If we have aborted, ignore the error and continue - if (!signal?.aborted) throw err; + if (signal?.aborted) { + // If writing to the cache was aborted, delete all of the keys to avoid inconsistent states. + for (let key of keys) { + try { + await this.options.cache.deleteLargeBlob(key); + } catch (err) { + // ignore. + } + } + } else { + throw err; + } } - - report({type: 'cache', phase: 'end', total, size: this.graph.nodes.length}); } static async init({ @@ -1549,89 +1440,27 @@ function getCacheKey(options) { ); } -function getRequestGraphNodeKey(index: number, cacheKey: string) { - return `requestGraph-nodes-${index}-${cacheKey}`; -} - -export async function readAndDeserializeRequestGraph( - cache: Cache, - requestGraphKey: string, - cacheKey: string, -): Async<{|requestGraph: RequestGraph, bufferLength: number|}> { - let bufferLength = 0; - const getAndDeserialize = async (key: string) => { - let buffer = await cache.getLargeBlob(key); - bufferLength += Buffer.byteLength(buffer); - return deserialize(buffer); - }; - - let serializedRequestGraph = await getAndDeserialize(requestGraphKey); - - let nodePromises = serializedRequestGraph.nodeCountsPerBlob.map( - async (nodesCount, i) => { - let nodes = await getAndDeserialize(getRequestGraphNodeKey(i, cacheKey)); - invariant.equal( - nodes.length, - nodesCount, - 'RequestTracker node chunk: invalid node count', - ); - return nodes; - }, - ); - - return { - requestGraph: RequestGraph.deserialize({ - ...serializedRequestGraph, - nodes: (await Promise.all(nodePromises)).flat(), - }), - // This is used inside parcel query for `.inspectCache` - bufferLength, - }; -} - async function loadRequestGraph(options): Async { if (options.shouldDisableCache) { return new RequestGraph(); } let cacheKey = getCacheKey(options); - let requestGraphKey = `requestGraph-${cacheKey}`; - let timeout; + let requestGraphKey = `${cacheKey}-RequestGraph`; const snapshotKey = `snapshot-${cacheKey}`; const snapshotPath = path.join(options.cacheDir, snapshotKey + '.txt'); if (await options.cache.hasLargeBlob(requestGraphKey)) { try { - let {requestGraph} = await readAndDeserializeRequestGraph( - options.cache, - requestGraphKey, - cacheKey, + let requestGraph: RequestGraph = deserialize( + await options.cache.getLargeBlob(requestGraphKey), ); let opts = getWatcherOptions(options); - - timeout = setTimeout(() => { - logger.warn({ - origin: '@parcel/core', - message: `Retrieving file system events since last build...\nThis can take upto a minute after branch changes or npm/yarn installs.`, - }); - }, 5000); - let startTime = Date.now(); let events = await options.inputFS.getEventsSince( options.watchDir, snapshotPath, opts, ); - clearTimeout(timeout); - - logger.verbose({ - origin: '@parcel/core', - message: `File system event count: ${events.length}`, - meta: { - trackableEvent: 'watcher_events_count', - watcherEventCount: events.length, - duration: Date.now() - startTime, - }, - }); requestGraph.invalidateUnpredictableNodes(); requestGraph.invalidateOnBuildNodes(); @@ -1646,7 +1475,6 @@ async function loadRequestGraph(options): Async { return requestGraph; } catch (e) { // Prevent logging fs events took too long warning - clearTimeout(timeout); logErrorOnBailout(options, snapshotPath, e); // This error means respondToFSEvents timed out handling the invalidation events // In this case we'll return a fresh RequestGraph diff --git a/packages/core/core/test/RequestTracker.test.js b/packages/core/core/test/RequestTracker.test.js index 0cb8b830495..ed5cfc79dfd 100644 --- a/packages/core/core/test/RequestTracker.test.js +++ b/packages/core/core/test/RequestTracker.test.js @@ -185,36 +185,6 @@ describe('RequestTracker', () => { ); }); - it('should write cache to disk and store index', async () => { - let tracker = new RequestTracker({farm, options}); - - await tracker.runRequest({ - id: 'abc', - type: 7, - // $FlowFixMe string isn't a valid result - run: async ({api}: {api: RunAPI, ...}) => { - let result = await Promise.resolve(); - api.storeResult(result); - }, - input: null, - }); - - await tracker.writeToCache(); - - assert(tracker.graph.cachedRequestChunks.size > 0); - }); - - it('should not write to cache when the abort controller aborts', async () => { - let tracker = new RequestTracker({farm, options}); - - const abortController = new AbortController(); - abortController.abort(); - - await tracker.writeToCache(abortController.signal); - - assert(tracker.graph.cachedRequestChunks.size === 0); - }); - it('should not requeue requests if the previous request is still running', async () => { let tracker = new RequestTracker({farm, options}); @@ -312,137 +282,4 @@ describe('RequestTracker', () => { assert.strictEqual(cachedResult, 'b'); assert.strictEqual(called, false); }); - - it('should ignore stale node chunks from cache', async () => { - let tracker = new RequestTracker({farm, options}); - - // Set the nodes per blob low so we can ensure multiple files without - // creating 17,000 nodes - tracker.graph.nodesPerBlob = 2; - - tracker.graph.addNode({type: 0, id: 'some-file-node-1'}); - tracker.graph.addNode({type: 0, id: 'some-file-node-2'}); - tracker.graph.addNode({type: 0, id: 'some-file-node-3'}); - tracker.graph.addNode({type: 0, id: 'some-file-node-4'}); - tracker.graph.addNode({type: 0, id: 'some-file-node-5'}); - - await tracker.writeToCache(); - - // Create a new request tracker that shouldn't look at the old cache files - tracker = new RequestTracker({farm, options}); - assert.equal(tracker.graph.nodes.length, 0); - - tracker.graph.addNode({type: 0, id: 'some-file-node-1'}); - await tracker.writeToCache(); - - // Init a request tracker that should only read the relevant cache files - tracker = await RequestTracker.init({farm, options}); - assert.equal(tracker.graph.nodes.length, 1); - }); - - it('should init with multiple node chunks', async () => { - let tracker = new RequestTracker({farm, options}); - - // Set the nodes per blob low so we can ensure multiple files without - // creating 17,000 nodes - tracker.graph.nodesPerBlob = 2; - - tracker.graph.addNode({type: 0, id: 'some-file-node-1'}); - tracker.graph.addNode({type: 0, id: 'some-file-node-2'}); - tracker.graph.addNode({type: 0, id: 'some-file-node-3'}); - tracker.graph.addNode({type: 0, id: 'some-file-node-4'}); - tracker.graph.addNode({type: 0, id: 'some-file-node-5'}); - - await tracker.writeToCache(); - - tracker = await RequestTracker.init({farm, options}); - assert.equal(tracker.graph.nodes.length, 5); - }); - - it('should write new nodes to cache', async () => { - let tracker = new RequestTracker({farm, options}); - - tracker.graph.addNode({ - type: 0, - id: 'test-file', - }); - await tracker.writeToCache(); - assert.equal(tracker.graph.nodes.length, 1); - - tracker.graph.addNode({ - type: 0, - id: 'test-file-2', - }); - await tracker.writeToCache(); - assert.equal(tracker.graph.nodes.length, 2); - - // Create a new tracker from cache - tracker = await RequestTracker.init({farm, options}); - - await tracker.writeToCache(); - assert.equal(tracker.graph.nodes.length, 2); - }); - - it('should write updated nodes to cache', async () => { - let tracker = new RequestTracker({farm, options}); - - let contentKey = 'abc'; - await tracker.runRequest({ - id: contentKey, - type: 7, - // $FlowFixMe string isn't a valid result - run: async ({api}: {api: RunAPI, ...}) => { - let result = await Promise.resolve('a'); - api.storeResult(result); - }, - input: null, - }); - assert.equal(await tracker.getRequestResult(contentKey), 'a'); - await tracker.writeToCache(); - - await tracker.runRequest( - { - id: contentKey, - type: 7, - // $FlowFixMe string isn't a valid result - run: async ({api}: {api: RunAPI, ...}) => { - let result = await Promise.resolve('b'); - api.storeResult(result); - }, - input: null, - }, - {force: true}, - ); - assert.equal(await tracker.getRequestResult(contentKey), 'b'); - await tracker.writeToCache(); - - // Create a new tracker from cache - tracker = await RequestTracker.init({farm, options}); - - assert.equal(await tracker.getRequestResult(contentKey), 'b'); - }); - - it('should write invalidated nodes to cache', async () => { - let tracker = new RequestTracker({farm, options}); - - let contentKey = 'abc'; - await tracker.runRequest({ - id: contentKey, - type: 7, - run: () => {}, - input: null, - }); - let nodeId = tracker.graph.getNodeIdByContentKey(contentKey); - assert.equal(tracker.graph.getNode(nodeId)?.invalidateReason, 0); - await tracker.writeToCache(); - - tracker.graph.invalidateNode(nodeId, 1); - assert.equal(tracker.graph.getNode(nodeId)?.invalidateReason, 1); - await tracker.writeToCache(); - - // Create a new tracker from cache - tracker = await RequestTracker.init({farm, options}); - - assert.equal(tracker.graph.getNode(nodeId)?.invalidateReason, 1); - }); }); diff --git a/packages/dev/query/src/deep-imports.js b/packages/dev/query/src/deep-imports.js index 7fa4c302394..4f661ae219d 100644 --- a/packages/dev/query/src/deep-imports.js +++ b/packages/dev/query/src/deep-imports.js @@ -6,7 +6,6 @@ import typeof BundleGraph, { } from '@parcel/core/src/BundleGraph.js'; import typeof RequestTracker, { RequestGraph, - readAndDeserializeRequestGraph, } from '@parcel/core/src/RequestTracker.js'; import typeof {requestGraphEdgeTypes} from '@parcel/core/src/RequestTracker.js'; import typeof {LMDBCache} from '@parcel/cache/src/LMDBCache.js'; @@ -50,7 +49,6 @@ module.exports = (v: {| }, RequestTracker: { default: RequestTracker, - readAndDeserializeRequestGraph: readAndDeserializeRequestGraph, RequestGraph: RequestGraph, requestGraphEdgeTypes: requestGraphEdgeTypes, ... diff --git a/packages/dev/query/src/index.js b/packages/dev/query/src/index.js index 47aeb71f779..61065989a0c 100644 --- a/packages/dev/query/src/index.js +++ b/packages/dev/query/src/index.js @@ -14,7 +14,7 @@ const { BundleGraph: {default: BundleGraph}, RequestTracker: { default: RequestTracker, - readAndDeserializeRequestGraph, + RequestGraph, requestGraphEdgeTypes, }, LMDBCache, @@ -39,17 +39,15 @@ export async function loadGraphs(cacheDir: string): Promise<{| |}> = [ { name: 'requestGraphBlob', - check: basename => - basename.startsWith('requestGraph-') && - !basename.startsWith('requestGraph-nodes'), + check: basename => basename.endsWith('RequestGraph'), }, { name: 'bundleGraphBlob', - check: basename => basename.endsWith('BundleGraph-0'), + check: basename => basename.endsWith('BundleGraph'), }, { name: 'assetGraphBlob', - check: basename => basename.endsWith('AssetGraph-0'), + check: basename => basename.endsWith('AssetGraph'), }, ]; @@ -80,23 +78,19 @@ export async function loadGraphs(cacheDir: string): Promise<{| let requestTracker; if (requestGraphBlob) { try { - let requestGraphKey = requestGraphBlob.slice(0, -'-0'.length); - let date = Date.now(); - let {requestGraph, bufferLength} = await readAndDeserializeRequestGraph( - cache, - requestGraphKey, - requestGraphKey.replace('requestGraph-', ''), - ); + let file = await cache.getLargeBlob(path.basename(requestGraphBlob)); + let timeToDeserialize = Date.now(); + let obj = v8.deserialize(file); + timeToDeserialize = Date.now() - timeToDeserialize; requestTracker = new RequestTracker({ - graph: requestGraph, + graph: RequestGraph.deserialize(obj.value), // $FlowFixMe farm: null, // $FlowFixMe options: null, }); - let timeToDeserialize = Date.now() - date; - cacheInfo.set('RequestGraph', [bufferLength]); + cacheInfo.set('RequestGraph', [Buffer.byteLength(file)]); cacheInfo.get('RequestGraph')?.push(timeToDeserialize); } catch (e) { console.log('Error loading Request Graph\n', e); @@ -107,9 +101,7 @@ export async function loadGraphs(cacheDir: string): Promise<{| let bundleGraph; if (bundleGraphBlob) { try { - let file = await cache.getLargeBlob( - path.basename(bundleGraphBlob).slice(0, -'-0'.length), - ); + let file = await cache.getLargeBlob(path.basename(bundleGraphBlob)); let timeToDeserialize = Date.now(); let obj = v8.deserialize(file); @@ -128,9 +120,7 @@ export async function loadGraphs(cacheDir: string): Promise<{| let assetGraph; if (assetGraphBlob) { try { - let file = await cache.getLargeBlob( - path.basename(assetGraphBlob).slice(0, -'-0'.length), - ); + let file = await cache.getLargeBlob(path.basename(assetGraphBlob)); let timeToDeserialize = Date.now(); let obj = v8.deserialize(file);