Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

opt(torii-core): move off queryqueue for executing tx #2460

Merged
merged 55 commits into from
Oct 3, 2024
Merged
Show file tree
Hide file tree
Changes from 15 commits
Commits
Show all changes
55 commits
Select commit Hold shift + click to select a range
01ce338
opt(torii-core): move off queryqueue for executing tx
Larkooo Sep 20, 2024
e0ec767
feat: replace queury queue by executor
Larkooo Sep 24, 2024
6097a60
fix: executor
Larkooo Sep 24, 2024
043f669
refactor: executor logic
Larkooo Sep 25, 2024
9d7d0e7
Merge remote-tracking branch 'upstream/main' into use-tx-executor
Larkooo Sep 25, 2024
9314438
fix: tests
Larkooo Sep 25, 2024
f9a136f
fmt
Larkooo Sep 25, 2024
b883343
executor inside of tokio select
Larkooo Sep 25, 2024
7771fdf
executor graceful exit
Larkooo Sep 25, 2024
60c9069
priv execute
Larkooo Sep 25, 2024
cd52f0f
contracts insertion shouldnt go through executor
Larkooo Sep 25, 2024
045eed0
clean code
Larkooo Sep 25, 2024
045e4ae
exec
Larkooo Sep 25, 2024
388ba1e
Merge branch 'main' into use-tx-executor
Larkooo Sep 25, 2024
b7acef5
fix: tests
Larkooo Sep 25, 2024
b94ad7a
oneshot channel for execution result
Larkooo Sep 25, 2024
c13ff59
fmt
Larkooo Sep 25, 2024
7fc27d5
clone shutdown tx
Larkooo Sep 25, 2024
260845c
fmt
Larkooo Sep 25, 2024
a7e4f1f
fix: test exec
Larkooo Sep 25, 2024
8cf4452
non bloking execute engine
Larkooo Sep 25, 2024
2bcf226
executor logs
Larkooo Sep 25, 2024
3242ac4
in memory head
Larkooo Sep 25, 2024
ef3e4ba
fmt
Larkooo Sep 25, 2024
299c0b9
fix: tests
Larkooo Sep 27, 2024
e4404f1
fixx: libp2p
Larkooo Sep 27, 2024
663234a
fmt
Larkooo Sep 27, 2024
c998428
Merge branch 'main' into use-tx-executor
Larkooo Sep 27, 2024
994abc5
try fix libp2p test
Larkooo Sep 27, 2024
65612fa
fix tests
Larkooo Sep 27, 2024
13b1ba7
fmt
Larkooo Sep 27, 2024
0c31327
use tempfile for tests
Larkooo Sep 27, 2024
afa2a0a
fix
Larkooo Sep 27, 2024
ef9fafc
c
Larkooo Sep 27, 2024
4ec379c
fix: sql tests
Larkooo Sep 27, 2024
d393896
clone
Larkooo Sep 27, 2024
1730bfc
fmt
Larkooo Sep 27, 2024
b708081
fmt
Larkooo Sep 27, 2024
7758cf9
no temp file
Larkooo Sep 27, 2024
607cd06
tmp file
Larkooo Sep 30, 2024
d48dd30
fix: lock issues
Larkooo Sep 30, 2024
6d4b99f
manuallyt use tmp file
Larkooo Sep 30, 2024
baf7f35
fix graphql tests
Larkooo Sep 30, 2024
c4f288a
fix: tests
Larkooo Sep 30, 2024
5dac220
clippy
Larkooo Sep 30, 2024
28633b4
fix torii bin
Larkooo Sep 30, 2024
fd3c377
engine executions
Larkooo Sep 30, 2024
4cabea5
use tmp file for db
Larkooo Sep 30, 2024
ee86042
fix: cursor
Larkooo Sep 30, 2024
43246b6
chore
Larkooo Sep 30, 2024
706c7fb
wip
Larkooo Oct 1, 2024
9c9e0a3
Merge branch 'main' into use-tx-executor
Larkooo Oct 1, 2024
6b6f5a6
cleaning code
Larkooo Oct 2, 2024
61f0a4b
refactor: handle errors without panic
Larkooo Oct 2, 2024
63cca75
use vec
Larkooo Oct 2, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion bin/torii/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ use tokio::sync::broadcast;
use tokio::sync::broadcast::Sender;
use tokio_stream::StreamExt;
use torii_core::engine::{Engine, EngineConfig, IndexingFlags, Processors};
use torii_core::executor::Executor;
use torii_core::processors::event_message::EventMessageProcessor;
use torii_core::processors::generate_event_processors_map;
use torii_core::processors::metadata_update::MetadataUpdateProcessor;
Expand Down Expand Up @@ -185,7 +186,8 @@ async fn main() -> anyhow::Result<()> {
// Get world address
let world = WorldContractReader::new(args.world_address, provider.clone());

let db = Sql::new(pool.clone(), args.world_address).await?;
let (mut executor, sender) = Executor::new(pool.clone(), shutdown_tx.clone()).await?;
let db = Sql::new(pool.clone(), args.world_address, sender.clone()).await?;

let processors = Processors {
event: generate_event_processors_map(vec![
Expand Down Expand Up @@ -289,6 +291,7 @@ async fn main() -> anyhow::Result<()> {

tokio::select! {
res = engine.start() => res?,
_ = executor.run() => {},
_ = proxy_server.start(shutdown_tx.subscribe()) => {},
_ = graphql_server => {},
_ = grpc_server => {},
Expand Down
46 changes: 17 additions & 29 deletions crates/torii/core/src/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ use std::time::Duration;
use anyhow::Result;
use bitflags::bitflags;
use dojo_world::contracts::world::WorldContractReader;
use futures_util::future::try_join_all;
use hashlink::LinkedHashMap;
use starknet::core::types::{
BlockId, BlockTag, EmittedEvent, Event, EventFilter, Felt, MaybePendingBlockWithReceipts,
Expand All @@ -17,7 +18,6 @@ use starknet::providers::Provider;
use tokio::sync::broadcast::Sender;
use tokio::sync::mpsc::Sender as BoundedSender;
use tokio::sync::Semaphore;
use tokio::task::JoinSet;
use tokio::time::{sleep, Instant};
use tracing::{debug, error, info, trace, warn};

Expand Down Expand Up @@ -151,7 +151,7 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
// use the start block provided by user if head is 0
let (head, _, _) = self.db.head().await?;
if head == 0 {
self.db.set_head(self.config.start_block);
self.db.set_head(self.config.start_block)?;
} else if self.config.start_block != 0 {
warn!(target: LOG_TARGET, "Start block ignored, stored head exists and will be used instead.");
}
Expand Down Expand Up @@ -179,7 +179,7 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
}

match self.process(fetch_result).await {
Ok(()) => {}
Ok(()) => self.db.execute()?,
Err(e) => {
error!(target: LOG_TARGET, error = %e, "Processing fetched data.");
erroring_out = true;
Expand Down Expand Up @@ -407,15 +407,14 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
// provider. So we can fail silently and try
// again in the next iteration.
warn!(target: LOG_TARGET, transaction_hash = %format!("{:#x}", transaction_hash), "Retrieving pending transaction receipt.");
self.db.set_head(data.block_number - 1);
self.db.set_head(data.block_number - 1)?;
if let Some(tx) = last_pending_block_tx {
self.db.set_last_pending_block_tx(Some(tx));
self.db.set_last_pending_block_tx(Some(tx))?;
}

if let Some(tx) = last_pending_block_world_tx {
self.db.set_last_pending_block_world_tx(Some(tx));
self.db.set_last_pending_block_world_tx(Some(tx))?;
}
self.db.execute().await?;
return Ok(());
}
_ => {
Expand All @@ -441,18 +440,16 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {

// Set the head to the last processed pending transaction
// Head block number should still be latest block number
self.db.set_head(data.block_number - 1);
self.db.set_head(data.block_number - 1)?;

if let Some(tx) = last_pending_block_tx {
self.db.set_last_pending_block_tx(Some(tx));
self.db.set_last_pending_block_tx(Some(tx))?;
}

if let Some(tx) = last_pending_block_world_tx {
self.db.set_last_pending_block_world_tx(Some(tx));
self.db.set_last_pending_block_world_tx(Some(tx))?;
}

self.db.execute().await?;

Ok(())
}

Expand Down Expand Up @@ -486,20 +483,14 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
self.process_block(block_number, data.blocks[&block_number]).await?;
last_block = block_number;
}

if self.db.query_queue.queue.len() >= QUERY_QUEUE_BATCH_SIZE {
self.db.execute().await?;
}
}

// Process parallelized events
self.process_tasks().await?;

self.db.set_head(data.latest_block_number);
self.db.set_last_pending_block_world_tx(None);
self.db.set_last_pending_block_tx(None);

self.db.execute().await?;
self.db.set_head(data.latest_block_number)?;
self.db.set_last_pending_block_world_tx(None)?;
self.db.set_last_pending_block_tx(None)?;

Ok(())
}
Expand All @@ -509,14 +500,14 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
let semaphore = Arc::new(Semaphore::new(self.config.max_concurrent_tasks));

// Run all tasks concurrently
let mut set = JoinSet::new();
let mut handles = Vec::new();
for (task_id, events) in self.tasks.drain() {
let db = self.db.clone();
let world = self.world.clone();
let processors = self.processors.clone();
let semaphore = semaphore.clone();

set.spawn(async move {
handles.push(tokio::spawn(async move {
let _permit = semaphore.acquire().await.unwrap();
let mut local_db = db.clone();
for ParallelizedEvent { event_id, event, block_number, block_timestamp } in events {
Expand All @@ -532,14 +523,11 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
}
}
Ok::<_, anyhow::Error>(local_db)
});
}));
}

// Join all tasks
while let Some(result) = set.join_next().await {
let local_db = result??;
self.db.merge(local_db)?;
}
try_join_all(handles).await?;

Ok(())
}
Expand Down Expand Up @@ -688,7 +676,7 @@ impl<P: Provider + Send + Sync + std::fmt::Debug + 'static> Engine<P> {
transaction_hash: Felt,
) -> Result<()> {
if self.config.flags.contains(IndexingFlags::RAW_EVENTS) {
self.db.store_event(event_id, event, transaction_hash, block_timestamp);
self.db.store_event(event_id, event, transaction_hash, block_timestamp)?;
}

let event_key = event.keys[0];
Expand Down
Loading
Loading