Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Return error instead of panicking if rewriting fails #343

Merged
merged 25 commits into from
Dec 7, 2023
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ jobs:
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: 1.66.0
toolchain: 1.67.1
override: true
components: rustfmt, clippy, rust-src
- uses: Swatinem/rust-cache@v1
Expand Down
4 changes: 1 addition & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name = "raft-engine"
version = "0.4.1"
authors = ["The TiKV Project Developers"]
edition = "2018"
rust-version = "1.66.0"
rust-version = "1.67.1"
description = "A persistent storage engine for Multi-Raft logs"
readme = "README.md"
repository = "https://github.com/tikv/raft-engine"
Expand Down Expand Up @@ -95,8 +95,6 @@ nightly_group = ["nightly", "swap"]
raft-proto = { git = "https://github.com/tikv/raft-rs", branch = "master" }
protobuf = { git = "https://github.com/pingcap/rust-protobuf", branch = "v2.8" }
protobuf-codegen = { git = "https://github.com/pingcap/rust-protobuf", branch = "v2.8" }
# TODO: Use official grpc-rs once https://github.com/tikv/grpc-rs/pull/622 is merged.
grpcio = { git = "https://github.com/tabokie/grpc-rs", branch = "v0.10.x-win" }

[workspace]
members = ["stress", "ctl"]
2 changes: 1 addition & 1 deletion src/engine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ where
}
perf_context!(log_write_duration).observe_since(now);
if sync {
// As per trait protocol, this error should be retriable. But we panic anyway to
// As per trait protocol, sync error should be retriable. But we panic anyway to
// save the trouble of propagating it to other group members.
self.pipe_log.sync(LogQueue::Append).expect("pipe::sync()");
}
Expand Down
7 changes: 4 additions & 3 deletions src/file_pipe_log/log_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ impl<F: FileSystem> LogFileWriter<F> {
}
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Add a comment to this struct stating it should be fail-safe, i.e. user can still use the writer without breaking data consistency if any operation has failed.


fn write_header(&mut self, format: LogFileFormat) -> IoResult<()> {
self.writer.seek(SeekFrom::Start(0))?;
self.writer.rewind()?;
self.written = 0;
let mut buf = Vec::with_capacity(LogFileFormat::encoded_len(format.version));
format.encode(&mut buf).unwrap();
Expand Down Expand Up @@ -119,8 +119,9 @@ impl<F: FileSystem> LogFileWriter<F> {

pub fn sync(&mut self) -> IoResult<()> {
let _t = StopWatch::new(&*LOG_SYNC_DURATION_HISTOGRAM);
self.handle.sync()?;
Ok(())
// Panic if sync fails, in case of data loss.
self.handle.sync().unwrap();
IoResult::Ok(())
v01dstar marked this conversation as resolved.
Show resolved Hide resolved
}

#[inline]
Expand Down
34 changes: 10 additions & 24 deletions src/file_pipe_log/pipe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,7 @@ impl<F: FileSystem> SinglePipe<F> {
let new_seq = writable_file.seq + 1;
debug_assert!(new_seq > DEFAULT_FIRST_FILE_SEQ);

writable_file.writer.close()?;
writable_file.writer.close().unwrap();
v01dstar marked this conversation as resolved.
Show resolved Hide resolved
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

No need to unwrap now.


let (path_id, handle) = self
.recycle_file(new_seq)
Expand All @@ -273,7 +273,9 @@ impl<F: FileSystem> SinglePipe<F> {
// File header must be persisted. This way we can recover gracefully if power
// loss before a new entry is written.
new_file.writer.sync()?;
self.sync_dir(path_id)?;
// Panic if sync calls fail, keep consistent with the behavior of
// `LogFileWriter::sync()`.
self.sync_dir(path_id).unwrap();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

panic inside sync_dir as well.


**writable_file = new_file;
let len = {
Expand Down Expand Up @@ -321,12 +323,7 @@ impl<F: FileSystem> SinglePipe<F> {
fail_point!("file_pipe_log::append");
let mut writable_file = self.writable_file.lock();
if writable_file.writer.offset() >= self.target_file_size {
if let Err(e) = self.rotate_imp(&mut writable_file) {
panic!(
"error when rotate [{:?}:{}]: {e}",
self.queue, writable_file.seq,
);
}
self.rotate_imp(&mut writable_file)?;
v01dstar marked this conversation as resolved.
Show resolved Hide resolved
}

let seq = writable_file.seq;
Expand Down Expand Up @@ -359,9 +356,7 @@ impl<F: FileSystem> SinglePipe<F> {
}
let start_offset = writer.offset();
if let Err(e) = writer.write(bytes.as_bytes(&ctx), self.target_file_size) {
if let Err(te) = writer.truncate() {
panic!("error when truncate {seq} after error: {e}, get: {}", te);
}
writer.truncate()?;
if is_no_space_err(&e) {
// TODO: There exists several corner cases should be tackled if
// `bytes.len()` > `target_file_size`. For example,
Expand All @@ -372,12 +367,7 @@ impl<F: FileSystem> SinglePipe<F> {
// - [3] Both main-dir and spill-dir have several recycled logs.
// But as `bytes.len()` is always smaller than `target_file_size` in common
// cases, this issue will be ignored temprorarily.
if let Err(e) = self.rotate_imp(&mut writable_file) {
panic!(
"error when rotate [{:?}:{}]: {e}",
self.queue, writable_file.seq
);
}
self.rotate_imp(&mut writable_file)?;
v01dstar marked this conversation as resolved.
Show resolved Hide resolved
// If there still exists free space for this record, rotate the file
// and return a special TryAgain Err (for retry) to the caller.
return Err(Error::TryAgain(format!(
Expand All @@ -403,15 +393,11 @@ impl<F: FileSystem> SinglePipe<F> {

fn sync(&self) -> Result<()> {
let mut writable_file = self.writable_file.lock();
let seq = writable_file.seq;
let writer = &mut writable_file.writer;
{
let _t = StopWatch::new(perf_context!(log_sync_duration));
if let Err(e) = writer.sync() {
panic!("error when sync [{:?}:{seq}]: {e}", self.queue);
}
let _t = StopWatch::new(perf_context!(log_sync_duration));
if let Err(e) = writer.sync() {
v01dstar marked this conversation as resolved.
Show resolved Hide resolved
return Err(Error::Io(e));
}

Ok(())
}

Expand Down
2 changes: 1 addition & 1 deletion src/filter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ impl RhaiFilterMachine {
)?;
log_batch.drain();
}
writer.close()?;
writer.close().unwrap();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto

}
}
// Delete backup file and defuse the guard.
Expand Down
4 changes: 2 additions & 2 deletions src/purge.rs
Original file line number Diff line number Diff line change
Expand Up @@ -273,7 +273,7 @@ where
// Rewrites the entire rewrite queue into new log files.
fn rewrite_rewrite_queue(&self) -> Result<Vec<u64>> {
let _t = StopWatch::new(&*ENGINE_REWRITE_REWRITE_DURATION_HISTOGRAM);
self.pipe_log.rotate(LogQueue::Rewrite)?;
self.pipe_log.rotate(LogQueue::Rewrite).unwrap();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why unwrap this?


let mut force_compact_regions = vec![];
let memtables = self.memtables.collect(|t| {
Expand Down Expand Up @@ -439,7 +439,7 @@ where
)?;
let file_handle = self.pipe_log.append(LogQueue::Rewrite, log_batch)?;
if sync {
self.pipe_log.sync(LogQueue::Rewrite)?
self.pipe_log.sync(LogQueue::Rewrite)?;
}
log_batch.finish_write(file_handle);
self.memtables.apply_rewrite_writes(
Expand Down
40 changes: 15 additions & 25 deletions tests/failpoints/test_io_error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,19 +165,17 @@ fn test_file_rotate_error() {
{
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Make two versions of this test: fn test_file_rotate_error(restart: bool)

// case 1
if restart {
  let engine = Engine::open_with_file_system(cfg.clone(), fs.clone()).unwrap();
}
// case 2
// ...

// Fail to create new log file.
let _f = FailGuard::new("default_fs::create::err", "return");
assert!(catch_unwind_silent(|| {
let _ = engine.write(&mut generate_batch(1, 4, 5, Some(&entry)), false);
})
.is_err());
assert!(engine
.write(&mut generate_batch(1, 4, 5, Some(&entry)), false)
.is_err());
assert_eq!(engine.file_span(LogQueue::Append).1, 1);
}
{
// Fail to write header of new log file.
let _f = FailGuard::new("log_file::write::err", "1*off->return");
assert!(catch_unwind_silent(|| {
let _ = engine.write(&mut generate_batch(1, 4, 5, Some(&entry)), false);
})
.is_err());
assert!(engine
.write(&mut generate_batch(1, 4, 5, Some(&entry)), false)
.is_err());
assert_eq!(engine.file_span(LogQueue::Append).1, 1);
}
{
Expand Down Expand Up @@ -262,10 +260,8 @@ fn test_concurrent_write_error() {
let _f2 = FailGuard::new("log_file::truncate::err", "return");
let entry_clone = entry.clone();
ctx.write_ext(move |e| {
catch_unwind_silent(|| {
e.write(&mut generate_batch(1, 11, 21, Some(&entry_clone)), false)
})
.unwrap_err();
e.write(&mut generate_batch(1, 11, 21, Some(&entry_clone)), false)
.unwrap_err();
});
// We don't test followers, their panics are hard to catch.
ctx.join();
Expand Down Expand Up @@ -527,20 +523,17 @@ fn test_no_space_write_error() {
cfg.dir = dir.path().to_str().unwrap().to_owned();
cfg.spill_dir = Some(spill_dir.path().to_str().unwrap().to_owned());
{
// Case 1: `Write` is abnormal for no space left, Engine should panic at
// Case 1: `Write` is abnormal for no space left, Engine should fail at
// `rotate`.
let cfg_err = Config {
target_file_size: ReadableSize(1),
..cfg.clone()
};
let engine = Engine::open(cfg_err).unwrap();
let _f = FailGuard::new("log_fd::write::no_space_err", "return");
assert!(catch_unwind_silent(|| {
engine
.write(&mut generate_batch(2, 11, 21, Some(&entry)), true)
.unwrap_err();
})
.is_err());
assert!(engine
.write(&mut generate_batch(2, 11, 21, Some(&entry)), true)
.is_err());
assert_eq!(
0,
engine
Expand All @@ -554,12 +547,9 @@ fn test_no_space_write_error() {
let _f1 = FailGuard::new("log_fd::write::no_space_err", "2*return->off");
let _f2 = FailGuard::new("file_pipe_log::force_choose_dir", "return");
// The first write should fail, because all dirs run out of space for writing.
assert!(catch_unwind_silent(|| {
engine
.write(&mut generate_batch(2, 11, 21, Some(&entry)), true)
.unwrap_err();
})
.is_err());
assert!(engine
.write(&mut generate_batch(2, 11, 21, Some(&entry)), true)
.is_err());
assert_eq!(
0,
engine
Expand Down
Loading