diff --git a/bin/ofs/Cargo.toml b/bin/ofs/Cargo.toml index 860904d7cead..552f68649d48 100644 --- a/bin/ofs/Cargo.toml +++ b/bin/ofs/Cargo.toml @@ -30,20 +30,20 @@ repository.workspace = true rust-version.workspace = true [dependencies] +anyhow = "1" async-trait = "0.1.75" +clap = { version = "4.4.18", features = ["derive", "env"] } +env_logger = "0.10" fuse3 = { "version" = "0.6.1", "features" = ["tokio-runtime", "unprivileged"] } futures-util = "0.3.30" libc = "0.2.151" log = "0.4.20" -anyhow = "1" +nix = { version = "0.27.1", features = ["user"] } +opendal.workspace = true tokio = { version = "1.34", features = [ "fs", "macros", "rt-multi-thread", "io-std", ] } -nix = { version = "0.27.1", features = ["user"] } -env_logger = "0.10" -clap = { version = "4.4.18", features = ["derive", "env"] } url = "2.5.0" -opendal.workspace = true diff --git a/bindings/c/include/opendal.h b/bindings/c/include/opendal.h index 3eb0dc0a3b5f..daf50f169b07 100644 --- a/bindings/c/include/opendal.h +++ b/bindings/c/include/opendal.h @@ -120,7 +120,6 @@ typedef struct BlockingLister BlockingLister; * * Some services like s3, gcs doesn't have native blocking supports, we can use [`layers::BlockingLayer`] * to wrap the async operator to make it blocking. - * * # use anyhow::Result; * use opendal::layers::BlockingLayer; * use opendal::services::S3; diff --git a/bindings/java/Cargo.toml b/bindings/java/Cargo.toml index 7e38605d88cf..803836288562 100644 --- a/bindings/java/Cargo.toml +++ b/bindings/java/Cargo.toml @@ -98,8 +98,8 @@ services-all = [ # Default services provided by opendal. services-azblob = ["opendal/services-azblob"] services-azdls = ["opendal/services-azdls"] -services-cos = ["opendal/services-cos"] services-chainsafe = ["opendal/services-chainsafe"] +services-cos = ["opendal/services-cos"] services-fs = ["opendal/services-fs"] services-gcs = ["opendal/services-gcs"] services-ghac = ["opendal/services-ghac"] diff --git a/bindings/nodejs/Cargo.toml b/bindings/nodejs/Cargo.toml index 9521e3ba7a6f..b387f28b12e2 100644 --- a/bindings/nodejs/Cargo.toml +++ b/bindings/nodejs/Cargo.toml @@ -93,8 +93,8 @@ services-all = [ # Default services provided by opendal. services-azblob = ["opendal/services-azblob"] services-azdls = ["opendal/services-azdls"] -services-cos = ["opendal/services-cos"] services-chainsafe = ["opendal/services-chainsafe"] +services-cos = ["opendal/services-cos"] services-fs = ["opendal/services-fs"] services-gcs = ["opendal/services-gcs"] services-ghac = ["opendal/services-ghac"] diff --git a/bindings/php/Cargo.toml b/bindings/php/Cargo.toml index 0b0a504b4870..3ad2a50849ee 100644 --- a/bindings/php/Cargo.toml +++ b/bindings/php/Cargo.toml @@ -17,8 +17,8 @@ [package] name = "opendal-php" -version = "0.1.0" publish = false +version = "0.1.0" authors = ["Apache OpenDAL "] edition = "2021" diff --git a/bindings/python/Cargo.toml b/bindings/python/Cargo.toml index eb964de02fdd..760fb2430be0 100644 --- a/bindings/python/Cargo.toml +++ b/bindings/python/Cargo.toml @@ -95,8 +95,8 @@ services-all = [ # Default services provided by opendal. services-azblob = ["opendal/services-azblob"] services-azdls = ["opendal/services-azdls"] -services-cos = ["opendal/services-cos"] services-chainsafe = ["opendal/services-chainsafe"] +services-cos = ["opendal/services-cos"] services-fs = ["opendal/services-fs"] services-gcs = ["opendal/services-gcs"] services-ghac = ["opendal/services-ghac"] diff --git a/bindings/ruby/Cargo.toml b/bindings/ruby/Cargo.toml index f790c788900d..d28d12039901 100644 --- a/bindings/ruby/Cargo.toml +++ b/bindings/ruby/Cargo.toml @@ -17,8 +17,8 @@ [package] name = "opendal-ruby" -version = "0.1.0" publish = false +version = "0.1.0" authors = ["Apache OpenDAL "] edition = "2021" diff --git a/core/Cargo.toml b/core/Cargo.toml index 5f315bd541ae..908e1891415b 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -111,8 +111,8 @@ layers-await-tree = ["dep:await-tree"] # Enable layers async-backtrace support. layers-async-backtrace = ["dep:async-backtrace"] # Enable dtrace support. -layers-dtrace=["dep:probe"] layers-blocking = ["internal-tokio-rt"] +layers-dtrace = ["dep:probe"] services-alluxio = [] services-atomicserver = ["dep:atomic_lib"] @@ -156,9 +156,9 @@ services-gridfs = ["dep:mongodb"] services-hdfs = ["dep:hdrs"] services-http = [] services-huggingface = [] +services-icloud = ["internal-path-cache"] services-ipfs = ["dep:prost"] services-ipmfs = [] -services-icloud = ["internal-path-cache"] services-koofr = [] services-libsql = ["dep:hrana-client-proto"] services-memcached = ["dep:bb8"] @@ -202,11 +202,11 @@ services-vercel-artifacts = [] # Deprecated # wasabi services support has been removed. # We will remove this feature in the next version. +services-hdfs-native = ["hdfs-native"] services-wasabi = [] services-webdav = [] services-webhdfs = [] services-yandex-disk = [] -services-hdfs-native = ["hdfs-native"] internal-tokio-rt = ["tokio/rt-multi-thread"] @@ -341,7 +341,7 @@ suppaftp = { version = "5.3.1", default-features = false, features = [ # for services-tikv tikv-client = { version = "0.3.0", optional = true, default-features = false } # for services-hdfs-native -hdfs-native = { version = "0.6.0", optional = true} +hdfs-native = { version = "0.6.0", optional = true } # Layers # for layers-async-backtrace diff --git a/core/src/layers/dtrace.rs b/core/src/layers/dtrace.rs index 389c29f27c34..0eb0ce02f60f 100644 --- a/core/src/layers/dtrace.rs +++ b/core/src/layers/dtrace.rs @@ -15,12 +15,6 @@ // specific language governing permissions and limitations // under the License. -use crate::raw::Accessor; -use crate::raw::*; -use crate::*; -use async_trait::async_trait; -use bytes::Bytes; -use probe::probe_lazy; use std::ffi::CString; use std::fmt::Debug; use std::fmt::Formatter; @@ -28,6 +22,14 @@ use std::io; use std::task::Context; use std::task::Poll; +use async_trait::async_trait; +use bytes::Bytes; +use probe::probe_lazy; + +use crate::raw::Accessor; +use crate::raw::*; +use crate::*; + /// Support User Statically-Defined Tracing(aka USDT) on Linux /// /// This layer is an experimental feature, it will be enabled by `features = ["layers-dtrace"]` in Cargo.toml. @@ -113,11 +115,10 @@ use std::task::Poll; /// /// Example: /// ``` -/// /// use anyhow::Result; +/// use opendal::layers::DTraceLayer; /// use opendal::services::Fs; /// use opendal::Operator; -/// use opendal::layers::DTraceLayer; /// /// #[tokio::main] /// async fn main() -> Result<()> { @@ -126,10 +127,12 @@ use std::task::Poll; /// builder.root("/tmp"); /// /// // `Accessor` provides the low level APIs, we will use `Operator` normally. -/// let op: Operator = Operator::new(builder)?.layer(DtraceLayer::default()).finish(); -/// -/// let path="/tmp/test.txt"; -/// for _ in 1..100000{ +/// let op: Operator = Operator::new(builder)? +/// .layer(DtraceLayer::default()) +/// .finish(); +/// +/// let path = "/tmp/test.txt"; +/// for _ in 1..100000 { /// let bs = vec![0; 64 * 1024 * 1024]; /// op.write(path, bs).await?; /// op.read(path).await?; diff --git a/core/src/layers/timeout.rs b/core/src/layers/timeout.rs index c05d211f90b0..4eec45bd87d0 100644 --- a/core/src/layers/timeout.rs +++ b/core/src/layers/timeout.rs @@ -18,8 +18,9 @@ use std::future::Future; use std::io::SeekFrom; use std::pin::Pin; +use std::task::ready; +use std::task::Context; use std::task::Poll; -use std::task::{ready, Context}; use std::time::Duration; use async_trait::async_trait; @@ -57,18 +58,21 @@ use crate::*; /// operations, 3 seconds timeout for all io operations. /// /// ``` +/// use std::time::Duration; +/// /// use anyhow::Result; /// use opendal::layers::TimeoutLayer; /// use opendal::services; /// use opendal::Operator; /// use opendal::Scheme; -/// use std::time::Duration; /// /// let _ = Operator::new(services::Memory::default()) /// .expect("must init") -/// .layer(TimeoutLayer::default() -/// .with_timeout(Duration::from_secs(10)) -/// .with_io_timeout(Duration::from_secs(3))) +/// .layer( +/// TimeoutLayer::default() +/// .with_timeout(Duration::from_secs(10)) +/// .with_io_timeout(Duration::from_secs(3)), +/// ) /// .finish(); /// ``` /// @@ -368,17 +372,22 @@ impl oio::List for TimeoutWrapper { #[cfg(test)] mod tests { - use crate::layers::{TimeoutLayer, TypeEraseLayer}; - use crate::raw::oio::ReadExt; - use crate::raw::*; - use crate::*; - use async_trait::async_trait; - use bytes::Bytes; use std::io::SeekFrom; use std::sync::Arc; - use std::task::{Context, Poll}; + use std::task::Context; + use std::task::Poll; use std::time::Duration; - use tokio::time::{sleep, timeout}; + + use async_trait::async_trait; + use bytes::Bytes; + use tokio::time::sleep; + use tokio::time::timeout; + + use crate::layers::TimeoutLayer; + use crate::layers::TypeEraseLayer; + use crate::raw::oio::ReadExt; + use crate::raw::*; + use crate::*; #[derive(Debug, Clone, Default)] struct MockService; diff --git a/core/src/raw/oio/write/block_write.rs b/core/src/raw/oio/write/block_write.rs index aecc7c66c610..af4e79a05853 100644 --- a/core/src/raw/oio/write/block_write.rs +++ b/core/src/raw/oio/write/block_write.rs @@ -326,14 +326,20 @@ where #[cfg(test)] mod tests { - use super::*; - use crate::raw::oio::{StreamExt, WriteBuf, WriteExt}; - use bytes::Bytes; - use pretty_assertions::assert_eq; - use rand::{thread_rng, Rng, RngCore}; use std::collections::HashMap; use std::sync::Mutex; + use bytes::Bytes; + use pretty_assertions::assert_eq; + use rand::thread_rng; + use rand::Rng; + use rand::RngCore; + + use super::*; + use crate::raw::oio::StreamExt; + use crate::raw::oio::WriteBuf; + use crate::raw::oio::WriteExt; + struct TestWrite { length: u64, bytes: HashMap, diff --git a/core/src/raw/oio/write/multipart_write.rs b/core/src/raw/oio/write/multipart_write.rs index 60c6f7d52984..46d215ed124f 100644 --- a/core/src/raw/oio/write/multipart_write.rs +++ b/core/src/raw/oio/write/multipart_write.rs @@ -404,11 +404,15 @@ where #[cfg(test)] mod tests { + use std::sync::Mutex; + + use pretty_assertions::assert_eq; + use rand::thread_rng; + use rand::Rng; + use rand::RngCore; + use super::*; use crate::raw::oio::WriteExt; - use pretty_assertions::assert_eq; - use rand::{thread_rng, Rng, RngCore}; - use std::sync::Mutex; struct TestWrite { upload_id: String, diff --git a/core/src/raw/oio/write/range_write.rs b/core/src/raw/oio/write/range_write.rs index e4210b5ab0d5..496722266b40 100644 --- a/core/src/raw/oio/write/range_write.rs +++ b/core/src/raw/oio/write/range_write.rs @@ -354,13 +354,17 @@ impl oio::Write for RangeWriter { #[cfg(test)] mod tests { - use super::*; - use crate::raw::oio::WriteExt; - use pretty_assertions::assert_eq; - use rand::{thread_rng, Rng, RngCore}; use std::collections::HashSet; use std::sync::Mutex; + use pretty_assertions::assert_eq; + use rand::thread_rng; + use rand::Rng; + use rand::RngCore; + + use super::*; + use crate::raw::oio::WriteExt; + struct TestWrite { length: u64, bytes: HashSet, diff --git a/core/src/raw/path_cache.rs b/core/src/raw/path_cache.rs index 45efbf86917b..b7926f40296e 100644 --- a/core/src/raw/path_cache.rs +++ b/core/src/raw/path_cache.rs @@ -15,12 +15,15 @@ // specific language governing permissions and limitations // under the License. -use crate::raw::*; -use crate::*; +use std::collections::VecDeque; + use async_trait::async_trait; use moka::sync::Cache; -use std::collections::VecDeque; -use tokio::sync::{Mutex, MutexGuard}; +use tokio::sync::Mutex; +use tokio::sync::MutexGuard; + +use crate::raw::*; +use crate::*; /// The trait required for path cacher. #[cfg_attr(not(target_arch = "wasm32"), async_trait)] @@ -191,10 +194,12 @@ impl PathCacher { #[cfg(test)] mod tests { - use crate::raw::{PathCacher, PathQuery}; - use crate::*; use async_trait::async_trait; + use crate::raw::PathCacher; + use crate::raw::PathQuery; + use crate::*; + struct TestQuery {} #[async_trait] diff --git a/core/src/raw/tokio_util.rs b/core/src/raw/tokio_util.rs index bceff001475c..528d995c199f 100644 --- a/core/src/raw/tokio_util.rs +++ b/core/src/raw/tokio_util.rs @@ -15,7 +15,8 @@ // specific language governing permissions and limitations // under the License. -use crate::{Error, ErrorKind}; +use crate::Error; +use crate::ErrorKind; /// Parse tokio error into opendal::Error. pub fn new_task_join_error(e: tokio::task::JoinError) -> Error { diff --git a/core/src/services/dropbox/builder.rs b/core/src/services/dropbox/builder.rs index 36eeceaee02a..2bdc7726e4a3 100644 --- a/core/src/services/dropbox/builder.rs +++ b/core/src/services/dropbox/builder.rs @@ -15,7 +15,6 @@ // specific language governing permissions and limitations // under the License. -use serde::Deserialize; use std::collections::HashMap; use std::fmt::Debug; use std::fmt::Formatter; @@ -23,6 +22,7 @@ use std::sync::Arc; use chrono::DateTime; use chrono::Utc; +use serde::Deserialize; use tokio::sync::Mutex; use super::backend::DropboxBackend; diff --git a/core/src/services/gdrive/builder.rs b/core/src/services/gdrive/builder.rs index bcb495d8f703..5ee80ab4c644 100644 --- a/core/src/services/gdrive/builder.rs +++ b/core/src/services/gdrive/builder.rs @@ -27,10 +27,13 @@ use serde::Deserialize; use tokio::sync::Mutex; use super::backend::GdriveBackend; -use crate::raw::{normalize_root, PathCacher}; -use crate::raw::{ConfigDeserializer, HttpClient}; +use crate::raw::normalize_root; +use crate::raw::ConfigDeserializer; +use crate::raw::HttpClient; +use crate::raw::PathCacher; +use crate::services::gdrive::core::GdriveCore; +use crate::services::gdrive::core::GdrivePathQuery; use crate::services::gdrive::core::GdriveSigner; -use crate::services::gdrive::core::{GdriveCore, GdrivePathQuery}; use crate::Scheme; use crate::*; diff --git a/core/src/services/gdrive/core.rs b/core/src/services/gdrive/core.rs index 96ad5f1e11e2..0f6211666d6a 100644 --- a/core/src/services/gdrive/core.rs +++ b/core/src/services/gdrive/core.rs @@ -15,11 +15,11 @@ // specific language governing permissions and limitations // under the License. -use async_trait::async_trait; use std::fmt::Debug; use std::fmt::Formatter; use std::sync::Arc; +use async_trait::async_trait; use bytes; use bytes::Bytes; use chrono::DateTime; diff --git a/core/src/services/gdrive/writer.rs b/core/src/services/gdrive/writer.rs index 251787064ea0..037b87c23db8 100644 --- a/core/src/services/gdrive/writer.rs +++ b/core/src/services/gdrive/writer.rs @@ -20,7 +20,8 @@ use std::sync::Arc; use async_trait::async_trait; use http::StatusCode; -use super::core::{GdriveCore, GdriveFile}; +use super::core::GdriveCore; +use super::core::GdriveFile; use super::error::parse_error; use crate::raw::oio::WriteBuf; use crate::raw::*; diff --git a/core/src/services/ghac/backend.rs b/core/src/services/ghac/backend.rs index 61b8b2598ac7..6319ae7f43a8 100644 --- a/core/src/services/ghac/backend.rs +++ b/core/src/services/ghac/backend.rs @@ -20,15 +20,16 @@ use std::env; use async_trait::async_trait; use bytes::Bytes; +use http::header; use http::header::ACCEPT; use http::header::AUTHORIZATION; use http::header::CONTENT_LENGTH; use http::header::CONTENT_RANGE; use http::header::CONTENT_TYPE; use http::header::USER_AGENT; +use http::Request; use http::Response; use http::StatusCode; -use http::{header, Request}; use log::debug; use serde::Deserialize; use serde::Serialize; diff --git a/core/src/services/hdfs_native/backend.rs b/core/src/services/hdfs_native/backend.rs index 5c5412b3d875..48ef1caae699 100644 --- a/core/src/services/hdfs_native/backend.rs +++ b/core/src/services/hdfs_native/backend.rs @@ -16,15 +16,16 @@ // under the License. use std::collections::HashMap; -use std::fmt::{Debug, Formatter}; +use std::fmt::Debug; +use std::fmt::Formatter; use std::sync::Arc; use async_trait::async_trait; use hdfs_native::WriteOptions; use log::debug; use serde::Deserialize; -// use uuid::Uuid; +// use uuid::Uuid; use super::error::parse_hdfs_error; use super::lister::HdfsNativeLister; use super::reader::HdfsNativeReader; diff --git a/core/src/services/hdfs_native/error.rs b/core/src/services/hdfs_native/error.rs index 4432716535cb..215f319cf89b 100644 --- a/core/src/services/hdfs_native/error.rs +++ b/core/src/services/hdfs_native/error.rs @@ -15,9 +15,10 @@ // specific language governing permissions and limitations // under the License. -use crate::*; use hdfs_native::HdfsError; +use crate::*; + /// Parse hdfs-native error into opendal::Error. pub fn parse_hdfs_error(hdfs_error: HdfsError) -> Error { let (kind, retryable, msg) = match &hdfs_error { diff --git a/core/src/services/hdfs_native/lister.rs b/core/src/services/hdfs_native/lister.rs index deb4336446b2..65c916c28076 100644 --- a/core/src/services/hdfs_native/lister.rs +++ b/core/src/services/hdfs_native/lister.rs @@ -15,11 +15,13 @@ // specific language governing permissions and limitations // under the License. +use std::sync::Arc; +use std::task::Context; +use std::task::Poll; + use crate::raw::oio; use crate::raw::oio::Entry; use crate::*; -use std::sync::Arc; -use std::task::{Context, Poll}; pub struct HdfsNativeLister { _path: String, diff --git a/core/src/services/hdfs_native/reader.rs b/core/src/services/hdfs_native/reader.rs index babbbf32b14f..784d0678df4b 100644 --- a/core/src/services/hdfs_native/reader.rs +++ b/core/src/services/hdfs_native/reader.rs @@ -15,12 +15,15 @@ // specific language governing permissions and limitations // under the License. -use crate::raw::oio::Read; -use crate::*; +use std::io::SeekFrom; +use std::task::Context; +use std::task::Poll; + use bytes::Bytes; use hdfs_native::file::FileReader; -use std::io::SeekFrom; -use std::task::{Context, Poll}; + +use crate::raw::oio::Read; +use crate::*; pub struct HdfsNativeReader { _f: FileReader, diff --git a/core/src/services/hdfs_native/writer.rs b/core/src/services/hdfs_native/writer.rs index de0349eacf39..e781b463cbdd 100644 --- a/core/src/services/hdfs_native/writer.rs +++ b/core/src/services/hdfs_native/writer.rs @@ -15,11 +15,14 @@ // specific language governing permissions and limitations // under the License. +use std::task::Context; +use std::task::Poll; + +use hdfs_native::file::FileWriter; + use crate::raw::oio; use crate::raw::oio::WriteBuf; use crate::*; -use hdfs_native::file::FileWriter; -use std::task::{Context, Poll}; pub struct HdfsNativeWriter { _f: FileWriter, diff --git a/core/src/services/icloud/backend.rs b/core/src/services/icloud/backend.rs index 52886dfa98d7..8f6628ae0e0f 100644 --- a/core/src/services/icloud/backend.rs +++ b/core/src/services/icloud/backend.rs @@ -15,12 +15,14 @@ // specific language governing permissions and limitations // under the License. +use std::collections::HashMap; +use std::fmt::Debug; +use std::fmt::Formatter; +use std::sync::Arc; + use async_trait::async_trait; use http::StatusCode; use serde::Deserialize; -use std::collections::HashMap; -use std::fmt::{Debug, Formatter}; -use std::sync::Arc; use tokio::sync::Mutex; use super::core::*; diff --git a/core/src/services/icloud/core.rs b/core/src/services/icloud/core.rs index 0430e954fb39..77f747a1bf84 100644 --- a/core/src/services/icloud/core.rs +++ b/core/src/services/icloud/core.rs @@ -15,18 +15,22 @@ // specific language governing permissions and limitations // under the License. -use async_trait::async_trait; -use bytes::{Buf, Bytes}; use std::collections::BTreeMap; -use std::fmt::{Debug, Formatter}; +use std::fmt::Debug; +use std::fmt::Formatter; use std::sync::Arc; +use async_trait::async_trait; +use bytes::Buf; +use bytes::Bytes; use http::header; -use http::header::{IF_MATCH, IF_NONE_MATCH}; +use http::header::IF_MATCH; +use http::header::IF_NONE_MATCH; use http::Request; use http::Response; use http::StatusCode; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; +use serde::Serialize; use serde_json::json; use tokio::sync::Mutex; @@ -706,7 +710,8 @@ pub struct IcloudCreateFolder { #[cfg(test)] mod tests { - use super::{IcloudRoot, IcloudWebservicesResponse}; + use super::IcloudRoot; + use super::IcloudWebservicesResponse; #[test] fn test_parse_icloud_drive_root_json() { diff --git a/core/src/services/koofr/core.rs b/core/src/services/koofr/core.rs index d998ba750ffd..441c3ffb0142 100644 --- a/core/src/services/koofr/core.rs +++ b/core/src/services/koofr/core.rs @@ -31,11 +31,10 @@ use serde_json::json; use tokio::sync::Mutex; use tokio::sync::OnceCell; +use super::error::parse_error; use crate::raw::*; use crate::*; -use super::error::parse_error; - #[derive(Clone)] pub struct KoofrCore { /// The root of this core. diff --git a/core/src/services/mod.rs b/core/src/services/mod.rs index c95556dbc305..e7ffb008444c 100644 --- a/core/src/services/mod.rs +++ b/core/src/services/mod.rs @@ -235,7 +235,6 @@ mod dropbox; pub use dropbox::Dropbox; #[cfg(feature = "services-dropbox")] pub use dropbox::DropboxConfig; - #[cfg(feature = "services-webhdfs")] pub use webhdfs::Webhdfs; diff --git a/core/src/services/sftp/lister.rs b/core/src/services/sftp/lister.rs index 6192fa3fccdf..f7fe5aac9fc2 100644 --- a/core/src/services/sftp/lister.rs +++ b/core/src/services/sftp/lister.rs @@ -25,11 +25,10 @@ use futures::StreamExt; use openssh_sftp_client::fs::DirEntry; use openssh_sftp_client::fs::ReadDir; +use super::error::parse_sftp_error; use crate::raw::oio; use crate::Result; -use super::error::parse_sftp_error; - pub struct SftpLister { dir: Pin>, prefix: String, diff --git a/core/src/services/yandex_disk/backend.rs b/core/src/services/yandex_disk/backend.rs index d22032bc912f..d581e8fd4d90 100644 --- a/core/src/services/yandex_disk/backend.rs +++ b/core/src/services/yandex_disk/backend.rs @@ -15,15 +15,16 @@ // specific language governing permissions and limitations // under the License. +use std::collections::HashMap; +use std::fmt::Debug; +use std::fmt::Formatter; +use std::sync::Arc; + use async_trait::async_trait; use http::Request; use http::StatusCode; use log::debug; use serde::Deserialize; -use std::collections::HashMap; -use std::fmt::Debug; -use std::fmt::Formatter; -use std::sync::Arc; use super::core::*; use super::error::parse_error; diff --git a/core/src/services/yandex_disk/core.rs b/core/src/services/yandex_disk/core.rs index d3852ea79e72..f206a52c0eb9 100644 --- a/core/src/services/yandex_disk/core.rs +++ b/core/src/services/yandex_disk/core.rs @@ -15,16 +15,20 @@ // specific language governing permissions and limitations // under the License. -use std::fmt::{Debug, Formatter}; - -use http::{header, request, Request, Response, StatusCode}; +use std::fmt::Debug; +use std::fmt::Formatter; + +use http::header; +use http::request; +use http::Request; +use http::Response; +use http::StatusCode; use serde::Deserialize; +use super::error::parse_error; use crate::raw::*; use crate::*; -use super::error::parse_error; - #[derive(Clone)] pub struct YandexDiskCore { /// The root of this core. diff --git a/core/src/services/yandex_disk/writer.rs b/core/src/services/yandex_disk/writer.rs index b25d770098bb..33633593e87c 100644 --- a/core/src/services/yandex_disk/writer.rs +++ b/core/src/services/yandex_disk/writer.rs @@ -18,13 +18,13 @@ use std::sync::Arc; use async_trait::async_trait; -use http::{Request, StatusCode}; - -use crate::raw::*; -use crate::*; +use http::Request; +use http::StatusCode; use super::core::YandexDiskCore; use super::error::parse_error; +use crate::raw::*; +use crate::*; pub type YandexDiskWriters = oio::OneShotWriter; diff --git a/core/src/types/operator/blocking_operator.rs b/core/src/types/operator/blocking_operator.rs index babacdf471fb..9cb46efa30ab 100644 --- a/core/src/types/operator/blocking_operator.rs +++ b/core/src/types/operator/blocking_operator.rs @@ -58,7 +58,6 @@ use crate::*; /// /// Some services like s3, gcs doesn't have native blocking supports, we can use [`layers::BlockingLayer`] /// to wrap the async operator to make it blocking. -/// #[cfg_attr(feature = "layers-blocking", doc = "```rust")] #[cfg_attr(not(feature = "layers-blocking"), doc = "```ignore")] /// # use anyhow::Result; diff --git a/core/src/types/operator/operator.rs b/core/src/types/operator/operator.rs index 8c50d7db4c5a..f3039c0bf952 100644 --- a/core/src/types/operator/operator.rs +++ b/core/src/types/operator/operator.rs @@ -37,12 +37,13 @@ use crate::*; /// Developer should manipulate the data from storage service through Operator only by right. /// /// We will usually do some general checks and data transformations in this layer, -/// like normalizing path from input, checking whether the path refers to one file or one directory, and so on. -/// Read [`concepts`][docs::concepts] for more about [`Operator`]. +/// like normalizing path from input, checking whether the path refers to one file or one directory, +/// and so on. +/// Read [`Operator::concepts`][docs::concepts] for more about [`Operator::Operator`]. /// /// # Examples /// -/// Read more backend init examples in [`services`] +/// Read more backend init examples in [`Operator::services`] /// /// ``` /// # use anyhow::Result; @@ -133,7 +134,7 @@ impl Operator { } } -/// Operator async API. +/// # Operator async API. impl Operator { /// Check if this operator can work correctly. /// @@ -163,35 +164,16 @@ impl Operator { /// /// # Notes /// - /// For fetch metadata of entries returned by [`Lister`], it's better to use [`list_with`] and - /// [`lister_with`] with `metakey` query like `Metakey::ContentLength | Metakey::LastModified` - /// so that we can avoid extra stat requests. + /// ## Extra Options /// - /// # Behavior - /// - /// ## Services that support `create_dir` - /// - /// `test` and `test/` may vary in some services such as S3. However, on a local file system, - /// they're identical. Therefore, the behavior of `stat("test")` and `stat("test/")` might differ - /// in certain edge cases. Always use `stat("test/")` when you need to access a directory if possible. + /// [`Operator::stat`] is a wrapper of [`Operator::stat_with`] without any options. To use extra + /// options like `if_match` and `if_none_match`, please use [`Operator::stat_with`] instead. /// - /// Here are the behavior list: - /// - /// | Case | Path | Result | - /// |------------------------|-----------------|--------------------------------------------| - /// | stat existing dir | `abc/` | Metadata with dir mode | - /// | stat existing file | `abc/def_file` | Metadata with file mode | - /// | stat dir without `/` | `abc/def_dir` | Error `NotFound` or metadata with dir mode | - /// | stat file with `/` | `abc/def_file/` | Error `NotFound` | - /// | stat not existing path | `xyz` | Error `NotFound` | - /// - /// Refer to [RFC: List Prefix][crate::docs::rfcs::rfc_3243_list_prefix] for more details. + /// ## Reuse Metadata /// - /// ## Services that not support `create_dir` - /// - /// For services that not support `create_dir`, `stat("test/")` will return `NotFound` even - /// when `test/abc` exists since the service won't have the concept of dir. There is nothing - /// we can do about this. + /// For fetch metadata of entries returned by [`Operator::Lister`], it's better to use + /// [`Operator::list_with`] and [`Operator::lister_with`] with `metakey` query like + /// `Metakey::ContentLength | Metakey::LastModified` so that we can avoid extra stat requests. /// /// # Examples /// @@ -221,35 +203,51 @@ impl Operator { /// /// # Notes /// - /// For fetch metadata of entries returned by [`Lister`], it's better to use [`list_with`] and - /// [`lister_with`] with `metakey` query like `Metakey::ContentLength | Metakey::LastModified` - /// so that we can avoid extra requests. + /// ## Reuse Metadata /// - /// # Behavior + /// For fetch metadata of entries returned by [`Operator::Lister`], it's better to use + /// [`Operator::list_with`] and [`Operator::lister_with`] with `metakey` query like + /// `Metakey::ContentLength | Metakey::LastModified` so that we can avoid extra requests. /// - /// ## Services that support `create_dir` + /// # Options /// - /// `test` and `test/` may vary in some services such as S3. However, on a local file system, - /// they're identical. Therefore, the behavior of `stat("test")` and `stat("test/")` might differ - /// in certain edge cases. Always use `stat("test/")` when you need to access a directory if possible. + /// ## `if_match` /// - /// Here are the behavior list: + /// Set `if_match` for this `stat` request. /// - /// | Case | Path | Result | - /// |------------------------|-----------------|--------------------------------------------| - /// | stat existing dir | `abc/` | Metadata with dir mode | - /// | stat existing file | `abc/def_file` | Metadata with file mode | - /// | stat dir without `/` | `abc/def_dir` | Error `NotFound` or metadata with dir mode | - /// | stat file with `/` | `abc/def_file/` | Error `NotFound` | - /// | stat not existing path | `xyz` | Error `NotFound` | + /// This feature can be used to check if the file's `ETag` matches the given `ETag`. /// - /// Refer to [RFC: List Prefix][crate::docs::rfcs::rfc_3243_list_prefix] for more details. + /// If file exists and it's etag doesn't match, an error with kind [`ErrorKind::ConditionNotMatch`] + /// will be returned. /// - /// ## Services that not support `create_dir` + /// ```no_run + /// # use opendal::Result; + /// use opendal::Operator; + /// # #[tokio::main] + /// # async fn test(op: Operator, etag: &str) -> Result<()> { + /// let mut metadata = op.stat_with("path/to/file").if_match(etag).await?; + /// # Ok(()) + /// # } + /// ``` /// - /// For services that not support `create_dir`, `stat("test/")` will return `NotFound` even - /// when `test/abc` exists since the service won't have the concept of dir. There is nothing - /// we can do about this. + /// ## `if_none_match` + /// + /// Set `if_none_match` for this `stat` request. + /// + /// This feature can be used to check if the file's `ETag` doesn't match the given `ETag`. + /// + /// If file exists and it's etag match, an error with kind [`ErrorKind::ConditionNotMatch`] + /// will be returned. + /// + /// ```no_run + /// # use opendal::Result; + /// use opendal::Operator; + /// # #[tokio::main] + /// # async fn test(op: Operator, etag: &str) -> Result<()> { + /// let mut metadata = op.stat_with("path/to/file").if_none_match(etag).await?; + /// # Ok(()) + /// # } + /// ``` /// /// # Examples /// @@ -281,6 +279,34 @@ impl Operator { /// # Ok(()) /// # } /// ``` + /// + /// --- + /// + /// # Behavior + /// + /// ## Services that support `create_dir` + /// + /// `test` and `test/` may vary in some services such as S3. However, on a local file system, + /// they're identical. Therefore, the behavior of `stat("test")` and `stat("test/")` might differ + /// in certain edge cases. Always use `stat("test/")` when you need to access a directory if possible. + /// + /// Here are the behavior list: + /// + /// | Case | Path | Result | + /// |------------------------|-----------------|--------------------------------------------| + /// | stat existing dir | `abc/` | Metadata with dir mode | + /// | stat existing file | `abc/def_file` | Metadata with file mode | + /// | stat dir without `/` | `abc/def_dir` | Error `NotFound` or metadata with dir mode | + /// | stat file with `/` | `abc/def_file/` | Error `NotFound` | + /// | stat not existing path | `xyz` | Error `NotFound` | + /// + /// Refer to [RFC: List Prefix][crate::docs::rfcs::rfc_3243_list_prefix] for more details. + /// + /// ## Services that not support `create_dir` + /// + /// For services that not support `create_dir`, `stat("test/")` will return `NotFound` even + /// when `test/abc` exists since the service won't have the concept of dir. There is nothing + /// we can do about this. pub fn stat_with(&self, path: &str) -> FutureStat>> { let path = normalize_path(path); @@ -367,6 +393,15 @@ impl Operator { /// Read the whole path into a bytes. /// + /// # Notes + /// + /// ## Extra Options + /// + /// [`Operator::read`] is a wrapper of [`Operator::read_with`] without any options. To use + /// extra options like `range` and `if_match`, please use [`Operator::read_with`] instead. + /// + /// ## Streaming Read + /// /// This function will allocate a new bytes internally. For more precise memory control or /// reading data lazily, please use [`Operator::reader`] /// @@ -391,8 +426,79 @@ impl Operator { /// This function will allocate a new bytes internally. For more precise memory control or /// reading data lazily, please use [`Operator::reader`] /// + /// # Notes + /// + /// ## Streaming Read + /// + /// This function will allocate a new bytes internally. For more precise memory control or + /// reading data lazily, please use [`Operator::reader`] + /// + /// # Options + /// + /// ## `range` + /// + /// Set `range` for this `read` request. + /// + /// If we have a file with size `n`. + /// + /// - `..` means read bytes in range `[0, n)` of file. + /// - `0..1024` means read bytes in range `[0, 1024)` of file + /// - `1024..` means read bytes in range `[1024, n)` of file + /// - `..1024` means read bytes in range `(n - 1024, n)` of file + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::TryStreamExt; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = op.read_with("path/to/file").range(0..1024).await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `if_match` + /// + /// Set `if_match` for this `read` request. + /// + /// This feature can be used to check if the file's `ETag` matches the given `ETag`. + /// + /// If file exists and it's etag doesn't match, an error with kind [`ErrorKind::ConditionNotMatch`] + /// will be returned. + /// + /// ```no_run + /// # use opendal::Result; + /// use opendal::Operator; + /// # #[tokio::main] + /// # async fn test(op: Operator, etag: &str) -> Result<()> { + /// let mut metadata = op.read_with("path/to/file").if_match(etag).await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `if_none_match` + /// + /// Set `if_none_match` for this `read` request. + /// + /// This feature can be used to check if the file's `ETag` doesn't match the given `ETag`. + /// + /// If file exists and it's etag match, an error with kind [`ErrorKind::ConditionNotMatch`] + /// will be returned. + /// + /// ```no_run + /// # use opendal::Result; + /// use opendal::Operator; + /// # #[tokio::main] + /// # async fn test(op: Operator, etag: &str) -> Result<()> { + /// let mut metadata = op.read_with("path/to/file").if_none_match(etag).await?; + /// # Ok(()) + /// # } + /// ``` + /// /// # Examples /// + /// Read the whole path into a bytes. + /// /// ``` /// # use std::io::Result; /// # use opendal::Operator; @@ -445,6 +551,13 @@ impl Operator { /// Create a new reader which can read the whole path. /// + /// # Notes + /// + /// ## Extra Options + /// + /// [`Operator::reader`] is a wrapper of [`Operator::reader_with`] without any options. To use + /// extra options like `range` and `if_match`, please use [`Operator::reader_with`] instead. + /// /// # Examples /// /// ```no_run @@ -464,6 +577,99 @@ impl Operator { /// Create a new reader with extra options /// + /// # Notes + /// + /// ## Extra Options + /// + /// [`Operator::reader`] is a wrapper of [`Operator::reader_with`] without any options. To use + /// extra options like `range` and `if_match`, please use [`Operator::reader_with`] instead. + /// + /// # Options + /// + /// ## `range` + /// + /// Set `range` for this `read` request. + /// + /// If we have a file with size `n`. + /// + /// - `..` means read bytes in range `[0, n)` of file. + /// - `0..1024` means read bytes in range `[0, 1024)` of file + /// - `1024..` means read bytes in range `[1024, n)` of file + /// - `..1024` means read bytes in range `(n - 1024, n)` of file + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::TryStreamExt; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = op.reader_with("path/to/file").range(0..1024).await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `buffer` + /// + /// Set `buffer` for the reader. + /// + /// OpenDAL by default to read file without buffer. This is not efficient for cases like `seek` + /// after read or reading file with small chunks. To improve performance, we can set a buffer. + /// + /// The following example will create a reader with 4 MiB buffer internally. All seek operations + /// happened in buffered data will be zero cost. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::TryStreamExt; + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = op + /// .reader_with("path/to/file") + /// .buffer(4 * 1024 * 1024) + /// .await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `if_match` + /// + /// Set `if_match` for this `read` request. + /// + /// This feature can be used to check if the file's `ETag` matches the given `ETag`. + /// + /// If file exists and it's etag doesn't match, an error with kind [`ErrorKind::ConditionNotMatch`] + /// will be returned. + /// + /// ```no_run + /// # use opendal::Result; + /// use opendal::Operator; + /// # #[tokio::main] + /// # async fn test(op: Operator, etag: &str) -> Result<()> { + /// let mut metadata = op.reader_with("path/to/file").if_match(etag).await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `if_none_match` + /// + /// Set `if_none_match` for this `read` request. + /// + /// This feature can be used to check if the file's `ETag` doesn't match the given `ETag`. + /// + /// If file exists and it's etag match, an error with kind [`ErrorKind::ConditionNotMatch`] + /// will be returned. + /// + /// ```no_run + /// # use opendal::Result; + /// use opendal::Operator; + /// # #[tokio::main] + /// # async fn test(op: Operator, etag: &str) -> Result<()> { + /// let mut metadata = op.reader_with("path/to/file").if_none_match(etag).await?; + /// # Ok(()) + /// # } + /// ``` + /// /// # Examples /// /// ```no_run @@ -473,7 +679,7 @@ impl Operator { /// # use opendal::Scheme; /// # #[tokio::main] /// # async fn test(op: Operator) -> Result<()> { - /// let r = op.reader_with("path/to/file").range((0..10)).await?; + /// let r = op.reader_with("path/to/file").range(0..10).await?; /// # Ok(()) /// # } /// ``` @@ -503,7 +709,16 @@ impl Operator { /// /// # Notes /// - /// - Write will make sure all bytes has been written, or an error will be returned. + /// ## Extra Options + /// + /// [`Operator::write`] is a wrapper of [`Operator::write_with`] without any options. To use + /// extra options like `content_type` and `cache_control`, please use [`Operator::write_with`] + /// instead. + /// + /// ## Streaming Write + /// + /// This function will write all bytes at once. For more precise memory control or + /// writing data lazily, please use [`Operator::writer`]. /// /// # Examples /// @@ -644,7 +859,13 @@ impl Operator { /// Write multiple bytes into path. /// - /// Refer to [`Writer`] for more details. + /// # Notes + /// + /// ## Extra Options + /// + /// [`Operator::write`] is a wrapper of [`Operator::write_with`] without any options. To use + /// extra options like `content_type` and `cache_control`, please use [`Operator::write_with`] + /// instead. /// /// # Examples /// @@ -670,7 +891,176 @@ impl Operator { /// Write multiple bytes into path with extra options. /// - /// Refer to [`Writer`] for more details. + /// # Options + /// + /// ## `append` + /// + /// Set `append` for this `write` request. + /// + /// `write` by default to overwrite existing files. To append to the end of file instead, + /// please set `append` to true. + /// + /// The following example will append data to existing file instead. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let mut w = op.writer_with("path/to/file").append(true).await?; + /// w.write(vec![0; 4096]).await?; + /// w.write(vec![1; 4096]).await?; + /// w.close().await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `buffer` + /// + /// Set `buffer` for the writer. + /// + /// OpenDAL by default to write file without buffer. This is not efficient for cases when users + /// write small chunks of data. Some storage services like `s3` could even return hard errors + /// like `EntityTooSmall`. To improve performance, we can set a buffer. + /// + /// Besides, cloud storage services will cost more money if we write data in small chunks. Set + /// a good buffer size might reduce the API calls and save money. + /// + /// The following example will set the writer buffer to 8MiB. Only one API call will be sent at + /// `close` instead. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let mut w = op + /// .writer_with("path/to/file") + /// .buffer(8 * 1024 * 1024) + /// .await?; + /// w.write(vec![0; 4096]).await?; + /// w.write(vec![1; 4096]).await?; + /// w.close().await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `concurrent` + /// + /// Set `concurrent` for the writer. + /// + /// OpenDAL by default to write file without concurrent. This is not efficient for cases when users + /// write large chunks of data. By setting `concurrent`, opendal will writing files concurrently + /// on support storage services. + /// + /// The following example will set the writer concurrent to 8. + /// + /// - The first write will start and return immediately. + /// - The second write will start and return immediately. + /// - The close will make sure all writes are done in order and return result. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let mut w = op.writer_with("path/to/file").concurrent(8).await?; + /// w.write(vec![0; 4096]).await?; // Start the first write + /// w.write(vec![1; 4096]).await?; // Second write will be concurrent without wait + /// w.close().await?; // Close will make sure all writes are done and success + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `cache_control` + /// + /// Set the `cache_control` for this `write` request. + /// + /// Some storage services support setting `cache_control` as system metadata. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let mut w = op + /// .writer_with("path/to/file") + /// .cache_control("max-age=604800") + /// .await?; + /// w.write(vec![0; 4096]).await?; + /// w.write(vec![1; 4096]).await?; + /// w.close().await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `content_type` + /// + /// Set the `content_type` for this `write` request. + /// + /// Some storage services support setting `content_type` as system metadata. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let mut w = op + /// .writer_with("path/to/file") + /// .content_type("text/plain") + /// .await?; + /// w.write(vec![0; 4096]).await?; + /// w.write(vec![1; 4096]).await?; + /// w.close().await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `content_disposition` + /// + /// Set the `content_disposition` for this `write` request. + /// + /// Some storage services support setting `content_disposition` as system metadata. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// # use futures::StreamExt; + /// # use futures::SinkExt; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let mut w = op + /// .writer_with("path/to/file") + /// .content_disposition("attachment; filename=\"filename.jpg\"") + /// .await?; + /// w.write(vec![0; 4096]).await?; + /// w.write(vec![1; 4096]).await?; + /// w.close().await?; + /// # Ok(()) + /// # } + /// ``` /// /// # Examples /// @@ -719,11 +1109,104 @@ impl Operator { /// /// # Notes /// - /// - Write will make sure all bytes has been written, or an error will be returned. + /// ## Streaming Write + /// + /// This function will write all bytes at once. For more precise memory control or + /// writing data lazily, please use [`Operator::writer_with`]. + /// + /// # Options + /// + /// ## `append` + /// + /// Set `append` for this `write` request. + /// + /// `write` by default to overwrite existing files. To append to the end of file instead, + /// please set `append` to true. + /// + /// The following example will append data to existing file instead. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = b"hello, world!".to_vec(); + /// let _ = op.write_with("path/to/file", bs).append(true).await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `cache_control` + /// + /// Set the `cache_control` for this `write` request. + /// + /// Some storage services support setting `cache_control` as system metadata. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = b"hello, world!".to_vec(); + /// let _ = op + /// .write_with("path/to/file", bs) + /// .cache_control("max-age=604800") + /// .await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `content_type` + /// + /// Set the `content_type` for this `write` request. + /// + /// Some storage services support setting `content_type` as system metadata. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = b"hello, world!".to_vec(); + /// let _ = op + /// .write_with("path/to/file", bs) + /// .content_type("text/plain") + /// .await?; + /// # Ok(()) + /// # } + /// ``` + /// + /// ## `content_disposition` + /// + /// Set the `content_disposition` for this `write` request. + /// + /// Some storage services support setting `content_disposition` as system metadata. + /// + /// ``` + /// # use std::io::Result; + /// # use opendal::Operator; + /// use bytes::Bytes; + /// + /// # #[tokio::main] + /// # async fn test(op: Operator) -> Result<()> { + /// let bs = b"hello, world!".to_vec(); + /// let _ = op + /// .write_with("path/to/file", bs) + /// .content_disposition("attachment; filename=\"filename.jpg\"") + /// .await?; + /// # Ok(()) + /// # } + /// ``` /// /// # Examples /// - /// ```no_run + /// ``` /// # use std::io::Result; /// # use opendal::Operator; /// use bytes::Bytes; @@ -985,13 +1468,13 @@ impl Operator { /// /// # Notes /// - /// ## Recursively list + /// ## Recursively List /// /// This function only read the children of the given directory. To read /// all entries recursively, use `Operator::list_with("path").recursive(true)` /// instead. /// - /// ## Streaming list + /// ## Streaming List /// /// This function will read all entries in the given directory. It could /// take very long time and consume a lot of memory if the directory @@ -1239,8 +1722,8 @@ impl Operator { /// List entries that starts with given `path` in parent dir. /// - /// This function will create a new [`Lister`] to list entries. Users can stop listing via - /// dropping this [`Lister`]. + /// This function will create a new [`Operator::Lister`] to list entries. Users can stop + /// listing via dropping this [`Operator::Lister`]. /// /// # Notes /// @@ -1287,8 +1770,8 @@ impl Operator { /// List entries that starts with given `path` in parent dir with options. /// - /// This function will create a new [`Lister`] to list entries. Users can stop listing via - /// dropping this [`Lister`]. + /// This function will create a new [`Operator::Lister`] to list entries. Users can stop listing via + /// dropping this [`Operator::Lister`]. /// /// # Options /// diff --git a/core/src/types/operator/operator_futures.rs b/core/src/types/operator/operator_futures.rs index 15ce16e0c61c..2822db84ccac 100644 --- a/core/src/types/operator/operator_futures.rs +++ b/core/src/types/operator/operator_futures.rs @@ -213,6 +213,21 @@ impl FutureRead { self.map(|args| args.with_range(range.into())) } + /// Set the buffer capability to enable buffer for reader. + pub fn buffer(self, v: usize) -> Self { + self.map(|args| args.with_buffer(v)) + } + + /// Set the If-Match for this operation. + pub fn if_match(self, v: &str) -> Self { + self.map(|args| args.with_if_match(v)) + } + + /// Set the If-None-Match for this operation. + pub fn if_none_match(self, v: &str) -> Self { + self.map(|args| args.with_if_none_match(v)) + } + /// Sets the content-disposition header that should be send back by the remote read operation. pub fn override_content_disposition(self, v: &str) -> Self { self.map(|args| args.with_override_content_disposition(v)) @@ -228,25 +243,10 @@ impl FutureRead { self.map(|args| args.with_override_content_type(v)) } - /// Set the If-Match for this operation. - pub fn if_match(self, v: &str) -> Self { - self.map(|args| args.with_if_match(v)) - } - - /// Set the If-None-Match for this operation. - pub fn if_none_match(self, v: &str) -> Self { - self.map(|args| args.with_if_none_match(v)) - } - /// Set the version for this operation. pub fn version(self, v: &str) -> Self { self.map(|args| args.with_version(v)) } - - /// Set the buffer capability to enable buffer for reader. - pub fn buffer(self, v: usize) -> Self { - self.map(|args| args.with_buffer(v)) - } } /// Future that generated by [`Operator::write_with`]. @@ -278,6 +278,16 @@ impl FutureWrite { self.map(|(args, bs)| (args.with_buffer(v), bs)) } + /// Set the maximum concurrent write task amount. + pub fn concurrent(self, v: usize) -> Self { + self.map(|(args, bs)| (args.with_buffer(v), bs)) + } + + /// Set the content type of option + pub fn cache_control(self, v: &str) -> Self { + self.map(|(args, bs)| (args.with_cache_control(v), bs)) + } + /// Set the content type of option pub fn content_type(self, v: &str) -> Self { self.map(|(args, bs)| (args.with_content_type(v), bs)) @@ -287,16 +297,6 @@ impl FutureWrite { pub fn content_disposition(self, v: &str) -> Self { self.map(|(args, bs)| (args.with_content_disposition(v), bs)) } - - /// Set the content type of option - pub fn cache_control(self, v: &str) -> Self { - self.map(|(args, bs)| (args.with_cache_control(v), bs)) - } - - /// Set the maximum concurrent write task amount. - pub fn concurrent(self, v: usize) -> Self { - self.map(|(args, bs)| (args.with_buffer(v), bs)) - } } /// Future that generated by [`Operator::writer_with`]. @@ -335,6 +335,16 @@ impl FutureWriter { self.map(|args| args.with_buffer(v)) } + /// Set the maximum concurrent write task amount. + pub fn concurrent(self, v: usize) -> Self { + self.map(|args| args.with_concurrent(v)) + } + + /// Set the content type of option + pub fn cache_control(self, v: &str) -> Self { + self.map(|args| args.with_cache_control(v)) + } + /// Set the content type of option pub fn content_type(self, v: &str) -> Self { self.map(|args| args.with_content_type(v)) @@ -344,16 +354,6 @@ impl FutureWriter { pub fn content_disposition(self, v: &str) -> Self { self.map(|args| args.with_content_disposition(v)) } - - /// Set the content type of option - pub fn cache_control(self, v: &str) -> Self { - self.map(|args| args.with_cache_control(v)) - } - - /// Set the maximum concurrent write task amount. - pub fn concurrent(self, v: usize) -> Self { - self.map(|args| args.with_concurrent(v)) - } } /// Future that generated by [`Operator::delete_with`].