From c866d4bbae50f863b22ec2728011e80b633177a8 Mon Sep 17 00:00:00 2001 From: tison Date: Fri, 3 Jan 2025 21:37:41 +0800 Subject: [PATCH] feat: generate java configs (#5503) --- bindings/java/src/lib.rs | 4 +- .../org/apache/opendal/AsyncOperator.java | 34 +- .../java/org/apache/opendal/Operator.java | 17 +- .../org/apache/opendal/ServiceConfig.java | 3572 +++++++++++++++++ .../opendal/test/AsyncExecutorTest.java | 9 +- .../org/apache/opendal/test/LayerTest.java | 15 +- .../org/apache/opendal/test/MetadataTest.java | 15 +- .../apache/opendal/test/OperatorInfoTest.java | 15 +- .../test/OperatorInputOutputStreamTest.java | 9 +- .../opendal/test/OperatorUtf8DecodeTest.java | 9 +- dev/Cargo.lock | 1 + dev/Cargo.toml | 1 + dev/src/generate/java.j2 | 72 + dev/src/generate/java.rs | 122 + dev/src/generate/mod.rs | 2 + dev/src/generate/parser.rs | 15 + dev/src/generate/python.j2 | 2 +- dev/src/generate/python.rs | 21 +- 18 files changed, 3866 insertions(+), 69 deletions(-) create mode 100644 bindings/java/src/main/java/org/apache/opendal/ServiceConfig.java create mode 100644 dev/src/generate/java.j2 create mode 100644 dev/src/generate/java.rs diff --git a/bindings/java/src/lib.rs b/bindings/java/src/lib.rs index eb317f2640a7..45a0554edb08 100644 --- a/bindings/java/src/lib.rs +++ b/bindings/java/src/lib.rs @@ -70,7 +70,7 @@ fn make_presigned_request<'a>(env: &mut JNIEnv<'a>, req: PresignedRequest) -> Re } fn make_operator_info<'a>(env: &mut JNIEnv<'a>, info: OperatorInfo) -> Result> { - let schema = env.new_string(info.scheme().to_string())?; + let scheme = env.new_string(info.scheme().to_string())?; let root = env.new_string(info.root().to_string())?; let name = env.new_string(info.name().to_string())?; let full_capability_obj = make_capability(env, info.full_capability())?; @@ -81,7 +81,7 @@ fn make_operator_info<'a>(env: &mut JNIEnv<'a>, info: OperatorInfo) -> Result CompletableFuture take(long requestId) { private final long executorHandle; + /** + * Construct an OpenDAL operator. + * + * @param config the config of the underneath service to access data from. + */ + public static AsyncOperator of(ServiceConfig config) { + return of(config, null); + } + + /** + * Construct an OpenDAL operator. + * + * @param executor the underneath executor to run async operations; {@code null} to use a default global executor. + */ + public static AsyncOperator of(ServiceConfig config, AsyncExecutor executor) { + final String scheme = config.scheme(); + final Map map = config.configMap(); + return of(scheme, map, executor); + } + /** * Construct an OpenDAL operator: * @@ -113,11 +133,11 @@ private static CompletableFuture take(long requestId) { * You can find all possible schemes here * and see what config options each service supports. * - * @param schema the name of the underneath service to access data from. + * @param scheme the name of the underneath service to access data from. * @param map a map of properties to construct the underneath operator. */ - public static AsyncOperator of(String schema, Map map) { - return of(schema, map, null); + public static AsyncOperator of(String scheme, Map map) { + return of(scheme, map, null); } /** @@ -127,13 +147,13 @@ public static AsyncOperator of(String schema, Map map) { * You can find all possible schemes here * and see what config options each service supports. * - * @param schema the name of the underneath service to access data from. + * @param scheme the name of the underneath service to access data from. * @param map a map of properties to construct the underneath operator. * @param executor the underneath executor to run async operations; {@code null} to use a default global executor. */ - public static AsyncOperator of(String schema, Map map, AsyncExecutor executor) { + public static AsyncOperator of(String scheme, Map map, AsyncExecutor executor) { final long executorHandle = executor != null ? executor.nativeHandle : 0; - final long nativeHandle = constructor(executorHandle, schema, map); + final long nativeHandle = constructor(executorHandle, scheme, map); final OperatorInfo info = makeOperatorInfo(nativeHandle); return new AsyncOperator(nativeHandle, executorHandle, info); } @@ -248,7 +268,7 @@ public CompletableFuture> list(String path) { private static native long duplicate(long nativeHandle); - private static native long constructor(long executorHandle, String schema, Map map); + private static native long constructor(long executorHandle, String scheme, Map map); private static native long read(long nativeHandle, long executorHandle, String path); diff --git a/bindings/java/src/main/java/org/apache/opendal/Operator.java b/bindings/java/src/main/java/org/apache/opendal/Operator.java index bb08c87a9aa2..be496d2cb70f 100644 --- a/bindings/java/src/main/java/org/apache/opendal/Operator.java +++ b/bindings/java/src/main/java/org/apache/opendal/Operator.java @@ -30,6 +30,17 @@ public class Operator extends NativeObject { public final OperatorInfo info; + /** + * Construct an OpenDAL blocking operator. + * + * @param config the config of the underneath service to access data from. + */ + public static Operator of(ServiceConfig config) { + try (final AsyncOperator operator = AsyncOperator.of(config)) { + return operator.blocking(); + } + } + /** * Construct an OpenDAL blocking operator: * @@ -37,11 +48,11 @@ public class Operator extends NativeObject { * You can find all possible schemes here * and see what config options each service supports. * - * @param schema the name of the underneath service to access data from. + * @param scheme the name of the underneath service to access data from. * @param map a map of properties to construct the underneath operator. */ - public static Operator of(String schema, Map map) { - try (final AsyncOperator operator = AsyncOperator.of(schema, map)) { + public static Operator of(String scheme, Map map) { + try (final AsyncOperator operator = AsyncOperator.of(scheme, map)) { return operator.blocking(); } } diff --git a/bindings/java/src/main/java/org/apache/opendal/ServiceConfig.java b/bindings/java/src/main/java/org/apache/opendal/ServiceConfig.java new file mode 100644 index 000000000000..0c56d9acc20a --- /dev/null +++ b/bindings/java/src/main/java/org/apache/opendal/ServiceConfig.java @@ -0,0 +1,3572 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// DO NOT EDIT IT MANUALLY. This file is generated by opendal/dev/generate/java.rs. + +package org.apache.opendal; + +import java.time.Duration; +import java.util.HashMap; +import lombok.AccessLevel; +import lombok.Builder; +import lombok.Data; +import lombok.NonNull; +import lombok.RequiredArgsConstructor; + +/** + * Service configurations that are mapped from + * OpenDAL's services. + */ +@SuppressWarnings("unused") // intended to be used by users +public interface ServiceConfig { + String scheme(); + + HashMap configMap(); + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class AliyunDrive implements ServiceConfig { + /** + * The drive_type of this backend. + *

+ * All operations will happen under this type of drive. + *

+ * Available values are `default`, `backup` and `resource`. + *

+ * Fallback to default if not set or no other drives can be found. + */ + private final @NonNull String driveType; + /** + * The Root of this backend. + *

+ * All operations will happen under this root. + *

+ * Default to `/` if not set. + */ + private final String root; + /** + * The access_token of this backend. + *

+ * Solution for client-only purpose. #4733 + *

+ * Required if no client_id, client_secret and refresh_token are provided. + */ + private final String accessToken; + /** + * The client_id of this backend. + *

+ * Required if no access_token is provided. + */ + private final String clientId; + /** + * The client_secret of this backend. + *

+ * Required if no access_token is provided. + */ + private final String clientSecret; + /** + * The refresh_token of this backend. + *

+ * Required if no access_token is provided. + */ + private final String refreshToken; + + @Override + public String scheme() { + return "aliyun_drive"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("drive_type", driveType); + if (root != null) { + map.put("root", root); + } + if (accessToken != null) { + map.put("access_token", accessToken); + } + if (clientId != null) { + map.put("client_id", clientId); + } + if (clientSecret != null) { + map.put("client_secret", clientSecret); + } + if (refreshToken != null) { + map.put("refresh_token", refreshToken); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Alluxio implements ServiceConfig { + /** + * root of this backend. + *

+ * All operations will happen under this root. + *

+ * default to `/` if not set. + */ + private final String root; + /** + * endpoint of this backend. + *

+ * Endpoint must be full uri, mostly like `http://127.0.0.1:39999`. + */ + private final String endpoint; + + @Override + public String scheme() { + return "alluxio"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Atomicserver implements ServiceConfig { + /** + * work dir of this backend + */ + private final String root; + /** + * endpoint of this backend + */ + private final String endpoint; + /** + * private_key of this backend + */ + private final String privateKey; + /** + * public_key of this backend + */ + private final String publicKey; + /** + * parent_resource_id of this backend + */ + private final String parentResourceId; + + @Override + public String scheme() { + return "atomicserver"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (privateKey != null) { + map.put("private_key", privateKey); + } + if (publicKey != null) { + map.put("public_key", publicKey); + } + if (parentResourceId != null) { + map.put("parent_resource_id", parentResourceId); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Azblob implements ServiceConfig { + /** + * The container name of Azblob service backend. + */ + private final @NonNull String container; + /** + * The root of Azblob service backend. + *

+ * All operations will happen under this root. + */ + private final String root; + /** + * The endpoint of Azblob service backend. + *

+ * Endpoint must be full uri, e.g. + *

+ * - Azblob: `https://accountname.blob.core.windows.net` + * - Azurite: `http://127.0.0.1:10000/devstoreaccount1` + */ + private final String endpoint; + /** + * The account name of Azblob service backend. + */ + private final String accountName; + /** + * The account key of Azblob service backend. + */ + private final String accountKey; + /** + * The encryption key of Azblob service backend. + */ + private final String encryptionKey; + /** + * The encryption key sha256 of Azblob service backend. + */ + private final String encryptionKeySha256; + /** + * The encryption algorithm of Azblob service backend. + */ + private final String encryptionAlgorithm; + /** + * The sas token of Azblob service backend. + */ + private final String sasToken; + /** + * The maximum batch operations of Azblob service backend. + */ + private final Long batchMaxOperations; + + @Override + public String scheme() { + return "azblob"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("container", container); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (accountName != null) { + map.put("account_name", accountName); + } + if (accountKey != null) { + map.put("account_key", accountKey); + } + if (encryptionKey != null) { + map.put("encryption_key", encryptionKey); + } + if (encryptionKeySha256 != null) { + map.put("encryption_key_sha256", encryptionKeySha256); + } + if (encryptionAlgorithm != null) { + map.put("encryption_algorithm", encryptionAlgorithm); + } + if (sasToken != null) { + map.put("sas_token", sasToken); + } + if (batchMaxOperations != null) { + map.put("batch_max_operations", String.valueOf(batchMaxOperations)); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Azdls implements ServiceConfig { + /** + * Filesystem name of this backend. + */ + private final @NonNull String filesystem; + /** + * Root of this backend. + */ + private final String root; + /** + * Endpoint of this backend. + */ + private final String endpoint; + /** + * Account name of this backend. + */ + private final String accountName; + /** + * Account key of this backend. + */ + private final String accountKey; + + @Override + public String scheme() { + return "azdls"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("filesystem", filesystem); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (accountName != null) { + map.put("account_name", accountName); + } + if (accountKey != null) { + map.put("account_key", accountKey); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Azfile implements ServiceConfig { + /** + * The share name for azfile. + */ + private final @NonNull String shareName; + /** + * The root path for azfile. + */ + private final String root; + /** + * The endpoint for azfile. + */ + private final String endpoint; + /** + * The account name for azfile. + */ + private final String accountName; + /** + * The account key for azfile. + */ + private final String accountKey; + /** + * The sas token for azfile. + */ + private final String sasToken; + + @Override + public String scheme() { + return "azfile"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("share_name", shareName); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (accountName != null) { + map.put("account_name", accountName); + } + if (accountKey != null) { + map.put("account_key", accountKey); + } + if (sasToken != null) { + map.put("sas_token", sasToken); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class B2 implements ServiceConfig { + /** + * bucket of this backend. + *

+ * required. + */ + private final @NonNull String bucket; + /** + * bucket id of this backend. + *

+ * required. + */ + private final @NonNull String bucketId; + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + /** + * keyID of this backend. + *

+ * - If application_key_id is set, we will take user's input first. + * - If not, we will try to load it from environment. + */ + private final String applicationKeyId; + /** + * applicationKey of this backend. + *

+ * - If application_key is set, we will take user's input first. + * - If not, we will try to load it from environment. + */ + private final String applicationKey; + + @Override + public String scheme() { + return "b2"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("bucket", bucket); + map.put("bucket_id", bucketId); + if (root != null) { + map.put("root", root); + } + if (applicationKeyId != null) { + map.put("application_key_id", applicationKeyId); + } + if (applicationKey != null) { + map.put("application_key", applicationKey); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Cacache implements ServiceConfig { + /** + * That path to the cacache data directory. + */ + private final String datadir; + + @Override + public String scheme() { + return "cacache"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (datadir != null) { + map.put("datadir", datadir); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Chainsafe implements ServiceConfig { + /** + * bucket_id of this backend. + *

+ * required. + */ + private final @NonNull String bucketId; + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + /** + * api_key of this backend. + */ + private final String apiKey; + + @Override + public String scheme() { + return "chainsafe"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("bucket_id", bucketId); + if (root != null) { + map.put("root", root); + } + if (apiKey != null) { + map.put("api_key", apiKey); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class CloudflareKv implements ServiceConfig { + /** + * The token used to authenticate with CloudFlare. + */ + private final String token; + /** + * The account ID used to authenticate with CloudFlare. Used as URI path parameter. + */ + private final String accountId; + /** + * The namespace ID. Used as URI path parameter. + */ + private final String namespaceId; + /** + * Root within this backend. + */ + private final String root; + + @Override + public String scheme() { + return "cloudflare_kv"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (token != null) { + map.put("token", token); + } + if (accountId != null) { + map.put("account_id", accountId); + } + if (namespaceId != null) { + map.put("namespace_id", namespaceId); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Compfs implements ServiceConfig { + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + + @Override + public String scheme() { + return "compfs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Cos implements ServiceConfig { + /** + * Root of this backend. + */ + private final String root; + /** + * Endpoint of this backend. + */ + private final String endpoint; + /** + * Secret ID of this backend. + */ + private final String secretId; + /** + * Secret key of this backend. + */ + private final String secretKey; + /** + * Bucket of this backend. + */ + private final String bucket; + /** + * Disable config load so that opendal will not load config from + */ + private final Boolean disableConfigLoad; + + @Override + public String scheme() { + return "cos"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (secretId != null) { + map.put("secret_id", secretId); + } + if (secretKey != null) { + map.put("secret_key", secretKey); + } + if (bucket != null) { + map.put("bucket", bucket); + } + if (disableConfigLoad != null) { + map.put("disable_config_load", String.valueOf(disableConfigLoad)); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class D1 implements ServiceConfig { + /** + * Set the token of cloudflare api. + */ + private final String token; + /** + * Set the account id of cloudflare api. + */ + private final String accountId; + /** + * Set the database id of cloudflare api. + */ + private final String databaseId; + /** + * Set the working directory of OpenDAL. + */ + private final String root; + /** + * Set the table of D1 Database. + */ + private final String table; + /** + * Set the key field of D1 Database. + */ + private final String keyField; + /** + * Set the value field of D1 Database. + */ + private final String valueField; + + @Override + public String scheme() { + return "d1"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (token != null) { + map.put("token", token); + } + if (accountId != null) { + map.put("account_id", accountId); + } + if (databaseId != null) { + map.put("database_id", databaseId); + } + if (root != null) { + map.put("root", root); + } + if (table != null) { + map.put("table", table); + } + if (keyField != null) { + map.put("key_field", keyField); + } + if (valueField != null) { + map.put("value_field", valueField); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Dashmap implements ServiceConfig { + /** + * The root path for dashmap. + */ + private final String root; + + @Override + public String scheme() { + return "dashmap"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Dbfs implements ServiceConfig { + /** + * The root for dbfs. + */ + private final String root; + /** + * The endpoint for dbfs. + */ + private final String endpoint; + /** + * The token for dbfs. + */ + private final String token; + + @Override + public String scheme() { + return "dbfs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (token != null) { + map.put("token", token); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Dropbox implements ServiceConfig { + /** + * root path for dropbox. + */ + private final String root; + /** + * access token for dropbox. + */ + private final String accessToken; + /** + * refresh_token for dropbox. + */ + private final String refreshToken; + /** + * client_id for dropbox. + */ + private final String clientId; + /** + * client_secret for dropbox. + */ + private final String clientSecret; + + @Override + public String scheme() { + return "dropbox"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (accessToken != null) { + map.put("access_token", accessToken); + } + if (refreshToken != null) { + map.put("refresh_token", refreshToken); + } + if (clientId != null) { + map.put("client_id", clientId); + } + if (clientSecret != null) { + map.put("client_secret", clientSecret); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Etcd implements ServiceConfig { + /** + * network address of the Etcd services. + * If use https, must set TLS options: `ca_path`, `cert_path`, `key_path`. + * e.g. "127.0.0.1:23790,127.0.0.1:23791,127.0.0.1:23792" or "http://127.0.0.1:23790,http://127.0.0.1:23791,http://127.0.0.1:23792" or "https://127.0.0.1:23790,https://127.0.0.1:23791,https://127.0.0.1:23792" + *

+ * default is "http://127.0.0.1:2379" + */ + private final String endpoints; + /** + * the username to connect etcd service. + *

+ * default is None + */ + private final String username; + /** + * the password for authentication + *

+ * default is None + */ + private final String password; + /** + * the working directory of the etcd service. Can be "/path/to/dir" + *

+ * default is "/" + */ + private final String root; + /** + * certificate authority file path + *

+ * default is None + */ + private final String caPath; + /** + * cert path + *

+ * default is None + */ + private final String certPath; + /** + * key path + *

+ * default is None + */ + private final String keyPath; + + @Override + public String scheme() { + return "etcd"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (endpoints != null) { + map.put("endpoints", endpoints); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + if (root != null) { + map.put("root", root); + } + if (caPath != null) { + map.put("ca_path", caPath); + } + if (certPath != null) { + map.put("cert_path", certPath); + } + if (keyPath != null) { + map.put("key_path", keyPath); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Fs implements ServiceConfig { + /** + * root dir for backend + */ + private final String root; + /** + * tmp dir for atomic write + */ + private final String atomicWriteDir; + + @Override + public String scheme() { + return "fs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (atomicWriteDir != null) { + map.put("atomic_write_dir", atomicWriteDir); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Gcs implements ServiceConfig { + /** + * bucket name + */ + private final @NonNull String bucket; + /** + * root URI, all operations happens under `root` + */ + private final String root; + /** + * endpoint URI of GCS service, + * default is `https://storage.googleapis.com` + */ + private final String endpoint; + /** + * Scope for gcs. + */ + private final String scope; + /** + * Service Account for gcs. + */ + private final String serviceAccount; + /** + * Credentials string for GCS service OAuth2 authentication. + */ + private final String credential; + /** + * Local path to credentials file for GCS service OAuth2 authentication. + */ + private final String credentialPath; + /** + * The predefined acl for GCS. + */ + private final String predefinedAcl; + /** + * The default storage class used by gcs. + */ + private final String defaultStorageClass; + /** + * Allow opendal to send requests without signing when credentials are not + * loaded. + */ + private final Boolean allowAnonymous; + /** + * Disable attempting to load credentials from the GCE metadata server when + * running within Google Cloud. + */ + private final Boolean disableVmMetadata; + /** + * Disable loading configuration from the environment. + */ + private final Boolean disableConfigLoad; + /** + * A Google Cloud OAuth2 token. + *

+ * Takes precedence over `credential` and `credential_path`. + */ + private final String token; + + @Override + public String scheme() { + return "gcs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("bucket", bucket); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (scope != null) { + map.put("scope", scope); + } + if (serviceAccount != null) { + map.put("service_account", serviceAccount); + } + if (credential != null) { + map.put("credential", credential); + } + if (credentialPath != null) { + map.put("credential_path", credentialPath); + } + if (predefinedAcl != null) { + map.put("predefined_acl", predefinedAcl); + } + if (defaultStorageClass != null) { + map.put("default_storage_class", defaultStorageClass); + } + if (allowAnonymous != null) { + map.put("allow_anonymous", String.valueOf(allowAnonymous)); + } + if (disableVmMetadata != null) { + map.put("disable_vm_metadata", String.valueOf(disableVmMetadata)); + } + if (disableConfigLoad != null) { + map.put("disable_config_load", String.valueOf(disableConfigLoad)); + } + if (token != null) { + map.put("token", token); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Gdrive implements ServiceConfig { + /** + * The root for gdrive + */ + private final String root; + /** + * Access token for gdrive. + */ + private final String accessToken; + /** + * Refresh token for gdrive. + */ + private final String refreshToken; + /** + * Client id for gdrive. + */ + private final String clientId; + /** + * Client secret for gdrive. + */ + private final String clientSecret; + + @Override + public String scheme() { + return "gdrive"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (accessToken != null) { + map.put("access_token", accessToken); + } + if (refreshToken != null) { + map.put("refresh_token", refreshToken); + } + if (clientId != null) { + map.put("client_id", clientId); + } + if (clientSecret != null) { + map.put("client_secret", clientSecret); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Ghac implements ServiceConfig { + /** + * The root path for ghac. + */ + private final String root; + /** + * The version that used by cache. + */ + private final String version; + /** + * The endpoint for ghac service. + */ + private final String endpoint; + /** + * The runtime token for ghac service. + */ + private final String runtimeToken; + + @Override + public String scheme() { + return "ghac"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (version != null) { + map.put("version", version); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (runtimeToken != null) { + map.put("runtime_token", runtimeToken); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Github implements ServiceConfig { + /** + * GitHub repo owner. + *

+ * required. + */ + private final @NonNull String owner; + /** + * GitHub repo name. + *

+ * required. + */ + private final @NonNull String repo; + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + /** + * GitHub access_token. + *

+ * optional. + * If not provided, the backend will only support read operations for public repositories. + * And rate limit will be limited to 60 requests per hour. + */ + private final String token; + + @Override + public String scheme() { + return "github"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("owner", owner); + map.put("repo", repo); + if (root != null) { + map.put("root", root); + } + if (token != null) { + map.put("token", token); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Gridfs implements ServiceConfig { + /** + * The connection string of the MongoDB service. + */ + private final String connectionString; + /** + * The database name of the MongoDB GridFs service to read/write. + */ + private final String database; + /** + * The bucket name of the MongoDB GridFs service to read/write. + */ + private final String bucket; + /** + * The chunk size of the MongoDB GridFs service used to break the user file into chunks. + */ + private final Integer chunkSize; + /** + * The working directory, all operations will be performed under it. + */ + private final String root; + + @Override + public String scheme() { + return "gridfs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (connectionString != null) { + map.put("connection_string", connectionString); + } + if (database != null) { + map.put("database", database); + } + if (bucket != null) { + map.put("bucket", bucket); + } + if (chunkSize != null) { + map.put("chunk_size", String.valueOf(chunkSize)); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class HdfsNative implements ServiceConfig { + /** + * work dir of this backend + */ + private final String root; + /** + * url of this backend + */ + private final String url; + /** + * enable the append capacity + */ + private final Boolean enableAppend; + + @Override + public String scheme() { + return "hdfs_native"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (url != null) { + map.put("url", url); + } + if (enableAppend != null) { + map.put("enable_append", String.valueOf(enableAppend)); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Http implements ServiceConfig { + /** + * endpoint of this backend + */ + private final String endpoint; + /** + * username of this backend + */ + private final String username; + /** + * password of this backend + */ + private final String password; + /** + * token of this backend + */ + private final String token; + /** + * root of this backend + */ + private final String root; + + @Override + public String scheme() { + return "http"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + if (token != null) { + map.put("token", token); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Huggingface implements ServiceConfig { + /** + * Repo type of this backend. Default is model. + *

+ * Available values: + * - model + * - dataset + */ + private final String repoType; + /** + * Repo id of this backend. + *

+ * This is required. + */ + private final String repoId; + /** + * Revision of this backend. + *

+ * Default is main. + */ + private final String revision; + /** + * Root of this backend. Can be "/path/to/dir". + *

+ * Default is "/". + */ + private final String root; + /** + * Token of this backend. + *

+ * This is optional. + */ + private final String token; + + @Override + public String scheme() { + return "huggingface"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (repoType != null) { + map.put("repo_type", repoType); + } + if (repoId != null) { + map.put("repo_id", repoId); + } + if (revision != null) { + map.put("revision", revision); + } + if (root != null) { + map.put("root", root); + } + if (token != null) { + map.put("token", token); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Icloud implements ServiceConfig { + /** + * root of this backend. + *

+ * All operations will happen under this root. + *

+ * default to `/` if not set. + */ + private final String root; + /** + * apple_id of this backend. + *

+ * apple_id must be full, mostly like `example@gmail.com`. + */ + private final String appleId; + /** + * password of this backend. + *

+ * password must be full. + */ + private final String password; + /** + * Session + *

+ * token must be valid. + */ + private final String trustToken; + /** + * ds_web_auth_token must be set in Session + */ + private final String dsWebAuthToken; + /** + * enable the china origin + * China region `origin` Header needs to be set to "https://www.icloud.com.cn". + *

+ * otherwise Apple server will return 302. + */ + private final Boolean isChinaMainland; + + @Override + public String scheme() { + return "icloud"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (appleId != null) { + map.put("apple_id", appleId); + } + if (password != null) { + map.put("password", password); + } + if (trustToken != null) { + map.put("trust_token", trustToken); + } + if (dsWebAuthToken != null) { + map.put("ds_web_auth_token", dsWebAuthToken); + } + if (isChinaMainland != null) { + map.put("is_china_mainland", String.valueOf(isChinaMainland)); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Ipfs implements ServiceConfig { + /** + * IPFS gateway endpoint. + */ + private final String endpoint; + /** + * IPFS root. + */ + private final String root; + + @Override + public String scheme() { + return "ipfs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Ipmfs implements ServiceConfig { + /** + * Root for ipfs. + */ + private final String root; + /** + * Endpoint for ipfs. + */ + private final String endpoint; + + @Override + public String scheme() { + return "ipmfs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Koofr implements ServiceConfig { + /** + * Koofr endpoint. + */ + private final @NonNull String endpoint; + /** + * Koofr email. + */ + private final @NonNull String email; + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + /** + * password of this backend. (Must be the application password) + */ + private final String password; + + @Override + public String scheme() { + return "koofr"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("endpoint", endpoint); + map.put("email", email); + if (root != null) { + map.put("root", root); + } + if (password != null) { + map.put("password", password); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Lakefs implements ServiceConfig { + /** + * Base url. + *

+ * This is required. + */ + private final String endpoint; + /** + * Username for Lakefs basic authentication. + *

+ * This is required. + */ + private final String username; + /** + * Password for Lakefs basic authentication. + *

+ * This is required. + */ + private final String password; + /** + * Root of this backend. Can be "/path/to/dir". + *

+ * Default is "/". + */ + private final String root; + /** + * The repository name + *

+ * This is required. + */ + private final String repository; + /** + * Name of the branch or a commit ID. Default is main. + *

+ * This is optional. + */ + private final String branch; + + @Override + public String scheme() { + return "lakefs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + if (root != null) { + map.put("root", root); + } + if (repository != null) { + map.put("repository", repository); + } + if (branch != null) { + map.put("branch", branch); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Libsql implements ServiceConfig { + /** + * Connection string for libsql service. + */ + private final String connectionString; + /** + * Authentication token for libsql service. + */ + private final String authToken; + /** + * Table name for libsql service. + */ + private final String table; + /** + * Key field name for libsql service. + */ + private final String keyField; + /** + * Value field name for libsql service. + */ + private final String valueField; + /** + * Root for libsql service. + */ + private final String root; + + @Override + public String scheme() { + return "libsql"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (connectionString != null) { + map.put("connection_string", connectionString); + } + if (authToken != null) { + map.put("auth_token", authToken); + } + if (table != null) { + map.put("table", table); + } + if (keyField != null) { + map.put("key_field", keyField); + } + if (valueField != null) { + map.put("value_field", valueField); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Memcached implements ServiceConfig { + /** + * network address of the memcached service. + *

+ * For example: "tcp://localhost:11211" + */ + private final String endpoint; + /** + * the working directory of the service. Can be "/path/to/dir" + *

+ * default is "/" + */ + private final String root; + /** + * Memcached username, optional. + */ + private final String username; + /** + * Memcached password, optional. + */ + private final String password; + /** + * The default ttl for put operations. + */ + private final Duration defaultTtl; + + @Override + public String scheme() { + return "memcached"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (root != null) { + map.put("root", root); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + if (defaultTtl != null) { + map.put("default_ttl", defaultTtl.toString()); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Memory implements ServiceConfig { + /** + * root of the backend. + */ + private final String root; + + @Override + public String scheme() { + return "memory"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class MiniMoka implements ServiceConfig { + /** + * Sets the max capacity of the cache. + *

+ * Refer to [`mini-moka::sync::CacheBuilder::max_capacity`](https://docs.rs/mini-moka/latest/mini_moka/sync/struct.CacheBuilder.html#method.max_capacity) + */ + private final Long maxCapacity; + /** + * Sets the time to live of the cache. + *

+ * Refer to [`mini-moka::sync::CacheBuilder::time_to_live`](https://docs.rs/mini-moka/latest/mini_moka/sync/struct.CacheBuilder.html#method.time_to_live) + */ + private final Duration timeToLive; + /** + * Sets the time to idle of the cache. + *

+ * Refer to [`mini-moka::sync::CacheBuilder::time_to_idle`](https://docs.rs/mini-moka/latest/mini_moka/sync/struct.CacheBuilder.html#method.time_to_idle) + */ + private final Duration timeToIdle; + /** + * root path of this backend + */ + private final String root; + + @Override + public String scheme() { + return "mini_moka"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (maxCapacity != null) { + map.put("max_capacity", String.valueOf(maxCapacity)); + } + if (timeToLive != null) { + map.put("time_to_live", timeToLive.toString()); + } + if (timeToIdle != null) { + map.put("time_to_idle", timeToIdle.toString()); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Moka implements ServiceConfig { + /** + * Name for this cache instance. + */ + private final String name; + /** + * Sets the max capacity of the cache. + *

+ * Refer to [`moka::sync::CacheBuilder::max_capacity`](https://docs.rs/moka/latest/moka/sync/struct.CacheBuilder.html#method.max_capacity) + */ + private final Long maxCapacity; + /** + * Sets the time to live of the cache. + *

+ * Refer to [`moka::sync::CacheBuilder::time_to_live`](https://docs.rs/moka/latest/moka/sync/struct.CacheBuilder.html#method.time_to_live) + */ + private final Duration timeToLive; + /** + * Sets the time to idle of the cache. + *

+ * Refer to [`moka::sync::CacheBuilder::time_to_idle`](https://docs.rs/moka/latest/moka/sync/struct.CacheBuilder.html#method.time_to_idle) + */ + private final Duration timeToIdle; + /** + * Sets the segments number of the cache. + *

+ * Refer to [`moka::sync::CacheBuilder::segments`](https://docs.rs/moka/latest/moka/sync/struct.CacheBuilder.html#method.segments) + */ + private final Long numSegments; + /** + * root path of this backend + */ + private final String root; + + @Override + public String scheme() { + return "moka"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (name != null) { + map.put("name", name); + } + if (maxCapacity != null) { + map.put("max_capacity", String.valueOf(maxCapacity)); + } + if (timeToLive != null) { + map.put("time_to_live", timeToLive.toString()); + } + if (timeToIdle != null) { + map.put("time_to_idle", timeToIdle.toString()); + } + if (numSegments != null) { + map.put("num_segments", String.valueOf(numSegments)); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Mongodb implements ServiceConfig { + /** + * connection string of this backend + */ + private final String connectionString; + /** + * database of this backend + */ + private final String database; + /** + * collection of this backend + */ + private final String collection; + /** + * root of this backend + */ + private final String root; + /** + * key field of this backend + */ + private final String keyField; + /** + * value field of this backend + */ + private final String valueField; + + @Override + public String scheme() { + return "mongodb"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (connectionString != null) { + map.put("connection_string", connectionString); + } + if (database != null) { + map.put("database", database); + } + if (collection != null) { + map.put("collection", collection); + } + if (root != null) { + map.put("root", root); + } + if (keyField != null) { + map.put("key_field", keyField); + } + if (valueField != null) { + map.put("value_field", valueField); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Monoiofs implements ServiceConfig { + /** + * The Root of this backend. + *

+ * All operations will happen under this root. + *

+ * Builder::build will return error if not set. + */ + private final String root; + + @Override + public String scheme() { + return "monoiofs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Mysql implements ServiceConfig { + /** + * This connection string is used to connect to the mysql service. There are url based formats: + *

+ * ## Url + *

+ * This format resembles the url format of the mysql client. The format is: `[scheme://][user[:[password]]@]host[:port][/schema][?attribute1=value1&attribute2=value2...` + *

+ * - `mysql://user@localhost` + * - `mysql://user:password@localhost` + * - `mysql://user:password@localhost:3306` + * - `mysql://user:password@localhost:3306/db` + *

+ * For more information, please refer to . + */ + private final String connectionString; + /** + * The table name for mysql. + */ + private final String table; + /** + * The key field name for mysql. + */ + private final String keyField; + /** + * The value field name for mysql. + */ + private final String valueField; + /** + * The root for mysql. + */ + private final String root; + + @Override + public String scheme() { + return "mysql"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (connectionString != null) { + map.put("connection_string", connectionString); + } + if (table != null) { + map.put("table", table); + } + if (keyField != null) { + map.put("key_field", keyField); + } + if (valueField != null) { + map.put("value_field", valueField); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class NebulaGraph implements ServiceConfig { + /** + * The host addr of nebulagraph's graphd server + */ + private final String host; + /** + * The host port of nebulagraph's graphd server + */ + private final Integer port; + /** + * The username of nebulagraph's graphd server + */ + private final String username; + /** + * The password of nebulagraph's graphd server + */ + private final String password; + /** + * The space name of nebulagraph's graphd server + */ + private final String space; + /** + * The tag name of nebulagraph's graphd server + */ + private final String tag; + /** + * The key field name of the NebulaGraph service to read/write. + */ + private final String keyField; + /** + * The value field name of the NebulaGraph service to read/write. + */ + private final String valueField; + /** + * The root for NebulaGraph + */ + private final String root; + + @Override + public String scheme() { + return "nebula_graph"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (host != null) { + map.put("host", host); + } + if (port != null) { + map.put("port", String.valueOf(port)); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + if (space != null) { + map.put("space", space); + } + if (tag != null) { + map.put("tag", tag); + } + if (keyField != null) { + map.put("key_field", keyField); + } + if (valueField != null) { + map.put("value_field", valueField); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Obs implements ServiceConfig { + /** + * Root for obs. + */ + private final String root; + /** + * Endpoint for obs. + */ + private final String endpoint; + /** + * Access key id for obs. + */ + private final String accessKeyId; + /** + * Secret access key for obs. + */ + private final String secretAccessKey; + /** + * Bucket for obs. + */ + private final String bucket; + + @Override + public String scheme() { + return "obs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (accessKeyId != null) { + map.put("access_key_id", accessKeyId); + } + if (secretAccessKey != null) { + map.put("secret_access_key", secretAccessKey); + } + if (bucket != null) { + map.put("bucket", bucket); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Onedrive implements ServiceConfig { + /** + * bearer access token for OneDrive + */ + private final String accessToken; + /** + * root path of OneDrive folder. + */ + private final String root; + + @Override + public String scheme() { + return "onedrive"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (accessToken != null) { + map.put("access_token", accessToken); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Oss implements ServiceConfig { + /** + * Bucket for oss. + */ + private final @NonNull String bucket; + /** + * Root for oss. + */ + private final String root; + /** + * Endpoint for oss. + */ + private final String endpoint; + /** + * Presign endpoint for oss. + */ + private final String presignEndpoint; + /** + * Server side encryption for oss. + */ + private final String serverSideEncryption; + /** + * Server side encryption key id for oss. + */ + private final String serverSideEncryptionKeyId; + /** + * Allow anonymous for oss. + */ + private final Boolean allowAnonymous; + /** + * Access key id for oss. + */ + private final String accessKeyId; + /** + * Access key secret for oss. + */ + private final String accessKeySecret; + /** + * The size of max batch operations. + * + * @deprecated Please use `delete_max_size` instead of `batch_max_operations` + */ + private final Long batchMaxOperations; + /** + * The size of max delete operations. + */ + private final Long deleteMaxSize; + /** + * If `role_arn` is set, we will use already known config as source + * credential to assume role with `role_arn`. + */ + private final String roleArn; + /** + * role_session_name for this backend. + */ + private final String roleSessionName; + /** + * `oidc_provider_arn` will be loaded from + *

+ * - this field if it's `is_some` + * - env value: [`ALIBABA_CLOUD_OIDC_PROVIDER_ARN`] + */ + private final String oidcProviderArn; + /** + * `oidc_token_file` will be loaded from + *

+ * - this field if it's `is_some` + * - env value: [`ALIBABA_CLOUD_OIDC_TOKEN_FILE`] + */ + private final String oidcTokenFile; + /** + * `sts_endpoint` will be loaded from + *

+ * - this field if it's `is_some` + * - env value: [`ALIBABA_CLOUD_STS_ENDPOINT`] + */ + private final String stsEndpoint; + + @Override + public String scheme() { + return "oss"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("bucket", bucket); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (presignEndpoint != null) { + map.put("presign_endpoint", presignEndpoint); + } + if (serverSideEncryption != null) { + map.put("server_side_encryption", serverSideEncryption); + } + if (serverSideEncryptionKeyId != null) { + map.put("server_side_encryption_key_id", serverSideEncryptionKeyId); + } + if (allowAnonymous != null) { + map.put("allow_anonymous", String.valueOf(allowAnonymous)); + } + if (accessKeyId != null) { + map.put("access_key_id", accessKeyId); + } + if (accessKeySecret != null) { + map.put("access_key_secret", accessKeySecret); + } + if (batchMaxOperations != null) { + map.put("batch_max_operations", String.valueOf(batchMaxOperations)); + } + if (deleteMaxSize != null) { + map.put("delete_max_size", String.valueOf(deleteMaxSize)); + } + if (roleArn != null) { + map.put("role_arn", roleArn); + } + if (roleSessionName != null) { + map.put("role_session_name", roleSessionName); + } + if (oidcProviderArn != null) { + map.put("oidc_provider_arn", oidcProviderArn); + } + if (oidcTokenFile != null) { + map.put("oidc_token_file", oidcTokenFile); + } + if (stsEndpoint != null) { + map.put("sts_endpoint", stsEndpoint); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Pcloud implements ServiceConfig { + /** + * pCloud endpoint address. + */ + private final @NonNull String endpoint; + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + /** + * pCloud username. + */ + private final String username; + /** + * pCloud password. + */ + private final String password; + + @Override + public String scheme() { + return "pcloud"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("endpoint", endpoint); + if (root != null) { + map.put("root", root); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Persy implements ServiceConfig { + /** + * That path to the persy data file. The directory in the path must already exist. + */ + private final String datafile; + /** + * That name of the persy segment. + */ + private final String segment; + /** + * That name of the persy index. + */ + private final String index; + + @Override + public String scheme() { + return "persy"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (datafile != null) { + map.put("datafile", datafile); + } + if (segment != null) { + map.put("segment", segment); + } + if (index != null) { + map.put("index", index); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Postgresql implements ServiceConfig { + /** + * Root of this backend. + *

+ * All operations will happen under this root. + *

+ * Default to `/` if not set. + */ + private final String root; + /** + * The URL should be with a scheme of either `postgres://` or `postgresql://`. + *

+ * - `postgresql://user@localhost` + * - `postgresql://user:password@%2Fvar%2Flib%2Fpostgresql/mydb?connect_timeout=10` + * - `postgresql://user@host1:1234,host2,host3:5678?target_session_attrs=read-write` + * - `postgresql:///mydb?user=user&host=/var/lib/postgresql` + *

+ * For more information, please visit . + */ + private final String connectionString; + /** + * the table of postgresql + */ + private final String table; + /** + * the key field of postgresql + */ + private final String keyField; + /** + * the value field of postgresql + */ + private final String valueField; + + @Override + public String scheme() { + return "postgresql"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (connectionString != null) { + map.put("connection_string", connectionString); + } + if (table != null) { + map.put("table", table); + } + if (keyField != null) { + map.put("key_field", keyField); + } + if (valueField != null) { + map.put("value_field", valueField); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Redb implements ServiceConfig { + /** + * path to the redb data directory. + */ + private final String datadir; + /** + * The root for redb. + */ + private final String root; + /** + * The table name for redb. + */ + private final String table; + + @Override + public String scheme() { + return "redb"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (datadir != null) { + map.put("datadir", datadir); + } + if (root != null) { + map.put("root", root); + } + if (table != null) { + map.put("table", table); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Redis implements ServiceConfig { + /** + * the number of DBs redis can take is unlimited + *

+ * default is db 0 + */ + private final long db; + /** + * network address of the Redis service. Can be "tcp://127.0.0.1:6379", e.g. + *

+ * default is "tcp://127.0.0.1:6379" + */ + private final String endpoint; + /** + * network address of the Redis cluster service. Can be "tcp://127.0.0.1:6379,tcp://127.0.0.1:6380,tcp://127.0.0.1:6381", e.g. + *

+ * default is None + */ + private final String clusterEndpoints; + /** + * the username to connect redis service. + *

+ * default is None + */ + private final String username; + /** + * the password for authentication + *

+ * default is None + */ + private final String password; + /** + * the working directory of the Redis service. Can be "/path/to/dir" + *

+ * default is "/" + */ + private final String root; + /** + * The default ttl for put operations. + */ + private final Duration defaultTtl; + + @Override + public String scheme() { + return "redis"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("db", String.valueOf(db)); + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (clusterEndpoints != null) { + map.put("cluster_endpoints", clusterEndpoints); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + if (root != null) { + map.put("root", root); + } + if (defaultTtl != null) { + map.put("default_ttl", defaultTtl.toString()); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class S3 implements ServiceConfig { + /** + * bucket name of this backend. + *

+ * required. + */ + private final @NonNull String bucket; + /** + * root of this backend. + *

+ * All operations will happen under this root. + *

+ * default to `/` if not set. + */ + private final String root; + /** + * is bucket versioning enabled for this bucket + */ + private final Boolean enableVersioning; + /** + * endpoint of this backend. + *

+ * Endpoint must be full uri, e.g. + *

+ * - AWS S3: `https://s3.amazonaws.com` or `https://s3.{region}.amazonaws.com` + * - Cloudflare R2: `https://.r2.cloudflarestorage.com` + * - Aliyun OSS: `https://{region}.aliyuncs.com` + * - Tencent COS: `https://cos.{region}.myqcloud.com` + * - Minio: `http://127.0.0.1:9000` + *

+ * If user inputs endpoint without scheme like "s3.amazonaws.com", we + * will prepend "https://" before it. + *

+ * - If endpoint is set, we will take user's input first. + * - If not, we will try to load it from environment. + * - If still not set, default to `https://s3.amazonaws.com`. + */ + private final String endpoint; + /** + * Region represent the signing region of this endpoint. This is required + * if you are using the default AWS S3 endpoint. + *

+ * If using a custom endpoint, + * - If region is set, we will take user's input first. + * - If not, we will try to load it from environment. + */ + private final String region; + /** + * access_key_id of this backend. + *

+ * - If access_key_id is set, we will take user's input first. + * - If not, we will try to load it from environment. + */ + private final String accessKeyId; + /** + * secret_access_key of this backend. + *

+ * - If secret_access_key is set, we will take user's input first. + * - If not, we will try to load it from environment. + */ + private final String secretAccessKey; + /** + * session_token (aka, security token) of this backend. + *

+ * This token will expire after sometime, it's recommended to set session_token + * by hand. + */ + private final String sessionToken; + /** + * role_arn for this backend. + *

+ * If `role_arn` is set, we will use already known config as source + * credential to assume role with `role_arn`. + */ + private final String roleArn; + /** + * external_id for this backend. + */ + private final String externalId; + /** + * role_session_name for this backend. + */ + private final String roleSessionName; + /** + * Disable config load so that opendal will not load config from + * environment. + *

+ * For examples: + *

+ * - envs like `AWS_ACCESS_KEY_ID` + * - files like `~/.aws/config` + */ + private final Boolean disableConfigLoad; + /** + * Disable load credential from ec2 metadata. + *

+ * This option is used to disable the default behavior of opendal + * to load credential from ec2 metadata, a.k.a, IMDSv2 + */ + private final Boolean disableEc2Metadata; + /** + * Allow anonymous will allow opendal to send request without signing + * when credential is not loaded. + */ + private final Boolean allowAnonymous; + /** + * server_side_encryption for this backend. + *

+ * Available values: `AES256`, `aws:kms`. + */ + private final String serverSideEncryption; + /** + * server_side_encryption_aws_kms_key_id for this backend + *

+ * - If `server_side_encryption` set to `aws:kms`, and `server_side_encryption_aws_kms_key_id` + * is not set, S3 will use aws managed kms key to encrypt data. + * - If `server_side_encryption` set to `aws:kms`, and `server_side_encryption_aws_kms_key_id` + * is a valid kms key id, S3 will use the provided kms key to encrypt data. + * - If the `server_side_encryption_aws_kms_key_id` is invalid or not found, an error will be + * returned. + * - If `server_side_encryption` is not `aws:kms`, setting `server_side_encryption_aws_kms_key_id` + * is a noop. + */ + private final String serverSideEncryptionAwsKmsKeyId; + /** + * server_side_encryption_customer_algorithm for this backend. + *

+ * Available values: `AES256`. + */ + private final String serverSideEncryptionCustomerAlgorithm; + /** + * server_side_encryption_customer_key for this backend. + *

+ * # Value + *

+ * base64 encoded key that matches algorithm specified in + * `server_side_encryption_customer_algorithm`. + */ + private final String serverSideEncryptionCustomerKey; + /** + * Set server_side_encryption_customer_key_md5 for this backend. + *

+ * # Value + *

+ * MD5 digest of key specified in `server_side_encryption_customer_key`. + */ + private final String serverSideEncryptionCustomerKeyMd5; + /** + * default storage_class for this backend. + *

+ * Available values: + * - `DEEP_ARCHIVE` + * - `GLACIER` + * - `GLACIER_IR` + * - `INTELLIGENT_TIERING` + * - `ONEZONE_IA` + * - `OUTPOSTS` + * - `REDUCED_REDUNDANCY` + * - `STANDARD` + * - `STANDARD_IA` + *

+ * S3 compatible services don't support all of them + */ + private final String defaultStorageClass; + /** + * Enable virtual host style so that opendal will send API requests + * in virtual host style instead of path style. + *

+ * - By default, opendal will send API to `https://s3.us-east-1.amazonaws.com/bucket_name` + * - Enabled, opendal will send API to `https://bucket_name.s3.us-east-1.amazonaws.com` + */ + private final Boolean enableVirtualHostStyle; + /** + * Set maximum batch operations of this backend. + *

+ * Some compatible services have a limit on the number of operations in a batch request. + * For example, R2 could return `Internal Error` while batch delete 1000 files. + *

+ * Please tune this value based on services' document. + * + * @deprecated Please use `delete_max_size` instead of `batch_max_operations` + */ + private final Long batchMaxOperations; + /** + * Set the maximum delete size of this backend. + *

+ * Some compatible services have a limit on the number of operations in a batch request. + * For example, R2 could return `Internal Error` while batch delete 1000 files. + *

+ * Please tune this value based on services' document. + */ + private final Long deleteMaxSize; + /** + * Disable stat with override so that opendal will not send stat request with override queries. + *

+ * For example, R2 doesn't support stat with `response_content_type` query. + */ + private final Boolean disableStatWithOverride; + /** + * Checksum Algorithm to use when sending checksums in HTTP headers. + * This is necessary when writing to AWS S3 Buckets with Object Lock enabled for example. + *

+ * Available options: + * - "crc32c" + */ + private final String checksumAlgorithm; + /** + * Disable write with if match so that opendal will not send write request with if match headers. + *

+ * For example, Ceph RADOS S3 doesn't support write with if match. + */ + private final Boolean disableWriteWithIfMatch; + + @Override + public String scheme() { + return "s3"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("bucket", bucket); + if (root != null) { + map.put("root", root); + } + if (enableVersioning != null) { + map.put("enable_versioning", String.valueOf(enableVersioning)); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (region != null) { + map.put("region", region); + } + if (accessKeyId != null) { + map.put("access_key_id", accessKeyId); + } + if (secretAccessKey != null) { + map.put("secret_access_key", secretAccessKey); + } + if (sessionToken != null) { + map.put("session_token", sessionToken); + } + if (roleArn != null) { + map.put("role_arn", roleArn); + } + if (externalId != null) { + map.put("external_id", externalId); + } + if (roleSessionName != null) { + map.put("role_session_name", roleSessionName); + } + if (disableConfigLoad != null) { + map.put("disable_config_load", String.valueOf(disableConfigLoad)); + } + if (disableEc2Metadata != null) { + map.put("disable_ec2_metadata", String.valueOf(disableEc2Metadata)); + } + if (allowAnonymous != null) { + map.put("allow_anonymous", String.valueOf(allowAnonymous)); + } + if (serverSideEncryption != null) { + map.put("server_side_encryption", serverSideEncryption); + } + if (serverSideEncryptionAwsKmsKeyId != null) { + map.put("server_side_encryption_aws_kms_key_id", serverSideEncryptionAwsKmsKeyId); + } + if (serverSideEncryptionCustomerAlgorithm != null) { + map.put("server_side_encryption_customer_algorithm", serverSideEncryptionCustomerAlgorithm); + } + if (serverSideEncryptionCustomerKey != null) { + map.put("server_side_encryption_customer_key", serverSideEncryptionCustomerKey); + } + if (serverSideEncryptionCustomerKeyMd5 != null) { + map.put("server_side_encryption_customer_key_md5", serverSideEncryptionCustomerKeyMd5); + } + if (defaultStorageClass != null) { + map.put("default_storage_class", defaultStorageClass); + } + if (enableVirtualHostStyle != null) { + map.put("enable_virtual_host_style", String.valueOf(enableVirtualHostStyle)); + } + if (batchMaxOperations != null) { + map.put("batch_max_operations", String.valueOf(batchMaxOperations)); + } + if (deleteMaxSize != null) { + map.put("delete_max_size", String.valueOf(deleteMaxSize)); + } + if (disableStatWithOverride != null) { + map.put("disable_stat_with_override", String.valueOf(disableStatWithOverride)); + } + if (checksumAlgorithm != null) { + map.put("checksum_algorithm", checksumAlgorithm); + } + if (disableWriteWithIfMatch != null) { + map.put("disable_write_with_if_match", String.valueOf(disableWriteWithIfMatch)); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Seafile implements ServiceConfig { + /** + * repo_name of this backend. + *

+ * required. + */ + private final @NonNull String repoName; + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + /** + * endpoint address of this backend. + */ + private final String endpoint; + /** + * username of this backend. + */ + private final String username; + /** + * password of this backend. + */ + private final String password; + + @Override + public String scheme() { + return "seafile"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("repo_name", repoName); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Sftp implements ServiceConfig { + /** + * endpoint of this backend + */ + private final String endpoint; + /** + * root of this backend + */ + private final String root; + /** + * user of this backend + */ + private final String user; + /** + * key of this backend + */ + private final String key; + /** + * known_hosts_strategy of this backend + */ + private final String knownHostsStrategy; + /** + * enable_copy of this backend + */ + private final Boolean enableCopy; + + @Override + public String scheme() { + return "sftp"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (root != null) { + map.put("root", root); + } + if (user != null) { + map.put("user", user); + } + if (key != null) { + map.put("key", key); + } + if (knownHostsStrategy != null) { + map.put("known_hosts_strategy", knownHostsStrategy); + } + if (enableCopy != null) { + map.put("enable_copy", String.valueOf(enableCopy)); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Sled implements ServiceConfig { + /** + * That path to the sled data directory. + */ + private final String datadir; + /** + * The root for sled. + */ + private final String root; + /** + * The tree for sled. + */ + private final String tree; + + @Override + public String scheme() { + return "sled"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (datadir != null) { + map.put("datadir", datadir); + } + if (root != null) { + map.put("root", root); + } + if (tree != null) { + map.put("tree", tree); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Sqlite implements ServiceConfig { + /** + * Set the connection_string of the sqlite service. + *

+ * This connection string is used to connect to the sqlite service. There are url based formats: + *

+ * ## Url + *

+ * This format resembles the url format of the sqlite client: + *

+ * - `sqlite::memory:` + * - `sqlite:data.db` + * - `sqlite://data.db` + *

+ * For more information, please visit . + */ + private final String connectionString; + /** + * Set the table name of the sqlite service to read/write. + */ + private final String table; + /** + * Set the key field name of the sqlite service to read/write. + *

+ * Default to `key` if not specified. + */ + private final String keyField; + /** + * Set the value field name of the sqlite service to read/write. + *

+ * Default to `value` if not specified. + */ + private final String valueField; + /** + * set the working directory, all operations will be performed under it. + *

+ * default: "/" + */ + private final String root; + + @Override + public String scheme() { + return "sqlite"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (connectionString != null) { + map.put("connection_string", connectionString); + } + if (table != null) { + map.put("table", table); + } + if (keyField != null) { + map.put("key_field", keyField); + } + if (valueField != null) { + map.put("value_field", valueField); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Supabase implements ServiceConfig { + /** + * The bucket for supabase service. + */ + private final @NonNull String bucket; + /** + * The root for supabase service. + */ + private final String root; + /** + * The endpoint for supabase service. + */ + private final String endpoint; + /** + * The key for supabase service. + */ + private final String key; + + @Override + public String scheme() { + return "supabase"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("bucket", bucket); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (key != null) { + map.put("key", key); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Surrealdb implements ServiceConfig { + /** + * The connection string for surrealdb. + */ + private final String connectionString; + /** + * The username for surrealdb. + */ + private final String username; + /** + * The password for surrealdb. + */ + private final String password; + /** + * The namespace for surrealdb. + */ + private final String namespace; + /** + * The database for surrealdb. + */ + private final String database; + /** + * The table for surrealdb. + */ + private final String table; + /** + * The key field for surrealdb. + */ + private final String keyField; + /** + * The value field for surrealdb. + */ + private final String valueField; + /** + * The root for surrealdb. + */ + private final String root; + + @Override + public String scheme() { + return "surrealdb"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (connectionString != null) { + map.put("connection_string", connectionString); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + if (namespace != null) { + map.put("namespace", namespace); + } + if (database != null) { + map.put("database", database); + } + if (table != null) { + map.put("table", table); + } + if (keyField != null) { + map.put("key_field", keyField); + } + if (valueField != null) { + map.put("value_field", valueField); + } + if (root != null) { + map.put("root", root); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Swift implements ServiceConfig { + /** + * The endpoint for Swift. + */ + private final String endpoint; + /** + * The container for Swift. + */ + private final String container; + /** + * The root for Swift. + */ + private final String root; + /** + * The token for Swift. + */ + private final String token; + + @Override + public String scheme() { + return "swift"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (container != null) { + map.put("container", container); + } + if (root != null) { + map.put("root", root); + } + if (token != null) { + map.put("token", token); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Upyun implements ServiceConfig { + /** + * bucket address of this backend. + */ + private final @NonNull String bucket; + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + /** + * username of this backend. + */ + private final String operator; + /** + * password of this backend. + */ + private final String password; + + @Override + public String scheme() { + return "upyun"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("bucket", bucket); + if (root != null) { + map.put("root", root); + } + if (operator != null) { + map.put("operator", operator); + } + if (password != null) { + map.put("password", password); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class VercelArtifacts implements ServiceConfig { + /** + * The access token for Vercel. + */ + private final String accessToken; + + @Override + public String scheme() { + return "vercel_artifacts"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (accessToken != null) { + map.put("access_token", accessToken); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class VercelBlob implements ServiceConfig { + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + /** + * vercel blob token. + */ + private final String token; + + @Override + public String scheme() { + return "vercel_blob"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (token != null) { + map.put("token", token); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Webdav implements ServiceConfig { + /** + * endpoint of this backend + */ + private final String endpoint; + /** + * username of this backend + */ + private final String username; + /** + * password of this backend + */ + private final String password; + /** + * token of this backend + */ + private final String token; + /** + * root of this backend + */ + private final String root; + /** + * WebDAV Service doesn't support copy. + */ + private final Boolean disableCopy; + + @Override + public String scheme() { + return "webdav"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (username != null) { + map.put("username", username); + } + if (password != null) { + map.put("password", password); + } + if (token != null) { + map.put("token", token); + } + if (root != null) { + map.put("root", root); + } + if (disableCopy != null) { + map.put("disable_copy", String.valueOf(disableCopy)); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class Webhdfs implements ServiceConfig { + /** + * Root for webhdfs. + */ + private final String root; + /** + * Endpoint for webhdfs. + */ + private final String endpoint; + /** + * Delegation token for webhdfs. + */ + private final String delegation; + /** + * Disable batch listing + */ + private final Boolean disableListBatch; + /** + * atomic_write_dir of this backend + */ + private final String atomicWriteDir; + + @Override + public String scheme() { + return "webhdfs"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + if (root != null) { + map.put("root", root); + } + if (endpoint != null) { + map.put("endpoint", endpoint); + } + if (delegation != null) { + map.put("delegation", delegation); + } + if (disableListBatch != null) { + map.put("disable_list_batch", String.valueOf(disableListBatch)); + } + if (atomicWriteDir != null) { + map.put("atomic_write_dir", atomicWriteDir); + } + return map; + } + } + + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class YandexDisk implements ServiceConfig { + /** + * yandex disk oauth access_token. + */ + private final @NonNull String accessToken; + /** + * root of this backend. + *

+ * All operations will happen under this root. + */ + private final String root; + + @Override + public String scheme() { + return "yandex_disk"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + map.put("access_token", accessToken); + if (root != null) { + map.put("root", root); + } + return map; + } + } +} diff --git a/bindings/java/src/test/java/org/apache/opendal/test/AsyncExecutorTest.java b/bindings/java/src/test/java/org/apache/opendal/test/AsyncExecutorTest.java index c776e8b754a7..23241c868588 100644 --- a/bindings/java/src/test/java/org/apache/opendal/test/AsyncExecutorTest.java +++ b/bindings/java/src/test/java/org/apache/opendal/test/AsyncExecutorTest.java @@ -21,21 +21,20 @@ import static org.assertj.core.api.Assertions.assertThat; import java.nio.charset.StandardCharsets; -import java.util.HashMap; -import java.util.Map; import lombok.Cleanup; import org.apache.opendal.AsyncExecutor; import org.apache.opendal.AsyncOperator; +import org.apache.opendal.ServiceConfig; import org.junit.jupiter.api.Test; public class AsyncExecutorTest { @Test void testDedicatedTokioExecutor() { - final Map conf = new HashMap<>(); - conf.put("root", "/opendal/"); + final ServiceConfig.Memory memory = + ServiceConfig.Memory.builder().root("/opendal/").build(); final int cores = Runtime.getRuntime().availableProcessors(); @Cleanup final AsyncExecutor executor = AsyncExecutor.createTokioExecutor(cores); - @Cleanup final AsyncOperator op = AsyncOperator.of("memory", conf, executor); + @Cleanup final AsyncOperator op = AsyncOperator.of(memory, executor); assertThat(op.info).isNotNull(); final String key = "key"; diff --git a/bindings/java/src/test/java/org/apache/opendal/test/LayerTest.java b/bindings/java/src/test/java/org/apache/opendal/test/LayerTest.java index ed2ab4a1d264..98b7ff1a9b6c 100644 --- a/bindings/java/src/test/java/org/apache/opendal/test/LayerTest.java +++ b/bindings/java/src/test/java/org/apache/opendal/test/LayerTest.java @@ -20,11 +20,10 @@ package org.apache.opendal.test; import static org.assertj.core.api.Assertions.assertThat; -import java.util.HashMap; -import java.util.Map; import lombok.Cleanup; import org.apache.opendal.AsyncOperator; import org.apache.opendal.Layer; +import org.apache.opendal.ServiceConfig; import org.apache.opendal.layer.ConcurrentLimitLayer; import org.apache.opendal.layer.RetryLayer; import org.junit.jupiter.api.Test; @@ -32,20 +31,20 @@ public class LayerTest { @Test void testOperatorWithRetryLayer() { - final Map conf = new HashMap<>(); - conf.put("root", "/opendal/"); + final ServiceConfig.Memory memory = + ServiceConfig.Memory.builder().root("/opendal/").build(); final Layer retryLayer = RetryLayer.builder().build(); - @Cleanup final AsyncOperator op = AsyncOperator.of("memory", conf); + @Cleanup final AsyncOperator op = AsyncOperator.of(memory); @Cleanup final AsyncOperator layeredOp = op.layer(retryLayer); assertThat(layeredOp.info).isNotNull(); } @Test void testOperatorWithConcurrentLimitLayer() { - final Map conf = new HashMap<>(); - conf.put("root", "/opendal/"); + final ServiceConfig.Memory memory = + ServiceConfig.Memory.builder().root("/opendal/").build(); final Layer concurrentLimitLayer = new ConcurrentLimitLayer(1024); - @Cleanup final AsyncOperator op = AsyncOperator.of("memory", conf); + @Cleanup final AsyncOperator op = AsyncOperator.of(memory); @Cleanup final AsyncOperator layeredOp = op.layer(concurrentLimitLayer); assertThat(layeredOp.info).isNotNull(); } diff --git a/bindings/java/src/test/java/org/apache/opendal/test/MetadataTest.java b/bindings/java/src/test/java/org/apache/opendal/test/MetadataTest.java index 821390a12c46..06c0009cd627 100644 --- a/bindings/java/src/test/java/org/apache/opendal/test/MetadataTest.java +++ b/bindings/java/src/test/java/org/apache/opendal/test/MetadataTest.java @@ -23,12 +23,11 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertTrue; import java.nio.file.Path; -import java.util.HashMap; -import java.util.Map; import java.util.UUID; import org.apache.opendal.AsyncOperator; import org.apache.opendal.Metadata; import org.apache.opendal.Operator; +import org.apache.opendal.ServiceConfig; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -38,10 +37,10 @@ public class MetadataTest { @Test public void testAsyncMetadata() { - final Map conf = new HashMap<>(); - conf.put("root", tempDir.toString()); + final ServiceConfig.Fs fs = + ServiceConfig.Fs.builder().root(tempDir.toString()).build(); - try (final AsyncOperator op = AsyncOperator.of("fs", conf)) { + try (final AsyncOperator op = AsyncOperator.of(fs)) { final String dir = UUID.randomUUID() + "/"; op.createDir(dir).join(); final Metadata dirMetadata = op.stat(dir).join(); @@ -69,10 +68,10 @@ public void testAsyncMetadata() { @Test public void testBlockingMetadata() { - final Map conf = new HashMap<>(); - conf.put("root", tempDir.toString()); + final ServiceConfig.Fs fs = + ServiceConfig.Fs.builder().root(tempDir.toString()).build(); - try (final Operator op = Operator.of("fs", conf)) { + try (final Operator op = Operator.of(fs)) { final String dir = UUID.randomUUID() + "/"; op.createDir(dir); final Metadata dirMetadata = op.stat(dir); diff --git a/bindings/java/src/test/java/org/apache/opendal/test/OperatorInfoTest.java b/bindings/java/src/test/java/org/apache/opendal/test/OperatorInfoTest.java index db48d22dad8d..511c998b40b0 100644 --- a/bindings/java/src/test/java/org/apache/opendal/test/OperatorInfoTest.java +++ b/bindings/java/src/test/java/org/apache/opendal/test/OperatorInfoTest.java @@ -21,11 +21,10 @@ import static org.assertj.core.api.Assertions.assertThat; import java.nio.file.Path; -import java.util.HashMap; -import java.util.Map; import org.apache.opendal.AsyncOperator; import org.apache.opendal.Operator; import org.apache.opendal.OperatorInfo; +import org.apache.opendal.ServiceConfig; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -35,10 +34,10 @@ public class OperatorInfoTest { @Test public void testBlockingOperatorInfo() { - final Map conf = new HashMap<>(); - conf.put("root", tempDir.toString()); + final ServiceConfig.Fs fs = + ServiceConfig.Fs.builder().root(tempDir.toString()).build(); - try (final Operator op = Operator.of("fs", conf)) { + try (final Operator op = Operator.of(fs)) { final OperatorInfo info = op.info; assertThat(info).isNotNull(); assertThat(info.scheme).isEqualTo("fs"); @@ -57,9 +56,9 @@ public void testBlockingOperatorInfo() { @Test public void testOperatorInfo() { - final Map conf = new HashMap<>(); - conf.put("root", "/opendal/"); - try (final AsyncOperator op = AsyncOperator.of("memory", conf)) { + final ServiceConfig.Memory memory = + ServiceConfig.Memory.builder().root("/opendal/").build(); + try (final AsyncOperator op = AsyncOperator.of(memory)) { final OperatorInfo info = op.info; assertThat(info).isNotNull(); assertThat(info.scheme).isEqualTo("memory"); diff --git a/bindings/java/src/test/java/org/apache/opendal/test/OperatorInputOutputStreamTest.java b/bindings/java/src/test/java/org/apache/opendal/test/OperatorInputOutputStreamTest.java index 13de860caf9c..bfbbd2244d87 100644 --- a/bindings/java/src/test/java/org/apache/opendal/test/OperatorInputOutputStreamTest.java +++ b/bindings/java/src/test/java/org/apache/opendal/test/OperatorInputOutputStreamTest.java @@ -23,13 +23,12 @@ import java.io.BufferedReader; import java.io.InputStreamReader; import java.nio.file.Path; -import java.util.HashMap; -import java.util.Map; import java.util.concurrent.atomic.AtomicLong; import java.util.stream.Stream; import org.apache.opendal.Operator; import org.apache.opendal.OperatorInputStream; import org.apache.opendal.OperatorOutputStream; +import org.apache.opendal.ServiceConfig; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -39,10 +38,10 @@ public class OperatorInputOutputStreamTest { @Test void testReadWriteWithStream() throws Exception { - final Map conf = new HashMap<>(); - conf.put("root", tempDir.toString()); + final ServiceConfig.Fs fs = + ServiceConfig.Fs.builder().root(tempDir.toString()).build(); - try (final Operator op = Operator.of("fs", conf)) { + try (final Operator op = Operator.of(fs)) { final String path = "OperatorInputOutputStreamTest.txt"; final long multi = 1024 * 1024; diff --git a/bindings/java/src/test/java/org/apache/opendal/test/OperatorUtf8DecodeTest.java b/bindings/java/src/test/java/org/apache/opendal/test/OperatorUtf8DecodeTest.java index 3c7d6fbbfd15..031b411a7ab6 100644 --- a/bindings/java/src/test/java/org/apache/opendal/test/OperatorUtf8DecodeTest.java +++ b/bindings/java/src/test/java/org/apache/opendal/test/OperatorUtf8DecodeTest.java @@ -21,10 +21,9 @@ import static org.assertj.core.api.Assertions.assertThat; import java.nio.file.Path; -import java.util.HashMap; -import java.util.Map; import org.apache.opendal.Metadata; import org.apache.opendal.Operator; +import org.apache.opendal.ServiceConfig; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -39,10 +38,10 @@ public class OperatorUtf8DecodeTest { */ @Test public void testWriteFileWithNonAsciiName() { - final Map conf = new HashMap<>(); - conf.put("root", tempDir.toString()); + final ServiceConfig.Fs fs = + ServiceConfig.Fs.builder().root(tempDir.toString()).build(); - try (final Operator op = Operator.of("fs", conf)) { + try (final Operator op = Operator.of(fs)) { final String path = "βŒπŸ˜±δΈ­ζ–‡.test"; final byte[] content = "βŒπŸ˜±δΈ­ζ–‡".getBytes(); op.write(path, content); diff --git a/dev/Cargo.lock b/dev/Cargo.lock index 9557672df84b..ec2ffb62d9b4 100644 --- a/dev/Cargo.lock +++ b/dev/Cargo.lock @@ -187,6 +187,7 @@ dependencies = [ "anyhow", "clap", "env_logger", + "heck", "log", "minijinja", "pretty_assertions", diff --git a/dev/Cargo.toml b/dev/Cargo.toml index 2acaeecae7ca..1ff7ab62b77c 100644 --- a/dev/Cargo.toml +++ b/dev/Cargo.toml @@ -32,6 +32,7 @@ anyhow = { version = "1.0.95" } clap = { version = "4.5.23", features = ["derive"] } env_logger = { version = "0.11.5" } log = { version = "0.4.22" } +heck = { version = "0.5.0" } minijinja = { version = "2.5.0" } serde = { version = "1.0.216", features = ["derive"] } syn = { version = "2.0.91", features = ["visit", "full", "extra-traits"] } diff --git a/dev/src/generate/java.j2 b/dev/src/generate/java.j2 new file mode 100644 index 000000000000..7c98469a26b0 --- /dev/null +++ b/dev/src/generate/java.j2 @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +// DO NOT EDIT IT MANUALLY. This file is generated by opendal/dev/generate/java.rs. + +package org.apache.opendal; + +import java.time.Duration; +import java.util.HashMap; +import lombok.AccessLevel; +import lombok.Builder; +import lombok.Data; +import lombok.NonNull; +import lombok.RequiredArgsConstructor; + +/** + * Service configurations that are mapped from + * OpenDAL's services. + */ +@SuppressWarnings("unused") // intended to be used by users +public interface ServiceConfig { + String scheme(); + + HashMap configMap(); + + {% for srv in srvs %} + @Builder + @Data + @RequiredArgsConstructor(access = AccessLevel.PRIVATE) + class {{srv | case_java_class_name}} implements ServiceConfig { + {%- for field in srvs[srv].config %} + /** + * {{field.comments}} + {%- if field.deprecated %} + * @deprecated {{field.deprecated["note"]}} + {%- endif %} + */ + {{make_field(field)}} + {%- endfor %} + + @Override + public String scheme() { + return "{{srv}}"; + } + + @Override + public HashMap configMap() { + final HashMap map = new HashMap<>(); + {% for field in srvs[srv].config -%} + {{make_populate_map(field)}} + {% endfor -%} + return map; + } + } + {% endfor %} +} diff --git a/dev/src/generate/java.rs b/dev/src/generate/java.rs new file mode 100644 index 000000000000..2abd51760a1b --- /dev/null +++ b/dev/src/generate/java.rs @@ -0,0 +1,122 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use crate::generate::parser::{sorted_services, Config, ConfigType, Services}; +use anyhow::Result; +use minijinja::value::ViaDeserialize; +use minijinja::{context, Environment}; +use std::fs; +use std::path::PathBuf; + +fn enabled_service(srv: &str) -> bool { + match srv { + // not enabled in bindings/java/Cargo.toml + "foundationdb" | "ftp" | "hdfs" | "rocksdb" | "tikv" => false, + _ => true, + } +} + +pub fn generate(workspace_dir: PathBuf, services: Services) -> Result<()> { + let srvs = sorted_services(services, enabled_service); + let mut env = Environment::new(); + env.add_template("java", include_str!("java.j2"))?; + env.add_function("make_field", make_field); + env.add_function("make_populate_map", make_populate_map); + env.add_filter("case_java_class_name", case_java_class_name); + let tmpl = env.get_template("java")?; + + let output = + workspace_dir.join("bindings/java/src/main/java/org/apache/opendal/ServiceConfig.java"); + fs::write(output, tmpl.render(context! { srvs => srvs })?)?; + Ok(()) +} + +fn case_java_class_name(s: String) -> String { + heck::AsUpperCamelCase(s).to_string() +} + +fn case_java_field_name(s: String) -> String { + heck::AsLowerCamelCase(s).to_string() +} + +fn make_field(field: ViaDeserialize) -> Result { + let field_type = if field.optional { + match field.value { + ConfigType::Bool => "Boolean", + ConfigType::String => "String", + ConfigType::Duration => "Duration", + ConfigType::Usize | ConfigType::U64 | ConfigType::I64 => "Long", + ConfigType::U32 | ConfigType::U16 => "Integer", + ConfigType::Vec => "List", + } + } else { + match field.value { + ConfigType::Bool => "boolean", + ConfigType::String => "@NonNull String", + ConfigType::Duration => "@NonNull Duration", + ConfigType::Usize | ConfigType::U64 | ConfigType::I64 => "long", + ConfigType::U32 | ConfigType::U16 => "int", + ConfigType::Vec => "@NonNull List", + } + }; + + Ok(format!( + "private final {} {};", + field_type, + case_java_field_name(field.name.clone()) + )) +} + +fn make_populate_map(field: ViaDeserialize) -> Result { + let populate = match field.value { + ConfigType::Usize + | ConfigType::U64 + | ConfigType::I64 + | ConfigType::Bool + | ConfigType::U32 + | ConfigType::U16 => format!( + "map.put(\"{}\", String.valueOf({}));", + field.name, + case_java_field_name(field.name.clone()) + ), + ConfigType::String => format!( + "map.put(\"{}\", {});", + field.name, + case_java_field_name(field.name.clone()) + ), + ConfigType::Duration => format!( + "map.put(\"{}\", {}.toString());", + field.name, + case_java_field_name(field.name.clone()) + ), + ConfigType::Vec => format!( + "map.put(\"{}\", String.join(\",\", {}));", + field.name, + case_java_field_name(field.name.clone()) + ), + }; + + if field.optional { + Ok(format!( + "if ({} != null) {{\n {}\n}}", + case_java_field_name(field.name.clone()), + populate + )) + } else { + Ok(populate) + } +} diff --git a/dev/src/generate/mod.rs b/dev/src/generate/mod.rs index 780f19a7406a..2a36ce765a1d 100644 --- a/dev/src/generate/mod.rs +++ b/dev/src/generate/mod.rs @@ -15,6 +15,7 @@ // specific language governing permissions and limitations // under the License. +mod java; mod parser; mod python; @@ -27,6 +28,7 @@ pub fn run(language: &str) -> Result<()> { let services = parser::parse(&services_path.to_string_lossy())?; match language { + "java" => java::generate(workspace_dir, services), "python" | "py" => python::generate(workspace_dir, services), _ => anyhow::bail!("unsupported language: {}", language), } diff --git a/dev/src/generate/parser.rs b/dev/src/generate/parser.rs index 086d7be5a37b..ee4bc4d4c9c4 100644 --- a/dev/src/generate/parser.rs +++ b/dev/src/generate/parser.rs @@ -29,6 +29,21 @@ use syn::{ pub type Services = HashMap; +pub fn sorted_services(services: Services, test: fn(&str) -> bool) -> Services { + let mut srvs = Services::new(); + for (k, srv) in services.into_iter() { + if !test(k.as_str()) { + continue; + } + + let mut sorted = srv.config.into_iter().enumerate().collect::>(); + sorted.sort_by_key(|(i, v)| (v.optional, *i)); + let config = sorted.into_iter().map(|(_, v)| v).collect(); + srvs.insert(k, Service { config }); + } + srvs +} + /// Service represents a service supported by opendal core, like `s3` and `fs` #[derive(Debug, Clone, Eq, PartialEq, Ord, PartialOrd, Serialize, Deserialize)] pub struct Service { diff --git a/dev/src/generate/python.j2 b/dev/src/generate/python.j2 index 772407d52975..49036d278166 100644 --- a/dev/src/generate/python.j2 +++ b/dev/src/generate/python.j2 @@ -54,7 +54,7 @@ class _Base: {% if field.deprecated %} # deprecated: {{field.deprecated["note"]}} {%- endif %} - {{field.name}}: {{make_pytype(field.value)}}{% if field.optional %} = ...{% endif %}, + {{field.name}}: {{make_python_type(field.value)}}{% if field.optional %} = ...{% endif %}, {%- endfor %} ) -> None: ... {% endfor %} diff --git a/dev/src/generate/python.rs b/dev/src/generate/python.rs index 268a6e89d501..0e852ed0ca17 100644 --- a/dev/src/generate/python.rs +++ b/dev/src/generate/python.rs @@ -15,14 +15,13 @@ // specific language governing permissions and limitations // under the License. -use crate::generate::parser::{ConfigType, Service, Services}; +use crate::generate::parser::{sorted_services, ConfigType, Services}; use anyhow::Result; use minijinja::value::ViaDeserialize; use minijinja::{context, Environment}; use std::fs; use std::path::PathBuf; -/// TODO: add a common utils to parse enabled features from cargo.toml fn enabled_service(srv: &str) -> bool { match srv { // not enabled in bindings/python/Cargo.toml @@ -32,30 +31,18 @@ fn enabled_service(srv: &str) -> bool { } pub fn generate(workspace_dir: PathBuf, services: Services) -> Result<()> { - let mut srvs = Services::new(); - for (k, srv) in services.into_iter() { - if !enabled_service(k.as_str()) { - continue; - } - - let mut sorted = srv.config.into_iter().enumerate().collect::>(); - sorted.sort_by_key(|(i, v)| (v.optional, *i)); - let config = sorted.into_iter().map(|(_, v)| v).collect(); - srvs.insert(k, Service { config }); - } - + let srvs = sorted_services(services, enabled_service); let mut env = Environment::new(); env.add_template("python", include_str!("python.j2"))?; - env.add_function("make_pytype", make_pytype); + env.add_function("make_python_type", make_python_type); let tmpl = env.get_template("python")?; let output = workspace_dir.join("bindings/python/python/opendal/__base.pyi"); fs::write(output, tmpl.render(context! { srvs => srvs })?)?; - Ok(()) } -fn make_pytype(ty: ViaDeserialize) -> Result { +fn make_python_type(ty: ViaDeserialize) -> Result { Ok(match ty.0 { ConfigType::Bool => "_bool", ConfigType::Duration => "_duration",