diff --git a/.github/workflows/build-proxy.yml b/.github/workflows/build-proxy.yml deleted file mode 100644 index 47ee21021..000000000 --- a/.github/workflows/build-proxy.yml +++ /dev/null @@ -1,25 +0,0 @@ -name: Build and upload proxy client to JFrog - -on: - push: - branches: - - stage - # TODO: snapshots_private has been removed from base parent pom.xml. Need to add workflow code to write snapshots_private to local pipeline pom.xml (this workflow will not work until that is done) - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Checkout Java client - uses: actions/checkout@v2 - - - name: Set up settings.xml for Maven - uses: s4u/maven-settings-action@v2.8.0 - with: - servers: '[{"id": "snapshots_private", "username": "${{ secrets.JFROG_USERNAME }}", "password": "${{ secrets.JFROG_MAVEN_TOKEN }}"}]' - - - name: Build Java client - run: mvn install - - - name: Upload to JFrog - run: mvn deploy diff --git a/README.md b/README.md index 379248195..240e354ac 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,6 @@ Aerospike Java Client Package Aerospike Java client. This package contains full source code for these projects. * client: Java native client library. -* proxy: Java proxy client library for dbaas (database as a service). * examples: Java client examples. * benchmarks: Java client benchmarks. * test: Java client unit tests. diff --git a/benchmarks/pom.xml b/benchmarks/pom.xml index 95d0ba097..e1d4033c8 100644 --- a/benchmarks/pom.xml +++ b/benchmarks/pom.xml @@ -6,7 +6,7 @@ com.aerospike aerospike-parent - 8.1.4 + 9.0.0 aerospike-benchmarks jar @@ -18,11 +18,6 @@ aerospike-client-jdk21 - - com.aerospike - aerospike-proxy-client - - io.netty netty-transport @@ -64,46 +59,36 @@ org.apache.maven.plugins maven-compiler-plugin - - 21 - 21 - - org.apache.maven.plugins - maven-shade-plugin - 3.4.1 + maven-assembly-plugin + + + jar-with-dependencies + + + + com.aerospike.benchmarks.Main + + + + make-my-jar-with-dependencies + package - shade + single - - - - *:* - - META-INF/*.SF - META-INF/*.DSA - META-INF/*.RSA - - - - true - jar-with-dependencies - - - - - com.aerospike.benchmarks.Main - - - - + + + resources + true + + diff --git a/benchmarks/src/com/aerospike/benchmarks/Main.java b/benchmarks/src/com/aerospike/benchmarks/Main.java index dd5e35c63..75f538478 100644 --- a/benchmarks/src/com/aerospike/benchmarks/Main.java +++ b/benchmarks/src/com/aerospike/benchmarks/Main.java @@ -37,6 +37,7 @@ import org.apache.commons.cli.Options; import org.apache.commons.cli.ParseException; +import com.aerospike.client.AerospikeClient; import com.aerospike.client.Host; import com.aerospike.client.IAerospikeClient; import com.aerospike.client.Key; @@ -62,11 +63,9 @@ import com.aerospike.client.policy.Replica; import com.aerospike.client.policy.TlsPolicy; import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.AerospikeClientFactory; import com.aerospike.client.util.Util; import io.netty.channel.EventLoopGroup; -import io.netty.channel.epoll.Epoll; import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.kqueue.KQueueEventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; @@ -105,7 +104,6 @@ public static void main(String[] args) { private int nThreads; private int asyncMaxCommands = 100; private int eventLoopSize = 1; - private boolean useProxyClient; private boolean useVirtualThreads; private boolean asyncEnabled; private boolean initialize; @@ -349,8 +347,6 @@ public Main(String[] commandLineArgs) throws Exception { "Value: DIRECT_NIO | NETTY_NIO | NETTY_EPOLL | NETTY_KQUEUE | NETTY_IOURING" ); - options.addOption("proxy", false, "Use proxy client."); - options.addOption("upn", "udfPackageName", true, "Specify the package name where the udf function is located"); options.addOption("ufn", "udfFunctionName", true, "Specify the udf function name that must be used in the udf benchmarks"); options.addOption("ufv","udfFunctionValues",true, "The udf argument values comma separated"); @@ -382,10 +378,6 @@ public Main(String[] commandLineArgs) throws Exception { this.asyncEnabled = true; } - if (line.hasOption("proxy")) { - this.useProxyClient = true; - } - args.readPolicy = clientPolicy.readPolicyDefault; args.writePolicy = clientPolicy.writePolicyDefault; args.batchPolicy = clientPolicy.batchPolicyDefault; @@ -412,13 +404,6 @@ public Main(String[] commandLineArgs) throws Exception { this.port = 3000; } - // If the Aerospike server's default port (3000) is used and the proxy client is used, - // Reset the port to the proxy server's default port (4000). - if (port == 3000 && useProxyClient) { - System.out.println("Change proxy server port to 4000"); - port = 4000; - } - if (line.hasOption("hosts")) { this.hosts = Host.parseHosts(line.getOptionValue("hosts"), this.port); } @@ -1183,16 +1168,6 @@ public void runBenchmarks() throws Exception { eventPolicy.minTimeout = args.writePolicy.socketTimeout; } - if (this.useProxyClient && this.eventLoopType == EventLoopType.DIRECT_NIO) { - // Proxy client requires netty event loops. - if (Epoll.isAvailable()) { - this.eventLoopType = EventLoopType.NETTY_EPOLL; - } - else { - this.eventLoopType = EventLoopType.NETTY_NIO; - } - } - switch (this.eventLoopType) { default: case DIRECT_NIO: { @@ -1232,7 +1207,7 @@ public void runBenchmarks() throws Exception { clientPolicy.asyncMaxConnsPerNode = this.asyncMaxCommands; } - IAerospikeClient client = AerospikeClientFactory.getClient(clientPolicy, useProxyClient, hosts); + IAerospikeClient client = new AerospikeClient(clientPolicy, hosts); try { if (initialize) { @@ -1252,7 +1227,7 @@ public void runBenchmarks() throws Exception { } } else { - IAerospikeClient client = AerospikeClientFactory.getClient(clientPolicy, useProxyClient, hosts); + IAerospikeClient client = new AerospikeClient(clientPolicy, hosts); try { if (initialize) { diff --git a/client/pom.xml b/client/pom.xml index 9df36062a..914fcc4c0 100644 --- a/client/pom.xml +++ b/client/pom.xml @@ -6,7 +6,7 @@ com.aerospike aerospike-parent - 8.1.4 + 9.0.0 aerospike-client-jdk21 jar diff --git a/client/src/com/aerospike/client/AbortStatus.java b/client/src/com/aerospike/client/AbortStatus.java new file mode 100644 index 000000000..c55f59b66 --- /dev/null +++ b/client/src/com/aerospike/client/AbortStatus.java @@ -0,0 +1,34 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client; + +/** + * Multi-record transaction (MRT) abort status code. + */ +public enum AbortStatus { + OK("Abort succeeded"), + ALREADY_COMMITTED("Already committed"), + ALREADY_ABORTED("Already aborted"), + ROLL_BACK_ABANDONED("MRT client roll back abandoned. Server will eventually abort the MRT."), + CLOSE_ABANDONED("MRT has been rolled back, but MRT client close was abandoned. Server will eventually close the MRT."); + + public final String str; + + AbortStatus(String str) { + this.str = str; + } +} diff --git a/client/src/com/aerospike/client/AerospikeClient.java b/client/src/com/aerospike/client/AerospikeClient.java index df7c8dbd2..c22a1a925 100644 --- a/client/src/com/aerospike/client/AerospikeClient.java +++ b/client/src/com/aerospike/client/AerospikeClient.java @@ -37,13 +37,16 @@ import com.aerospike.client.async.AsyncExists; import com.aerospike.client.async.AsyncIndexTask; import com.aerospike.client.async.AsyncInfoCommand; -import com.aerospike.client.async.AsyncOperate; +import com.aerospike.client.async.AsyncOperateRead; +import com.aerospike.client.async.AsyncOperateWrite; import com.aerospike.client.async.AsyncQueryExecutor; import com.aerospike.client.async.AsyncQueryPartitionExecutor; import com.aerospike.client.async.AsyncRead; import com.aerospike.client.async.AsyncReadHeader; import com.aerospike.client.async.AsyncScanPartitionExecutor; import com.aerospike.client.async.AsyncTouch; +import com.aerospike.client.async.AsyncTxnMonitor; +import com.aerospike.client.async.AsyncTxnRoll; import com.aerospike.client.async.AsyncWrite; import com.aerospike.client.async.EventLoop; import com.aerospike.client.cdt.CTX; @@ -66,12 +69,15 @@ import com.aerospike.client.command.ExistsCommand; import com.aerospike.client.command.IBatchCommand; import com.aerospike.client.command.OperateArgs; -import com.aerospike.client.command.OperateCommand; +import com.aerospike.client.command.OperateCommandRead; +import com.aerospike.client.command.OperateCommandWrite; import com.aerospike.client.command.ReadCommand; import com.aerospike.client.command.ReadHeaderCommand; import com.aerospike.client.command.RegisterCommand; import com.aerospike.client.command.ScanExecutor; import com.aerospike.client.command.TouchCommand; +import com.aerospike.client.command.TxnMonitor; +import com.aerospike.client.command.TxnRoll; import com.aerospike.client.command.WriteCommand; import com.aerospike.client.exp.Expression; import com.aerospike.client.listener.BatchListListener; @@ -90,6 +96,8 @@ import com.aerospike.client.listener.RecordArrayListener; import com.aerospike.client.listener.RecordListener; import com.aerospike.client.listener.RecordSequenceListener; +import com.aerospike.client.listener.AbortListener; +import com.aerospike.client.listener.CommitListener; import com.aerospike.client.listener.WriteListener; import com.aerospike.client.metrics.MetricsPolicy; import com.aerospike.client.policy.AdminPolicy; @@ -102,6 +110,8 @@ import com.aerospike.client.policy.Policy; import com.aerospike.client.policy.QueryPolicy; import com.aerospike.client.policy.ScanPolicy; +import com.aerospike.client.policy.TxnRollPolicy; +import com.aerospike.client.policy.TxnVerifyPolicy; import com.aerospike.client.policy.WritePolicy; import com.aerospike.client.query.IndexCollectionType; import com.aerospike.client.query.IndexType; @@ -187,7 +197,7 @@ public class AerospikeClient implements IAerospikeClient, Closeable { public final BatchDeletePolicy batchDeletePolicyDefault; /** - * Default user defined function policy used in batch UDF excecute commands. + * Default user defined function policy used in batch UDF execute commands. */ public final BatchUDFPolicy batchUDFPolicyDefault; @@ -196,6 +206,17 @@ public class AerospikeClient implements IAerospikeClient, Closeable { */ public final InfoPolicy infoPolicyDefault; + /** + * Default multi-record transaction (MRT) policy when verifying record versions in a batch on a commit. + */ + public final TxnVerifyPolicy txnVerifyPolicyDefault; + + /** + * Default multi-record transaction (MRT) policy when rolling the transaction records forward (commit) + * or back (abort) in a batch. + */ + public final TxnRollPolicy txnRollPolicyDefault; + private final WritePolicy operatePolicyReadDefault; //------------------------------------------------------- @@ -294,6 +315,8 @@ public AerospikeClient(ClientPolicy policy, Host... hosts) this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; this.infoPolicyDefault = policy.infoPolicyDefault; + this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; + this.txnRollPolicyDefault = policy.txnRollPolicyDefault; this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); cluster = new Cluster(this, policy, hosts); @@ -318,6 +341,8 @@ protected AerospikeClient(ClientPolicy policy) { this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; this.infoPolicyDefault = policy.infoPolicyDefault; + this.txnVerifyPolicyDefault = policy.txnVerifyPolicyDefault; + this.txnRollPolicyDefault = policy.txnRollPolicyDefault; this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); } else { @@ -331,6 +356,8 @@ protected AerospikeClient(ClientPolicy policy) { this.batchDeletePolicyDefault = new BatchDeletePolicy(); this.batchUDFPolicyDefault = new BatchUDFPolicy(); this.infoPolicyDefault = new InfoPolicy(); + this.txnVerifyPolicyDefault = new TxnVerifyPolicy(); + this.txnRollPolicyDefault = new TxnRollPolicy(); this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); } } @@ -340,145 +367,159 @@ protected AerospikeClient(ClientPolicy policy) { //------------------------------------------------------- /** - * Return read policy default. Use when the policy will not be modified. + * Copy read policy default to avoid problems if this shared instance is later modified. */ public final Policy getReadPolicyDefault() { - return readPolicyDefault; + return new Policy(readPolicyDefault); } /** - * Copy read policy default. Use when the policy will be modified for use in a specific transaction. + * Copy read policy default. */ public final Policy copyReadPolicyDefault() { return new Policy(readPolicyDefault); } /** - * Return write policy default. Use when the policy will not be modified. + * Copy write policy default to avoid problems if this shared instance is later modified. */ public final WritePolicy getWritePolicyDefault() { - return writePolicyDefault; + return new WritePolicy(writePolicyDefault); } /** - * Copy write policy default. Use when the policy will be modified for use in a specific transaction. + * Copy write policy default. */ public final WritePolicy copyWritePolicyDefault() { return new WritePolicy(writePolicyDefault); } /** - * Return scan policy default. Use when the policy will not be modified. + * Copy scan policy default to avoid problems if this shared instance is later modified. */ public final ScanPolicy getScanPolicyDefault() { - return scanPolicyDefault; + return new ScanPolicy(scanPolicyDefault); } /** - * Copy scan policy default. Use when the policy will be modified for use in a specific transaction. + * Copy scan policy default. */ public final ScanPolicy copyScanPolicyDefault() { return new ScanPolicy(scanPolicyDefault); } /** - * Return query policy default. Use when the policy will not be modified. + * Copy query policy default to avoid problems if this shared instance is later modified. */ public final QueryPolicy getQueryPolicyDefault() { - return queryPolicyDefault; + return new QueryPolicy(queryPolicyDefault); } /** - * Copy query policy default. Use when the policy will be modified for use in a specific transaction. + * Copy query policy default. */ public final QueryPolicy copyQueryPolicyDefault() { return new QueryPolicy(queryPolicyDefault); } /** - * Return batch header read policy default. Use when the policy will not be modified. + * Copy batch header read policy default to avoid problems if this shared instance is later modified. */ public final BatchPolicy getBatchPolicyDefault() { - return batchPolicyDefault; + return new BatchPolicy(batchPolicyDefault); } /** - * Copy batch header read policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch header read policy default. */ public final BatchPolicy copyBatchPolicyDefault() { return new BatchPolicy(batchPolicyDefault); } /** - * Return batch header write policy default. Use when the policy will not be modified. + * Copy batch header write policy default to avoid problems if this shared instance is later modified. */ public final BatchPolicy getBatchParentPolicyWriteDefault() { - return batchParentPolicyWriteDefault; + return new BatchPolicy(batchParentPolicyWriteDefault); } /** - * Copy batch header write policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch header write policy default. */ public final BatchPolicy copyBatchParentPolicyWriteDefault() { return new BatchPolicy(batchParentPolicyWriteDefault); } /** - * Return batch detail write policy default. Use when the policy will not be modified. + * Copy batch detail write policy default to avoid problems if this shared instance is later modified. */ public final BatchWritePolicy getBatchWritePolicyDefault() { - return batchWritePolicyDefault; + return new BatchWritePolicy(batchWritePolicyDefault); } /** - * Copy batch detail write policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch detail write policy default. */ public final BatchWritePolicy copyBatchWritePolicyDefault() { return new BatchWritePolicy(batchWritePolicyDefault); } /** - * Return batch detail delete policy default. Use when the policy will not be modified. + * Copy batch detail delete policy default to avoid problems if this shared instance is later modified. */ public final BatchDeletePolicy getBatchDeletePolicyDefault() { - return batchDeletePolicyDefault; + return new BatchDeletePolicy(batchDeletePolicyDefault); } /** - * Copy batch detail delete policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch detail delete policy default. */ public final BatchDeletePolicy copyBatchDeletePolicyDefault() { return new BatchDeletePolicy(batchDeletePolicyDefault); } /** - * Return batch detail UDF policy default. Use when the policy will not be modified. + * Copy batch detail UDF policy default to avoid problems if this shared instance is later modified. */ public final BatchUDFPolicy getBatchUDFPolicyDefault() { - return batchUDFPolicyDefault; + return new BatchUDFPolicy(batchUDFPolicyDefault); } /** - * Copy batch detail UDF policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch detail UDF policy default. */ public final BatchUDFPolicy copyBatchUDFPolicyDefault() { return new BatchUDFPolicy(batchUDFPolicyDefault); } /** - * Return info command policy default. Use when the policy will not be modified. + * Copy info command policy default to avoid problems if this shared instance is later modified. */ public final InfoPolicy getInfoPolicyDefault() { - return infoPolicyDefault; + return new InfoPolicy(infoPolicyDefault); } /** - * Copy info command policy default. Use when the policy will be modified for use in a specific transaction. + * Copy info command policy default. */ public final InfoPolicy copyInfoPolicyDefault() { return new InfoPolicy(infoPolicyDefault); } + /** + * Copy MRT record version verify policy default. + */ + public final TxnVerifyPolicy copyTxnVerifyPolicyDefault() { + return new TxnVerifyPolicy(txnVerifyPolicyDefault); + } + + /** + * Copy MRT roll forward/back policy default. + */ + public final TxnRollPolicy copyTxnRollPolicyDefault() { + return new TxnRollPolicy(txnRollPolicyDefault); + } + //------------------------------------------------------- // Cluster Connection Management //------------------------------------------------------- @@ -574,13 +615,160 @@ public final Cluster getCluster() { return cluster; } + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /** + * Attempt to commit the given multi-record transaction. First, the expected record versions are + * sent to the server nodes for verification. If all nodes return success, the transaction is + * committed. Otherwise, the transaction is aborted. + *

+ * Requires server version 8.0+ + * + * @param txn multi-record transaction + * @return status of the commit on success + * @throws AerospikeException.Commit if verify commit fails + */ + public final CommitStatus commit(Txn txn) + throws AerospikeException.Commit { + + TxnRoll tr = new TxnRoll(cluster, txn); + + switch (txn.getState()) { + default: + case OPEN: + tr.verify(txnVerifyPolicyDefault, txnRollPolicyDefault); + return tr.commit(txnRollPolicyDefault); + + case VERIFIED: + return tr.commit(txnRollPolicyDefault); + + case COMMITTED: + return CommitStatus.ALREADY_COMMITTED; + + case ABORTED: + return CommitStatus.ALREADY_ABORTED; + } + } + + /** + * Asynchronously attempt to commit the given multi-record transaction. First, the expected + * record versions are sent to the server nodes for verification. If all nodes return success, + * the transaction is committed. Otherwise, the transaction is aborted. + *

+ * This method registers the command with an event loop and returns. + * The event loop thread will process the command and send the results to the listener. + *

+ * Requires server version 8.0+ + * + * @param eventLoop event loop that will process the command. If NULL, the event + * loop will be chosen by round-robin. + * @param listener where to send results + * @param txn multi-record transaction + * @throws AerospikeException if event loop registration fails + */ + public final void commit(EventLoop eventLoop, CommitListener listener, Txn txn) + throws AerospikeException { + if (eventLoop == null) { + eventLoop = cluster.eventLoops.next(); + } + + AsyncTxnRoll atr = new AsyncTxnRoll( + cluster, eventLoop, txnVerifyPolicyDefault, txnRollPolicyDefault, txn + ); + + switch (txn.getState()) { + default: + case OPEN: + atr.verify(listener); + break; + + case VERIFIED: + atr.commit(listener); + break; + + case COMMITTED: + listener.onSuccess(CommitStatus.ALREADY_COMMITTED); + break; + + case ABORTED: + listener.onSuccess(CommitStatus.ALREADY_ABORTED); + break; + } + } + + /** + * Abort and rollback the given multi-record transaction. + *

+ * Requires server version 8.0+ + * + * @param txn multi-record transaction + * @return status of the abort + */ + public final AbortStatus abort(Txn txn) { + TxnRoll tr = new TxnRoll(cluster, txn); + + switch (txn.getState()) { + default: + case OPEN: + case VERIFIED: + return tr.abort(txnRollPolicyDefault); + + case COMMITTED: + return AbortStatus.ALREADY_COMMITTED; + + case ABORTED: + return AbortStatus.ALREADY_ABORTED; + } + } + + /** + * Asynchronously abort and rollback the given multi-record transaction. + *

+ * This method registers the command with an event loop and returns. + * The event loop thread will process the command and send the results to the listener. + *

+ * Requires server version 8.0+ + * + * @param eventLoop event loop that will process the command. If NULL, the event + * loop will be chosen by round-robin. + * @param listener where to send results + * @param txn multi-record transaction + * @throws AerospikeException if event loop registration fails + */ + public final void abort(EventLoop eventLoop, AbortListener listener, Txn txn) + throws AerospikeException { + if (eventLoop == null) { + eventLoop = cluster.eventLoops.next(); + } + + AsyncTxnRoll atr = new AsyncTxnRoll(cluster, eventLoop, null, txnRollPolicyDefault, txn); + + switch (txn.getState()) { + default: + case OPEN: + case VERIFIED: + atr.abort(listener); + break; + + case COMMITTED: + listener.onSuccess(AbortStatus.ALREADY_COMMITTED); + break; + + case ABORTED: + listener.onSuccess(AbortStatus.ALREADY_ABORTED); + break; + } + } + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- /** * Write record bin(s). - * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeouts, record expiration and how the command is * handled when the record already exists. * * @param policy write configuration parameters, pass in null for defaults @@ -593,6 +781,11 @@ public final void put(WritePolicy policy, Key key, Bin... bins) if (policy == null) { policy = writePolicyDefault; } + + if (policy.txn != null) { + TxnMonitor.addKey(cluster, policy, key); + } + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.WRITE); command.execute(); } @@ -602,7 +795,7 @@ public final void put(WritePolicy policy, Key key, Bin... bins) * This method registers the command with an event loop and returns. * The event loop thread will process the command and send the results to the listener. *

- * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * * @param eventLoop event loop that will process the command. If NULL, the event @@ -622,8 +815,9 @@ public final void put(EventLoop eventLoop, WriteListener listener, WritePolicy p if (policy == null) { policy = writePolicyDefault; } + AsyncWrite command = new AsyncWrite(cluster, listener, policy, key, bins, Operation.Type.WRITE); - eventLoop.execute(cluster, command); + AsyncTxnMonitor.execute(eventLoop, cluster, policy, command); } //------------------------------------------------------- @@ -632,7 +826,7 @@ public final void put(EventLoop eventLoop, WriteListener listener, WritePolicy p /** * Append bin string values to existing record bin values. - * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call only works for string values. * @@ -646,6 +840,11 @@ public final void append(WritePolicy policy, Key key, Bin... bins) if (policy == null) { policy = writePolicyDefault; } + + if (policy.txn != null) { + TxnMonitor.addKey(cluster, policy, key); + } + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.APPEND); command.execute(); } @@ -655,7 +854,7 @@ public final void append(WritePolicy policy, Key key, Bin... bins) * This method registers the command with an event loop and returns. * The event loop thread will process the command and send the results to the listener. *

- * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call only works for string values. * @@ -676,13 +875,14 @@ public final void append(EventLoop eventLoop, WriteListener listener, WritePolic if (policy == null) { policy = writePolicyDefault; } + AsyncWrite command = new AsyncWrite(cluster, listener, policy, key, bins, Operation.Type.APPEND); - eventLoop.execute(cluster, command); + AsyncTxnMonitor.execute(eventLoop, cluster, policy, command); } /** * Prepend bin string values to existing record bin values. - * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call works only for string values. * @@ -696,6 +896,11 @@ public final void prepend(WritePolicy policy, Key key, Bin... bins) if (policy == null) { policy = writePolicyDefault; } + + if (policy.txn != null) { + TxnMonitor.addKey(cluster, policy, key); + } + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.PREPEND); command.execute(); } @@ -705,7 +910,7 @@ public final void prepend(WritePolicy policy, Key key, Bin... bins) * This method registers the command with an event loop and returns. * The event loop thread will process the command and send the results to the listener. *

- * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call only works for string values. * @@ -726,8 +931,9 @@ public final void prepend(EventLoop eventLoop, WriteListener listener, WritePoli if (policy == null) { policy = writePolicyDefault; } + AsyncWrite command = new AsyncWrite(cluster, listener, policy, key, bins, Operation.Type.PREPEND); - eventLoop.execute(cluster, command); + AsyncTxnMonitor.execute(eventLoop, cluster, policy, command); } //------------------------------------------------------- @@ -737,7 +943,7 @@ public final void prepend(EventLoop eventLoop, WriteListener listener, WritePoli /** * Add integer/double bin values to record bin values. If the record or bin does not exist, the * record/bin will be created by default with the value to be added. The policy specifies the - * transaction timeout, record expiration and how the transaction is handled when the record + * command timeout, record expiration and how the command is handled when the record * already exists. * * @param policy write configuration parameters, pass in null for defaults @@ -750,6 +956,11 @@ public final void add(WritePolicy policy, Key key, Bin... bins) if (policy == null) { policy = writePolicyDefault; } + + if (policy.txn != null) { + TxnMonitor.addKey(cluster, policy, key); + } + WriteCommand command = new WriteCommand(cluster, policy, key, bins, Operation.Type.ADD); command.execute(); } @@ -757,7 +968,7 @@ public final void add(WritePolicy policy, Key key, Bin... bins) /** * Asynchronously add integer/double bin values to record bin values. If the record or bin does * not exist, the record/bin will be created by default with the value to be added. The policy - * specifies the transaction timeout, record expiration and how the transaction is handled when + * specifies the command timeout, record expiration and how the command is handled when * the record already exists. *

* This method registers the command with an event loop and returns. @@ -780,8 +991,9 @@ public final void add(EventLoop eventLoop, WriteListener listener, WritePolicy p if (policy == null) { policy = writePolicyDefault; } + AsyncWrite command = new AsyncWrite(cluster, listener, policy, key, bins, Operation.Type.ADD); - eventLoop.execute(cluster, command); + AsyncTxnMonitor.execute(eventLoop, cluster, policy, command); } //------------------------------------------------------- @@ -790,7 +1002,7 @@ public final void add(EventLoop eventLoop, WriteListener listener, WritePolicy p /** * Delete record for specified key. - * The policy specifies the transaction timeout. + * The policy specifies the command timeout. * * @param policy delete configuration parameters, pass in null for defaults * @param key unique record identifier @@ -802,6 +1014,11 @@ public final boolean delete(WritePolicy policy, Key key) if (policy == null) { policy = writePolicyDefault; } + + if (policy.txn != null) { + TxnMonitor.addKey(cluster, policy, key); + } + DeleteCommand command = new DeleteCommand(cluster, policy, key); command.execute(); return command.existed(); @@ -812,7 +1029,7 @@ public final boolean delete(WritePolicy policy, Key key) * This method registers the command with an event loop and returns. * The event loop thread will process the command and send the results to the listener. *

- * The policy specifies the transaction timeout. + * The policy specifies the command timeout. * * @param eventLoop event loop that will process the command. If NULL, the event * loop will be chosen by round-robin. @@ -830,8 +1047,9 @@ public final void delete(EventLoop eventLoop, DeleteListener listener, WritePoli if (policy == null) { policy = writePolicyDefault; } + AsyncDelete command = new AsyncDelete(cluster, listener, policy, key); - eventLoop.execute(cluster, command); + AsyncTxnMonitor.execute(eventLoop, cluster, policy, command); } /** @@ -859,6 +1077,10 @@ public final BatchResults delete(BatchPolicy batchPolicy, BatchDeletePolicy dele deletePolicy = batchDeletePolicyDefault; } + if (batchPolicy.txn != null) { + TxnMonitor.addKeys(cluster, batchPolicy, keys); + } + BatchAttr attr = new BatchAttr(); attr.setDelete(deletePolicy); @@ -962,7 +1184,7 @@ public final void delete( executor, bn, batchPolicy, keys, null, records, attr); } } - executor.execute(commands); + AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys); } /** @@ -1029,7 +1251,7 @@ public final void delete( executor, bn, batchPolicy, keys, null, sent, listener, attr); } } - executor.execute(commands); + AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys); } /** @@ -1102,6 +1324,11 @@ public final void touch(WritePolicy policy, Key key) if (policy == null) { policy = writePolicyDefault; } + + if (policy.txn != null) { + TxnMonitor.addKey(cluster, policy, key); + } + TouchCommand command = new TouchCommand(cluster, policy, key); command.execute(); } @@ -1129,8 +1356,9 @@ public final void touch(EventLoop eventLoop, WriteListener listener, WritePolicy if (policy == null) { policy = writePolicyDefault; } + AsyncTouch command = new AsyncTouch(cluster, listener, policy, key); - eventLoop.execute(cluster, command); + AsyncTxnMonitor.execute(eventLoop, cluster, policy, command); } //------------------------------------------------------- @@ -1151,6 +1379,11 @@ public final boolean exists(Policy policy, Key key) if (policy == null) { policy = readPolicyDefault; } + + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + ExistsCommand command = new ExistsCommand(cluster, policy, key); command.execute(); return command.exists(); @@ -1179,6 +1412,11 @@ public final void exists(EventLoop eventLoop, ExistsListener listener, Policy po if (policy == null) { policy = readPolicyDefault; } + + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + AsyncExists command = new AsyncExists(cluster, listener, policy, key); eventLoop.execute(cluster, command); } @@ -1202,6 +1440,10 @@ public final boolean[] exists(BatchPolicy policy, Key[] keys) policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + boolean[] existsArray = new boolean[keys.length]; try { @@ -1258,6 +1500,10 @@ public final void exists(EventLoop eventLoop, ExistsArrayListener listener, Batc policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + boolean[] existsArray = new boolean[keys.length]; AsyncBatchExecutor.ExistsArray executor = new AsyncBatchExecutor.ExistsArray( eventLoop, cluster, listener, keys, existsArray); @@ -1308,6 +1554,10 @@ public final void exists(EventLoop eventLoop, ExistsSequenceListener listener, B policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + AsyncBatchExecutor.ExistsSequence executor = new AsyncBatchExecutor.ExistsSequence( eventLoop, cluster, listener); List bns = BatchNodeList.generate(cluster, policy, keys, null, false, executor); @@ -1346,6 +1596,11 @@ public final Record get(Policy policy, Key key) if (policy == null) { policy = readPolicyDefault; } + + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + ReadCommand command = new ReadCommand(cluster, policy, key); command.execute(); return command.getRecord(); @@ -1374,6 +1629,11 @@ public final void get(EventLoop eventLoop, RecordListener listener, Policy polic if (policy == null) { policy = readPolicyDefault; } + + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + AsyncRead command = new AsyncRead(cluster, listener, policy, key, null); eventLoop.execute(cluster, command); } @@ -1393,6 +1653,11 @@ public final Record get(Policy policy, Key key, String... binNames) if (policy == null) { policy = readPolicyDefault; } + + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + ReadCommand command = new ReadCommand(cluster, policy, key, binNames); command.execute(); return command.getRecord(); @@ -1422,6 +1687,11 @@ public final void get(EventLoop eventLoop, RecordListener listener, Policy polic if (policy == null) { policy = readPolicyDefault; } + + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + AsyncRead command = new AsyncRead(cluster, listener, policy, key, binNames); eventLoop.execute(cluster, command); } @@ -1440,6 +1710,11 @@ public final Record getHeader(Policy policy, Key key) if (policy == null) { policy = readPolicyDefault; } + + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + ReadHeaderCommand command = new ReadHeaderCommand(cluster, policy, key); command.execute(); return command.getRecord(); @@ -1468,6 +1743,11 @@ public final void getHeader(EventLoop eventLoop, RecordListener listener, Policy if (policy == null) { policy = readPolicyDefault; } + + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + AsyncReadHeader command = new AsyncReadHeader(cluster, listener, policy, key); eventLoop.execute(cluster, command); } @@ -1498,6 +1778,10 @@ public final boolean get(BatchPolicy policy, List records) policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(records); + } + BatchStatus status = new BatchStatus(true); List bns = BatchNodeList.generate(cluster, policy, records, status); IBatchCommand[] commands = new IBatchCommand[bns.size()]; @@ -1548,6 +1832,10 @@ public final void get(EventLoop eventLoop, BatchListListener listener, BatchPoli policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(records); + } + AsyncBatchExecutor.ReadList executor = new AsyncBatchExecutor.ReadList(eventLoop, cluster, listener, records); List bns = BatchNodeList.generate(cluster, policy, records, executor); AsyncCommand[] commands = new AsyncCommand[bns.size()]; @@ -1597,6 +1885,10 @@ public final void get(EventLoop eventLoop, BatchSequenceListener listener, Batch policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(records); + } + AsyncBatchExecutor.ReadSequence executor = new AsyncBatchExecutor.ReadSequence(eventLoop, cluster, listener); List bns = BatchNodeList.generate(cluster, policy, records, executor); AsyncCommand[] commands = new AsyncCommand[bns.size()]; @@ -1636,6 +1928,10 @@ public final Record[] get(BatchPolicy policy, Key[] keys) policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + Record[] records = new Record[keys.length]; try { @@ -1694,6 +1990,10 @@ public final void get(EventLoop eventLoop, RecordArrayListener listener, BatchPo policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + Record[] records = new Record[keys.length]; AsyncBatchExecutor.GetArray executor = new AsyncBatchExecutor.GetArray( eventLoop, cluster, listener, keys, records); @@ -1745,6 +2045,10 @@ public final void get(EventLoop eventLoop, RecordSequenceListener listener, Batc policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + AsyncBatchExecutor.GetSequence executor = new AsyncBatchExecutor.GetSequence(eventLoop, cluster, listener); List bns = BatchNodeList.generate(cluster, policy, keys, null, false, executor); AsyncCommand[] commands = new AsyncCommand[bns.size()]; @@ -1786,6 +2090,10 @@ public final Record[] get(BatchPolicy policy, Key[] keys, String... binNames) policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + int readAttr = (binNames == null || binNames.length == 0)? Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ; @@ -1847,6 +2155,10 @@ public final void get(EventLoop eventLoop, RecordArrayListener listener, BatchPo policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + int readAttr = (binNames == null || binNames.length == 0)? Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ; @@ -1902,6 +2214,10 @@ public final void get(EventLoop eventLoop, RecordSequenceListener listener, Batc policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + int readAttr = (binNames == null || binNames.length == 0)? Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ; @@ -1945,6 +2261,10 @@ public final Record[] get(BatchPolicy policy, Key[] keys, Operation... ops) policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + Record[] records = new Record[keys.length]; try { @@ -2003,6 +2323,10 @@ public final void get(EventLoop eventLoop, RecordArrayListener listener, BatchPo policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + Record[] records = new Record[keys.length]; AsyncBatchExecutor.GetArray executor = new AsyncBatchExecutor.GetArray( eventLoop, cluster, listener, keys, records); @@ -2055,6 +2379,10 @@ public final void get(EventLoop eventLoop, RecordSequenceListener listener, Batc policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + AsyncBatchExecutor.GetSequence executor = new AsyncBatchExecutor.GetSequence(eventLoop, cluster, listener); List bns = BatchNodeList.generate(cluster, policy, keys, null, false, executor); AsyncCommand[] commands = new AsyncCommand[bns.size()]; @@ -2094,6 +2422,10 @@ public final Record[] getHeader(BatchPolicy policy, Key[] keys) policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + Record[] records = new Record[keys.length]; try { @@ -2152,6 +2484,10 @@ public final void getHeader(EventLoop eventLoop, RecordArrayListener listener, B policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + Record[] records = new Record[keys.length]; AsyncBatchExecutor.GetArray executor = new AsyncBatchExecutor.GetArray( eventLoop, cluster, listener, keys, records); @@ -2204,6 +2540,10 @@ public final void getHeader(EventLoop eventLoop, RecordSequenceListener listener policy = batchPolicyDefault; } + if (policy.txn != null) { + policy.txn.prepareRead(keys); + } + AsyncBatchExecutor.GetSequence executor = new AsyncBatchExecutor.GetSequence(eventLoop, cluster, listener); List bns = BatchNodeList.generate(cluster, policy, keys, null, false, executor); AsyncCommand[] commands = new AsyncCommand[bns.size()]; @@ -2249,9 +2589,26 @@ public final void getHeader(EventLoop eventLoop, RecordSequenceListener listener public final Record operate(WritePolicy policy, Key key, Operation... operations) throws AerospikeException { OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations); - OperateCommand command = new OperateCommand(cluster, key, args); - command.execute(); - return command.getRecord(); + policy = args.writePolicy; + + if (args.hasWrite) { + if (policy.txn != null) { + TxnMonitor.addKey(cluster, policy, key); + } + + OperateCommandWrite command = new OperateCommandWrite(cluster, key, args); + command.execute(); + return command.getRecord(); + } + else { + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + + OperateCommandRead command = new OperateCommandRead(cluster, key, args); + command.execute(); + return command.getRecord(); + } } /** @@ -2284,8 +2641,20 @@ public final void operate(EventLoop eventLoop, RecordListener listener, WritePol } OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations); - AsyncOperate command = new AsyncOperate(cluster, listener, key, args); - eventLoop.execute(cluster, command); + policy = args.writePolicy; + + if (args.hasWrite) { + AsyncOperateWrite command = new AsyncOperateWrite(cluster, listener, key, args); + AsyncTxnMonitor.execute(eventLoop, cluster, args.writePolicy, command); + } + else { + if (policy.txn != null) { + policy.txn.prepareRead(key.namespace); + } + + AsyncOperateRead command = new AsyncOperateRead(cluster, listener, key, args); + eventLoop.execute(cluster, command); + } } //------------------------------------------------------- @@ -2317,6 +2686,10 @@ public final boolean operate(BatchPolicy policy, List records) policy = batchParentPolicyWriteDefault; } + if (policy.txn != null) { + TxnMonitor.addKeys(cluster, policy, records); + } + BatchStatus status = new BatchStatus(true); List bns = BatchNodeList.generate(cluster, policy, records, status); IBatchCommand[] commands = new IBatchCommand[bns.size()]; @@ -2481,7 +2854,7 @@ public final void operate( commands[count++] = new AsyncBatch.OperateListCommand(executor, bn, policy, records); } } - executor.execute(commands); + AsyncTxnMonitor.executeBatch(policy, executor, commands, records); } /** @@ -2586,7 +2959,7 @@ public final void operate( commands[count++] = new AsyncBatch.OperateSequenceCommand(executor, bn, policy, listener, records); } } - executor.execute(commands); + AsyncTxnMonitor.executeBatch(policy, executor, commands, records); } /** @@ -2622,6 +2995,10 @@ public final BatchResults operate( writePolicy = batchWritePolicyDefault; } + if (batchPolicy.txn != null) { + TxnMonitor.addKeys(cluster, batchPolicy, keys); + } + BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); BatchRecord[] records = new BatchRecord[keys.length]; @@ -2731,7 +3108,7 @@ public final void operate( executor, bn, batchPolicy, keys, ops, records, attr); } } - executor.execute(commands); + AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys); } /** @@ -2801,7 +3178,7 @@ public final void operate( executor, bn, batchPolicy, keys, ops, sent, listener, attr); } } - executor.execute(commands); + AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys); } //------------------------------------------------------- @@ -3095,13 +3472,18 @@ public final void removeUdf(InfoPolicy policy, String serverPath) * @param functionName user defined function * @param functionArgs arguments passed in to user defined function * @return return value of user defined function - * @throws AerospikeException if transaction fails + * @throws AerospikeException if command fails */ public final Object execute(WritePolicy policy, Key key, String packageName, String functionName, Value... functionArgs) throws AerospikeException { if (policy == null) { policy = writePolicyDefault; } + + if (policy.txn != null) { + TxnMonitor.addKey(cluster, policy, key); + } + ExecuteCommand command = new ExecuteCommand(cluster, policy, key, packageName, functionName, functionArgs); command.execute(); @@ -3168,8 +3550,9 @@ public final void execute( if (policy == null) { policy = writePolicyDefault; } + AsyncExecute command = new AsyncExecute(cluster, listener, policy, key, packageName, functionName, functionArgs); - eventLoop.execute(cluster, command); + AsyncTxnMonitor.execute(eventLoop, cluster, policy, command); } /** @@ -3208,6 +3591,10 @@ public final BatchResults execute( udfPolicy = batchUDFPolicyDefault; } + if (batchPolicy.txn != null) { + TxnMonitor.addKeys(cluster, batchPolicy, keys); + } + byte[] argBytes = Packer.pack(functionArgs); BatchAttr attr = new BatchAttr(); @@ -3323,7 +3710,7 @@ public final void execute( executor, bn, batchPolicy, keys, packageName, functionName, argBytes, records, attr); } } - executor.execute(commands); + AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys); } /** @@ -3399,7 +3786,7 @@ public final void execute( executor, bn, batchPolicy, keys, packageName, functionName, argBytes, sent, listener, attr); } } - executor.execute(commands); + AsyncTxnMonitor.executeBatch(batchPolicy, executor, commands, keys); } //---------------------------------------------------------- @@ -3432,7 +3819,7 @@ public final ExecuteTask execute( } statement.setAggregateFunction(packageName, functionName, functionArgs); - cluster.addTran(); + cluster.addCommandCount(); long taskId = statement.prepareTaskId(); Node[] nodes = cluster.validateNodes(); @@ -3471,7 +3858,7 @@ public final ExecuteTask execute( statement.setOperations(operations); } - cluster.addTran(); + cluster.addCommandCount(); long taskId = statement.prepareTaskId(); Node[] nodes = cluster.validateNodes(); diff --git a/client/src/com/aerospike/client/AerospikeException.java b/client/src/com/aerospike/client/AerospikeException.java index db1c77420..2f1153cb9 100644 --- a/client/src/com/aerospike/client/AerospikeException.java +++ b/client/src/com/aerospike/client/AerospikeException.java @@ -150,14 +150,14 @@ public final void setNode(Node node) { } /** - * Get transaction policy. Will be null for non-transaction exceptions. + * Get command policy. Will be null for non-command exceptions. */ public final Policy getPolicy() { return policy; } /** - * Set transaction policy. + * Set command policy. */ public final void setPolicy(Policy policy) { this.policy = policy; @@ -199,14 +199,14 @@ public final void setIteration(int iteration) { } /** - * Is it possible that write transaction may have completed. + * Is it possible that write command may have completed. */ public final boolean getInDoubt() { return inDoubt; } /** - * Set whether it is possible that the write transaction may have completed + * Set whether it is possible that the write command may have completed * even though this exception was generated. This may be the case when a * client error occurs (like timeout) after the command was sent to the server. */ @@ -434,6 +434,11 @@ public BatchRecordArray(BatchRecord[] records, Throwable e) { super(ResultCode.BATCH_FAILED, "Batch failed", e); this.records = records; } + + public BatchRecordArray(BatchRecord[] records, String message, Throwable e) { + super(ResultCode.BATCH_FAILED, message, e); + this.records = records; + } } /** @@ -489,4 +494,83 @@ public Backoff(int resultCode) { super(resultCode); } } + + /** + * Exception thrown when a multi-record transaction commit fails. + */ + public static final class Commit extends AerospikeException { + private static final long serialVersionUID = 1L; + + /** + * Error status of the attempted commit. + */ + public final CommitError error; + + /** + * Verify result for each read key in the MRT. May be null if failure occurred before verify. + */ + public final BatchRecord[] verifyRecords; + + /** + * Roll forward/backward result for each write key in the MRT. May be null if failure occurred before + * roll forward/backward. + */ + public final BatchRecord[] rollRecords; + + public Commit(CommitError error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords) { + super(ResultCode.TXN_FAILED, error.str); + this.error = error; + this.verifyRecords = verifyRecords; + this.rollRecords = rollRecords; + } + + public Commit(CommitError error, BatchRecord[] verifyRecords, BatchRecord[] rollRecords, Throwable cause) { + super(ResultCode.TXN_FAILED, error.str, cause); + this.error = error; + this.verifyRecords = verifyRecords; + this.rollRecords = rollRecords; + } + + @Override + public String getMessage() { + String msg = super.getMessage(); + StringBuilder sb = new StringBuilder(1024); + recordsToString(sb, "verify errors:", verifyRecords); + recordsToString(sb, "roll errors:", rollRecords); + return msg + sb.toString(); + } + } + + private static void recordsToString(StringBuilder sb, String title, BatchRecord[] records) { + if (records == null) { + return; + } + + int count = 0; + + for (BatchRecord br : records) { + // Only show results with an error response. + if (!(br.resultCode == ResultCode.OK || br.resultCode == ResultCode.NO_RESPONSE)) { + // Only show first 3 errors. + if (count >= 3) { + sb.append(System.lineSeparator()); + sb.append("..."); + break; + } + + if (count == 0) { + sb.append(System.lineSeparator()); + sb.append(title); + } + + sb.append(System.lineSeparator()); + sb.append(br.key); + sb.append(','); + sb.append(br.resultCode); + sb.append(','); + sb.append(br.inDoubt); + count++; + } + } + } } diff --git a/client/src/com/aerospike/client/BatchRecord.java b/client/src/com/aerospike/client/BatchRecord.java index 33a7b94ea..cdb9dcd45 100644 --- a/client/src/com/aerospike/client/BatchRecord.java +++ b/client/src/com/aerospike/client/BatchRecord.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -40,7 +40,7 @@ public class BatchRecord { public int resultCode; /** - * Is it possible that the write transaction may have completed even though an error + * Is it possible that the write command may have completed even though an error * occurred for this record. This may be the case when a client error occurs (like timeout) * after the command was sent to the server. */ diff --git a/client/src/com/aerospike/client/CommitError.java b/client/src/com/aerospike/client/CommitError.java new file mode 100644 index 000000000..05d6ddb34 --- /dev/null +++ b/client/src/com/aerospike/client/CommitError.java @@ -0,0 +1,33 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client; + +/** + * Multi-record transaction (MRT) error status. + */ +public enum CommitError { + VERIFY_FAIL("MRT verify failed. MRT aborted."), + VERIFY_FAIL_CLOSE_ABANDONED("MRT verify failed. MRT aborted. MRT client close abandoned. Server will eventually close the MRT."), + VERIFY_FAIL_ABORT_ABANDONED("MRT verify failed. MRT client abort abandoned. Server will eventually abort the MRT."), + MARK_ROLL_FORWARD_ABANDONED("MRT client mark roll forward abandoned. Server will eventually abort the MRT."); + + public final String str; + + CommitError(String str) { + this.str = str; + } +} diff --git a/client/src/com/aerospike/client/CommitStatus.java b/client/src/com/aerospike/client/CommitStatus.java new file mode 100644 index 000000000..7a973ae45 --- /dev/null +++ b/client/src/com/aerospike/client/CommitStatus.java @@ -0,0 +1,34 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client; + +/** + * Multi-record transaction (MRT) commit status code. + */ +public enum CommitStatus { + OK("Commit succeeded"), + ALREADY_COMMITTED("Already committed"), + ALREADY_ABORTED("Already aborted"), + ROLL_FORWARD_ABANDONED("MRT client roll forward abandoned. Server will eventually commit the MRT."), + CLOSE_ABANDONED("MRT has been rolled forward, but MRT client close was abandoned. Server will eventually close the MRT."); + + public final String str; + + CommitStatus(String str) { + this.str = str; + } +} diff --git a/client/src/com/aerospike/client/IAerospikeClient.java b/client/src/com/aerospike/client/IAerospikeClient.java index 161ea58bb..bf3af145f 100644 --- a/client/src/com/aerospike/client/IAerospikeClient.java +++ b/client/src/com/aerospike/client/IAerospikeClient.java @@ -45,6 +45,8 @@ import com.aerospike.client.listener.RecordArrayListener; import com.aerospike.client.listener.RecordListener; import com.aerospike.client.listener.RecordSequenceListener; +import com.aerospike.client.listener.AbortListener; +import com.aerospike.client.listener.CommitListener; import com.aerospike.client.listener.WriteListener; import com.aerospike.client.metrics.MetricsPolicy; import com.aerospike.client.policy.AdminPolicy; @@ -56,6 +58,8 @@ import com.aerospike.client.policy.Policy; import com.aerospike.client.policy.QueryPolicy; import com.aerospike.client.policy.ScanPolicy; +import com.aerospike.client.policy.TxnRollPolicy; +import com.aerospike.client.policy.TxnVerifyPolicy; import com.aerospike.client.policy.WritePolicy; import com.aerospike.client.query.IndexCollectionType; import com.aerospike.client.query.IndexType; @@ -78,105 +82,115 @@ public interface IAerospikeClient extends Closeable { //------------------------------------------------------- /** - * Return read policy default. Use when the policy will not be modified. + * Copy read policy default to avoid problems if this shared instance is later modified. */ public Policy getReadPolicyDefault(); /** - * Copy read policy default. Use when the policy will be modified for use in a specific transaction. + * Copy read policy default. */ public Policy copyReadPolicyDefault(); /** - * Return write policy default. Use when the policy will not be modified. + * Copy write policy default to avoid problems if this shared instance is later modified. */ public WritePolicy getWritePolicyDefault(); /** - * Copy write policy default. Use when the policy will be modified for use in a specific transaction. + * Copy write policy default. */ public WritePolicy copyWritePolicyDefault(); /** - * Return scan policy default. Use when the policy will not be modified. + * Copy scan policy default to avoid problems if this shared instance is later modified. */ public ScanPolicy getScanPolicyDefault(); /** - * Copy scan policy default. Use when the policy will be modified for use in a specific transaction. + * Copy scan policy default. */ public ScanPolicy copyScanPolicyDefault(); /** - * Return query policy default. Use when the policy will not be modified. + * Copy query policy default to avoid problems if this shared instance is later modified. */ public QueryPolicy getQueryPolicyDefault(); /** - * Copy query policy default. Use when the policy will be modified for use in a specific transaction. + * Copy query policy default. */ public QueryPolicy copyQueryPolicyDefault(); /** - * Return batch header read policy default. Use when the policy will not be modified. + * Copy batch header read policy default to avoid problems if this shared instance is later modified. */ public BatchPolicy getBatchPolicyDefault(); /** - * Copy batch header read policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch header read policy default. */ public BatchPolicy copyBatchPolicyDefault(); /** - * Return batch header write policy default. Use when the policy will not be modified. + * Copy batch header write policy default to avoid problems if this shared instance is later modified. */ public BatchPolicy getBatchParentPolicyWriteDefault(); /** - * Copy batch header write policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch header write policy default. */ public BatchPolicy copyBatchParentPolicyWriteDefault(); /** - * Return batch detail write policy default. Use when the policy will not be modified. + * Copy batch detail write policy default to avoid problems if this shared instance is later modified. */ public BatchWritePolicy getBatchWritePolicyDefault(); /** - * Copy batch detail write policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch detail write policy default. */ public BatchWritePolicy copyBatchWritePolicyDefault(); /** - * Return batch detail delete policy default. Use when the policy will not be modified. + * Copy batch detail delete policy default to avoid problems if this shared instance is later modified. */ public BatchDeletePolicy getBatchDeletePolicyDefault(); /** - * Copy batch detail delete policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch detail delete policy default. */ public BatchDeletePolicy copyBatchDeletePolicyDefault(); /** - * Return batch detail UDF policy default. Use when the policy will not be modified. + * Copy batch detail UDF policy default to avoid problems if this shared instance is later modified. */ public BatchUDFPolicy getBatchUDFPolicyDefault(); /** - * Copy batch detail UDF policy default. Use when the policy will be modified for use in a specific transaction. + * Copy batch detail UDF policy default. */ public BatchUDFPolicy copyBatchUDFPolicyDefault(); /** - * Return info command policy default. Use when the policy will not be modified. + * Copy info command policy default to avoid problems if this shared instance is later modified. */ public InfoPolicy getInfoPolicyDefault(); /** - * Copy info command policy default. Use when the policy will be modified for use in a specific transaction. + * Copy info command policy default. */ public InfoPolicy copyInfoPolicyDefault(); + /** + * Copy MRT record version verify policy default. + */ + public TxnVerifyPolicy copyTxnVerifyPolicyDefault(); + + /** + * Copy MRT roll forward/back policy default. + */ + public TxnRollPolicy copyTxnRollPolicyDefault(); + //------------------------------------------------------- // Cluster Connection Management //------------------------------------------------------- @@ -246,13 +260,77 @@ public Node getNode(String nodeName) */ public Cluster getCluster(); + //------------------------------------------------------- + // Multi-Record Transactions + //------------------------------------------------------- + + /** + * Attempt to commit the given multi-record transaction. First, the expected record versions are + * sent to the server nodes for verification. If all nodes return success, the transaction is + * committed. Otherwise, the transaction is aborted. + *

+ * Requires server version 8.0+ + * + * @param txn multi-record transaction + * @return status of the commit on success + * @throws AerospikeException.Commit if verify commit fails + */ + CommitStatus commit(Txn txn) + throws AerospikeException.Commit; + + /** + * Asynchronously attempt to commit the given multi-record transaction. First, the expected + * record versions are sent to the server nodes for verification. If all nodes return success, + * the transaction is committed. Otherwise, the transaction is aborted. + *

+ * This method registers the command with an event loop and returns. + * The event loop thread will process the command and send the results to the listener. + *

+ * Requires server version 8.0+ + * + * @param eventLoop event loop that will process the command. If NULL, the event + * loop will be chosen by round-robin. + * @param listener where to send results + * @param txn multi-record transaction + * @throws AerospikeException if event loop registration fails + */ + void commit(EventLoop eventLoop, CommitListener listener, Txn txn) + throws AerospikeException; + + /** + * Abort and rollback the given multi-record transaction. + *

+ * Requires server version 8.0+ + * + * @param txn multi-record transaction + * @return status of the abort + */ + AbortStatus abort(Txn txn); + + /** + * Asynchronously abort and rollback the given multi-record transaction. + *

+ * This method registers the command with an event loop and returns. + * The event loop thread will process the command and send the results to the listener. + *

+ * Requires server version 8.0+ + * + * @param eventLoop event loop that will process the command. If NULL, the event + * loop will be chosen by round-robin. + * @param listener where to send results + * @param txn multi-record transaction + * @throws AerospikeException if event loop registration fails + */ + void abort(EventLoop eventLoop, AbortListener listener, Txn txn) + throws AerospikeException; + //------------------------------------------------------- // Write Record Operations //------------------------------------------------------- /** * Write record bin(s). - * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * * @param policy write configuration parameters, pass in null for defaults @@ -268,7 +346,7 @@ public void put(WritePolicy policy, Key key, Bin... bins) * This method registers the command with an event loop and returns. * The event loop thread will process the command and send the results to the listener. *

- * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * * @param eventLoop event loop that will process the command. If NULL, the event @@ -288,7 +366,7 @@ public void put(EventLoop eventLoop, WriteListener listener, WritePolicy policy, /** * Append bin string values to existing record bin values. - * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call only works for string values. * @@ -305,7 +383,7 @@ public void append(WritePolicy policy, Key key, Bin... bins) * This method registers the command with an event loop and returns. * The event loop thread will process the command and send the results to the listener. *

- * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call only works for string values. * @@ -322,7 +400,7 @@ public void append(EventLoop eventLoop, WriteListener listener, WritePolicy poli /** * Prepend bin string values to existing record bin values. - * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call works only for string values. * @@ -339,7 +417,7 @@ public void prepend(WritePolicy policy, Key key, Bin... bins) * This method registers the command with an event loop and returns. * The event loop thread will process the command and send the results to the listener. *

- * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call only works for string values. * @@ -360,7 +438,7 @@ public void prepend(EventLoop eventLoop, WriteListener listener, WritePolicy pol /** * Add integer bin values to existing record bin values. - * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call only works for integer values. * @@ -377,7 +455,7 @@ public void add(WritePolicy policy, Key key, Bin... bins) * This method registers the command with an event loop and returns. * The event loop thread will process the command and send the results to the listener. *

- * The policy specifies the transaction timeout, record expiration and how the transaction is + * The policy specifies the command timeout, record expiration and how the command is * handled when the record already exists. * This call only works for integer values. * @@ -398,7 +476,7 @@ public void add(EventLoop eventLoop, WriteListener listener, WritePolicy policy, /** * Delete record for specified key. - * The policy specifies the transaction timeout. + * The policy specifies the command timeout. * * @param policy delete configuration parameters, pass in null for defaults * @param key unique record identifier @@ -413,7 +491,7 @@ public boolean delete(WritePolicy policy, Key key) * This method registers the command with an event loop and returns. * The event loop thread will process the command and send the results to the listener. *

- * The policy specifies the transaction timeout. + * The policy specifies the command timeout. * * @param eventLoop event loop that will process the command. If NULL, the event * loop will be chosen by round-robin. @@ -1379,7 +1457,7 @@ public void removeUdf(InfoPolicy policy, String serverPath) * @param functionName user defined function * @param args arguments passed in to user defined function * @return return value of user defined function - * @throws AerospikeException if transaction fails + * @throws AerospikeException if command fails */ public Object execute(WritePolicy policy, Key key, String packageName, String functionName, Value... args) throws AerospikeException; diff --git a/client/src/com/aerospike/client/Key.java b/client/src/com/aerospike/client/Key.java index 242ebcb0a..f03e098c0 100644 --- a/client/src/com/aerospike/client/Key.java +++ b/client/src/com/aerospike/client/Key.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2021 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -276,9 +276,10 @@ public Key(String namespace, byte[] digest, String setName, Value userKey) { */ @Override public int hashCode() { - final int prime = 31; - int result = prime + Arrays.hashCode(digest); - return prime * result + namespace.hashCode(); + // The digest is already a hash, so pick 4 bytes from the 20 byte digest at a + // random offset (in this case 8). + final int result = Buffer.littleBytesToInt(digest, 8) + 31; + return result * 31 + namespace.hashCode(); } /** @@ -289,14 +290,16 @@ public boolean equals(Object obj) { if (this == obj) { return true; } + if (obj == null || getClass() != obj.getClass()) { return false; } + Key other = (Key) obj; - if (! Arrays.equals(digest, other.digest)) + if (! Arrays.equals(digest, other.digest)) { return false; - + } return namespace.equals(other.namespace); } diff --git a/client/src/com/aerospike/client/ResultCode.java b/client/src/com/aerospike/client/ResultCode.java index 54a3ce096..5d454f80b 100644 --- a/client/src/com/aerospike/client/ResultCode.java +++ b/client/src/com/aerospike/client/ResultCode.java @@ -21,6 +21,11 @@ * side file proto.h. */ public final class ResultCode { + /** + * Multi-record transaction failed + */ + public static final int TXN_FAILED = -17; + /** * One or more keys failed in a batch. */ @@ -224,7 +229,7 @@ public final class ResultCode { public static final int OP_NOT_APPLICABLE = 26; /** - * The transaction was not performed because the filter was false. + * The command was not performed because the filter was false. */ public static final int FILTERED_OUT = 27; @@ -233,11 +238,37 @@ public final class ResultCode { */ public static final int LOST_CONFLICT = 28; + /** + * MRT record blocked by a different transaction. + */ + public static final int MRT_BLOCKED = 29; + + /** + * MRT read version mismatch identified during commit. + * Some other command changed the record outside of the transaction. + */ + public static final int MRT_VERSION_MISMATCH = 30; + + /** + * MRT deadline reached without a successful commit or abort. + */ + public static final int MRT_EXPIRED = 31; + /** * Write can't complete until XDR finishes shipping. */ public static final int XDR_KEY_BUSY = 32; + /** + * MRT was already committed. + */ + public static final int MRT_COMMITTED = 33; + + /** + * MRT was already aborted. + */ + public static final int MRT_ABORTED = 34; + /** * There are no more records left for query. */ @@ -460,6 +491,8 @@ public static boolean keepConnection(int resultCode) { */ public static String getResultString(int resultCode) { switch (resultCode) { + case TXN_FAILED: + return "Multi-record transaction failed"; case BATCH_FAILED: return "One or more keys failed in a batch"; @@ -582,14 +615,29 @@ public static String getResultString(int resultCode) { return "Operation not applicable"; case FILTERED_OUT: - return "Transaction filtered out"; + return "Command filtered out"; case LOST_CONFLICT: - return "Transaction failed due to conflict with XDR"; + return "Command failed due to conflict with XDR"; + + case MRT_BLOCKED: + return "MRT record blocked by a different transaction"; + + case MRT_VERSION_MISMATCH: + return "MRT version mismatch"; + + case MRT_EXPIRED: + return "MRT expired"; case XDR_KEY_BUSY: return "Write can't complete until XDR finishes shipping"; + case MRT_COMMITTED: + return "MRT already committed"; + + case MRT_ABORTED: + return "MRT already aborted"; + case QUERY_END: return "Query end"; diff --git a/client/src/com/aerospike/client/Txn.java b/client/src/com/aerospike/client/Txn.java new file mode 100644 index 000000000..d8e0e0330 --- /dev/null +++ b/client/src/com/aerospike/client/Txn.java @@ -0,0 +1,326 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client; + +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; + +/** + * Multi-record transaction (MRT). Each command in the MRT must use the same namespace. + */ +public final class Txn { + /** + * MRT state. + */ + public static enum State { + OPEN, + VERIFIED, + COMMITTED, + ABORTED; + } + + private static AtomicLong randomState = new AtomicLong(System.nanoTime()); + + private final long id; + private final ConcurrentHashMap reads; + private final Set writes; + private Txn.State state; + private String namespace; + private int timeout; + private int deadline; + private boolean monitorInDoubt; + private boolean inDoubt; + + /** + * Create MRT, assign random transaction id and initialize reads/writes hashmaps with default + * capacities. The default MRT timeout is 10 seconds. + */ + public Txn() { + id = createId(); + reads = new ConcurrentHashMap<>(); + writes = ConcurrentHashMap.newKeySet(); + state = Txn.State.OPEN; + timeout = 10; // seconds + } + + /** + * Create MRT, assign random transaction id and initialize reads/writes hashmaps with given + * capacities. The default MRT timeout is 10 seconds. + * + * @param readsCapacity expected number of record reads in the MRT. Minimum value is 16. + * @param writesCapacity expected number of record writes in the MRT. Minimum value is 16. + */ + public Txn(int readsCapacity, int writesCapacity) { + if (readsCapacity < 16) { + readsCapacity = 16; + } + + if (writesCapacity < 16) { + writesCapacity = 16; + } + + id = createId(); + reads = new ConcurrentHashMap<>(readsCapacity); + writes = ConcurrentHashMap.newKeySet(writesCapacity); + state = Txn.State.OPEN; + timeout = 10; // seconds + } + + private static long createId() { + // xorshift64* doesn't generate zeroes. + long oldState; + long newState; + + do { + oldState = randomState.get(); + newState = oldState; + newState ^= newState >>> 12; + newState ^= newState << 25; + newState ^= newState >>> 27; + } while (!randomState.compareAndSet(oldState, newState)); + + return newState * 0x2545f4914f6cdd1dl; + } + + /** + * Return MRT ID. + */ + public long getId() { + return id; + } + + /** + * Set MRT timeout in seconds. The timer starts when the MRT monitor record is created. + * This occurs when the first command in the MRT is executed. If the timeout is reached before + * a commit or abort is called, the server will expire and rollback the MRT. + */ + public void setTimeout(int timeout) { + this.timeout = timeout; + } + + /** + * Return MRT timeout in seconds. + */ + public int getTimeout() { + return timeout; + } + + /** + * Verify current MRT state and namespace for a future read command. + */ + void prepareRead(String ns) { + verifyCommand(); + setNamespace(ns); + } + + /** + * Verify current MRT state and namespaces for a future batch read command. + */ + void prepareRead(Key[] keys) { + verifyCommand(); + setNamespace(keys); + } + + /** + * Verify current MRT state and namespaces for a future batch read command. + */ + void prepareRead(List records) { + verifyCommand(); + setNamespace(records); + } + + /** + * Verify that the MRT state allows future commands. + */ + public void verifyCommand() { + if (state != Txn.State.OPEN) { + throw new AerospikeException("Command not allowed in current MRT state: " + state); + } + } + + /** + * Process the results of a record read. For internal use only. + */ + public void onRead(Key key, Long version) { + if (version != null) { + reads.put(key, version); + } + } + + /** + * Get record version for a given key. + */ + public Long getReadVersion(Key key) { + return reads.get(key); + } + + /** + * Get all read keys and their versions. + */ + public Set> getReads() { + return reads.entrySet(); + } + + /** + * Process the results of a record write. For internal use only. + */ + public void onWrite(Key key, Long version, int resultCode) { + if (version != null) { + reads.put(key, version); + } + else { + if (resultCode == ResultCode.OK) { + reads.remove(key); + writes.add(key); + } + } + } + + /** + * Add key to write hash when write command is in doubt (usually caused by timeout). + */ + public void onWriteInDoubt(Key key) { + reads.remove(key); + writes.add(key); + } + + /** + * Get all write keys and their versions. + */ + public Set getWrites() { + return writes; + } + + /** + * Set MRT namespace only if doesn't already exist. + * If namespace already exists, verify new namespace is the same. + */ + public void setNamespace(String ns) { + if (namespace == null) { + namespace = ns; + } + else if (! namespace.equals(ns)) { + throw new AerospikeException("Namespace must be the same for all commands in the MRT. orig: " + + namespace + " new: " + ns); + } + } + + /** + * Set MRT namespaces for each key only if doesn't already exist. + * If namespace already exists, verify new namespace is the same. + */ + private void setNamespace(Key[] keys) { + for (Key key : keys) { + setNamespace(key.namespace); + } + } + + /** + * Set MRT namespaces for each key only if doesn't already exist. + * If namespace already exists, verify new namespace is the same. + */ + private void setNamespace(List records) { + for (BatchRead br : records) { + setNamespace(br.key.namespace); + } + } + + /** + * Return MRT namespace. + */ + public String getNamespace() { + return namespace; + } + + /** + * Set MRT deadline. The deadline is a wall clock time calculated by the server from the + * MRT timeout that is sent by the client when creating the MRT monitor record. This deadline + * is used to avoid client/server clock skew issues. For internal use only. + */ + public void setDeadline(int deadline) { + this.deadline = deadline; + } + + /** + * Get MRT deadline. For internal use only. + */ + public int getDeadline() { + return deadline; + } + + /** + * Set that the MRT monitor existence is in doubt. For internal use only. + */ + public void setMonitorInDoubt() { + this.monitorInDoubt = true; + } + + /** + * Does MRT monitor record exist or is in doubt. + */ + public boolean monitorMightExist() { + return deadline != 0 || monitorInDoubt; + } + + /** + * Does MRT monitor record exist. + */ + public boolean monitorExists() { + return deadline != 0; + } + + /** + * Set MRT state. For internal use only. + */ + public void setState(Txn.State state) { + this.state = state; + } + + /** + * Return MRT state. + */ + public Txn.State getState() { + return state; + } + + /** + * Set MRT inDoubt flag. For internal use only. + */ + public void setInDoubt(boolean inDoubt) { + this.inDoubt = inDoubt; + } + + /** + * Return if MRT is inDoubt. + */ + public boolean getInDoubt() { + return inDoubt; + } + + /** + * Clear MRT. Remove all tracked keys. + */ + public void clear() { + namespace = null; + deadline = 0; + reads.clear(); + writes.clear(); + } +} diff --git a/client/src/com/aerospike/client/admin/Role.java b/client/src/com/aerospike/client/admin/Role.java index f521643b3..c1b8639a7 100644 --- a/client/src/com/aerospike/client/admin/Role.java +++ b/client/src/com/aerospike/client/admin/Role.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -48,22 +48,22 @@ public final class Role { public static final String SIndexAdmin = "sindex-admin"; /** - * Allow read transactions. + * Allow read commands. */ public static final String Read = "read"; /** - * Allow read and write transactions. + * Allow read and write commands. */ public static final String ReadWrite = "read-write"; /** - * Allow read and write transactions within user defined functions. + * Allow read and write commands within user defined functions. */ public static final String ReadWriteUdf = "read-write-udf"; /** - * Allow write transactions. + * Allow write commands. */ public static final String Write = "write"; @@ -110,18 +110,23 @@ public int hashCode() { } public boolean equals(Object obj) { - if (this == obj) + if (this == obj) { return true; - if (obj == null) + } + if (obj == null) { return false; - if (getClass() != obj.getClass()) + } + if (getClass() != obj.getClass()) { return false; + } Role other = (Role) obj; if (name == null) { - if (other.name != null) + if (other.name != null) { return false; - } else if (!name.equals(other.name)) + } + } else if (!name.equals(other.name)) { return false; + } return true; } } diff --git a/client/src/com/aerospike/client/async/AsyncBatch.java b/client/src/com/aerospike/client/async/AsyncBatch.java index 22d4fecf6..ac9a834b6 100644 --- a/client/src/com/aerospike/client/async/AsyncBatch.java +++ b/client/src/com/aerospike/client/async/AsyncBatch.java @@ -27,6 +27,7 @@ import com.aerospike.client.Operation; import com.aerospike.client.Record; import com.aerospike.client.ResultCode; +import com.aerospike.client.Txn; import com.aerospike.client.command.BatchAttr; import com.aerospike.client.command.BatchNode; import com.aerospike.client.command.BatchNodeList; @@ -71,10 +72,10 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - BatchRead record = records.get(batchIndex); + parseFieldsRead(record.key); + if (resultCode == 0) { record.setRecord(parseRecord()); } @@ -126,10 +127,10 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - BatchRead record = records.get(batchIndex); + parseFieldsRead(record.key); + if (resultCode == 0) { record.setRecord(parseRecord()); } @@ -193,7 +194,7 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); + parseFieldsRead(keys[batchIndex]); if (resultCode == 0) { records[batchIndex] = parseRecord(); @@ -254,10 +255,10 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - Key keyOrig = keys[batchIndex]; + parseFieldsRead(keyOrig); + if (resultCode == 0) { Record record = parseRecord(); listener.onRecord(keyOrig, record); @@ -311,12 +312,7 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - - if (opCount > 0) { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - + parseFieldsRead(keys[batchIndex]); existsArray[batchIndex] = resultCode == 0; } @@ -364,13 +360,8 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - - if (opCount > 0) { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - Key keyOrig = keys[batchIndex]; + parseFieldsRead(keyOrig); listener.onExists(keyOrig, resultCode == 0); } @@ -418,10 +409,10 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - BatchRecord record = records.get(batchIndex); + parseFields(record.key, record.hasWrite); + if (resultCode == 0) { record.setRecord(parseRecord()); return; @@ -456,6 +447,10 @@ protected void setInDoubt(boolean inDoubt) { if (record.resultCode == ResultCode.NO_RESPONSE) { record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.txn != null) { + policy.txn.onWriteInDoubt(record.key); + } } } } @@ -507,10 +502,10 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - BatchRecord record = records.get(batchIndex); + parseFields(record.key, record.hasWrite); + if (resultCode == 0) { record.setRecord(parseRecord()); } @@ -547,6 +542,10 @@ protected void setInDoubt(boolean inDoubt) { // Set inDoubt, but do not call onRecord() because user already has access to full // BatchRecord list and can examine each record for inDoubt when the exception occurs. record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.txn != null) { + policy.txn.onWriteInDoubt(record.key); + } } } } @@ -600,10 +599,10 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - BatchRecord record = records[batchIndex]; + parseFields(record.key, record.hasWrite); + if (resultCode == 0) { record.setRecord(parseRecord()); } @@ -623,7 +622,11 @@ protected void setInDoubt(boolean inDoubt) { BatchRecord record = records[index]; if (record.resultCode == ResultCode.NO_RESPONSE) { - record.inDoubt = inDoubt; + record.inDoubt = true; + + if (policy.txn != null) { + policy.txn.onWriteInDoubt(record.key); + } } } } @@ -680,9 +683,10 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - Key keyOrig = keys[batchIndex]; + + parseFields(keyOrig, attr.hasWrite); + BatchRecord record; if (resultCode == 0) { @@ -691,6 +695,7 @@ record = new BatchRecord(keyOrig, parseRecord(), attr.hasWrite); else { record = new BatchRecord(keyOrig, null, resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); } + sent[batchIndex] = true; AsyncBatch.onRecord(listener, record, batchIndex); } @@ -703,6 +708,11 @@ protected void setInDoubt(boolean inDoubt) { Key key = keys[index]; BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); sent[index] = true; + + if (record.inDoubt && policy.txn != null) { + policy.txn.onWriteInDoubt(key); + } + AsyncBatch.onRecord(listener, record, index); } } @@ -763,10 +773,10 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - BatchRecord record = records[batchIndex]; + parseFields(record.key, record.hasWrite); + if (resultCode == 0) { record.setRecord(parseRecord()); return; @@ -800,7 +810,11 @@ protected void setInDoubt(boolean inDoubt) { BatchRecord record = records[index]; if (record.resultCode == ResultCode.NO_RESPONSE) { - record.inDoubt = inDoubt; + record.inDoubt = true; + + if (policy.txn != null) { + policy.txn.onWriteInDoubt(record.key); + } } } } @@ -863,9 +877,10 @@ protected void writeBuffer() { @Override protected void parseRow() { - skipKey(fieldCount); - Key keyOrig = keys[batchIndex]; + + parseFields(keyOrig, attr.hasWrite); + BatchRecord record; if (resultCode == 0) { @@ -886,6 +901,7 @@ record = new BatchRecord(keyOrig, null, resultCode, Command.batchInDoubt(attr.ha else { record = new BatchRecord(keyOrig, null, resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter), attr.hasWrite); } + sent[batchIndex] = true; AsyncBatch.onRecord(listener, record, batchIndex); } @@ -898,6 +914,10 @@ protected void setInDoubt(boolean inDoubt) { Key key = keys[index]; BatchRecord record = new BatchRecord(key, null, ResultCode.NO_RESPONSE, attr.hasWrite && inDoubt, attr.hasWrite); sent[index] = true; + + if (record.inDoubt && policy.txn != null) { + policy.txn.onWriteInDoubt(record.key); + } AsyncBatch.onRecord(listener, record, index); } } @@ -914,6 +934,113 @@ protected List generateBatchNodes() { } } + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public static final class TxnVerify extends AsyncBatchCommand { + private final Key[] keys; + private final Long[] versions; + private final BatchRecord[] records; + + public TxnVerify( + AsyncBatchExecutor parent, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + Long[] versions, + BatchRecord[] records + ) { + super(parent, batch, batchPolicy, false); + this.keys = keys; + this.versions = versions; + this.records = records; + } + + @Override + protected void writeBuffer() { + setBatchTxnVerify(batchPolicy, keys, versions, batch); + } + + @Override + protected void parseRow() { + skipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == ResultCode.OK) { + record.resultCode = resultCode; + } + else { + record.setError(resultCode, false); + parent.setRowError(); + } + } + + @Override + protected AsyncBatchCommand createCommand(BatchNode batchNode) { + return new TxnVerify(parent, batchNode, batchPolicy, keys, versions, records); + } + + @Override + protected List generateBatchNodes() { + return BatchNodeList.generate(parent.cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, false, parent); + } + } + + public static final class TxnRoll extends AsyncBatchCommand { + private final Txn txn; + private final Key[] keys; + private final BatchRecord[] records; + private final BatchAttr attr; + + public TxnRoll( + AsyncBatchExecutor parent, + BatchNode batch, + BatchPolicy batchPolicy, + Txn txn, + Key[] keys, + BatchRecord[] records, + BatchAttr attr + ) { + super(parent, batch, batchPolicy, false); + this.txn = txn; + this.keys = keys; + this.records = records; + this.attr = attr; + } + + @Override + protected void writeBuffer() { + setBatchTxnRoll(batchPolicy, txn, keys, batch, attr); + } + + @Override + protected void parseRow() { + skipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == ResultCode.OK) { + record.resultCode = resultCode; + } + else { + record.setError(resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter)); + parent.setRowError(); + } + } + + @Override + protected AsyncBatchCommand createCommand(BatchNode batchNode) { + return new TxnRoll(parent, batchNode, batchPolicy, txn, keys, records, attr); + } + + @Override + protected List generateBatchNodes() { + return BatchNodeList.generate(parent.cluster, batchPolicy, keys, sequenceAP, sequenceSC, batch, true, parent); + } + } + //------------------------------------------------------- // Batch Base Command //------------------------------------------------------- @@ -942,6 +1069,32 @@ void addSubException(AerospikeException ae) { parent.addSubException(ae); } + final void parseFieldsRead(Key key) { + if (policy.txn != null) { + Long version = parseVersion(fieldCount); + policy.txn.onRead(key, version); + } + else { + skipKey(fieldCount); + } + } + + final void parseFields(Key key, boolean hasWrite) { + if (policy.txn != null) { + Long version = parseVersion(fieldCount); + + if (hasWrite) { + policy.txn.onWrite(key, version, resultCode); + } + else { + policy.txn.onRead(key, version); + } + } + else { + skipKey(fieldCount); + } + } + @Override protected boolean prepareRetry(boolean timeout) { if (parent.done || ! (policy.replica == Replica.SEQUENCE || policy.replica == Replica.PREFER_RACK)) { diff --git a/client/src/com/aerospike/client/async/AsyncBatchExecutor.java b/client/src/com/aerospike/client/async/AsyncBatchExecutor.java index 9d5d7cca1..6feb85409 100644 --- a/client/src/com/aerospike/client/async/AsyncBatchExecutor.java +++ b/client/src/com/aerospike/client/async/AsyncBatchExecutor.java @@ -314,7 +314,7 @@ protected AsyncBatchExecutor(EventLoop eventLoop, Cluster cluster, boolean hasRe this.eventLoop = eventLoop; this.cluster = cluster; this.hasResultCode = hasResultCode; - cluster.addTran(); + cluster.addCommandCount(); } public void execute(AsyncCommand[] cmds) { diff --git a/client/src/com/aerospike/client/async/AsyncBatchSingle.java b/client/src/com/aerospike/client/async/AsyncBatchSingle.java index 59e9c379e..e5ff3a07e 100644 --- a/client/src/com/aerospike/client/async/AsyncBatchSingle.java +++ b/client/src/com/aerospike/client/async/AsyncBatchSingle.java @@ -26,6 +26,7 @@ import com.aerospike.client.Operation; import com.aerospike.client.Record; import com.aerospike.client.ResultCode; +import com.aerospike.client.Txn; import com.aerospike.client.async.AsyncBatchExecutor.BatchRecordSequence; import com.aerospike.client.cluster.Cluster; import com.aerospike.client.cluster.Node; @@ -63,8 +64,8 @@ public ReadGetSequence( } @Override - protected final boolean parseResult() { - super.parseResult(); + protected void parseResult(RecordParser rp) { + super.parseResult(rp); try { listener.onRecord(record); @@ -72,7 +73,6 @@ protected final boolean parseResult() { catch (Throwable e) { Log.error("Unexpected exception from onRecord(): " + Util.getErrorMessage(e)); } - return true; } } @@ -95,10 +95,9 @@ public ReadSequence( } @Override - protected boolean parseResult() { - super.parseResult(); + protected void parseResult(RecordParser rp) { + super.parseResult(rp); AsyncBatch.onRecord(listener, record, index); - return true; } } @@ -122,9 +121,7 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - + protected void parseResult(RecordParser rp) { if (rp.resultCode == ResultCode.OK) { record.setRecord(rp.parseRecord(record.ops != null)); } @@ -132,7 +129,6 @@ protected boolean parseResult() { record.setError(rp.resultCode, false); executor.setRowError(); } - return true; } } @@ -193,13 +189,10 @@ protected void writeBuffer() { } @Override - protected final boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - + protected void parseResult(RecordParser rp) { if (rp.resultCode == ResultCode.OK) { records[index] = rp.parseRecord(isOperation); } - return true; } } @@ -252,15 +245,13 @@ protected void writeBuffer() { } @Override - protected final boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + protected void parseResult(RecordParser rp) { Record record = null; if (rp.resultCode == ResultCode.OK) { record = rp.parseRecord(isOperation); } AsyncBatch.onRecord(listener, key, record); - return true; } } @@ -289,15 +280,13 @@ protected void writeBuffer() { } @Override - protected final boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + protected void parseResult(RecordParser rp) { Record record = null; if (rp.resultCode == ResultCode.OK) { record = rp.parseRecord(false); } AsyncBatch.onRecord(listener, key, record); - return true; } } @@ -325,13 +314,10 @@ protected void writeBuffer() { } @Override - protected final boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - + protected void parseResult(RecordParser rp) { if (rp.resultCode == ResultCode.OK) { records[index] = rp.parseRecord(false); } - return true; } } @@ -360,20 +346,13 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - - if (rp.opCount > 0) { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - + protected void parseResult(RecordParser rp) { try { listener.onExists(key, rp.resultCode == 0); } catch (Throwable e) { Log.error("Unexpected exception from onExists(): " + Util.getErrorMessage(e)); } - return true; } } @@ -401,15 +380,8 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - - if (rp.opCount > 0) { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - + protected void parseResult(RecordParser rp) { existsArray[index] = rp.resultCode == 0; - return true; } } @@ -449,8 +421,7 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + protected void parseResult(RecordParser rp) { BatchRecord record; if (rp.resultCode == 0) { @@ -463,7 +434,6 @@ record = new BatchRecord(key, null, rp.resultCode, } parent.setSent(index); AsyncBatch.onRecord(listener, record, index); - return true; } @Override @@ -501,9 +471,7 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - + protected void parseResult(RecordParser rp) { if (rp.resultCode == ResultCode.OK) { record.setRecord(rp.parseRecord(true)); } @@ -511,7 +479,6 @@ protected boolean parseResult() { record.setError(rp.resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter)); executor.setRowError(); } - return true; } @Override @@ -546,10 +513,9 @@ public WriteSequence( } @Override - protected boolean parseResult() { - super.parseResult(); + protected void parseResult(RecordParser rp) { + super.parseResult(rp); AsyncBatch.onRecord(listener, record, index); - return true; } // setInDoubt() is not overridden to call onRecord() because user already has access to full @@ -579,9 +545,7 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - + protected void parseResult(RecordParser rp) { if (rp.resultCode == ResultCode.OK) { record.setRecord(rp.parseRecord(true)); } @@ -589,7 +553,6 @@ protected boolean parseResult() { record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter)); executor.setRowError(); } - return true; } @Override @@ -624,10 +587,9 @@ public UDFSequence( } @Override - protected boolean parseResult() { - super.parseResult(); + protected void parseResult(RecordParser rp) { + super.parseResult(rp); AsyncBatch.onRecord(listener, record, index); - return true; } // setInDoubt() is not overridden to call onRecord() because user already has access to full @@ -657,9 +619,7 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - + protected void parseResult(RecordParser rp) { if (rp.resultCode == ResultCode.OK) { record.setRecord(rp.parseRecord(false)); } @@ -679,7 +639,6 @@ else if (rp.resultCode == ResultCode.UDF_BAD_RESPONSE) { record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter)); executor.setRowError(); } - return true; } @Override @@ -728,8 +687,7 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + protected void parseResult(RecordParser rp) { BatchRecord record; if (rp.resultCode == ResultCode.OK) { @@ -754,7 +712,6 @@ record = new BatchRecord(key, null, rp.resultCode, Command.batchInDoubt(true, co } parent.setSent(index); AsyncBatch.onRecord(listener, record, index); - return true; } @Override @@ -798,9 +755,7 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - + protected void parseResult(RecordParser rp) { if (rp.resultCode == ResultCode.OK) { record.setRecord(rp.parseRecord(false)); } @@ -820,7 +775,6 @@ else if (rp.resultCode == ResultCode.UDF_BAD_RESPONSE) { record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter)); executor.setRowError(); } - return true; } @Override @@ -864,8 +818,7 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + protected void parseResult(RecordParser rp) { BatchRecord record; if (rp.resultCode == 0) { @@ -877,7 +830,6 @@ record = new BatchRecord(key, null, rp.resultCode, Command.batchInDoubt(true, co } parent.setSent(index); AsyncBatch.onRecord(listener, record, index); - return true; } @Override @@ -909,10 +861,9 @@ public DeleteSequence( } @Override - protected boolean parseResult() { - super.parseResult(); + protected void parseResult(RecordParser rp) { + super.parseResult(rp); AsyncBatch.onRecord(listener, record, index); - return true; } // setInDoubt() is not overridden to call onRecord() because user already has access to full @@ -942,9 +893,7 @@ protected void writeBuffer() { } @Override - protected boolean parseResult() { - RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); - + protected void parseResult(RecordParser rp) { if (rp.resultCode == 0) { record.setRecord(new Record(null, rp.generation, rp.expiration)); } @@ -952,7 +901,6 @@ protected boolean parseResult() { record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter)); executor.setRowError(); } - return true; } @Override @@ -963,6 +911,95 @@ public void setInDoubt() { } } + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public static class TxnVerify extends AsyncBaseCommand { + private final long version; + private final BatchRecord record; + + public TxnVerify( + AsyncBatchExecutor executor, + Cluster cluster, + BatchPolicy policy, + long version, + BatchRecord record, + Node node + ) { + super(executor, cluster, policy, record.key, node, false); + this.version = version; + this.record = record; + } + + @Override + protected void writeBuffer() { + setTxnVerify(record.key, version); + } + + @Override + protected boolean parseResult() { + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + + if (rp.resultCode == ResultCode.OK) { + record.resultCode = rp.resultCode; + } + else { + record.setError(rp.resultCode, false); + executor.setRowError(); + } + return true; + } + + @Override + protected void parseResult(RecordParser rp) { + } + } + + public static class TxnRoll extends AsyncBaseCommand { + private final Txn txn; + private final BatchRecord record; + private final int attr; + + public TxnRoll( + AsyncBatchExecutor executor, + Cluster cluster, + BatchPolicy policy, + Txn txn, + BatchRecord record, + Node node, + int attr + ) { + super(executor, cluster, policy, record.key, node, true); + this.txn = txn; + this.record = record; + this.attr = attr; + } + + @Override + protected void writeBuffer() { + setTxnRoll(record.key, txn, attr); + } + + @Override + protected boolean parseResult() { + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + + if (rp.resultCode == ResultCode.OK) { + record.resultCode = rp.resultCode; + } + else { + record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter)); + executor.setRowError(); + } + return true; + } + + @Override + protected void parseResult(RecordParser rp) { + } + } + //------------------------------------------------------- // Async Batch Base Command //------------------------------------------------------- @@ -1011,6 +1048,16 @@ void addSubException(AerospikeException ae) { executor.addSubException(ae); } + @Override + protected boolean parseResult() { + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + rp.parseFields(policy.txn, key, hasWrite); + parseResult(rp); + return true; + } + + protected abstract void parseResult(RecordParser rp); + @Override protected boolean prepareRetry(boolean timeout) { if (hasWrite) { diff --git a/client/src/com/aerospike/client/async/AsyncCommand.java b/client/src/com/aerospike/client/async/AsyncCommand.java index 90dc63f08..ace22ec28 100644 --- a/client/src/com/aerospike/client/async/AsyncCommand.java +++ b/client/src/com/aerospike/client/async/AsyncCommand.java @@ -139,12 +139,6 @@ protected void putBuffer(byte[] buffer) { } } - final void validateHeaderSize() { - if (receiveSize < Command.MSG_REMAINING_HEADER_SIZE) { - throw new AerospikeException.Parse("Invalid receive size: " + receiveSize); - } - } - boolean parseCommandResult() { if (compressed) { int usize = (int)Buffer.bytesToLong(dataBuffer, 0); @@ -183,6 +177,14 @@ final void stop() { valid = false; } + final void onRetryException(Node node, int iteration, AerospikeException ae) { + ae.setNode(node); + ae.setPolicy(policy); + ae.setIteration(iteration); + ae.setInDoubt(isWrite(), commandSentCounter); + addSubException(ae); + } + void addSubException(AerospikeException ae) { if (subExceptions == null) { subExceptions = new ArrayList(policy.maxRetries); @@ -190,11 +192,30 @@ void addSubException(AerospikeException ae) { subExceptions.add(ae); } + final void onFinalException(Node node, int iteration, AerospikeException ae) { + ae.setNode(node); + ae.setPolicy(policy); + ae.setIteration(iteration); + ae.setInDoubt(isWrite(), commandSentCounter); + ae.setSubExceptions(subExceptions); + + if (ae.getInDoubt()) { + onInDoubt(); + } + + onFailure(ae); + } + + void onInDoubt() { + // Write commands will override this method. + } + boolean retryBatch(Runnable command, long deadline) { // Override this method in batch to regenerate node assignments. return false; } + // TODD: Make abstract. boolean isWrite() { return false; } diff --git a/client/src/com/aerospike/client/async/AsyncDelete.java b/client/src/com/aerospike/client/async/AsyncDelete.java index 45278c7d5..862afc109 100644 --- a/client/src/com/aerospike/client/async/AsyncDelete.java +++ b/client/src/com/aerospike/client/async/AsyncDelete.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -20,41 +20,16 @@ import com.aerospike.client.Key; import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; import com.aerospike.client.listener.DeleteListener; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.WritePolicy; -public final class AsyncDelete extends AsyncCommand { +public final class AsyncDelete extends AsyncWriteBase { private final DeleteListener listener; - private final WritePolicy writePolicy; - private final Key key; - private final Partition partition; private boolean existed; public AsyncDelete(Cluster cluster, DeleteListener listener, WritePolicy writePolicy, Key key) { - super(writePolicy, true); + super(cluster, writePolicy, key); this.listener = listener; - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.write(cluster, writePolicy, key); - cluster.addTran(); - } - - @Override - boolean isWrite() { - return true; - } - - @Override - protected Node getNode(Cluster cluster) { - return partition.getNodeWrite(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.WRITE; } @Override @@ -64,11 +39,9 @@ protected void writeBuffer() { @Override protected boolean parseResult() { - validateHeaderSize(); - - int resultCode = dataBuffer[5] & 0xFF; + int resultCode = parseHeader(); - if (resultCode == 0) { + if (resultCode == ResultCode.OK) { existed = true; return true; } @@ -89,12 +62,6 @@ protected boolean parseResult() { throw new AerospikeException(resultCode); } - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryWrite(timeout); - return true; - } - @Override protected void onSuccess() { if (listener != null) { diff --git a/client/src/com/aerospike/client/async/AsyncExecute.java b/client/src/com/aerospike/client/async/AsyncExecute.java index d0cc414cd..36183747f 100644 --- a/client/src/com/aerospike/client/async/AsyncExecute.java +++ b/client/src/com/aerospike/client/async/AsyncExecute.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -20,20 +20,20 @@ import com.aerospike.client.AerospikeException; import com.aerospike.client.Key; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; import com.aerospike.client.Value; import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; +import com.aerospike.client.command.RecordParser; import com.aerospike.client.listener.ExecuteListener; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.WritePolicy; -public final class AsyncExecute extends AsyncRead { +public final class AsyncExecute extends AsyncWriteBase { private final ExecuteListener executeListener; - private final WritePolicy writePolicy; private final String packageName; private final String functionName; private final Value[] args; + private Record record; public AsyncExecute( Cluster cluster, @@ -44,43 +44,65 @@ public AsyncExecute( String functionName, Value[] args ) { - super(cluster, null, writePolicy, key, Partition.write(cluster, writePolicy, key), false); + super(cluster, writePolicy, key); this.executeListener = listener; - this.writePolicy = writePolicy; this.packageName = packageName; this.functionName = functionName; this.args = args; } @Override - boolean isWrite() { - return true; + protected void writeBuffer() { + setUdf(writePolicy, key, packageName, functionName, args); } @Override - protected Node getNode(Cluster cluster) { - return partition.getNodeWrite(cluster); - } + protected boolean parseResult() { + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + rp.parseFields(policy.txn, key, true); - @Override - protected LatencyType getLatencyType() { - return LatencyType.WRITE; - } + if (rp.resultCode == ResultCode.OK) { + record = rp.parseRecord(false); + return true; + } - @Override - protected void writeBuffer() throws AerospikeException { - setUdf(writePolicy, key, packageName, functionName, args); - } + if (rp.resultCode == ResultCode.UDF_BAD_RESPONSE) { + record = rp.parseRecord(false); + handleUdfError(rp.resultCode); + return true; + } - @Override - protected void handleNotFound(int resultCode) { - throw new AerospikeException(resultCode); + if (rp.resultCode == ResultCode.FILTERED_OUT) { + if (policy.failOnFilteredOut) { + throw new AerospikeException(rp.resultCode); + } + return true; + } + + throw new AerospikeException(rp.resultCode); } - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryWrite(timeout); - return true; + private void handleUdfError(int resultCode) { + String ret = (String)record.bins.get("FAILURE"); + + if (ret == null) { + throw new AerospikeException(resultCode); + } + + String message; + int code; + + try { + String[] list = ret.split(":"); + code = Integer.parseInt(list[2].trim()); + message = list[0] + ':' + list[1] + ' ' + list[3]; + } + catch (Throwable e) { + // Use generic exception if parse error occurs. + throw new AerospikeException(resultCode, ret); + } + + throw new AerospikeException(code, message); } @Override diff --git a/client/src/com/aerospike/client/async/AsyncExists.java b/client/src/com/aerospike/client/async/AsyncExists.java index 812b7de9e..81171902e 100644 --- a/client/src/com/aerospike/client/async/AsyncExists.java +++ b/client/src/com/aerospike/client/async/AsyncExists.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -20,34 +20,17 @@ import com.aerospike.client.Key; import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; +import com.aerospike.client.command.RecordParser; import com.aerospike.client.listener.ExistsListener; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.Policy; -public final class AsyncExists extends AsyncCommand { +public final class AsyncExists extends AsyncReadBase { private final ExistsListener listener; - private final Key key; - private final Partition partition; private boolean exists; public AsyncExists(Cluster cluster, ExistsListener listener, Policy policy, Key key) { - super(policy, true); + super(cluster, policy, key); this.listener = listener; - this.key = key; - this.partition = Partition.read(cluster, policy, key); - cluster.addTran(); - } - - @Override - Node getNode(Cluster cluster) { - return partition.getNodeRead(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.READ; } @Override @@ -57,35 +40,28 @@ protected void writeBuffer() { @Override protected boolean parseResult() { - validateHeaderSize(); - - int resultCode = dataBuffer[5] & 0xFF; + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + rp.parseFields(policy.txn, key, false); - if (resultCode == 0) { + if (rp.resultCode == ResultCode.OK) { exists = true; return true; } - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { + if (rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { exists = false; return true; } - if (resultCode == ResultCode.FILTERED_OUT) { + if (rp.resultCode == ResultCode.FILTERED_OUT) { if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); + throw new AerospikeException(rp.resultCode); } exists = true; return true; } - throw new AerospikeException(resultCode); - } - - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryRead(timeout); - return true; + throw new AerospikeException(rp.resultCode); } @Override diff --git a/proxy/src/com/aerospike/client/proxy/RecordProxy.java b/client/src/com/aerospike/client/async/AsyncOperateRead.java similarity index 54% rename from proxy/src/com/aerospike/client/proxy/RecordProxy.java rename to client/src/com/aerospike/client/async/AsyncOperateRead.java index 2e5d5ea78..5dd629693 100644 --- a/proxy/src/com/aerospike/client/proxy/RecordProxy.java +++ b/client/src/com/aerospike/client/async/AsyncOperateRead.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -14,38 +14,23 @@ * License for the specific language governing permissions and limitations under * the License. */ - -package com.aerospike.client.proxy; +package com.aerospike.client.async; import com.aerospike.client.Key; -import com.aerospike.client.Record; -import com.aerospike.client.query.BVal; - -public class RecordProxy { - /** - * Optional Key. - */ - public final Key key; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.command.OperateArgs; +import com.aerospike.client.listener.RecordListener; - /** - * Optional Record result after command has completed. - */ - public final Record record; +public final class AsyncOperateRead extends AsyncRead { + private final OperateArgs args; - /** - * Optional bVal. - */ - public final BVal bVal; - - /** - * The result code from proxy server. - */ - public final int resultCode; + public AsyncOperateRead(Cluster cluster, RecordListener listener, Key key, OperateArgs args) { + super(cluster, listener, args.writePolicy, key, true); + this.args = args; + } - public RecordProxy(int resultCode, Key key, Record record, BVal bVal) { - this.resultCode = resultCode; - this.key = key; - this.record = record; - this.bVal = bVal; + @Override + protected void writeBuffer() { + setOperate(args.writePolicy, key, args); } } diff --git a/client/src/com/aerospike/client/async/AsyncOperate.java b/client/src/com/aerospike/client/async/AsyncOperateWrite.java similarity index 51% rename from client/src/com/aerospike/client/async/AsyncOperate.java rename to client/src/com/aerospike/client/async/AsyncOperateWrite.java index 29d009171..4d0326d59 100644 --- a/client/src/com/aerospike/client/async/AsyncOperate.java +++ b/client/src/com/aerospike/client/async/AsyncOperateWrite.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -18,57 +18,60 @@ import com.aerospike.client.AerospikeException; import com.aerospike.client.Key; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Node; import com.aerospike.client.command.OperateArgs; +import com.aerospike.client.command.RecordParser; import com.aerospike.client.listener.RecordListener; -import com.aerospike.client.metrics.LatencyType; -public final class AsyncOperate extends AsyncRead { +public final class AsyncOperateWrite extends AsyncWriteBase { + private final RecordListener listener; private final OperateArgs args; + private Record record; - public AsyncOperate(Cluster cluster, RecordListener listener, Key key, OperateArgs args) { - super(cluster, listener, args.writePolicy, key, args.getPartition(cluster, key), true); + public AsyncOperateWrite(Cluster cluster, RecordListener listener, Key key, OperateArgs args) { + super(cluster, args.writePolicy, key); + this.listener = listener; this.args = args; } @Override - boolean isWrite() { - return args.hasWrite; + protected void writeBuffer() { + setOperate(args.writePolicy, key, args); } @Override - protected Node getNode(Cluster cluster) { - return args.hasWrite ? partition.getNodeWrite(cluster) : partition.getNodeRead(cluster); - } + protected boolean parseResult() { + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + rp.parseFields(policy.txn, key, true); - @Override - protected LatencyType getLatencyType() { - return args.hasWrite ? LatencyType.WRITE : LatencyType.READ; - } + if (rp.resultCode == ResultCode.OK) { + record = rp.parseRecord(true); + return true; + } - @Override - protected void writeBuffer() { - setOperate(args.writePolicy, key, args); + if (rp.resultCode == ResultCode.FILTERED_OUT) { + if (policy.failOnFilteredOut) { + throw new AerospikeException(rp.resultCode); + } + return true; + } + + throw new AerospikeException(rp.resultCode); } @Override - protected void handleNotFound(int resultCode) { - // Only throw not found exception for command with write operations. - // Read-only command operations return a null record. - if (args.hasWrite) { - throw new AerospikeException(resultCode); + protected void onSuccess() { + if (listener != null) { + listener.onSuccess(key, record); } } @Override - protected boolean prepareRetry(boolean timeout) { - if (args.hasWrite) { - partition.prepareRetryWrite(timeout); - } - else { - partition.prepareRetryRead(timeout); + protected void onFailure(AerospikeException e) { + if (listener != null) { + listener.onFailure(e); } - return true; } } diff --git a/client/src/com/aerospike/client/async/AsyncQueryPartitionExecutor.java b/client/src/com/aerospike/client/async/AsyncQueryPartitionExecutor.java index 9a7efdb4f..df0878f5e 100644 --- a/client/src/com/aerospike/client/async/AsyncQueryPartitionExecutor.java +++ b/client/src/com/aerospike/client/async/AsyncQueryPartitionExecutor.java @@ -50,7 +50,7 @@ public AsyncQueryPartitionExecutor( this.statement = statement; this.tracker = tracker; - cluster.addTran(); + cluster.addCommandCount(); task = new TaskGen(statement); taskId = task.getId(); tracker.setSleepBetweenRetries(0); diff --git a/client/src/com/aerospike/client/async/AsyncRead.java b/client/src/com/aerospike/client/async/AsyncRead.java index 2b2959ffe..d95c8b378 100644 --- a/client/src/com/aerospike/client/async/AsyncRead.java +++ b/client/src/com/aerospike/client/async/AsyncRead.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -21,50 +21,28 @@ import com.aerospike.client.Record; import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; -import com.aerospike.client.command.Buffer; -import com.aerospike.client.command.Command; +import com.aerospike.client.command.RecordParser; import com.aerospike.client.listener.RecordListener; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.Policy; -public class AsyncRead extends AsyncCommand { +public class AsyncRead extends AsyncReadBase { private final RecordListener listener; - protected final Key key; private final String[] binNames; private final boolean isOperation; - protected final Partition partition; protected Record record; public AsyncRead(Cluster cluster, RecordListener listener, Policy policy, Key key, String[] binNames) { - super(policy, true); + super(cluster, policy, key); this.listener = listener; - this.key = key; this.binNames = binNames; this.isOperation = false; - this.partition = Partition.read(cluster, policy, key); - cluster.addTran(); } - public AsyncRead(Cluster cluster, RecordListener listener, Policy policy, Key key, Partition partition, boolean isOperation) { - super(policy, true); + public AsyncRead(Cluster cluster, RecordListener listener, Policy policy, Key key, boolean isOperation) { + super(cluster, policy, key); this.listener = listener; - this.key = key; this.binNames = null; this.isOperation = isOperation; - this.partition = partition; - cluster.addTran(); - } - - @Override - Node getNode(Cluster cluster) { - return partition.getNodeRead(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.READ; } @Override @@ -74,79 +52,26 @@ protected void writeBuffer() { @Override protected final boolean parseResult() { - validateHeaderSize(); - - int resultCode = dataBuffer[dataOffset + 5] & 0xFF; - int generation = Buffer.bytesToInt(dataBuffer, dataOffset + 6); - int expiration = Buffer.bytesToInt(dataBuffer, dataOffset + 10); - int fieldCount = Buffer.bytesToShort(dataBuffer, dataOffset + 18); - int opCount = Buffer.bytesToShort(dataBuffer, dataOffset + 20); - dataOffset += Command.MSG_REMAINING_HEADER_SIZE; + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + rp.parseFields(policy.txn, key, false); - if (resultCode == 0) { - if (opCount == 0) { - // Bin data was not returned. - record = new Record(null, generation, expiration); - return true; - } - skipKey(fieldCount); - record = parseRecord(opCount, generation, expiration, isOperation); + if (rp.resultCode == ResultCode.OK) { + this.record = rp.parseRecord(isOperation); return true; } - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { - handleNotFound(resultCode); + if (rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { return true; } - if (resultCode == ResultCode.FILTERED_OUT) { + if (rp.resultCode == ResultCode.FILTERED_OUT) { if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); + throw new AerospikeException(rp.resultCode); } return true; } - if (resultCode == ResultCode.UDF_BAD_RESPONSE) { - skipKey(fieldCount); - record = parseRecord(opCount, generation, expiration, isOperation); - handleUdfError(resultCode); - return true; - } - - throw new AerospikeException(resultCode); - } - - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryRead(timeout); - return true; - } - - protected void handleNotFound(int resultCode) { - // Do nothing in default case. Record will be null. - } - - private final void handleUdfError(int resultCode) { - String ret = (String)record.bins.get("FAILURE"); - - if (ret == null) { - throw new AerospikeException(resultCode); - } - - String message; - int code; - - try { - String[] list = ret.split(":"); - code = Integer.parseInt(list[2].trim()); - message = list[0] + ':' + list[1] + ' ' + list[3]; - } - catch (Throwable e) { - // Use generic exception if parse error occurs. - throw new AerospikeException(resultCode, ret); - } - - throw new AerospikeException(code, message); + throw new AerospikeException(rp.resultCode); } @Override diff --git a/client/src/com/aerospike/client/async/AsyncReadBase.java b/client/src/com/aerospike/client/async/AsyncReadBase.java new file mode 100644 index 000000000..58b6d4b3a --- /dev/null +++ b/client/src/com/aerospike/client/async/AsyncReadBase.java @@ -0,0 +1,57 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.async; + +import com.aerospike.client.Key; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.cluster.Node; +import com.aerospike.client.cluster.Partition; +import com.aerospike.client.metrics.LatencyType; +import com.aerospike.client.policy.Policy; + +public abstract class AsyncReadBase extends AsyncCommand { + protected final Key key; + protected final Partition partition; + + public AsyncReadBase(Cluster cluster, Policy policy, Key key) { + super(policy, true); + this.key = key; + this.partition = Partition.read(cluster, policy, key); + cluster.addCommandCount(); + } + + @Override + boolean isWrite() { + return false; + } + + @Override + Node getNode(Cluster cluster) { + return partition.getNodeRead(cluster); + } + + @Override + protected LatencyType getLatencyType() { + return LatencyType.READ; + } + + @Override + protected boolean prepareRetry(boolean timeout) { + partition.prepareRetryRead(timeout); + return true; + } +} diff --git a/client/src/com/aerospike/client/async/AsyncReadHeader.java b/client/src/com/aerospike/client/async/AsyncReadHeader.java index 654b027dc..7ab259aab 100644 --- a/client/src/com/aerospike/client/async/AsyncReadHeader.java +++ b/client/src/com/aerospike/client/async/AsyncReadHeader.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -21,35 +21,17 @@ import com.aerospike.client.Record; import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; -import com.aerospike.client.command.Buffer; +import com.aerospike.client.command.RecordParser; import com.aerospike.client.listener.RecordListener; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.Policy; -public final class AsyncReadHeader extends AsyncCommand { +public final class AsyncReadHeader extends AsyncReadBase { private final RecordListener listener; - private final Key key; - private final Partition partition; private Record record; public AsyncReadHeader(Cluster cluster, RecordListener listener, Policy policy, Key key) { - super(policy, true); + super(cluster, policy, key); this.listener = listener; - this.key = key; - this.partition = Partition.read(cluster, policy, key); - cluster.addTran(); - } - - @Override - Node getNode(Cluster cluster) { - return partition.getNodeRead(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.READ; } @Override @@ -59,35 +41,26 @@ protected void writeBuffer() { @Override protected boolean parseResult() { - validateHeaderSize(); - - int resultCode = dataBuffer[5] & 0xFF; + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + rp.parseFields(policy.txn, key, false); - if (resultCode == 0) { - int generation = Buffer.bytesToInt(dataBuffer, 6); - int expiration = Buffer.bytesToInt(dataBuffer, 10); - record = new Record(null, generation, expiration); + if (rp.resultCode == ResultCode.OK) { + record = new Record(null, rp.generation, rp.expiration); return true; } - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { + if (rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { return true; } - if (resultCode == ResultCode.FILTERED_OUT) { + if (rp.resultCode == ResultCode.FILTERED_OUT) { if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); + throw new AerospikeException(rp.resultCode); } return true; } - throw new AerospikeException(resultCode); - } - - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryRead(timeout); - return true; + throw new AerospikeException(rp.resultCode); } @Override diff --git a/client/src/com/aerospike/client/async/AsyncScanPartitionExecutor.java b/client/src/com/aerospike/client/async/AsyncScanPartitionExecutor.java index adf5dff7d..5ebd624df 100644 --- a/client/src/com/aerospike/client/async/AsyncScanPartitionExecutor.java +++ b/client/src/com/aerospike/client/async/AsyncScanPartitionExecutor.java @@ -55,7 +55,7 @@ public AsyncScanPartitionExecutor( this.tracker = tracker; this.random = new RandomShift(); - cluster.addTran(); + cluster.addCommandCount(); tracker.setSleepBetweenRetries(0); scanPartitions(); } diff --git a/client/src/com/aerospike/client/async/AsyncTouch.java b/client/src/com/aerospike/client/async/AsyncTouch.java index e4f988133..ff24608a3 100644 --- a/client/src/com/aerospike/client/async/AsyncTouch.java +++ b/client/src/com/aerospike/client/async/AsyncTouch.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -20,40 +20,15 @@ import com.aerospike.client.Key; import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; import com.aerospike.client.listener.WriteListener; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.WritePolicy; -public final class AsyncTouch extends AsyncCommand { +public final class AsyncTouch extends AsyncWriteBase { private final WriteListener listener; - private final WritePolicy writePolicy; - private final Key key; - private final Partition partition; public AsyncTouch(Cluster cluster, WriteListener listener, WritePolicy writePolicy, Key key) { - super(writePolicy, true); + super(cluster, writePolicy, key); this.listener = listener; - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.write(cluster, writePolicy, key); - cluster.addTran(); - } - - @Override - boolean isWrite() { - return true; - } - - @Override - Node getNode(Cluster cluster) { - return partition.getNodeWrite(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.WRITE; } @Override @@ -63,11 +38,9 @@ protected void writeBuffer() { @Override protected boolean parseResult() { - validateHeaderSize(); - - int resultCode = dataBuffer[5] & 0xFF; + int resultCode = parseHeader(); - if (resultCode == 0) { + if (resultCode == ResultCode.OK) { return true; } @@ -81,12 +54,6 @@ protected boolean parseResult() { throw new AerospikeException(resultCode); } - @Override - boolean prepareRetry(boolean timeout) { - partition.prepareRetryWrite(timeout); - return true; - } - @Override protected void onSuccess() { if (listener != null) { diff --git a/client/src/com/aerospike/client/async/AsyncTxnAddKeys.java b/client/src/com/aerospike/client/async/AsyncTxnAddKeys.java new file mode 100644 index 000000000..3ac91732c --- /dev/null +++ b/client/src/com/aerospike/client/async/AsyncTxnAddKeys.java @@ -0,0 +1,73 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.async; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Key; +import com.aerospike.client.ResultCode; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.command.OperateArgs; +import com.aerospike.client.command.RecordParser; +import com.aerospike.client.listener.RecordListener; + +public final class AsyncTxnAddKeys extends AsyncWriteBase { + private final RecordListener listener; + private final OperateArgs args; + + public AsyncTxnAddKeys(Cluster cluster, RecordListener listener, Key key, OperateArgs args) { + super(cluster, args.writePolicy, key); + this.listener = listener; + this.args = args; + } + + @Override + protected void writeBuffer() { + setTxnAddKeys(args.writePolicy, key, args); + } + + @Override + protected boolean parseResult() { + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + rp.parseTranDeadline(policy.txn); + + if (rp.resultCode == ResultCode.OK) { + return true; + } + + throw new AerospikeException(rp.resultCode); + } + + @Override + void onInDoubt() { + // The MRT monitor record might exist if AsyncTxnAddKeys command is inDoubt. + policy.txn.setMonitorInDoubt(); + } + + @Override + protected void onSuccess() { + if (listener != null) { + listener.onSuccess(key, null); + } + } + + @Override + protected void onFailure(AerospikeException e) { + if (listener != null) { + listener.onFailure(e); + } + } +} diff --git a/client/src/com/aerospike/client/async/AsyncTxnClose.java b/client/src/com/aerospike/client/async/AsyncTxnClose.java new file mode 100644 index 000000000..c06df23b6 --- /dev/null +++ b/client/src/com/aerospike/client/async/AsyncTxnClose.java @@ -0,0 +1,72 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.async; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Key; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Txn; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.listener.DeleteListener; +import com.aerospike.client.policy.WritePolicy; + +public final class AsyncTxnClose extends AsyncWriteBase { + private final Txn txn; + private final DeleteListener listener; + + public AsyncTxnClose( + Cluster cluster, + Txn txn, + DeleteListener listener, + WritePolicy writePolicy, + Key key + ) { + super(cluster, writePolicy, key); + this.txn = txn; + this.listener = listener; + } + + @Override + protected void writeBuffer() { + setTxnClose(txn, key); + } + + @Override + protected boolean parseResult() { + int resultCode = parseHeader(); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { + return true; + } + + throw new AerospikeException(resultCode); + } + + @Override + void onInDoubt() { + } + + @Override + protected void onSuccess() { + listener.onSuccess(key, true); + } + + @Override + protected void onFailure(AerospikeException e) { + listener.onFailure(e); + } +} diff --git a/proxy/src/com/aerospike/client/proxy/TouchCommandProxy.java b/client/src/com/aerospike/client/async/AsyncTxnMarkRollForward.java similarity index 50% rename from proxy/src/com/aerospike/client/proxy/TouchCommandProxy.java rename to client/src/com/aerospike/client/async/AsyncTxnMarkRollForward.java index b75fd4c74..8e640aa77 100644 --- a/proxy/src/com/aerospike/client/proxy/TouchCommandProxy.java +++ b/client/src/com/aerospike/client/async/AsyncTxnMarkRollForward.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -14,67 +14,57 @@ * License for the specific language governing permissions and limitations under * the License. */ -package com.aerospike.client.proxy; +package com.aerospike.client.async; import com.aerospike.client.AerospikeException; import com.aerospike.client.Key; import com.aerospike.client.ResultCode; -import com.aerospike.client.command.Command; +import com.aerospike.client.cluster.Cluster; import com.aerospike.client.listener.WriteListener; import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.KVSGrpc; -public final class TouchCommandProxy extends SingleCommandProxy { +public final class AsyncTxnMarkRollForward extends AsyncWriteBase { private final WriteListener listener; - private final WritePolicy writePolicy; - private final Key key; - public TouchCommandProxy( - GrpcCallExecutor executor, + public AsyncTxnMarkRollForward( + Cluster cluster, WriteListener listener, WritePolicy writePolicy, Key key ) { - super(KVSGrpc.getTouchStreamingMethod(), executor, writePolicy); + super(cluster, writePolicy, key); this.listener = listener; - this.writePolicy = writePolicy; - this.key = key; } @Override - void writeCommand(Command command) { - command.setTouch(writePolicy, key); + protected void writeBuffer() { + setTxnMarkRollForward(key); } @Override - void parseResult(Parser parser) { - int resultCode = parser.parseResultCode(); + protected boolean parseResult() { + int resultCode = parseHeader(); - switch (resultCode) { - case ResultCode.OK: - break; + // MRT_COMMITTED is considered a success because it means a previous attempt already + // succeeded in notifying the server that the MRT will be rolled forward. + if (resultCode == ResultCode.OK || resultCode == ResultCode.MRT_COMMITTED) { + return true; + } - case ResultCode.FILTERED_OUT: - if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); - } - break; + throw new AerospikeException(resultCode); + } - default: - throw new AerospikeException(resultCode); - } + @Override + void onInDoubt() { + } - try { - listener.onSuccess(key); - } - catch (Throwable t) { - logOnSuccessError(t); - } + @Override + protected void onSuccess() { + listener.onSuccess(key); } @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); + protected void onFailure(AerospikeException e) { + listener.onFailure(e); } } diff --git a/client/src/com/aerospike/client/async/AsyncTxnMonitor.java b/client/src/com/aerospike/client/async/AsyncTxnMonitor.java new file mode 100644 index 000000000..332e51b64 --- /dev/null +++ b/client/src/com/aerospike/client/async/AsyncTxnMonitor.java @@ -0,0 +1,193 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.async; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.BatchRecord; +import com.aerospike.client.Key; +import com.aerospike.client.Log; +import com.aerospike.client.Operation; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Txn; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.command.OperateArgs; +import com.aerospike.client.command.TxnMonitor; +import com.aerospike.client.listener.RecordListener; +import com.aerospike.client.policy.BatchPolicy; +import com.aerospike.client.policy.Policy; +import com.aerospike.client.policy.WritePolicy; +import com.aerospike.client.util.Util; +import java.util.List; + +public abstract class AsyncTxnMonitor { + public static void execute(EventLoop eventLoop, Cluster cluster, WritePolicy policy, AsyncWriteBase command) { + if (policy.txn == null) { + // Command is not run under a MRT monitor. Run original command. + eventLoop.execute(cluster, command); + return; + } + + Txn txn = policy.txn; + Key cmdKey = command.key; + + if (txn.getWrites().contains(cmdKey)) { + // MRT monitor already contains this key. Run original command. + eventLoop.execute(cluster, command); + return; + } + + // Add key to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.getTranOps(txn, cmdKey); + AsyncTxnMonitor.Single ate = new AsyncTxnMonitor.Single(eventLoop, cluster, command); + ate.execute(policy, ops); + } + + public static void executeBatch( + BatchPolicy policy, + AsyncBatchExecutor executor, + AsyncCommand[] commands, + Key[] keys + ) { + if (policy.txn == null) { + // Command is not run under a MRT monitor. Run original command. + executor.execute(commands); + return; + } + + // Add write keys to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.getTranOps(policy.txn, keys); + AsyncTxnMonitor.Batch ate = new AsyncTxnMonitor.Batch(executor, commands); + ate.execute(policy, ops); + } + + public static void executeBatch( + BatchPolicy policy, + AsyncBatchExecutor executor, + AsyncCommand[] commands, + List records + ) { + if (policy.txn == null) { + // Command is not run under a MRT monitor. Run original command. + executor.execute(commands); + return; + } + + // Add write keys to MRT monitor and then run original command. + Operation[] ops = TxnMonitor.getTranOps(policy.txn, records); + + if (ops == null) { + // Readonly batch does not need to add key digests. Run original command. + executor.execute(commands); + return; + } + + AsyncTxnMonitor.Batch ate = new AsyncTxnMonitor.Batch(executor, commands); + ate.execute(policy, ops); + } + + private static class Single extends AsyncTxnMonitor { + private final AsyncWriteBase command; + + private Single(EventLoop eventLoop, Cluster cluster, AsyncWriteBase command) { + super(eventLoop, cluster); + this.command = command; + } + + @Override + void runCommand() { + eventLoop.execute(cluster, command); + } + + @Override + void onFailure(AerospikeException ae) { + command.onFailure(ae); + } + } + + private static class Batch extends AsyncTxnMonitor { + private final AsyncBatchExecutor executor; + private final AsyncCommand[] commands; + + private Batch(AsyncBatchExecutor executor, AsyncCommand[] commands) { + super(executor.eventLoop, executor.cluster); + this.executor = executor; + this.commands = commands; + } + + @Override + void runCommand() { + executor.execute(commands); + } + + @Override + void onFailure(AerospikeException ae) { + executor.onFailure(ae); + } + } + + final EventLoop eventLoop; + final Cluster cluster; + + private AsyncTxnMonitor(EventLoop eventLoop, Cluster cluster) { + this.eventLoop = eventLoop; + this.cluster = cluster; + } + + void execute(Policy policy, Operation[] ops) { + Key tranKey = TxnMonitor.getTxnMonitorKey(policy.txn); + WritePolicy wp = TxnMonitor.copyTimeoutPolicy(policy); + + RecordListener tranListener = new RecordListener() { + @Override + public void onSuccess(Key key, Record record) { + try { + // Run original command. + runCommand(); + } + catch (AerospikeException ae) { + notifyFailure(ae); + } + catch (Throwable t) { + notifyFailure(new AerospikeException(t)); + } + } + + @Override + public void onFailure(AerospikeException ae) { + notifyFailure(new AerospikeException(ResultCode.TXN_FAILED, "Failed to add key(s) to MRT monitor", ae)); + } + }; + + // Add write key(s) to MRT monitor. + OperateArgs args = new OperateArgs(wp, null, null, ops); + AsyncTxnAddKeys tranCommand = new AsyncTxnAddKeys(cluster, tranListener, tranKey, args); + eventLoop.execute(cluster, tranCommand); + } + + private void notifyFailure(AerospikeException ae) { + try { + onFailure(ae); + } + catch (Throwable t) { + Log.error("notifyCommandFailure onFailure() failed: " + Util.getStackTrace(t)); + } + } + + abstract void onFailure(AerospikeException ae); + abstract void runCommand(); +} diff --git a/client/src/com/aerospike/client/async/AsyncTxnRoll.java b/client/src/com/aerospike/client/async/AsyncTxnRoll.java new file mode 100644 index 000000000..0e0f20686 --- /dev/null +++ b/client/src/com/aerospike/client/async/AsyncTxnRoll.java @@ -0,0 +1,484 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.async; + +import java.util.List; +import java.util.Map; +import java.util.Set; + +import com.aerospike.client.AbortStatus; +import com.aerospike.client.AerospikeException; +import com.aerospike.client.BatchRecord; +import com.aerospike.client.CommitError; +import com.aerospike.client.CommitStatus; +import com.aerospike.client.Key; +import com.aerospike.client.Log; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Txn; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.command.BatchAttr; +import com.aerospike.client.command.BatchNode; +import com.aerospike.client.command.BatchNodeList; +import com.aerospike.client.command.Command; +import com.aerospike.client.command.TxnMonitor; +import com.aerospike.client.listener.AbortListener; +import com.aerospike.client.listener.BatchRecordArrayListener; +import com.aerospike.client.listener.CommitListener; +import com.aerospike.client.listener.DeleteListener; +import com.aerospike.client.listener.WriteListener; +import com.aerospike.client.policy.BatchPolicy; +import com.aerospike.client.policy.WritePolicy; +import com.aerospike.client.util.Util; + +public final class AsyncTxnRoll { + private final Cluster cluster; + private final EventLoop eventLoop; + private final BatchPolicy verifyPolicy; + private final BatchPolicy rollPolicy; + private final WritePolicy writePolicy; + private final Txn txn; + private final Key tranKey; + private CommitListener commitListener; + private AbortListener abortListener; + private BatchRecord[] verifyRecords; + private BatchRecord[] rollRecords; + private AerospikeException verifyException; + + public AsyncTxnRoll( + Cluster cluster, + EventLoop eventLoop, + BatchPolicy verifyPolicy, + BatchPolicy rollPolicy, + Txn txn + ) { + this.cluster = cluster; + this.eventLoop = eventLoop; + this.verifyPolicy = verifyPolicy; + this.rollPolicy = rollPolicy; + this.writePolicy = new WritePolicy(rollPolicy); + this.txn = txn; + this.tranKey = TxnMonitor.getTxnMonitorKey(txn); + } + + public void verify(CommitListener listener) { + commitListener = listener; + + BatchRecordArrayListener verifyListener = new BatchRecordArrayListener() { + @Override + public void onSuccess(BatchRecord[] records, boolean status) { + verifyRecords = records; + + if (status) { + txn.setState(Txn.State.VERIFIED); + commit(); + } + else { + txn.setState(Txn.State.ABORTED); + rollBack(); + } + } + + @Override + public void onFailure(BatchRecord[] records, AerospikeException ae) { + verifyRecords = records; + verifyException = ae; + txn.setState(Txn.State.ABORTED); + rollBack(); + } + }; + + verify(verifyListener); + } + + public void commit(CommitListener listener) { + commitListener = listener; + commit(); + } + + private void commit() { + if (txn.monitorExists()) { + markRollForward(); + } + else { + // There is nothing to roll-forward. + txn.setState(Txn.State.COMMITTED); + closeOnCommit(true); + } + } + + public void abort(AbortListener listener) { + abortListener = listener; + txn.setState(Txn.State.ABORTED); + + BatchRecordArrayListener rollListener = new BatchRecordArrayListener() { + @Override + public void onSuccess(BatchRecord[] records, boolean status) { + rollRecords = records; + + if (status) { + closeOnAbort(); + } + else { + notifyAbortSuccess(AbortStatus.ROLL_BACK_ABANDONED); + } + } + + @Override + public void onFailure(BatchRecord[] records, AerospikeException ae) { + rollRecords = records; + notifyAbortSuccess(AbortStatus.ROLL_BACK_ABANDONED); + } + }; + + roll(rollListener, Command.INFO4_MRT_ROLL_BACK); + } + + private void verify(BatchRecordArrayListener verifyListener) { + // Validate record versions in a batch. + Set> reads = txn.getReads(); + int max = reads.size(); + + if (max == 0) { + verifyListener.onSuccess(new BatchRecord[0], true); + return; + } + + BatchRecord[] records = new BatchRecord[max]; + Key[] keys = new Key[max]; + Long[] versions = new Long[max]; + int count = 0; + + for (Map.Entry entry : reads) { + Key key = entry.getKey(); + keys[count] = key; + records[count] = new BatchRecord(key, false); + versions[count] = entry.getValue(); + count++; + } + + AsyncBatchExecutor.BatchRecordArray executor = new AsyncBatchExecutor.BatchRecordArray( + eventLoop, cluster, verifyListener, records); + + List bns = BatchNodeList.generate(cluster, verifyPolicy, keys, records, false, executor); + AsyncCommand[] commands = new AsyncCommand[bns.size()]; + + count = 0; + + for (BatchNode bn : bns) { + if (bn.offsetsSize == 1) { + int i = bn.offsets[0]; + commands[count++] = new AsyncBatchSingle.TxnVerify( + executor, cluster, verifyPolicy, versions[i], records[i], bn.node); + } + else { + commands[count++] = new AsyncBatch.TxnVerify( + executor, bn, verifyPolicy, keys, versions, records); + } + } + executor.execute(commands); + } + + private void markRollForward() { + // Tell MRT monitor that a roll-forward will commence. + try { + WriteListener writeListener = new WriteListener() { + @Override + public void onSuccess(Key key) { + txn.setState(Txn.State.COMMITTED); + txn.setInDoubt(false); + rollForward(); + } + + @Override + public void onFailure(AerospikeException ae) { + notifyMarkRollForwardFailure(CommitError.MARK_ROLL_FORWARD_ABANDONED, ae); + } + }; + + AsyncTxnMarkRollForward command = new AsyncTxnMarkRollForward(cluster, writeListener, writePolicy, tranKey); + eventLoop.execute(cluster, command); + } + catch (Throwable t) { + notifyMarkRollForwardFailure(CommitError.MARK_ROLL_FORWARD_ABANDONED, t); + } + } + + private void rollForward() { + try { + BatchRecordArrayListener rollListener = new BatchRecordArrayListener() { + @Override + public void onSuccess(BatchRecord[] records, boolean status) { + rollRecords = records; + + if (status) { + closeOnCommit(true); + } + else { + notifyCommitSuccess(CommitStatus.ROLL_FORWARD_ABANDONED); + } + } + + @Override + public void onFailure(BatchRecord[] records, AerospikeException ae) { + rollRecords = records; + notifyCommitSuccess(CommitStatus.ROLL_FORWARD_ABANDONED); + } + }; + + roll(rollListener, Command.INFO4_MRT_ROLL_FORWARD); + } + catch (Throwable t) { + notifyCommitSuccess(CommitStatus.ROLL_FORWARD_ABANDONED); + } + } + + private void rollBack() { + try { + BatchRecordArrayListener rollListener = new BatchRecordArrayListener() { + @Override + public void onSuccess(BatchRecord[] records, boolean status) { + rollRecords = records; + + if (status) { + closeOnCommit(false); + } + else { + notifyCommitFailure(CommitError.VERIFY_FAIL_ABORT_ABANDONED, null); + } + } + + @Override + public void onFailure(BatchRecord[] records, AerospikeException ae) { + rollRecords = records; + notifyCommitFailure(CommitError.VERIFY_FAIL_ABORT_ABANDONED, ae); + } + }; + + roll(rollListener, Command.INFO4_MRT_ROLL_BACK); + } + catch (Throwable t) { + notifyCommitFailure(CommitError.VERIFY_FAIL_ABORT_ABANDONED, t); + } + } + + private void roll(BatchRecordArrayListener rollListener, int txnAttr) { + Set keySet = txn.getWrites(); + + if (keySet.isEmpty()) { + rollListener.onSuccess(new BatchRecord[0], true); + return; + } + + Key[] keys = keySet.toArray(new Key[keySet.size()]); + BatchRecord[] records = new BatchRecord[keys.length]; + + for (int i = 0; i < keys.length; i++) { + records[i] = new BatchRecord(keys[i], true); + } + + BatchAttr attr = new BatchAttr(); + attr.setTxn(txnAttr); + + AsyncBatchExecutor.BatchRecordArray executor = new AsyncBatchExecutor.BatchRecordArray( + eventLoop, cluster, rollListener, records); + + List bns = BatchNodeList.generate(cluster, rollPolicy, keys, records, true, executor); + AsyncCommand[] commands = new AsyncCommand[bns.size()]; + int count = 0; + + for (BatchNode bn : bns) { + if (bn.offsetsSize == 1) { + int i = bn.offsets[0]; + commands[count++] = new AsyncBatchSingle.TxnRoll( + executor, cluster, rollPolicy, txn, records[i], bn.node, txnAttr); + } + else { + commands[count++] = new AsyncBatch.TxnRoll( + executor, bn, rollPolicy, txn, keys, records, attr); + } + } + executor.execute(commands); + } + + private void closeOnCommit(boolean verified) { + if (! txn.monitorMightExist()) { + // There is no MRT monitor to remove. + if (verified) { + notifyCommitSuccess(CommitStatus.OK); + } + else { + // Record verification failed and MRT was aborted. + notifyCommitFailure(CommitError.VERIFY_FAIL, null); + } + return; + } + + try { + DeleteListener deleteListener = new DeleteListener() { + @Override + public void onSuccess(Key key, boolean existed) { + if (verified) { + notifyCommitSuccess(CommitStatus.OK); + } + else { + // Record verification failed and MRT was aborted. + notifyCommitFailure(CommitError.VERIFY_FAIL, null); + } + } + + @Override + public void onFailure(AerospikeException ae) { + if (verified) { + notifyCommitSuccess(CommitStatus.CLOSE_ABANDONED); + } + else { + notifyCommitFailure(CommitError.VERIFY_FAIL_CLOSE_ABANDONED, ae); + } + } + }; + + AsyncTxnClose command = new AsyncTxnClose(cluster, txn, deleteListener, writePolicy, tranKey); + eventLoop.execute(cluster, command); + } + catch (Throwable t) { + if (verified) { + notifyCommitSuccess(CommitStatus.CLOSE_ABANDONED); + } + else { + notifyCommitFailure(CommitError.VERIFY_FAIL_CLOSE_ABANDONED, t); + } + } + } + + private void closeOnAbort() { + if (! txn.monitorMightExist()) { + // There is no MRT monitor record to remove. + notifyAbortSuccess(AbortStatus.OK); + return; + } + + try { + DeleteListener deleteListener = new DeleteListener() { + @Override + public void onSuccess(Key key, boolean existed) { + notifyAbortSuccess(AbortStatus.OK); + } + + @Override + public void onFailure(AerospikeException ae) { + notifyAbortSuccess(AbortStatus.CLOSE_ABANDONED); + } + }; + + AsyncTxnClose command = new AsyncTxnClose(cluster, txn, deleteListener, writePolicy, tranKey); + eventLoop.execute(cluster, command); + } + catch (Throwable t) { + notifyAbortSuccess(AbortStatus.CLOSE_ABANDONED); + } + } + + private void notifyCommitSuccess(CommitStatus status) { + txn.clear(); + + try { + commitListener.onSuccess(status); + } + catch (Throwable t) { + Log.error("CommitListener onSuccess() failed: " + Util.getStackTrace(t)); + } + } + + private void notifyCommitFailure(CommitError error, Throwable cause) { + AerospikeException.Commit aec = createCommitException(error, cause); + + if (verifyException != null) { + aec.addSuppressed(verifyException); + } + + notifyCommitFailure(aec); + } + + private void notifyMarkRollForwardFailure(CommitError error, Throwable cause) { + AerospikeException.Commit aec = createCommitException(error, cause); + + if (cause instanceof AerospikeException) { + AerospikeException ae = (AerospikeException)cause; + + if (ae.getResultCode() == ResultCode.MRT_ABORTED) { + aec.setInDoubt(false); + txn.setInDoubt(false); + txn.setState(Txn.State.ABORTED); + } + else if (txn.getInDoubt()) { + // The transaction was already inDoubt and just failed again, + // so the new exception should also be inDoubt. + aec.setInDoubt(true); + } + else if (ae.getInDoubt()){ + // The current exception is inDoubt. + aec.setInDoubt(true); + txn.setInDoubt(true); + } + } + else { + if (txn.getInDoubt()) { + aec.setInDoubt(true); + } + } + + notifyCommitFailure(aec); + } + + private AerospikeException.Commit createCommitException(CommitError error, Throwable cause) { + if (cause != null) { + AerospikeException.Commit aec = new AerospikeException.Commit(error, verifyRecords, rollRecords, cause); + + if (cause instanceof AerospikeException) { + AerospikeException src = (AerospikeException)cause; + aec.setNode(src.getNode()); + aec.setPolicy(src.getPolicy()); + aec.setIteration(src.getIteration()); + aec.setInDoubt(src.getInDoubt()); + } + return aec; + } + else { + return new AerospikeException.Commit(error, verifyRecords, rollRecords); + } + } + + private void notifyCommitFailure(AerospikeException.Commit aec) { + try { + commitListener.onFailure(aec); + } + catch (Throwable t) { + Log.error("CommitListener onFailure() failed: " + Util.getStackTrace(t)); + } + } + + private void notifyAbortSuccess(AbortStatus status) { + txn.clear(); + + try { + abortListener.onSuccess(status); + } + catch (Throwable t) { + Log.error("AbortListener onSuccess() failed: " + Util.getStackTrace(t)); + } + } +} diff --git a/client/src/com/aerospike/client/async/AsyncWrite.java b/client/src/com/aerospike/client/async/AsyncWrite.java index fd81048dd..a2f2883d2 100644 --- a/client/src/com/aerospike/client/async/AsyncWrite.java +++ b/client/src/com/aerospike/client/async/AsyncWrite.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -22,17 +22,11 @@ import com.aerospike.client.Operation; import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; import com.aerospike.client.listener.WriteListener; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.WritePolicy; -public final class AsyncWrite extends AsyncCommand { +public final class AsyncWrite extends AsyncWriteBase { private final WriteListener listener; - private final WritePolicy writePolicy; - private final Key key; - private final Partition partition; private final Bin[] bins; private final Operation.Type operation; @@ -44,29 +38,10 @@ public AsyncWrite( Bin[] bins, Operation.Type operation ) { - super(writePolicy, true); + super(cluster, writePolicy, key); this.listener = listener; - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.write(cluster, writePolicy, key); this.bins = bins; this.operation = operation; - cluster.addTran(); - } - - @Override - boolean isWrite() { - return true; - } - - @Override - Node getNode(Cluster cluster) { - return partition.getNodeWrite(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.WRITE; } @Override @@ -76,11 +51,9 @@ protected void writeBuffer() { @Override protected boolean parseResult() { - validateHeaderSize(); - - int resultCode = dataBuffer[5] & 0xFF; + int resultCode = parseHeader(); - if (resultCode == 0) { + if (resultCode == ResultCode.OK) { return true; } @@ -94,12 +67,6 @@ protected boolean parseResult() { throw new AerospikeException(resultCode); } - @Override - boolean prepareRetry(boolean timeout) { - partition.prepareRetryWrite(timeout); - return true; - } - @Override protected void onSuccess() { if (listener != null) { diff --git a/client/src/com/aerospike/client/async/AsyncWriteBase.java b/client/src/com/aerospike/client/async/AsyncWriteBase.java new file mode 100644 index 000000000..54e1c63ba --- /dev/null +++ b/client/src/com/aerospike/client/async/AsyncWriteBase.java @@ -0,0 +1,73 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.async; + +import com.aerospike.client.Key; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.cluster.Node; +import com.aerospike.client.cluster.Partition; +import com.aerospike.client.command.RecordParser; +import com.aerospike.client.metrics.LatencyType; +import com.aerospike.client.policy.WritePolicy; + +public abstract class AsyncWriteBase extends AsyncCommand { + final WritePolicy writePolicy; + final Key key; + final Partition partition; + + public AsyncWriteBase(Cluster cluster, WritePolicy writePolicy, Key key) { + super(writePolicy, true); + this.writePolicy = writePolicy; + this.key = key; + this.partition = Partition.write(cluster, writePolicy, key); + cluster.addCommandCount(); + } + + @Override + boolean isWrite() { + return true; + } + + @Override + Node getNode(Cluster cluster) { + return partition.getNodeWrite(cluster); + } + + @Override + protected LatencyType getLatencyType() { + return LatencyType.WRITE; + } + + @Override + boolean prepareRetry(boolean timeout) { + partition.prepareRetryWrite(timeout); + return true; + } + + @Override + void onInDoubt() { + if (writePolicy.txn != null) { + writePolicy.txn.onWriteInDoubt(key); + } + } + + protected int parseHeader() { + RecordParser rp = new RecordParser(dataBuffer, dataOffset, receiveSize); + rp.parseFields(policy.txn, key, true); + return rp.resultCode; + } +} diff --git a/client/src/com/aerospike/client/async/NettyCommand.java b/client/src/com/aerospike/client/async/NettyCommand.java index 4f1ff82d7..00d06bb9a 100644 --- a/client/src/com/aerospike/client/async/NettyCommand.java +++ b/client/src/com/aerospike/client/async/NettyCommand.java @@ -593,7 +593,7 @@ private void parseAuthBody() { if (resultCode != 0 && resultCode != ResultCode.SECURITY_NOT_ENABLED) { // Authentication failed. Session token probably expired. // Signal tend thread to perform node login, so future - // transactions do not fail. + // commands do not fail. node.signalLogin(); // This is a rare event because the client tracks session @@ -1041,11 +1041,7 @@ public void run() { } private void retry(AerospikeException ae, long deadline) { - ae.setNode(node); - ae.setPolicy(command.policy); - ae.setIteration(iteration); - ae.setInDoubt(command.isWrite(), command.commandSentCounter); - command.addSubException(ae); + command.onRetryException(node, iteration, ae); if (! command.prepareRetry(ae.getResultCode() != ResultCode.SERVER_NOT_AVAILABLE)) { // Batch may be retried in separate commands. @@ -1093,15 +1089,10 @@ private void onFatalError(AerospikeException ae) { private void notifyFailure(AerospikeException ae) { try { - ae.setNode(node); - ae.setPolicy(command.policy); - ae.setIteration(iteration); - ae.setInDoubt(command.isWrite(), command.commandSentCounter); - ae.setSubExceptions(command.subExceptions); - command.onFailure(ae); + command.onFinalException(node, iteration, ae); } catch (Throwable e) { - logError("onFailure() error", e); + logError("onFinalException() error", e); } } diff --git a/client/src/com/aerospike/client/async/NettyConnection.java b/client/src/com/aerospike/client/async/NettyConnection.java index 49923061e..068854449 100644 --- a/client/src/com/aerospike/client/async/NettyConnection.java +++ b/client/src/com/aerospike/client/async/NettyConnection.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2021 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -39,7 +39,7 @@ public NettyConnection(SocketChannel channel) { } /** - * Validate connection in a transaction. + * Validate connection in a command. */ @Override public boolean isValid(ByteBuffer notUsed) { diff --git a/client/src/com/aerospike/client/async/NettyConnector.java b/client/src/com/aerospike/client/async/NettyConnector.java index 6a77a213d..fa57b8288 100644 --- a/client/src/com/aerospike/client/async/NettyConnector.java +++ b/client/src/com/aerospike/client/async/NettyConnector.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -209,7 +209,7 @@ private void parseAuthBody() { if (resultCode != 0 && resultCode != ResultCode.SECURITY_NOT_ENABLED) { // Authentication failed. Session token probably expired. // Signal tend thread to perform node login, so future - // transactions do not fail. + // commands do not fail. node.signalLogin(); // This is a rare event because the client tracks session diff --git a/client/src/com/aerospike/client/async/NettyRecover.java b/client/src/com/aerospike/client/async/NettyRecover.java index 895fe229b..0409bfe2b 100644 --- a/client/src/com/aerospike/client/async/NettyRecover.java +++ b/client/src/com/aerospike/client/async/NettyRecover.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2021 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -105,7 +105,7 @@ public NettyRecover(NettyCommand cmd) { public final void timeout() { //System.out.println("" + tranId + " timeout expired. close connection"); - // Transaction has been delayed long enough. + // Command has been delayed long enough. // User has already been notified. // timeoutTask has already been removed, so avoid cancel. abort(false); diff --git a/client/src/com/aerospike/client/async/NioCommand.java b/client/src/com/aerospike/client/async/NioCommand.java index d25be5ac4..d06b1c5e4 100644 --- a/client/src/com/aerospike/client/async/NioCommand.java +++ b/client/src/com/aerospike/client/async/NioCommand.java @@ -524,7 +524,7 @@ private final void readAuthBody() { if (resultCode != 0 && resultCode != ResultCode.SECURITY_NOT_ENABLED) { // Authentication failed. Session token probably expired. // Signal tend thread to perform node login, so future - // transactions do not fail. + // commands do not fail. node.signalLogin(); // This is a rare event because the client tracks session @@ -935,11 +935,7 @@ public void run() { } private final void retry(AerospikeException ae, long deadline) { - ae.setNode(node); - ae.setPolicy(command.policy); - ae.setIteration(iteration); - ae.setInDoubt(command.isWrite(), command.commandSentCounter); - command.addSubException(ae); + command.onRetryException(node, iteration, ae); if (! command.prepareRetry(ae.getResultCode() != ResultCode.SERVER_NOT_AVAILABLE)) { // Batch may be retried in separate commands. @@ -976,15 +972,10 @@ protected final void onApplicationError(AerospikeException ae) { private final void notifyFailure(AerospikeException ae) { try { - ae.setNode(node); - ae.setPolicy(command.policy); - ae.setIteration(iteration); - ae.setInDoubt(command.isWrite(), command.commandSentCounter); - ae.setSubExceptions(command.subExceptions); - command.onFailure(ae); + command.onFinalException(node, iteration, ae); } catch (Throwable e) { - Log.error("onFailure() error: " + Util.getErrorMessage(e)); + Log.error("onFinalException() error: " + Util.getErrorMessage(e)); } } diff --git a/client/src/com/aerospike/client/async/NioConnection.java b/client/src/com/aerospike/client/async/NioConnection.java index a06c89581..487b80385 100644 --- a/client/src/com/aerospike/client/async/NioConnection.java +++ b/client/src/com/aerospike/client/async/NioConnection.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -116,7 +116,7 @@ public boolean read(ByteBuffer byteBuffer) throws IOException { } /** - * Validate connection in a transaction. Return true if socket is connected and + * Validate connection in a command. Return true if socket is connected and * has no data in it's buffer. Return false, if not connected, socket read error * or has data in it's buffer. */ diff --git a/client/src/com/aerospike/client/async/NioConnector.java b/client/src/com/aerospike/client/async/NioConnector.java index 623ce0806..acbbc7c0a 100644 --- a/client/src/com/aerospike/client/async/NioConnector.java +++ b/client/src/com/aerospike/client/async/NioConnector.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -159,7 +159,7 @@ private final void readAuthBody() { if (resultCode != 0 && resultCode != ResultCode.SECURITY_NOT_ENABLED) { // Authentication failed. Session token probably expired. // Signal tend thread to perform node login, so future - // transactions do not fail. + // commands do not fail. node.signalLogin(); // This is a rare event because the client tracks session diff --git a/client/src/com/aerospike/client/async/NioRecover.java b/client/src/com/aerospike/client/async/NioRecover.java index f640713b5..ff59812f7 100644 --- a/client/src/com/aerospike/client/async/NioRecover.java +++ b/client/src/com/aerospike/client/async/NioRecover.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -118,7 +118,7 @@ public NioRecover(NioCommand cmd) { public final void timeout() { //System.out.println("" + tranId + " timeout expired. close connection"); - // Transaction has been delayed long enough. + // Command has been delayed long enough. // User has already been notified. // timeoutTask has already been removed, so avoid cancel. abort(false); diff --git a/client/src/com/aerospike/client/cluster/Cluster.java b/client/src/com/aerospike/client/cluster/Cluster.java index b9c3a4ef2..740e42679 100644 --- a/client/src/com/aerospike/client/cluster/Cluster.java +++ b/client/src/com/aerospike/client/cluster/Cluster.java @@ -126,7 +126,7 @@ public class Cluster implements Runnable, Closeable { // Extra event loop state for this cluster. public final EventState[] eventState; - // Maximum socket idle to validate connections in transactions. + // Maximum socket idle to validate connections in command. private final long maxSocketIdleNanosTran; // Maximum socket idle to trim peak connections to min connections. @@ -202,7 +202,7 @@ public class Cluster implements Runnable, Closeable { MetricsPolicy metricsPolicy; private volatile MetricsListener metricsListener; private final AtomicLong retryCount = new AtomicLong(); - private final AtomicLong tranCount = new AtomicLong(); + private final AtomicLong commandCount = new AtomicLong(); private final AtomicLong delayQueueTimeoutCount = new AtomicLong(); public Cluster(AerospikeClient client, ClientPolicy policy, Host[] hosts) { @@ -1373,37 +1373,45 @@ public final boolean isActive() { } /** - * Increment transaction count when metrics are enabled. + * Increment command count when metrics are enabled. */ - public final void addTran() { + public final void addCommandCount() { if (metricsEnabled) { - tranCount.getAndIncrement(); + commandCount.getAndIncrement(); } } /** - * Return transaction count. The value is cumulative and not reset per metrics interval. + * Return command count. The value is cumulative and not reset per metrics interval. + */ + public final long getCommandCount() { + return commandCount.get(); + } + + /** + * Return command count. The value is cumulative and not reset per metrics interval. + * This function is left for backwards compatibility. Use {@link #getCommandCount()} instead. */ public final long getTranCount() { - return tranCount.get(); + return commandCount.get(); } /** - * Increment transaction retry count. There can be multiple retries for a single transaction. + * Increment command retry count. There can be multiple retries for a single command. */ public final void addRetry() { retryCount.getAndIncrement(); } /** - * Add transaction retry count. There can be multiple retries for a single transaction. + * Add command retry count. There can be multiple retries for a single command. */ public final void addRetries(int count) { retryCount.getAndAdd(count); } /** - * Return transaction retry count. The value is cumulative and not reset per metrics interval. + * Return command retry count. The value is cumulative and not reset per metrics interval. */ public final long getRetryCount() { return retryCount.get(); diff --git a/client/src/com/aerospike/client/cluster/ClusterStats.java b/client/src/com/aerospike/client/cluster/ClusterStats.java index 213ef223a..60f2af237 100644 --- a/client/src/com/aerospike/client/cluster/ClusterStats.java +++ b/client/src/com/aerospike/client/cluster/ClusterStats.java @@ -44,7 +44,7 @@ public final class ClusterStats { public final int invalidNodeCount; /** - * Count of transaction retries since the client was started. + * Count of command retries since the client was started. */ public final long retryCount; diff --git a/client/src/com/aerospike/client/cluster/Node.java b/client/src/com/aerospike/client/cluster/Node.java index 8fa6be5a7..61730040b 100644 --- a/client/src/com/aerospike/client/cluster/Node.java +++ b/client/src/com/aerospike/client/cluster/Node.java @@ -955,7 +955,7 @@ public final boolean putAsyncConnection(AsyncConnection conn, int index) { // This should not happen since connection slots are reserved in advance // and total connections should never exceed maxSize. If it does happen, // it's highly likely that total count was decremented twice for the same - // transaction, causing the connection balancer to create more connections + // command, causing the connection balancer to create more connections // than necessary. Attempt to correct situation by not decrementing total // when this excess connection is closed. conn.close(); diff --git a/client/src/com/aerospike/client/cluster/NodeStats.java b/client/src/com/aerospike/client/cluster/NodeStats.java index 8cf78da92..042874e9c 100644 --- a/client/src/com/aerospike/client/cluster/NodeStats.java +++ b/client/src/com/aerospike/client/cluster/NodeStats.java @@ -36,14 +36,14 @@ public final class NodeStats { public ConnectionStats async; /** - * Transaction error count since node was initialized. If the error is retryable, multiple errors per - * transaction may occur. + * Command error count since node was initialized. If the error is retryable, multiple errors per + * command may occur. */ public final long errorCount; /** - * Transaction timeout count since node was initialized. If the timeout is retryable (ie socketTimeout), - * multiple timeouts per transaction may occur. + * Command timeout count since node was initialized. If the timeout is retryable (ie socketTimeout), + * multiple timeouts per command may occur. */ public final long timeoutCount; diff --git a/client/src/com/aerospike/client/command/Batch.java b/client/src/com/aerospike/client/command/Batch.java index a5168c061..53761aae3 100644 --- a/client/src/com/aerospike/client/command/Batch.java +++ b/client/src/com/aerospike/client/command/Batch.java @@ -28,6 +28,7 @@ import com.aerospike.client.Operation; import com.aerospike.client.Record; import com.aerospike.client.ResultCode; +import com.aerospike.client.Txn; import com.aerospike.client.cluster.Cluster; import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.BatchPolicy; @@ -65,10 +66,10 @@ protected void writeBuffer() { @Override protected boolean parseRow() { - skipKey(fieldCount); - BatchRead record = records.get(batchIndex); + parseFieldsRead(record.key); + if (resultCode == 0) { record.setRecord(parseRecord()); } @@ -134,7 +135,7 @@ protected void writeBuffer() { @Override protected boolean parseRow() { - skipKey(fieldCount); + parseFieldsRead(keys[batchIndex]); if (resultCode == 0) { records[batchIndex] = parseRecord(); @@ -187,12 +188,7 @@ protected void writeBuffer() { @Override protected boolean parseRow() { - skipKey(fieldCount); - - if (opCount > 0) { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - + parseFieldsRead(keys[batchIndex]); existsArray[batchIndex] = resultCode == 0; return true; } @@ -242,10 +238,10 @@ protected void writeBuffer() { @Override protected boolean parseRow() { - skipKey(fieldCount); - BatchRecord record = records.get(batchIndex); + parseFields(record); + if (resultCode == 0) { record.setRecord(parseRecord()); return true; @@ -277,6 +273,10 @@ protected void inDoubt() { if (record.resultCode == ResultCode.NO_RESPONSE) { record.inDoubt = record.hasWrite; + + if (record.inDoubt && policy.txn != null) { + policy.txn.onWriteInDoubt(record.key); + } } } } @@ -331,10 +331,10 @@ protected void writeBuffer() { @Override protected boolean parseRow() { - skipKey(fieldCount); - BatchRecord record = records[batchIndex]; + parseFields(record); + if (resultCode == 0) { record.setRecord(parseRecord()); } @@ -356,6 +356,10 @@ protected void inDoubt() { if (record.resultCode == ResultCode.NO_RESPONSE) { record.inDoubt = true; + + if (policy.txn != null) { + policy.txn.onWriteInDoubt(record.key); + } } } } @@ -416,10 +420,10 @@ protected void writeBuffer() { @Override protected boolean parseRow() { - skipKey(fieldCount); - BatchRecord record = records[batchIndex]; + parseFields(record); + if (resultCode == 0) { record.setRecord(parseRecord()); return true; @@ -455,6 +459,10 @@ protected void inDoubt() { if (record.resultCode == ResultCode.NO_RESPONSE) { record.inDoubt = true; + + if (policy.txn != null) { + policy.txn.onWriteInDoubt(record.key); + } } } } @@ -470,6 +478,142 @@ protected List generateBatchNodes() { } } + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public static final class TxnVerify extends BatchCommand { + private final Key[] keys; + private final Long[] versions; + private final BatchRecord[] records; + + public TxnVerify( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Key[] keys, + Long[] versions, + BatchRecord[] records, + BatchStatus status + ) { + super(cluster, batch, batchPolicy, status, false); + this.keys = keys; + this.versions = versions; + this.records = records; + } + + @Override + protected boolean isWrite() { + return false; + } + + @Override + protected void writeBuffer() { + setBatchTxnVerify(batchPolicy, keys, versions, batch); + } + + @Override + protected boolean parseRow() { + skipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == 0) { + record.resultCode = resultCode; + } + else { + record.setError(resultCode, false); + status.setRowError(); + } + return true; + } + + @Override + protected BatchCommand createCommand(BatchNode batchNode) { + return new TxnVerify(cluster, batchNode, batchPolicy, keys, versions, records, status); + } + + @Override + protected List generateBatchNodes() { + return BatchNodeList.generate(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, false, status); + } + } + + public static final class TxnRoll extends BatchCommand { + private final Txn txn; + private final Key[] keys; + private final BatchRecord[] records; + private final BatchAttr attr; + + public TxnRoll( + Cluster cluster, + BatchNode batch, + BatchPolicy batchPolicy, + Txn txn, + Key[] keys, + BatchRecord[] records, + BatchAttr attr, + BatchStatus status + ) { + super(cluster, batch, batchPolicy, status, false); + this.txn = txn; + this.keys = keys; + this.records = records; + this.attr = attr; + } + + @Override + protected boolean isWrite() { + return attr.hasWrite; + } + + @Override + protected void writeBuffer() { + setBatchTxnRoll(batchPolicy, txn, keys, batch, attr); + } + + @Override + protected boolean parseRow() { + skipKey(fieldCount); + + BatchRecord record = records[batchIndex]; + + if (resultCode == 0) { + record.resultCode = resultCode; + } + else { + record.setError(resultCode, Command.batchInDoubt(attr.hasWrite, commandSentCounter)); + status.setRowError(); + } + return true; + } + + @Override + protected void inDoubt() { + if (!attr.hasWrite) { + return; + } + + for (int index : batch.offsets) { + BatchRecord record = records[index]; + + if (record.resultCode == ResultCode.NO_RESPONSE) { + record.inDoubt = true; + } + } + } + + @Override + protected BatchCommand createCommand(BatchNode batchNode) { + return new TxnRoll(cluster, batchNode, batchPolicy, txn, keys, records, attr, status); + } + + @Override + protected List generateBatchNodes() { + return BatchNodeList.generate(cluster, batchPolicy, keys, records, sequenceAP, sequenceSC, batch, attr.hasWrite, status); + } + } + //------------------------------------------------------- // Batch Base Command //------------------------------------------------------- @@ -517,6 +661,32 @@ public void run() { } } + protected final void parseFieldsRead(Key key) { + if (policy.txn != null) { + Long version = parseVersion(fieldCount); + policy.txn.onRead(key, version); + } + else { + skipKey(fieldCount); + } + } + + protected final void parseFields(BatchRecord br) { + if (policy.txn != null) { + Long version = parseVersion(fieldCount); + + if (br.hasWrite) { + policy.txn.onWrite(br.key, version, resultCode); + } + else { + policy.txn.onRead(br.key, version); + } + } + else { + skipKey(fieldCount); + } + } + @Override protected void addSubException(AerospikeException ae) { status.addSubException(ae); diff --git a/client/src/com/aerospike/client/command/BatchAttr.java b/client/src/com/aerospike/client/command/BatchAttr.java index 7f063537e..f589aae9d 100644 --- a/client/src/com/aerospike/client/command/BatchAttr.java +++ b/client/src/com/aerospike/client/command/BatchAttr.java @@ -32,6 +32,7 @@ public final class BatchAttr { public int readAttr; public int writeAttr; public int infoAttr; + public int txnAttr; public int expiration; public int opSize; public short generation; @@ -321,4 +322,16 @@ public void setOpSize(Operation[] ops) { } opSize = dataOffset; } + + public void setTxn(int attr) { + filterExp = null; + readAttr = 0; + writeAttr = Command.INFO2_WRITE | Command.INFO2_RESPOND_ALL_OPS | Command.INFO2_DURABLE_DELETE; + infoAttr = 0; + txnAttr = attr; + expiration = 0; + generation = 0; + hasWrite = true; + sendKey = false; + } } diff --git a/client/src/com/aerospike/client/command/BatchExecutor.java b/client/src/com/aerospike/client/command/BatchExecutor.java index dbd5f9f9d..534fc67ee 100644 --- a/client/src/com/aerospike/client/command/BatchExecutor.java +++ b/client/src/com/aerospike/client/command/BatchExecutor.java @@ -26,7 +26,7 @@ public final class BatchExecutor { public static void execute(Cluster cluster, BatchPolicy policy, IBatchCommand[] commands, BatchStatus status) { - cluster.addTran(); + cluster.addCommandCount(); if (commands.length <= 1) { // Run batch request in same thread. diff --git a/client/src/com/aerospike/client/command/BatchSingle.java b/client/src/com/aerospike/client/command/BatchSingle.java index 2d2860545..3d09b8b17 100644 --- a/client/src/com/aerospike/client/command/BatchSingle.java +++ b/client/src/com/aerospike/client/command/BatchSingle.java @@ -25,6 +25,7 @@ import com.aerospike.client.Operation; import com.aerospike.client.Record; import com.aerospike.client.ResultCode; +import com.aerospike.client.Txn; import com.aerospike.client.Value; import com.aerospike.client.cluster.Cluster; import com.aerospike.client.cluster.Connection; @@ -92,6 +93,7 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, false); if (rp.resultCode == ResultCode.OK) { records[index] = rp.parseRecord(isOperation); @@ -127,6 +129,7 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, false); if (rp.resultCode == 0) { records[index] = new Record(null, rp.generation, rp.expiration); @@ -156,6 +159,7 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, false); if (rp.resultCode == ResultCode.OK) { record.setRecord(rp.parseRecord(true)); @@ -195,7 +199,7 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); - + rp.parseFields(policy.txn, key, false); existsArray[index] = rp.resultCode == 0; } } @@ -228,6 +232,7 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, record.hasWrite); if (rp.resultCode == ResultCode.OK) { record.setRecord(rp.parseRecord(true)); @@ -271,6 +276,7 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, true); if (rp.resultCode == ResultCode.OK || rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { record.setRecord(new Record(null, rp.generation, rp.expiration)); @@ -323,6 +329,7 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, true); if (rp.resultCode == ResultCode.OK) { record.setRecord(rp.parseRecord(false)); @@ -353,6 +360,92 @@ public void setInDoubt() { } } + //------------------------------------------------------- + // MRT + //------------------------------------------------------- + + public static final class TxnVerify extends BaseCommand { + private final long version; + private final BatchRecord record; + + public TxnVerify( + Cluster cluster, + BatchPolicy policy, + long version, + BatchRecord record, + BatchStatus status, + Node node + ) { + super(cluster, policy, status, record.key, node, false); + this.version = version; + this.record = record; + } + + @Override + protected void writeBuffer() { + setTxnVerify(record.key, version); + } + + @Override + protected void parseResult(Connection conn) throws IOException { + RecordParser rp = new RecordParser(conn, dataBuffer); + + if (rp.resultCode == ResultCode.OK) { + record.resultCode = rp.resultCode; + } + else { + record.setError(rp.resultCode, false); + status.setRowError(); + } + } + } + + public static final class TxnRoll extends BaseCommand { + private final Txn txn; + private final BatchRecord record; + private final int attr; + + public TxnRoll( + Cluster cluster, + BatchPolicy policy, + Txn txn, + BatchRecord record, + BatchStatus status, + Node node, + int attr + ) { + super(cluster, policy, status, record.key, node, true); + this.txn = txn; + this.record = record; + this.attr = attr; + } + + @Override + protected void writeBuffer() { + setTxnRoll(record.key, txn, attr); + } + + @Override + protected void parseResult(Connection conn) throws IOException { + RecordParser rp = new RecordParser(conn, dataBuffer); + + if (rp.resultCode == ResultCode.OK) { + record.resultCode = rp.resultCode; + } + else { + record.setError(rp.resultCode, Command.batchInDoubt(true, commandSentCounter)); + status.setRowError(); + } + } + + @Override + public void setInDoubt() { + if (record.resultCode == ResultCode.NO_RESPONSE) { + record.inDoubt = true; + } + } + } + public static abstract class BaseCommand extends SyncCommand implements IBatchCommand { BatchStatus status; Key key; diff --git a/client/src/com/aerospike/client/command/Buffer.java b/client/src/com/aerospike/client/command/Buffer.java index 2845a6a1d..60722ea09 100644 --- a/client/src/com/aerospike/client/command/Buffer.java +++ b/client/src/com/aerospike/client/command/Buffer.java @@ -461,6 +461,38 @@ public static long littleBytesToLong(byte[] buf, int offset) { ); } + //------------------------------------------------------- + // Transaction version conversions. + //------------------------------------------------------- + + /** + * Convert long to a 7 byte record version for MRT. + */ + public static void longToVersionBytes(long v, byte[] buf, int offset) { + buf[offset++] = (byte)(v >>> 0); + buf[offset++] = (byte)(v >>> 8); + buf[offset++] = (byte)(v >>> 16); + buf[offset++] = (byte)(v >>> 24); + buf[offset++] = (byte)(v >>> 32); + buf[offset++] = (byte)(v >>> 40); + buf[offset] = (byte)(v >>> 48); + } + + /** + * Convert 7 byte record version to a long for MRT. + */ + public static long versionBytesToLong(byte[] buf, int offset) { + return ( + ((long)(buf[offset] & 0xFF) << 0) | + ((long)(buf[offset+1] & 0xFF) << 8) | + ((long)(buf[offset+2] & 0xFF) << 16) | + ((long)(buf[offset+3] & 0xFF) << 24) | + ((long)(buf[offset+4] & 0xFF) << 32) | + ((long)(buf[offset+5] & 0xFF) << 40) | + ((long)(buf[offset+6] & 0xFF) << 48) + ); + } + //------------------------------------------------------- // 32 bit number conversions. //------------------------------------------------------- diff --git a/client/src/com/aerospike/client/command/Command.java b/client/src/com/aerospike/client/command/Command.java index 56130f26c..2a31a37ae 100644 --- a/client/src/com/aerospike/client/command/Command.java +++ b/client/src/com/aerospike/client/command/Command.java @@ -54,6 +54,7 @@ import com.aerospike.client.query.PartitionStatus; import com.aerospike.client.query.PartitionTracker.NodePartitions; import com.aerospike.client.query.Statement; +import com.aerospike.client.Txn; import com.aerospike.client.util.Packer; public class Command { @@ -70,7 +71,7 @@ public class Command { public static final int INFO2_DELETE = (1 << 1); // Fling a record into the belly of Moloch. public static final int INFO2_GENERATION = (1 << 2); // Update if expected generation == old. public static final int INFO2_GENERATION_GT = (1 << 3); // Update if new generation >= old, good for restore. - public static final int INFO2_DURABLE_DELETE = (1 << 4); // Transaction resulting in record deletion leaves tombstone (Enterprise only). + public static final int INFO2_DURABLE_DELETE = (1 << 4); // Command resulting in record deletion leaves tombstone (Enterprise only). public static final int INFO2_CREATE_ONLY = (1 << 5); // Create only. Fail if record already exists. public static final int INFO2_RELAX_AP_LONG_QUERY = (1 << 6); // Treat as long query, but relax read consistency. public static final int INFO2_RESPOND_ALL_OPS = (1 << 7); // Return a result for every operation. @@ -99,6 +100,10 @@ public class Command { // 1 0 allow replica // 1 1 allow unavailable + public static final int INFO4_MRT_VERIFY_READ = (1 << 0); // Send MRT version to the server to be verified. + public static final int INFO4_MRT_ROLL_FORWARD = (1 << 1); // Roll forward MRT. + public static final int INFO4_MRT_ROLL_BACK = (1 << 2); // Roll back MRT. + public static final byte STATE_READ_AUTH_HEADER = 1; public static final byte STATE_READ_HEADER = 2; public static final byte STATE_READ_DETAIL = 3; @@ -109,12 +114,12 @@ public class Command { public static final byte BATCH_MSG_INFO = 0x2; public static final byte BATCH_MSG_GEN = 0x4; public static final byte BATCH_MSG_TTL = 0x8; + public static final byte BATCH_MSG_INFO4 = 0x10; public static final int MSG_TOTAL_HEADER_SIZE = 30; public static final int FIELD_HEADER_SIZE = 5; public static final int OPERATION_HEADER_SIZE = 8; public static final int MSG_REMAINING_HEADER_SIZE = 22; - public static final int DIGEST_SIZE = 20; public static final int COMPRESS_THRESHOLD = 128; public static final long CL_MSG_VERSION = 2L; public static final long AS_MSG_TYPE = 3L; @@ -126,6 +131,7 @@ public class Command { public final int serverTimeout; public int socketTimeout; public int totalTimeout; + public Long version; public Command(int socketTimeout, int totalTimeout, int maxRetries) { this.maxRetries = maxRetries; @@ -141,13 +147,328 @@ public Command(int socketTimeout, int totalTimeout, int maxRetries) { } } + //-------------------------------------------------- + // Multi-record Transactions + //-------------------------------------------------- + + public final void setTxnAddKeys(WritePolicy policy, Key key, OperateArgs args) { + begin(); + int fieldCount = estimateKeySize(key); + dataOffset += args.size; + + sizeBuffer(); + + dataBuffer[8] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[9] = (byte)args.readAttr; + dataBuffer[10] = (byte)args.writeAttr; + dataBuffer[11] = (byte)0; + dataBuffer[12] = 0; + dataBuffer[13] = 0; + Buffer.intToBytes(0, dataBuffer, 14); + Buffer.intToBytes(policy.expiration, dataBuffer, 18); + Buffer.intToBytes(serverTimeout, dataBuffer, 22); + Buffer.shortToBytes(fieldCount, dataBuffer, 26); + Buffer.shortToBytes(args.operations.length, dataBuffer, 28); + dataOffset = MSG_TOTAL_HEADER_SIZE; + + writeKey(key); + + for (Operation operation : args.operations) { + writeOperation(operation); + } + end(); + compress(policy); + } + + public final void setTxnVerify(Key key, long ver) { + begin(); + int fieldCount = estimateKeySize(key); + + // Version field. + dataOffset += 7 + FIELD_HEADER_SIZE; + fieldCount++; + + sizeBuffer(); + dataBuffer[8] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[9] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); + dataBuffer[10] = (byte)0; + dataBuffer[11] = (byte)Command.INFO3_SC_READ_TYPE; + dataBuffer[12] = (byte)Command.INFO4_MRT_VERIFY_READ; + dataBuffer[13] = 0; + Buffer.intToBytes(0, dataBuffer, 14); + Buffer.intToBytes(0, dataBuffer, 18); + Buffer.intToBytes(serverTimeout, dataBuffer, 22); + Buffer.shortToBytes(fieldCount, dataBuffer, 26); + Buffer.shortToBytes(0, dataBuffer, 28); + dataOffset = MSG_TOTAL_HEADER_SIZE; + + writeKey(key); + writeFieldVersion(ver); + end(); + } + + public final void setBatchTxnVerify( + BatchPolicy policy, + Key[] keys, + Long[] versions, + BatchNode batch + ) { + // Estimate buffer size. + begin(); + + // Batch field + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + Long verPrev = null; + int max = batch.offsetsSize; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + Long ver = versions[offset]; + + dataOffset += key.digest.length + 4; + + if (canRepeat(key, keyPrev, ver, verPrev)) { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else { + // Write full header and namespace/set/bin names. + dataOffset += 9; // header(4) + info4(1) + fieldCount(2) + opCount(2) = 9 + dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE; + dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + + if (ver != null) { + dataOffset += 7 + FIELD_HEADER_SIZE; + } + keyPrev = key; + verPrev = ver; + } + } + + sizeBuffer(); + + writeBatchHeader(policy, totalTimeout, 1); + + int fieldSizeOffset = dataOffset; + writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + Buffer.intToBytes(max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = getBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + Long ver = versions[offset]; + + Buffer.intToBytes(offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length); + dataOffset += digest.length; + + if (canRepeat(key, keyPrev, ver, verPrev)) { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else { + // Write full message. + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4); + dataBuffer[dataOffset++] = (byte)(Command.INFO1_READ | Command.INFO1_NOBINDATA); + dataBuffer[dataOffset++] = (byte)0; + dataBuffer[dataOffset++] = (byte)Command.INFO3_SC_READ_TYPE; + dataBuffer[dataOffset++] = (byte)Command.INFO4_MRT_VERIFY_READ; + + int fieldCount = 0; + + if (ver != null) { + fieldCount++; + } + + writeBatchFields(key, fieldCount, 0); + + if (ver != null) { + writeFieldVersion(ver); + } + + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + Buffer.intToBytes(dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + end(); + compress(policy); + } + + public final void setTxnMarkRollForward(Key key) { + Bin bin = new Bin("fwd", true); + + begin(); + int fieldCount = estimateKeySize(key); + estimateOperationSize(bin); + writeTxnMonitor(key, 0, Command.INFO2_WRITE, fieldCount, 1); + writeOperation(bin, Operation.Type.WRITE); + end(); + } + + public final void setTxnRoll(Key key, Txn txn, int txnAttr) { + begin(); + int fieldCount = estimateKeySize(key); + + fieldCount += sizeTxn(key, txn, false); + + sizeBuffer(); + dataBuffer[8] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[9] = (byte)0; + dataBuffer[10] = (byte)Command.INFO2_WRITE | Command.INFO2_DURABLE_DELETE; + dataBuffer[11] = (byte)0; + dataBuffer[12] = (byte)txnAttr; + dataBuffer[13] = 0; // clear the result code + Buffer.intToBytes(0, dataBuffer, 14); + Buffer.intToBytes(0, dataBuffer, 18); + Buffer.intToBytes(serverTimeout, dataBuffer, 22); + Buffer.shortToBytes(fieldCount, dataBuffer, 26); + Buffer.shortToBytes(0, dataBuffer, 28); + dataOffset = MSG_TOTAL_HEADER_SIZE; + + writeKey(key); + writeTxn(txn, false); + end(); + } + + public final void setBatchTxnRoll( + BatchPolicy policy, + Txn txn, + Key[] keys, + BatchNode batch, + BatchAttr attr + ) { + // Estimate buffer size. + begin(); + int fieldCount = 1; + int max = batch.offsetsSize; + Long[] versions = new Long[max]; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + versions[i] = txn.getReadVersion(key); + } + + // Batch field + dataOffset += FIELD_HEADER_SIZE + 5; + + Key keyPrev = null; + Long verPrev = null; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + Long ver = versions[i]; + + dataOffset += key.digest.length + 4; + + if (canRepeat(key, keyPrev, ver, verPrev)) { + // Can set repeat previous namespace/bin names to save space. + dataOffset++; + } + else { + // Write full header and namespace/set/bin names. + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 + dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE; + dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + sizeTxnBatch(txn, ver, attr.hasWrite); + dataOffset += 2; // gen(2) = 2 + keyPrev = key; + verPrev = ver; + } + } + + sizeBuffer(); + + writeBatchHeader(policy, totalTimeout, fieldCount); + + int fieldSizeOffset = dataOffset; + writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end + + Buffer.intToBytes(max, dataBuffer, dataOffset); + dataOffset += 4; + dataBuffer[dataOffset++] = getBatchFlags(policy); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + Long ver = versions[i]; + + Buffer.intToBytes(offset, dataBuffer, dataOffset); + dataOffset += 4; + + byte[] digest = key.digest; + System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length); + dataOffset += digest.length; + + if (canRepeat(key, keyPrev, ver, verPrev)) { + // Can set repeat previous namespace/bin names to save space. + dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; + } + else { + // Write full message. + writeBatchWrite(key, txn, ver, attr, null, 0, 0); + keyPrev = key; + verPrev = ver; + } + } + + // Write real field size. + Buffer.intToBytes(dataOffset - MSG_TOTAL_HEADER_SIZE - 4, dataBuffer, fieldSizeOffset); + end(); + compress(policy); + } + + public void setTxnClose(Txn txn, Key key) { + begin(); + int fieldCount = estimateKeySize(key); + writeTxnMonitor(key, 0, Command.INFO2_WRITE | Command.INFO2_DELETE | Command.INFO2_DURABLE_DELETE, + fieldCount, 0); + end(); + } + + private void writeTxnMonitor(Key key, int readAttr, int writeAttr, int fieldCount, int opCount) { + sizeBuffer(); + + dataBuffer[8] = MSG_REMAINING_HEADER_SIZE; + dataBuffer[9] = (byte)readAttr; + dataBuffer[10] = (byte)writeAttr; + dataBuffer[11] = (byte)0; + dataBuffer[12] = 0; + dataBuffer[13] = 0; + Buffer.intToBytes(0, dataBuffer, 14); + Buffer.intToBytes(0, dataBuffer, 18); + Buffer.intToBytes(serverTimeout, dataBuffer, 22); + Buffer.shortToBytes(fieldCount, dataBuffer, 26); + Buffer.shortToBytes(opCount, dataBuffer, 28); + dataOffset = MSG_TOTAL_HEADER_SIZE; + + writeKey(key); + } + //-------------------------------------------------- // Writes //-------------------------------------------------- public final void setWrite(WritePolicy policy, Operation.Type operation, Key key, Bin[] bins) { begin(); - int fieldCount = estimateKeySize(policy, key); + int fieldCount = estimateKeySize(policy, key, true); if (policy.filterExp != null) { dataOffset += policy.filterExp.size(); @@ -159,7 +480,7 @@ public final void setWrite(WritePolicy policy, Operation.Type operation, Key key } sizeBuffer(); writeHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, bins.length); - writeKey(policy, key); + writeKey(policy, key, true); if (policy.filterExp != null) { policy.filterExp.write(this); @@ -174,7 +495,7 @@ public final void setWrite(WritePolicy policy, Operation.Type operation, Key key public void setDelete(WritePolicy policy, Key key) { begin(); - int fieldCount = estimateKeySize(policy, key); + int fieldCount = estimateKeySize(policy, key, true); if (policy.filterExp != null) { dataOffset += policy.filterExp.size(); @@ -182,7 +503,7 @@ public void setDelete(WritePolicy policy, Key key) { } sizeBuffer(); writeHeaderWrite(policy, Command.INFO2_WRITE | Command.INFO2_DELETE, fieldCount, 0); - writeKey(policy, key); + writeKey(policy, key, true); if (policy.filterExp != null) { policy.filterExp.write(this); @@ -193,15 +514,15 @@ public void setDelete(WritePolicy policy, Key key) { public void setDelete(Policy policy, Key key, BatchAttr attr) { begin(); Expression exp = getBatchExpression(policy, attr); - int fieldCount = estimateKeyAttrSize(key, attr, exp); + int fieldCount = estimateKeyAttrSize(policy, key, attr, exp); sizeBuffer(); - writeKeyAttr(key, attr, exp, fieldCount, 0); + writeKeyAttr(policy, key, attr, exp, fieldCount, 0); end(); } public final void setTouch(WritePolicy policy, Key key) { begin(); - int fieldCount = estimateKeySize(policy, key); + int fieldCount = estimateKeySize(policy, key, true); if (policy.filterExp != null) { dataOffset += policy.filterExp.size(); @@ -210,7 +531,7 @@ public final void setTouch(WritePolicy policy, Key key) { estimateOperationSize(); sizeBuffer(); writeHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 1); - writeKey(policy, key); + writeKey(policy, key, true); if (policy.filterExp != null) { policy.filterExp.write(this); @@ -225,7 +546,7 @@ public final void setTouch(WritePolicy policy, Key key) { public final void setExists(Policy policy, Key key) { begin(); - int fieldCount = estimateKeySize(policy, key); + int fieldCount = estimateKeySize(policy, key, false); if (policy.filterExp != null) { dataOffset += policy.filterExp.size(); @@ -233,7 +554,7 @@ public final void setExists(Policy policy, Key key) { } sizeBuffer(); writeHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); - writeKey(policy, key); + writeKey(policy, key, false); if (policy.filterExp != null) { policy.filterExp.write(this); @@ -253,7 +574,7 @@ public final void setRead(Policy policy, Key key, String[] binNames) { } begin(); - int fieldCount = estimateKeySize(policy, key); + int fieldCount = estimateKeySize(policy, key, false); if (policy.filterExp != null) { dataOffset += policy.filterExp.size(); @@ -268,7 +589,7 @@ public final void setRead(Policy policy, Key key, String[] binNames) { sizeBuffer(); writeHeaderRead(policy, serverTimeout, readAttr, 0, 0, fieldCount, opCount); - writeKey(policy, key); + writeKey(policy, key, false); if (policy.filterExp != null) { policy.filterExp.write(this); @@ -322,10 +643,10 @@ else if (br.ops != null) { opCount = 0; } - int fieldCount = estimateKeyAttrSize(br.key, attr, exp); + int fieldCount = estimateKeyAttrSize(policy, br.key, attr, exp); sizeBuffer(); - writeKeyAttr(br.key, attr, exp, fieldCount, opCount); + writeKeyAttr(policy, br.key, attr, exp, fieldCount, opCount); if (br.binNames != null) { for (String binName : br.binNames) { @@ -347,7 +668,7 @@ public final void setRead(Policy policy, Key key, Operation[] ops) { attr.setRead(policy); attr.adjustRead(ops); - int fieldCount = estimateKeyAttrSize(key, attr, policy.filterExp); + int fieldCount = estimateKeyAttrSize(policy, key, attr, policy.filterExp); for (Operation op : ops) { if (op.type.isWrite) { @@ -357,7 +678,7 @@ public final void setRead(Policy policy, Key key, Operation[] ops) { } sizeBuffer(); - writeKeyAttr(key, attr, policy.filterExp, fieldCount, ops.length); + writeKeyAttr(policy, key, attr, policy.filterExp, fieldCount, ops.length); for (Operation op : ops) { writeOperation(op); @@ -367,16 +688,15 @@ public final void setRead(Policy policy, Key key, Operation[] ops) { public final void setReadHeader(Policy policy, Key key) { begin(); - int fieldCount = estimateKeySize(policy, key); + int fieldCount = estimateKeySize(policy, key, false); if (policy.filterExp != null) { dataOffset += policy.filterExp.size(); fieldCount++; } - estimateOperationSize((String)null); sizeBuffer(); writeHeaderReadHeader(policy, Command.INFO1_READ | Command.INFO1_NOBINDATA, fieldCount, 0); - writeKey(policy, key); + writeKey(policy, key, false); if (policy.filterExp != null) { policy.filterExp.write(this); @@ -390,7 +710,7 @@ public final void setReadHeader(Policy policy, Key key) { public final void setOperate(WritePolicy policy, Key key, OperateArgs args) { begin(); - int fieldCount = estimateKeySize(policy, key); + int fieldCount = estimateKeySize(policy, key, args.hasWrite); if (policy.filterExp != null) { dataOffset += policy.filterExp.size(); @@ -400,7 +720,7 @@ public final void setOperate(WritePolicy policy, Key key, OperateArgs args) { sizeBuffer(); writeHeaderReadWrite(policy, args, fieldCount); - writeKey(policy, key); + writeKey(policy, key, args.hasWrite); if (policy.filterExp != null) { policy.filterExp.write(this); @@ -416,11 +736,11 @@ public final void setOperate(WritePolicy policy, Key key, OperateArgs args) { public final void setOperate(Policy policy, BatchAttr attr, Key key, Operation[] ops) { begin(); Expression exp = getBatchExpression(policy, attr); - int fieldCount = estimateKeyAttrSize(key, attr, exp); + int fieldCount = estimateKeyAttrSize(policy, key, attr, exp); dataOffset += attr.opSize; sizeBuffer(); - writeKeyAttr(key, attr, exp, fieldCount, ops.length); + writeKeyAttr(policy, key, attr, exp, fieldCount, ops.length); for (Operation op : ops) { writeOperation(op); @@ -435,7 +755,7 @@ public final void setOperate(Policy policy, BatchAttr attr, Key key, Operation[] public final void setUdf(WritePolicy policy, Key key, String packageName, String functionName, Value[] args) { begin(); - int fieldCount = estimateKeySize(policy, key); + int fieldCount = estimateKeySize(policy, key, true); if (policy.filterExp != null) { dataOffset += policy.filterExp.size(); @@ -447,7 +767,7 @@ public final void setUdf(WritePolicy policy, Key key, String packageName, String sizeBuffer(); writeHeaderWrite(policy, Command.INFO2_WRITE, fieldCount, 0); - writeKey(policy, key); + writeKey(policy, key, true); if (policy.filterExp != null) { policy.filterExp.write(this); @@ -468,11 +788,11 @@ public final void setUdf(Policy policy, BatchAttr attr, Key key, String packageN public final void setUdf(Policy policy, BatchAttr attr, Key key, String packageName, String functionName, byte[] argBytes) { begin(); Expression exp = getBatchExpression(policy, attr); - int fieldCount = estimateKeyAttrSize(key, attr, exp); + int fieldCount = estimateKeyAttrSize(policy, key, attr, exp); fieldCount += estimateUdfSize(packageName, functionName, argBytes); sizeBuffer(); - writeKeyAttr(key, attr, exp, fieldCount, 0); + writeKeyAttr(policy, key, attr, exp, fieldCount, 0); writeField(packageName, FieldType.UDF_PACKAGE_NAME); writeField(functionName, FieldType.UDF_FUNCTION); writeField(argBytes, FieldType.UDF_ARGLIST); @@ -745,21 +1065,21 @@ public final void setBatchOperate( List records, BatchNode batch ) { - final BatchRecordIterNative iter = new BatchRecordIterNative(records, batch); - setBatchOperate(policy, writePolicy, udfPolicy, deletePolicy, iter); - } + begin(); + int max = batch.offsetsSize; + Txn txn = policy.txn; + Long[] versions = null; - public final void setBatchOperate( - BatchPolicy policy, - BatchWritePolicy writePolicy, - BatchUDFPolicy udfPolicy, - BatchDeletePolicy deletePolicy, - KeyIter iter - ) { - BatchRecord record; - BatchRecord prev = null; + if (txn != null) { + versions = new Long[max]; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + BatchRecord record = records.get(offset); + versions[i] = txn.getReadVersion(record.key); + } + } - begin(); int fieldCount = 1; if (policy.filterExp != null) { @@ -769,17 +1089,18 @@ public final void setBatchOperate( dataOffset += FIELD_HEADER_SIZE + 5; - while ((record = iter.next()) != null) { - final Key key = record.key; + BatchRecord prev = null; + Long verPrev = null; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + BatchRecord record = records.get(offset); + Key key = record.key; + Long ver = (versions != null)? versions[i] : null; dataOffset += key.digest.length + 4; - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (!policy.sendKey && prev != null && prev.key.namespace == key.namespace && prev.key.setName == key.setName && - record.equals(prev)) { + if (canRepeat(policy, key, record, prev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } @@ -788,8 +1109,10 @@ public final void setBatchOperate( dataOffset += 12; dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE; dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + sizeTxnBatch(txn, ver, record.hasWrite); dataOffset += record.size(policy); prev = record; + verPrev = ver; } } sizeBuffer(); @@ -803,29 +1126,28 @@ public final void setBatchOperate( final int fieldSizeOffset = dataOffset; writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - Buffer.intToBytes(iter.size(), dataBuffer, dataOffset); + Buffer.intToBytes(max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = getBatchFlags(policy); BatchAttr attr = new BatchAttr(); prev = null; - iter.reset(); + verPrev = null; - while ((record = iter.next()) != null) { - Buffer.intToBytes(iter.offset(), dataBuffer, dataOffset); + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + BatchRecord record = records.get(offset); + Long ver = (versions != null)? versions[i] : null; + + Buffer.intToBytes(offset, dataBuffer, dataOffset); dataOffset += 4; - final Key key = record.key; + Key key = record.key; final byte[] digest = key.digest; System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length); dataOffset += digest.length; - // Avoid relatively expensive full equality checks for performance reasons. - // Use reference equality only in hope that common namespaces/bin names are set from - // fixed variables. It's fine if equality not determined correctly because it just - // results in more space used. The batch will still be correct. - if (!policy.sendKey && prev != null && prev.key.namespace == key.namespace && prev.key.setName == key.setName && - record.equals(prev)) { + if (canRepeat(policy, key, record, prev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; } @@ -844,20 +1166,20 @@ public final void setBatchOperate( if (br.binNames != null) { if (br.binNames.length > 0) { - writeBatchBinNames(key, br.binNames, attr, attr.filterExp); + writeBatchBinNames(key, txn, ver, br.binNames, attr, attr.filterExp); } else { attr.adjustRead(true); - writeBatchRead(key, attr, attr.filterExp, 0); + writeBatchRead(key, txn, ver, attr, attr.filterExp, 0); } } else if (br.ops != null) { attr.adjustRead(br.ops); - writeBatchOperations(key, br.ops, attr, attr.filterExp); + writeBatchOperations(key, txn, ver, br.ops, attr, attr.filterExp); } else { attr.adjustRead(br.readAllBins); - writeBatchRead(key, attr, attr.filterExp, 0); + writeBatchRead(key, txn, ver, attr, attr.filterExp, 0); } break; } @@ -868,7 +1190,7 @@ else if (br.ops != null) { attr.setWrite(bwp); attr.adjustWrite(bw.ops); - writeBatchOperations(key, bw.ops, attr, attr.filterExp); + writeBatchOperations(key, txn, ver, bw.ops, attr, attr.filterExp); break; } @@ -877,7 +1199,7 @@ else if (br.ops != null) { BatchUDFPolicy bup = (bu.policy != null)? bu.policy : udfPolicy; attr.setUDF(bup); - writeBatchWrite(key, attr, attr.filterExp, 3, 0); + writeBatchWrite(key, txn, ver, attr, attr.filterExp, 3, 0); writeField(bu.packageName, FieldType.UDF_PACKAGE_NAME); writeField(bu.functionName, FieldType.UDF_FUNCTION); writeField(bu.argBytes, FieldType.UDF_ARGLIST); @@ -889,11 +1211,12 @@ else if (br.ops != null) { BatchDeletePolicy bdp = (bd.policy != null)? bd.policy : deletePolicy; attr.setDelete(bdp); - writeBatchWrite(key, attr, attr.filterExp, 0, 0); + writeBatchWrite(key, txn, ver, attr, attr.filterExp, 0, 0); break; } } prev = record; + verPrev = ver; } } @@ -910,22 +1233,25 @@ public final void setBatchOperate( String[] binNames, Operation[] ops, BatchAttr attr - ) { - final KeyIterNative iter = new KeyIterNative(keys, batch); - setBatchOperate(policy, iter, binNames, ops, attr); - } - - public final void setBatchOperate( - BatchPolicy policy, - KeyIter iter, - String[] binNames, - Operation[] ops, - BatchAttr attr ) { // Estimate buffer size. begin(); - int fieldCount = 1; + int max = batch.offsetsSize; + Txn txn = policy.txn; + Long[] versions = null; + + if (txn != null) { + versions = new Long[max]; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + versions[i] = txn.getReadVersion(key); + } + } + Expression exp = getBatchExpression(policy, attr); + int fieldCount = 1; if (exp != null) { dataOffset += exp.size(); @@ -934,22 +1260,26 @@ public final void setBatchOperate( dataOffset += FIELD_HEADER_SIZE + 5; - Key key; - Key prev = null; + Key keyPrev = null; + Long verPrev = null; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + Long ver = (versions != null)? versions[i] : null; - while ((key = iter.next()) != null) { dataOffset += key.digest.length + 4; - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.namespace == key.namespace && prev.setName == key.setName) { + if (canRepeat(attr, key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } else { // Write full header and namespace/set/bin names. - dataOffset += 12; // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12 + dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE; dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + sizeTxnBatch(txn, ver, attr.hasWrite); if (attr.sendKey) { dataOffset += key.userKey.estimateSize() + FIELD_HEADER_SIZE + 1; @@ -974,7 +1304,8 @@ else if (ops != null) { else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) { dataOffset += 2; // Extra write specific fields. } - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -989,40 +1320,44 @@ else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) { int fieldSizeOffset = dataOffset; writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - Buffer.intToBytes(iter.size(), dataBuffer, dataOffset); + Buffer.intToBytes(max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = getBatchFlags(policy); - prev = null; - iter.reset(); + keyPrev = null; + verPrev = null; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + Long ver = (versions != null)? versions[i] : null; - while ((key = iter.next()) != null) { - Buffer.intToBytes(iter.offset(), dataBuffer, dataOffset); + Buffer.intToBytes(offset, dataBuffer, dataOffset); dataOffset += 4; byte[] digest = key.digest; System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length); dataOffset += digest.length; - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.namespace == key.namespace && prev.setName == key.setName) { + if (canRepeat(attr, key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; } else { // Write full message. if (binNames != null) { - writeBatchBinNames(key, binNames, attr, null); + writeBatchBinNames(key, txn, ver, binNames, attr, null); } else if (ops != null) { - writeBatchOperations(key, ops, attr, null); + writeBatchOperations(key, txn, ver, ops, attr, null); } else if ((attr.writeAttr & Command.INFO2_DELETE) != 0) { - writeBatchWrite(key, attr, null, 0, 0); + writeBatchWrite(key, txn, ver, attr, null, 0, 0); } else { - writeBatchRead(key, attr, null, 0); + writeBatchRead(key, txn, ver, attr, null, 0); } - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -1040,23 +1375,25 @@ public final void setBatchUDF( String functionName, byte[] argBytes, BatchAttr attr - ) { - final KeyIterNative iter = new KeyIterNative(keys, batch); - setBatchUDF(policy, iter, packageName, functionName, argBytes, attr); - } - - public final void setBatchUDF( - BatchPolicy policy, - KeyIter iter, - String packageName, - String functionName, - byte[] argBytes, - BatchAttr attr ) { // Estimate buffer size. begin(); - int fieldCount = 1; + int max = batch.offsetsSize; + Txn txn = policy.txn; + Long[] versions = null; + + if (txn != null) { + versions = new Long[max]; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + versions[i] = txn.getReadVersion(key); + } + } + Expression exp = getBatchExpression(policy, attr); + int fieldCount = 1; if (exp != null) { dataOffset += exp.size(); @@ -1065,14 +1402,17 @@ public final void setBatchUDF( dataOffset += FIELD_HEADER_SIZE + 5; - Key key; - Key prev = null; + Key keyPrev = null; + Long verPrev = null; + + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + Long ver = (versions != null)? versions[i] : null; - while ((key = iter.next()) != null) { dataOffset += key.digest.length + 4; - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.namespace == key.namespace && prev.setName == key.setName) { + if (canRepeat(attr, key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataOffset++; } @@ -1081,13 +1421,15 @@ public final void setBatchUDF( dataOffset += 12; // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12 dataOffset += Buffer.estimateSizeUtf8(key.namespace) + FIELD_HEADER_SIZE; dataOffset += Buffer.estimateSizeUtf8(key.setName) + FIELD_HEADER_SIZE; + sizeTxnBatch(txn, ver, attr.hasWrite); if (attr.sendKey) { dataOffset += key.userKey.estimateSize() + FIELD_HEADER_SIZE + 1; } dataOffset += 2; // gen(2) = 2 estimateUdfSize(packageName, functionName, argBytes); - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -1102,32 +1444,36 @@ public final void setBatchUDF( int fieldSizeOffset = dataOffset; writeFieldHeader(0, FieldType.BATCH_INDEX); // Need to update size at end - Buffer.intToBytes(iter.size(), dataBuffer, dataOffset); + Buffer.intToBytes(max, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = getBatchFlags(policy); - prev = null; - iter.reset(); + keyPrev = null; + verPrev = null; - while ((key = iter.next()) != null) { - Buffer.intToBytes(iter.offset(), dataBuffer, dataOffset); + for (int i = 0; i < max; i++) { + int offset = batch.offsets[i]; + Key key = keys[offset]; + Long ver = (versions != null)? versions[i] : null; + + Buffer.intToBytes(offset, dataBuffer, dataOffset); dataOffset += 4; byte[] digest = key.digest; System.arraycopy(digest, 0, dataBuffer, dataOffset, digest.length); dataOffset += digest.length; - // Try reference equality in hope that namespace/set for all keys is set from fixed variables. - if (!attr.sendKey && prev != null && prev.namespace == key.namespace && prev.setName == key.setName) { + if (canRepeat(attr, key, keyPrev, ver, verPrev)) { // Can set repeat previous namespace/bin names to save space. dataBuffer[dataOffset++] = BATCH_MSG_REPEAT; } else { // Write full message. - writeBatchWrite(key, attr, null, 3, 0); + writeBatchWrite(key, txn, ver, attr, null, 3, 0); writeField(packageName, FieldType.UDF_PACKAGE_NAME); writeField(functionName, FieldType.UDF_FUNCTION); writeField(argBytes, FieldType.UDF_ARGLIST); - prev = key; + keyPrev = key; + verPrev = ver; } } @@ -1137,6 +1483,33 @@ public final void setBatchUDF( compress(policy); } + private static boolean canRepeat( + Policy policy, + Key key, + BatchRecord record, + BatchRecord prev, + Long ver, + Long verPrev + ) { + // Avoid relatively expensive full equality checks for performance reasons. + // Use reference equality only in hope that common namespaces/bin names are set from + // fixed variables. It's fine if equality not determined correctly because it just + // results in more space used. The batch will still be correct. + // Same goes for ver reference equality check. + return !policy.sendKey && verPrev == ver && prev != null && prev.key.namespace == key.namespace && + prev.key.setName == key.setName && record.equals(prev); + } + + private static boolean canRepeat(BatchAttr attr, Key key, Key keyPrev, Long ver, Long verPrev) { + return !attr.sendKey && verPrev == ver && keyPrev != null && keyPrev.namespace == key.namespace && + keyPrev.setName == key.setName; + } + + private static boolean canRepeat(Key key, Key keyPrev, Long ver, Long verPrev) { + return verPrev == ver && keyPrev != null && keyPrev.namespace == key.namespace && + keyPrev.setName == key.setName; + } + private static final Expression getBatchExpression(Policy policy, BatchAttr attr) { return (attr.filterExp != null) ? attr.filterExp : policy.filterExp; } @@ -1158,6 +1531,21 @@ private static byte getBatchFlags(BatchPolicy policy) { return flags; } + private void sizeTxnBatch(Txn txn, Long ver, boolean hasWrite) { + if (txn != null) { + dataOffset++; // Add info4 byte for MRT. + dataOffset += 8 + FIELD_HEADER_SIZE; + + if (ver != null) { + dataOffset += 7 + FIELD_HEADER_SIZE; + } + + if (hasWrite && txn.getDeadline() != 0) { + dataOffset += 4 + FIELD_HEADER_SIZE; + } + } + } + private void writeBatchHeader(Policy policy, int timeout, int fieldCount) { int readAttr = Command.INFO1_BATCH; @@ -1180,20 +1568,20 @@ private void writeBatchHeader(Policy policy, int timeout, int fieldCount) { dataOffset = MSG_TOTAL_HEADER_SIZE; } - private void writeBatchBinNames(Key key, String[] binNames, BatchAttr attr, Expression filter) { - writeBatchRead(key, attr, filter, binNames.length); + private void writeBatchBinNames(Key key, Txn txn, Long ver, String[] binNames, BatchAttr attr, Expression filter) { + writeBatchRead(key, txn, ver, attr, filter, binNames.length); for (String binName : binNames) { writeOperation(binName, Operation.Type.READ); } } - private void writeBatchOperations(Key key, Operation[] ops, BatchAttr attr, Expression filter) { + private void writeBatchOperations(Key key, Txn txn, Long ver, Operation[] ops, BatchAttr attr, Expression filter) { if (attr.hasWrite) { - writeBatchWrite(key, attr, filter, 0, ops.length); + writeBatchWrite(key, txn, ver, attr, filter, 0, ops.length); } else { - writeBatchRead(key, attr, filter, ops.length); + writeBatchRead(key, txn, ver, attr, filter, ops.length); } for (Operation op : ops) { @@ -1201,44 +1589,133 @@ private void writeBatchOperations(Key key, Operation[] ops, BatchAttr attr, Expr } } - private void writeBatchRead(Key key, BatchAttr attr, Expression filter, int opCount) { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset); - dataOffset += 4; - writeBatchFields(key, filter, 0, opCount); + private void writeBatchRead(Key key, Txn txn, Long ver, BatchAttr attr, Expression filter, int opCount) { + if (txn != null) { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.txnAttr; + Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + writeBatchFieldsTxn(key, txn, ver, attr, filter, 0, opCount); + } + else { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + writeBatchFieldsReg(key, attr, filter, 0, opCount); + } } - private void writeBatchWrite(Key key, BatchAttr attr, Expression filter, int fieldCount, int opCount) { - dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL); - dataBuffer[dataOffset++] = (byte)attr.readAttr; - dataBuffer[dataOffset++] = (byte)attr.writeAttr; - dataBuffer[dataOffset++] = (byte)attr.infoAttr; - Buffer.shortToBytes(attr.generation, dataBuffer, dataOffset); - dataOffset += 2; - Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset); - dataOffset += 4; + private void writeBatchWrite( + Key key, + Txn txn, + Long ver, + BatchAttr attr, + Expression filter, + int fieldCount, + int opCount + ) { + if (txn != null) { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_INFO4 | BATCH_MSG_GEN | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + dataBuffer[dataOffset++] = (byte)attr.txnAttr; + Buffer.shortToBytes(attr.generation, dataBuffer, dataOffset); + dataOffset += 2; + Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + writeBatchFieldsTxn(key, txn, ver, attr, filter, fieldCount, opCount); + } + else { + dataBuffer[dataOffset++] = (byte)(BATCH_MSG_INFO | BATCH_MSG_GEN | BATCH_MSG_TTL); + dataBuffer[dataOffset++] = (byte)attr.readAttr; + dataBuffer[dataOffset++] = (byte)attr.writeAttr; + dataBuffer[dataOffset++] = (byte)attr.infoAttr; + Buffer.shortToBytes(attr.generation, dataBuffer, dataOffset); + dataOffset += 2; + Buffer.intToBytes(attr.expiration, dataBuffer, dataOffset); + dataOffset += 4; + writeBatchFieldsReg(key, attr, filter, fieldCount, opCount); + } + } + + private void writeBatchFieldsTxn( + Key key, + Txn txn, + Long ver, + BatchAttr attr, + Expression filter, + int fieldCount, + int opCount + ) { + fieldCount++; + + if (ver != null) { + fieldCount++; + } + + if (attr.hasWrite && txn.getDeadline() != 0) { + fieldCount++; + } + + if (filter != null) { + fieldCount++; + } if (attr.sendKey) { fieldCount++; - writeBatchFields(key, filter, fieldCount, opCount); - writeField(key.userKey, FieldType.KEY); } - else { - writeBatchFields(key, filter, fieldCount, opCount); + + writeBatchFields(key, fieldCount, opCount); + + writeFieldLE(txn.getId(), FieldType.MRT_ID); + + if (ver != null) { + writeFieldVersion(ver); + } + + if (attr.hasWrite && txn.getDeadline() != 0) { + writeFieldLE(txn.getDeadline(), FieldType.MRT_DEADLINE); + } + + if (filter != null) { + filter.write(this); + } + + if (attr.sendKey) { + writeField(key.userKey, FieldType.KEY); } } - private void writeBatchFields(Key key, Expression filter, int fieldCount, int opCount) { + private void writeBatchFieldsReg( + Key key, + BatchAttr attr, + Expression filter, + int fieldCount, + int opCount + ) { if (filter != null) { fieldCount++; - writeBatchFields(key, fieldCount, opCount); + } + + if (attr.sendKey) { + fieldCount++; + } + + writeBatchFields(key, fieldCount, opCount); + + if (filter != null) { filter.write(this); } - else { - writeBatchFields(key, fieldCount, opCount); + + if (attr.sendKey) { + writeField(key.userKey, FieldType.KEY); } } @@ -1373,7 +1850,7 @@ public final void setScan( writeField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); // Write taskId field - writeField(taskId, FieldType.TRAN_ID); + writeField(taskId, FieldType.QUERY_ID); if (binNames != null) { for (String binName : binNames) { @@ -1601,7 +2078,7 @@ else if (qp.expectedDuration == QueryDuration.LONG_RELAX_AP) { writeField(policy.socketTimeout, FieldType.SOCKET_TIMEOUT); // Write taskId field - writeField(taskId, FieldType.TRAN_ID); + writeField(taskId, FieldType.QUERY_ID); if (filter != null) { IndexCollectionType type = filter.getCollectionType(); @@ -1698,13 +2175,8 @@ else if (binNames != null && (isNew || filter == null)) { // Command Sizing //-------------------------------------------------- - private final int estimateKeyAttrSize(Key key, BatchAttr attr, Expression filterExp) { - int fieldCount = estimateKeySize(key); - - if (attr.sendKey) { - dataOffset += key.userKey.estimateSize() + FIELD_HEADER_SIZE + 1; - fieldCount++; - } + private final int estimateKeyAttrSize(Policy policy, Key key, BatchAttr attr, Expression filterExp) { + int fieldCount = estimateKeySize(policy, key, attr.hasWrite); if (filterExp != null) { dataOffset += filterExp.size(); @@ -1713,9 +2185,11 @@ private final int estimateKeyAttrSize(Key key, BatchAttr attr, Expression filter return fieldCount; } - private final int estimateKeySize(Policy policy, Key key) { + private int estimateKeySize(Policy policy, Key key, boolean hasWrite) { int fieldCount = estimateKeySize(key); + fieldCount += sizeTxn(key, policy.txn, hasWrite); + if (policy.sendKey) { dataOffset += key.userKey.estimateSize() + FIELD_HEADER_SIZE + 1; fieldCount++; @@ -1834,7 +2308,7 @@ private final void writeHeaderWrite(WritePolicy policy, int writeAttr, int field dataBuffer[9] = (byte)readAttr; dataBuffer[10] = (byte)writeAttr; dataBuffer[11] = (byte)infoAttr; - dataBuffer[12] = 0; // unused + dataBuffer[12] = 0; dataBuffer[13] = 0; // clear the result code Buffer.intToBytes(generation, dataBuffer, 14); Buffer.intToBytes(policy.expiration, dataBuffer, 18); @@ -2032,7 +2506,14 @@ private final void writeHeaderReadHeader(Policy policy, int readAttr, int fieldC /** * Header write for batch single commands. */ - private final void writeKeyAttr(Key key, BatchAttr attr, Expression filterExp, int fieldCount, int operationCount) { + private void writeKeyAttr( + Policy policy, + Key key, + BatchAttr attr, + Expression filterExp, + int fieldCount, + int operationCount + ) { // Write all header data except total size which must be written last. dataBuffer[8] = MSG_REMAINING_HEADER_SIZE; // Message header length. dataBuffer[9] = (byte)attr.readAttr; @@ -2047,19 +2528,16 @@ private final void writeKeyAttr(Key key, BatchAttr attr, Expression filterExp, i Buffer.shortToBytes(operationCount, dataBuffer, 28); dataOffset = MSG_TOTAL_HEADER_SIZE; - writeKey(key); - - if (attr.sendKey) { - writeField(key.userKey, FieldType.KEY); - } + writeKey(policy, key, attr.hasWrite); if (filterExp != null) { filterExp.write(this); } } - private final void writeKey(Policy policy, Key key) { + private void writeKey(Policy policy, Key key, boolean sendDeadline) { writeKey(key); + writeTxn(policy.txn, sendDeadline); if (policy.sendKey) { writeField(key.userKey, FieldType.KEY); @@ -2156,7 +2634,49 @@ private final void writeOperation(Operation.Type operation) { dataBuffer[dataOffset++] = 0; } - private final void writeField(Value value, int type) { + private int sizeTxn(Key key, Txn txn, boolean hasWrite) { + int fieldCount = 0; + + if (txn != null) { + dataOffset += 8 + FIELD_HEADER_SIZE; + fieldCount++; + + version = txn.getReadVersion(key); + + if (version != null) { + dataOffset += 7 + FIELD_HEADER_SIZE; + fieldCount++; + } + + if (hasWrite && txn.getDeadline() != 0) { + dataOffset += 4 + FIELD_HEADER_SIZE; + fieldCount++; + } + } + return fieldCount; + } + + private void writeTxn(Txn txn, boolean sendDeadline) { + if (txn != null) { + writeFieldLE(txn.getId(), FieldType.MRT_ID); + + if (version != null) { + writeFieldVersion(version); + } + + if (sendDeadline && txn.getDeadline() != 0) { + writeFieldLE(txn.getDeadline(), FieldType.MRT_DEADLINE); + } + } + } + + private void writeFieldVersion(long ver) { + writeFieldHeader(7, FieldType.RECORD_VERSION); + Buffer.longToVersionBytes(ver, dataBuffer, dataOffset); + dataOffset += 7; + } + + private void writeField(Value value, int type) { int offset = dataOffset + FIELD_HEADER_SIZE; dataBuffer[offset++] = (byte)value.getType(); int len = value.write(dataBuffer, offset) + 1; @@ -2164,31 +2684,43 @@ private final void writeField(Value value, int type) { dataOffset += len; } - private final void writeField(String str, int type) { + private void writeField(String str, int type) { int len = Buffer.stringToUtf8(str, dataBuffer, dataOffset + FIELD_HEADER_SIZE); writeFieldHeader(len, type); dataOffset += len; } - private final void writeField(byte[] bytes, int type) { + private void writeField(byte[] bytes, int type) { System.arraycopy(bytes, 0, dataBuffer, dataOffset + FIELD_HEADER_SIZE, bytes.length); writeFieldHeader(bytes.length, type); dataOffset += bytes.length; } - private final void writeField(int val, int type) { + private void writeField(int val, int type) { writeFieldHeader(4, type); Buffer.intToBytes(val, dataBuffer, dataOffset); dataOffset += 4; } - private final void writeField(long val, int type) { + private void writeFieldLE(int val, int type) { + writeFieldHeader(4, type); + Buffer.intToLittleBytes(val, dataBuffer, dataOffset); + dataOffset += 4; + } + + private void writeField(long val, int type) { writeFieldHeader(8, type); Buffer.longToBytes(val, dataBuffer, dataOffset); dataOffset += 8; } - private final void writeFieldHeader(int size, int type) { + private void writeFieldLE(long val, int type) { + writeFieldHeader(8, type); + Buffer.longToLittleBytes(val, dataBuffer, dataOffset); + dataOffset += 8; + } + + private void writeFieldHeader(int size, int type) { Buffer.intToBytes(size+1, dataBuffer, dataOffset); dataOffset += 4; dataBuffer[dataOffset++] = (byte)type; @@ -2210,9 +2742,8 @@ protected final void end() { private final void compress(Policy policy) { if (policy.compress && dataOffset > COMPRESS_THRESHOLD) { - Deflater def = new Deflater(); + Deflater def = new Deflater(Deflater.BEST_SPEED); try { - def.setLevel(Deflater.BEST_SPEED); def.setInput(dataBuffer, 0, dataOffset); def.finish(); @@ -2291,6 +2822,24 @@ protected final Key parseKey(int fieldCount, BVal bval) { return new Key(namespace, digest, setName, userKey); } + public Long parseVersion(int fieldCount) { + Long version = null; + + for (int i = 0; i < fieldCount; i++) { + int len = Buffer.bytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.RECORD_VERSION && size == 7) { + version = Buffer.versionBytesToLong(dataBuffer, dataOffset); + } + dataOffset += size; + } + return version; + } + protected final Record parseRecord( int opCount, int generation, @@ -2346,77 +2895,4 @@ public static boolean batchInDoubt(boolean isWrite, int commandSentCounter) { public static class OpResults extends ArrayList { private static final long serialVersionUID = 1L; } - - public interface KeyIter { - int size(); - T next(); - int offset(); - void reset(); - } - - private static class BatchRecordIterNative extends BaseIterNative { - private final List records; - - public BatchRecordIterNative(List records, BatchNode batch) { - super(batch); - this.records = records; - } - - @Override - public BatchRecord get(int offset) { - return records.get(offset); - } - } - - private static class KeyIterNative extends BaseIterNative { - private final Key[] keys; - - public KeyIterNative(Key[] keys, BatchNode batch) { - super(batch); - this.keys = keys; - } - - @Override - public Key get(int offset) { - return keys[offset]; - } - } - - private static abstract class BaseIterNative implements KeyIter { - private final int size; - private final int[] offsets; - private int offset; - private int index; - - public BaseIterNative(BatchNode batch) { - this.size = batch.offsetsSize; - this.offsets = batch.offsets; - } - - @Override - public int size() { - return size; - } - - @Override - public T next() { - if (index >= size) { - return null; - } - offset = offsets[index++]; - return get(offset); - } - - abstract T get(int offset); - - @Override - public int offset() { - return offset; - } - - @Override - public void reset() { - index = 0; - } - } } diff --git a/client/src/com/aerospike/client/command/DeleteCommand.java b/client/src/com/aerospike/client/command/DeleteCommand.java index 62ea07fe4..31726eb3a 100644 --- a/client/src/com/aerospike/client/command/DeleteCommand.java +++ b/client/src/com/aerospike/client/command/DeleteCommand.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -16,45 +16,19 @@ */ package com.aerospike.client.command; -import java.io.IOException; - import com.aerospike.client.AerospikeException; import com.aerospike.client.Key; import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; import com.aerospike.client.cluster.Connection; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.WritePolicy; +import java.io.IOException; -public final class DeleteCommand extends SyncCommand { - private final WritePolicy writePolicy; - private final Key key; - private final Partition partition; +public final class DeleteCommand extends SyncWriteCommand { private boolean existed; public DeleteCommand(Cluster cluster, WritePolicy writePolicy, Key key) { - super(cluster, writePolicy); - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.write(cluster, writePolicy, key); - cluster.addTran(); - } - - @Override - protected boolean isWrite() { - return true; - } - - @Override - protected Node getNode() { - return partition.getNodeWrite(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.WRITE; + super(cluster, writePolicy, key); } @Override @@ -64,33 +38,27 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { - RecordParser rp = new RecordParser(conn, dataBuffer); + int resultCode = parseHeader(conn); - if (rp.resultCode == 0) { + if (resultCode == ResultCode.OK) { existed = true; return; } - if (rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { + if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { existed = false; return; } - if (rp.resultCode == ResultCode.FILTERED_OUT) { + if (resultCode == ResultCode.FILTERED_OUT) { if (writePolicy.failOnFilteredOut) { - throw new AerospikeException(rp.resultCode); + throw new AerospikeException(resultCode); } existed = true; return; } - throw new AerospikeException(rp.resultCode); - } - - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryWrite(timeout); - return true; + throw new AerospikeException(resultCode); } public boolean existed() { diff --git a/client/src/com/aerospike/client/command/ExecuteCommand.java b/client/src/com/aerospike/client/command/ExecuteCommand.java index 9215f3f07..78a02653d 100644 --- a/client/src/com/aerospike/client/command/ExecuteCommand.java +++ b/client/src/com/aerospike/client/command/ExecuteCommand.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -18,18 +18,19 @@ import com.aerospike.client.AerospikeException; import com.aerospike.client.Key; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; import com.aerospike.client.Value; import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; -import com.aerospike.client.metrics.LatencyType; +import com.aerospike.client.cluster.Connection; import com.aerospike.client.policy.WritePolicy; +import java.io.IOException; -public final class ExecuteCommand extends ReadCommand { - private final WritePolicy writePolicy; +public final class ExecuteCommand extends SyncWriteCommand { private final String packageName; private final String functionName; private final Value[] args; + private Record record; public ExecuteCommand( Cluster cluster, @@ -39,41 +40,67 @@ public ExecuteCommand( String functionName, Value[] args ) { - super(cluster, writePolicy, key, Partition.write(cluster, writePolicy, key), false); - this.writePolicy = writePolicy; + super(cluster, writePolicy, key); this.packageName = packageName; this.functionName = functionName; this.args = args; } @Override - protected boolean isWrite() { - return true; + protected void writeBuffer() { + setUdf(writePolicy, key, packageName, functionName, args); } @Override - protected Node getNode() { - return partition.getNodeWrite(cluster); - } + protected void parseResult(Connection conn) throws IOException { + RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, true); - @Override - protected LatencyType getLatencyType() { - return LatencyType.WRITE; - } + if (rp.resultCode == ResultCode.OK) { + record = rp.parseRecord(false); + return; + } - @Override - protected void writeBuffer() throws AerospikeException { - setUdf(writePolicy, key, packageName, functionName, args); + if (rp.resultCode == ResultCode.UDF_BAD_RESPONSE) { + record = rp.parseRecord(false); + handleUdfError(rp.resultCode); + return; + } + + if (rp.resultCode == ResultCode.FILTERED_OUT) { + if (policy.failOnFilteredOut) { + throw new AerospikeException(rp.resultCode); + } + return; + } + + throw new AerospikeException(rp.resultCode); } - @Override - protected void handleNotFound(int resultCode) { - throw new AerospikeException(resultCode); + private void handleUdfError(int resultCode) { + String ret = (String)record.bins.get("FAILURE"); + + if (ret == null) { + throw new AerospikeException(resultCode); + } + + String message; + int code; + + try { + String[] list = ret.split(":"); + code = Integer.parseInt(list[2].trim()); + message = list[0] + ':' + list[1] + ' ' + list[3]; + } + catch (Throwable e) { + // Use generic exception if parse error occurs. + throw new AerospikeException(resultCode, ret); + } + + throw new AerospikeException(code, message); } - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryWrite(timeout); - return true; + public Record getRecord() { + return record; } } diff --git a/client/src/com/aerospike/client/command/ExistsCommand.java b/client/src/com/aerospike/client/command/ExistsCommand.java index 26e0aee2e..ffb1c2a0e 100644 --- a/client/src/com/aerospike/client/command/ExistsCommand.java +++ b/client/src/com/aerospike/client/command/ExistsCommand.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -23,31 +23,13 @@ import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; import com.aerospike.client.cluster.Connection; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.Policy; -public final class ExistsCommand extends SyncCommand { - private final Key key; - private final Partition partition; +public final class ExistsCommand extends SyncReadCommand { private boolean exists; public ExistsCommand(Cluster cluster, Policy policy, Key key) { - super(cluster, policy); - this.key = key; - this.partition = Partition.read(cluster, policy, key); - cluster.addTran(); - } - - @Override - protected Node getNode() { - return partition.getNodeRead(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.READ; + super(cluster, policy, key); } @Override @@ -58,8 +40,9 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, false); - if (rp.resultCode == 0) { + if (rp.resultCode == ResultCode.OK) { exists = true; return; } @@ -80,12 +63,6 @@ protected void parseResult(Connection conn) throws IOException { throw new AerospikeException(rp.resultCode); } - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryRead(timeout); - return true; - } - public boolean exists() { return exists; } diff --git a/client/src/com/aerospike/client/command/FieldType.java b/client/src/com/aerospike/client/command/FieldType.java index f078367be..a91424776 100644 --- a/client/src/com/aerospike/client/command/FieldType.java +++ b/client/src/com/aerospike/client/command/FieldType.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2022 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -20,8 +20,11 @@ public final class FieldType { public static final int NAMESPACE = 0; public static final int TABLE = 1; public static final int KEY = 2; + public static final int RECORD_VERSION = 3; public static final int DIGEST_RIPE = 4; - public final static int TRAN_ID = 7; // user supplied transaction id, which is simply passed back + public static final int MRT_ID = 5; + public final static int MRT_DEADLINE = 6; + public final static int QUERY_ID = 7; public final static int SOCKET_TIMEOUT = 9; public final static int RECORDS_PER_SECOND = 10; public final static int PID_ARRAY = 11; diff --git a/client/src/com/aerospike/client/command/OperateArgs.java b/client/src/com/aerospike/client/command/OperateArgs.java index c79f4e64e..3a15a5150 100644 --- a/client/src/com/aerospike/client/command/OperateArgs.java +++ b/client/src/com/aerospike/client/command/OperateArgs.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -16,10 +16,7 @@ */ package com.aerospike.client.command; -import com.aerospike.client.Key; import com.aerospike.client.Operation; -import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.Partition; import com.aerospike.client.policy.WritePolicy; public final class OperateArgs { @@ -112,13 +109,4 @@ public OperateArgs( } writeAttr = wattr; } - - public Partition getPartition(Cluster cluster, Key key) { - if (hasWrite) { - return Partition.write(cluster, writePolicy, key); - } - else { - return Partition.read(cluster, writePolicy, key); - } - } } diff --git a/proxy/src/com/aerospike/client/proxy/grpc/GrpcChannelProvider.java b/client/src/com/aerospike/client/command/OperateCommandRead.java similarity index 59% rename from proxy/src/com/aerospike/client/proxy/grpc/GrpcChannelProvider.java rename to client/src/com/aerospike/client/command/OperateCommandRead.java index 7d6890f4c..da649d288 100644 --- a/proxy/src/com/aerospike/client/proxy/grpc/GrpcChannelProvider.java +++ b/client/src/com/aerospike/client/command/OperateCommandRead.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -14,24 +14,21 @@ * License for the specific language governing permissions and limitations under * the License. */ -package com.aerospike.client.proxy.grpc; +package com.aerospike.client.command; -import io.grpc.ManagedChannel; +import com.aerospike.client.Key; +import com.aerospike.client.cluster.Cluster; -public class GrpcChannelProvider { - private GrpcCallExecutor callExecutor; +public final class OperateCommandRead extends ReadCommand { + private final OperateArgs args; - /** - * @return a managed channel if available else null. - */ - public ManagedChannel getControlChannel() { - if (callExecutor == null) { - return null; - } - return callExecutor.getControlChannel(); + public OperateCommandRead(Cluster cluster, Key key, OperateArgs args) { + super(cluster, args.writePolicy, key, true); + this.args = args; } - public void setCallExecutor(GrpcCallExecutor callExecutor) { - this.callExecutor = callExecutor; + @Override + protected void writeBuffer() { + setOperate(args.writePolicy, key, args); } } diff --git a/client/src/com/aerospike/client/command/OperateCommandWrite.java b/client/src/com/aerospike/client/command/OperateCommandWrite.java new file mode 100644 index 000000000..989cd1792 --- /dev/null +++ b/client/src/com/aerospike/client/command/OperateCommandWrite.java @@ -0,0 +1,64 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.command; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Key; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.cluster.Connection; +import java.io.IOException; + +public final class OperateCommandWrite extends SyncWriteCommand { + private final OperateArgs args; + private Record record; + + public OperateCommandWrite(Cluster cluster, Key key, OperateArgs args) { + super(cluster, args.writePolicy, key); + this.args = args; + } + + @Override + protected void writeBuffer() { + setOperate(args.writePolicy, key, args); + } + + @Override + protected void parseResult(Connection conn) throws IOException { + RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, true); + + if (rp.resultCode == ResultCode.OK) { + record = rp.parseRecord(true); + return; + } + + if (rp.resultCode == ResultCode.FILTERED_OUT) { + if (policy.failOnFilteredOut) { + throw new AerospikeException(rp.resultCode); + } + return; + } + + throw new AerospikeException(rp.resultCode); + } + + public Record getRecord() { + return record; + } +} diff --git a/client/src/com/aerospike/client/command/ReadCommand.java b/client/src/com/aerospike/client/command/ReadCommand.java index 0bbccc12f..da26668bc 100644 --- a/client/src/com/aerospike/client/command/ReadCommand.java +++ b/client/src/com/aerospike/client/command/ReadCommand.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -24,53 +24,29 @@ import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; import com.aerospike.client.cluster.Connection; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.Policy; -public class ReadCommand extends SyncCommand { - protected final Key key; - protected final Partition partition; +public class ReadCommand extends SyncReadCommand { private final String[] binNames; private final boolean isOperation; private Record record; public ReadCommand(Cluster cluster, Policy policy, Key key) { - super(cluster, policy); - this.key = key; + super(cluster, policy, key); this.binNames = null; - this.partition = Partition.read(cluster, policy, key); this.isOperation = false; - cluster.addTran(); } public ReadCommand(Cluster cluster, Policy policy, Key key, String[] binNames) { - super(cluster, policy); - this.key = key; + super(cluster, policy, key); this.binNames = binNames; - this.partition = Partition.read(cluster, policy, key); this.isOperation = false; - cluster.addTran(); } - public ReadCommand(Cluster cluster, Policy policy, Key key, Partition partition, boolean isOperation) { - super(cluster, policy); - this.key = key; + public ReadCommand(Cluster cluster, Policy policy, Key key, boolean isOperation) { + super(cluster, policy, key); this.binNames = null; - this.partition = partition; this.isOperation = isOperation; - cluster.addTran(); - } - - @Override - protected Node getNode() { - return partition.getNodeRead(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.READ; } @Override @@ -81,6 +57,7 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, false); if (rp.resultCode == ResultCode.OK) { this.record = rp.parseRecord(isOperation); @@ -88,7 +65,6 @@ protected void parseResult(Connection conn) throws IOException { } if (rp.resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { - handleNotFound(rp.resultCode); return; } @@ -99,48 +75,9 @@ protected void parseResult(Connection conn) throws IOException { return; } - if (rp.resultCode == ResultCode.UDF_BAD_RESPONSE) { - this.record = rp.parseRecord(isOperation); - handleUdfError(rp.resultCode); - return; - } - throw new AerospikeException(rp.resultCode); } - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryRead(timeout); - return true; - } - - protected void handleNotFound(int resultCode) { - // Do nothing in default case. Record will be null. - } - - private void handleUdfError(int resultCode) { - String ret = (String)record.bins.get("FAILURE"); - - if (ret == null) { - throw new AerospikeException(resultCode); - } - - String message; - int code; - - try { - String[] list = ret.split(":"); - code = Integer.parseInt(list[2].trim()); - message = list[0] + ':' + list[1] + ' ' + list[3]; - } - catch (Throwable e) { - // Use generic exception if parse error occurs. - throw new AerospikeException(resultCode, ret); - } - - throw new AerospikeException(code, message); - } - public Record getRecord() { return record; } diff --git a/client/src/com/aerospike/client/command/ReadHeaderCommand.java b/client/src/com/aerospike/client/command/ReadHeaderCommand.java index dca35c24e..b9c5c5c4c 100644 --- a/client/src/com/aerospike/client/command/ReadHeaderCommand.java +++ b/client/src/com/aerospike/client/command/ReadHeaderCommand.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -24,31 +24,13 @@ import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; import com.aerospike.client.cluster.Connection; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.Policy; -public class ReadHeaderCommand extends SyncCommand { - private final Key key; - private final Partition partition; +public final class ReadHeaderCommand extends SyncReadCommand { private Record record; public ReadHeaderCommand(Cluster cluster, Policy policy, Key key) { - super(cluster, policy); - this.key = key; - this.partition = Partition.read(cluster, policy, key); - cluster.addTran(); - } - - @Override - protected Node getNode() { - return partition.getNodeRead(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.READ; + super(cluster, policy, key); } @Override @@ -59,8 +41,9 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, false); - if (rp.resultCode == 0) { + if (rp.resultCode == ResultCode.OK) { record = new Record(null, rp.generation, rp.expiration); return; } @@ -79,12 +62,6 @@ record = new Record(null, rp.generation, rp.expiration); throw new AerospikeException(rp.resultCode); } - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryRead(timeout); - return true; - } - public Record getRecord() { return record; } diff --git a/client/src/com/aerospike/client/command/RecordParser.java b/client/src/com/aerospike/client/command/RecordParser.java index 29945a8ba..409a17072 100644 --- a/client/src/com/aerospike/client/command/RecordParser.java +++ b/client/src/com/aerospike/client/command/RecordParser.java @@ -23,7 +23,9 @@ import java.util.zip.Inflater; import com.aerospike.client.AerospikeException; +import com.aerospike.client.Key; import com.aerospike.client.Record; +import com.aerospike.client.Txn; import com.aerospike.client.cluster.Connection; import com.aerospike.client.command.Command.OpResults; @@ -114,18 +116,18 @@ else if (type == Command.MSG_TYPE_COMPRESSED) { throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); } - this.resultCode = buffer[offset] & 0xFF; + resultCode = buffer[offset] & 0xFF; offset++; - this.generation = Buffer.bytesToInt(buffer, offset); + generation = Buffer.bytesToInt(buffer, offset); offset += 4; - this.expiration = Buffer.bytesToInt(buffer, offset); + expiration = Buffer.bytesToInt(buffer, offset); offset += 8; - this.fieldCount = Buffer.bytesToShort(buffer, offset); + fieldCount = Buffer.bytesToShort(buffer, offset); offset += 2; - this.opCount = Buffer.bytesToShort(buffer, offset); + opCount = Buffer.bytesToShort(buffer, offset); offset += 2; - this.dataOffset = offset; - this.dataBuffer = buffer; + dataOffset = offset; + dataBuffer = buffer; } /** @@ -137,33 +139,84 @@ public RecordParser(byte[] buffer, int offset, int receiveSize) { } offset += 5; - this.resultCode = buffer[offset] & 0xFF; + resultCode = buffer[offset] & 0xFF; offset++; - this.generation = Buffer.bytesToInt(buffer, offset); + generation = Buffer.bytesToInt(buffer, offset); offset += 4; - this.expiration = Buffer.bytesToInt(buffer, offset); + expiration = Buffer.bytesToInt(buffer, offset); offset += 8; - this.fieldCount = Buffer.bytesToShort(buffer, offset); + fieldCount = Buffer.bytesToShort(buffer, offset); offset += 2; - this.opCount = Buffer.bytesToShort(buffer, offset); + opCount = Buffer.bytesToShort(buffer, offset); offset += 2; - this.dataOffset = offset; - this.dataBuffer = buffer; + dataOffset = offset; + dataBuffer = buffer; } - public Record parseRecord(boolean isOperation) { - if (opCount == 0) { - // Bin data was not returned. - return new Record(null, generation, expiration); + public void parseFields(Txn txn, Key key, boolean hasWrite) { + if (txn == null) { + skipFields(); + return; } - // Skip key. + Long version = null; + + for (int i = 0; i < fieldCount; i++) { + int len = Buffer.bytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.RECORD_VERSION) { + if (size == 7) { + version = Buffer.versionBytesToLong(dataBuffer, dataOffset); + } + else { + throw new AerospikeException("Record version field has invalid size: " + size); + } + } + dataOffset += size; + } + + if (hasWrite) { + txn.onWrite(key, version, resultCode); + } else { + txn.onRead(key, version); + } + } + + public void parseTranDeadline(Txn txn) { + for (int i = 0; i < fieldCount; i++) { + int len = Buffer.bytesToInt(dataBuffer, dataOffset); + dataOffset += 4; + + int type = dataBuffer[dataOffset++]; + int size = len - 1; + + if (type == FieldType.MRT_DEADLINE) { + int deadline = Buffer.littleBytesToInt(dataBuffer, dataOffset); + txn.setDeadline(deadline); + } + dataOffset += size; + } + } + + private void skipFields() { + // There can be fields in the response (setname etc). + // But for now, ignore them. Expose them to the API if needed in the future. for (int i = 0; i < fieldCount; i++) { int fieldlen = Buffer.bytesToInt(dataBuffer, dataOffset); dataOffset += 4 + fieldlen; } + } + + public Record parseRecord(boolean isOperation) { + if (opCount == 0) { + // Bin data was not returned. + return new Record(null, generation, expiration); + } - // Parse record. Map bins = new LinkedHashMap<>(); for (int i = 0 ; i < opCount; i++) { diff --git a/client/src/com/aerospike/client/command/ScanExecutor.java b/client/src/com/aerospike/client/command/ScanExecutor.java index 8427a0ae9..ca97eab44 100644 --- a/client/src/com/aerospike/client/command/ScanExecutor.java +++ b/client/src/com/aerospike/client/command/ScanExecutor.java @@ -37,7 +37,7 @@ public static void scanPartitions( ScanCallback callback, PartitionTracker tracker ) { - cluster.addTran(); + cluster.addCommandCount(); RandomShift random = new RandomShift(); diff --git a/client/src/com/aerospike/client/command/SyncCommand.java b/client/src/com/aerospike/client/command/SyncCommand.java index 61c1ac1e1..50251bec4 100644 --- a/client/src/com/aerospike/client/command/SyncCommand.java +++ b/client/src/com/aerospike/client/command/SyncCommand.java @@ -82,7 +82,7 @@ public final void executeCommand() { catch (AerospikeException ae) { if (cluster.isActive()) { // Log.info("Throw AerospikeException: " + tranId + ',' + node + ',' + sequence + ',' + iteration + ',' + ae.getResultCode()); - setExceptionData(null, ae, subExceptions); + prepareException(null, ae, subExceptions); throw ae; } else { @@ -148,7 +148,7 @@ else if (ae.getResultCode() == ResultCode.DEVICE_OVERLOAD) { } else { node.addError(); - setExceptionData(node, ae, subExceptions); + prepareException(node, ae, subExceptions); throw ae; } } @@ -186,7 +186,7 @@ else if (ae.getResultCode() == ResultCode.DEVICE_OVERLOAD) { node.closeConnection(conn); node.addError(); AerospikeException ae = new AerospikeException(t); - setExceptionData(node, ae, subExceptions); + prepareException(node, ae, subExceptions); throw ae; } } @@ -213,13 +213,13 @@ else if (ae.getResultCode() == ResultCode.DEVICE_OVERLOAD) { catch (AerospikeException ae) { // Log.info("Throw AerospikeException: " + tranId + ',' + node + ',' + sequence + ',' + iteration + ',' + ae.getResultCode()); node.addError(); - setExceptionData(node, ae, subExceptions); + prepareException(node, ae, subExceptions); throw ae; } catch (Throwable t) { node.addError(); AerospikeException ae = new AerospikeException(t); - setExceptionData(node, ae, subExceptions); + prepareException(node, ae, subExceptions); throw ae; } @@ -253,7 +253,10 @@ else if (ae.getResultCode() == ResultCode.DEVICE_OVERLOAD) { Util.sleep(policy.sleepBetweenRetries); } - setExceptionData(node, exception, null); + exception.setNode(node); + exception.setPolicy(policy); + exception.setIteration(iteration); + exception.setInDoubt(isWrite(), commandSentCounter); addSubException(exception); iteration++; @@ -270,7 +273,7 @@ else if (ae.getResultCode() == ResultCode.DEVICE_OVERLOAD) { // Retries have been exhausted. Throw last exception. // Log.info("Runtime exception: " + tranId + ',' + sequence + ',' + iteration + ',' + exception.getMessage()); - setExceptionData(node, exception, subExceptions); + prepareException(node, exception, subExceptions); throw exception; } @@ -281,12 +284,20 @@ protected void addSubException(AerospikeException exception) { subExceptions.add(exception); } - private void setExceptionData(Node node, AerospikeException exception, List subExceptions) { - exception.setNode(node); - exception.setPolicy(policy); - exception.setIteration(iteration); - exception.setInDoubt(isWrite(), commandSentCounter); - exception.setSubExceptions(subExceptions); + private void prepareException(Node node, AerospikeException ae, List subExceptions) { + ae.setNode(node); + ae.setPolicy(policy); + ae.setIteration(iteration); + ae.setInDoubt(isWrite(), commandSentCounter); + ae.setSubExceptions(subExceptions); + + if (ae.getInDoubt()) { + onInDoubt(); + } + } + + protected void onInDoubt() { + // Write commands will override this method. } public void resetDeadline(long startTime) { diff --git a/client/src/com/aerospike/client/command/SyncReadCommand.java b/client/src/com/aerospike/client/command/SyncReadCommand.java new file mode 100644 index 000000000..4bfda0ce3 --- /dev/null +++ b/client/src/com/aerospike/client/command/SyncReadCommand.java @@ -0,0 +1,52 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.command; + +import com.aerospike.client.Key; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.cluster.Node; +import com.aerospike.client.cluster.Partition; +import com.aerospike.client.metrics.LatencyType; +import com.aerospike.client.policy.Policy; + +public abstract class SyncReadCommand extends SyncCommand { + final Key key; + final Partition partition; + + public SyncReadCommand(Cluster cluster, Policy policy, Key key) { + super(cluster, policy); + this.key = key; + this.partition = Partition.read(cluster, policy, key); + cluster.addCommandCount(); + } + + @Override + protected Node getNode() { + return partition.getNodeRead(cluster); + } + + @Override + protected LatencyType getLatencyType() { + return LatencyType.READ; + } + + @Override + protected boolean prepareRetry(boolean timeout) { + partition.prepareRetryRead(timeout); + return true; + } +} diff --git a/client/src/com/aerospike/client/command/OperateCommand.java b/client/src/com/aerospike/client/command/SyncWriteCommand.java similarity index 53% rename from client/src/com/aerospike/client/command/OperateCommand.java rename to client/src/com/aerospike/client/command/SyncWriteCommand.java index 0c5645dc8..80f5b3a31 100644 --- a/client/src/com/aerospike/client/command/OperateCommand.java +++ b/client/src/com/aerospike/client/command/SyncWriteCommand.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -16,57 +16,60 @@ */ package com.aerospike.client.command; -import com.aerospike.client.AerospikeException; +import java.io.IOException; + import com.aerospike.client.Key; import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.cluster.Connection; import com.aerospike.client.cluster.Node; +import com.aerospike.client.cluster.Partition; import com.aerospike.client.metrics.LatencyType; +import com.aerospike.client.policy.WritePolicy; -public final class OperateCommand extends ReadCommand { - private final OperateArgs args; +public abstract class SyncWriteCommand extends SyncCommand { + final WritePolicy writePolicy; + final Key key; + final Partition partition; - public OperateCommand(Cluster cluster, Key key, OperateArgs args) { - super(cluster, args.writePolicy, key, args.getPartition(cluster, key), true); - this.args = args; + public SyncWriteCommand(Cluster cluster, WritePolicy writePolicy, Key key) { + super(cluster, writePolicy); + this.writePolicy = writePolicy; + this.key = key; + this.partition = Partition.write(cluster, writePolicy, key); + cluster.addCommandCount(); } @Override protected boolean isWrite() { - return args.hasWrite; + return true; } @Override protected Node getNode() { - return args.hasWrite ? partition.getNodeWrite(cluster) : partition.getNodeRead(cluster); + return partition.getNodeWrite(cluster); } @Override protected LatencyType getLatencyType() { - return args.hasWrite ? LatencyType.WRITE : LatencyType.READ; + return LatencyType.WRITE; } @Override - protected void writeBuffer() { - setOperate(args.writePolicy, key, args); + protected boolean prepareRetry(boolean timeout) { + partition.prepareRetryWrite(timeout); + return true; } @Override - protected void handleNotFound(int resultCode) { - // Only throw not found exception for command with write operations. - // Read-only command operations return a null record. - if (args.hasWrite) { - throw new AerospikeException(resultCode); + protected void onInDoubt() { + if (writePolicy.txn != null) { + writePolicy.txn.onWriteInDoubt(key); } } - @Override - protected boolean prepareRetry(boolean timeout) { - if (args.hasWrite) { - partition.prepareRetryWrite(timeout); - } - else { - partition.prepareRetryRead(timeout); - } - return true; + protected int parseHeader(Connection conn) throws IOException { + RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseFields(policy.txn, key, true); + return rp.resultCode; } } diff --git a/client/src/com/aerospike/client/command/TouchCommand.java b/client/src/com/aerospike/client/command/TouchCommand.java index f25680ebc..4432e5efa 100644 --- a/client/src/com/aerospike/client/command/TouchCommand.java +++ b/client/src/com/aerospike/client/command/TouchCommand.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -23,37 +23,11 @@ import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; import com.aerospike.client.cluster.Connection; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.WritePolicy; -public final class TouchCommand extends SyncCommand { - private final WritePolicy writePolicy; - private final Key key; - private final Partition partition; - +public final class TouchCommand extends SyncWriteCommand { public TouchCommand(Cluster cluster, WritePolicy writePolicy, Key key) { - super(cluster, writePolicy); - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.write(cluster, writePolicy, key); - cluster.addTran(); - } - - @Override - protected boolean isWrite() { - return true; - } - - @Override - protected Node getNode() { - return partition.getNodeWrite(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.WRITE; + super(cluster, writePolicy, key); } @Override @@ -63,25 +37,19 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { - RecordParser rp = new RecordParser(conn, dataBuffer); + int resultCode = parseHeader(conn); - if (rp.resultCode == 0) { + if (resultCode == ResultCode.OK) { return; } - if (rp.resultCode == ResultCode.FILTERED_OUT) { + if (resultCode == ResultCode.FILTERED_OUT) { if (writePolicy.failOnFilteredOut) { - throw new AerospikeException(rp.resultCode); + throw new AerospikeException(resultCode); } return; } - throw new AerospikeException(rp.resultCode); - } - - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryWrite(timeout); - return true; + throw new AerospikeException(resultCode); } } diff --git a/client/src/com/aerospike/client/command/TxnAddKeys.java b/client/src/com/aerospike/client/command/TxnAddKeys.java new file mode 100644 index 000000000..af155384e --- /dev/null +++ b/client/src/com/aerospike/client/command/TxnAddKeys.java @@ -0,0 +1,57 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.command; + +import java.io.IOException; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Key; +import com.aerospike.client.ResultCode; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.cluster.Connection; + +public final class TxnAddKeys extends SyncWriteCommand { + private final OperateArgs args; + + public TxnAddKeys(Cluster cluster, Key key, OperateArgs args) { + super(cluster, args.writePolicy, key); + this.args = args; + } + + @Override + protected void writeBuffer() { + setTxnAddKeys(args.writePolicy, key, args); + } + + @Override + protected void parseResult(Connection conn) throws IOException { + RecordParser rp = new RecordParser(conn, dataBuffer); + rp.parseTranDeadline(policy.txn); + + if (rp.resultCode == ResultCode.OK) { + return; + } + + throw new AerospikeException(rp.resultCode); + } + + @Override + protected void onInDoubt() { + // The MRT monitor record might exist if TxnAddKeys command is inDoubt. + policy.txn.setMonitorInDoubt(); + } +} diff --git a/client/src/com/aerospike/client/command/TxnClose.java b/client/src/com/aerospike/client/command/TxnClose.java new file mode 100644 index 000000000..2639b1436 --- /dev/null +++ b/client/src/com/aerospike/client/command/TxnClose.java @@ -0,0 +1,55 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.command; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Key; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Txn; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.cluster.Connection; +import com.aerospike.client.policy.WritePolicy; +import java.io.IOException; + +public final class TxnClose extends SyncWriteCommand { + private final Txn txn; + + public TxnClose(Cluster cluster, Txn txn, WritePolicy writePolicy, Key key) { + super(cluster, writePolicy, key); + this.txn = txn; + } + + @Override + protected void writeBuffer() { + setTxnClose(txn, key); + } + + @Override + protected void parseResult(Connection conn) throws IOException { + int resultCode = parseHeader(conn); + + if (resultCode == ResultCode.OK || resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { + return; + } + + throw new AerospikeException(resultCode); + } + + @Override + protected void onInDoubt() { + } +} diff --git a/client/src/com/aerospike/client/command/TxnMarkRollForward.java b/client/src/com/aerospike/client/command/TxnMarkRollForward.java new file mode 100644 index 000000000..2f7464f21 --- /dev/null +++ b/client/src/com/aerospike/client/command/TxnMarkRollForward.java @@ -0,0 +1,55 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.command; + +import java.io.IOException; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Key; +import com.aerospike.client.ResultCode; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.cluster.Connection; +import com.aerospike.client.policy.WritePolicy; + +public final class TxnMarkRollForward extends SyncWriteCommand { + + public TxnMarkRollForward(Cluster cluster, WritePolicy writePolicy, Key key) { + super(cluster, writePolicy, key); + } + + @Override + protected void writeBuffer() { + setTxnMarkRollForward(key); + } + + @Override + protected void parseResult(Connection conn) throws IOException { + int resultCode = parseHeader(conn); + + // MRT_COMMITTED is considered a success because it means a previous attempt already + // succeeded in notifying the server that the MRT will be rolled forward. + if (resultCode == ResultCode.OK || resultCode == ResultCode.MRT_COMMITTED) { + return; + } + + throw new AerospikeException(resultCode); + } + + @Override + protected void onInDoubt() { + } +} diff --git a/client/src/com/aerospike/client/command/TxnMonitor.java b/client/src/com/aerospike/client/command/TxnMonitor.java new file mode 100644 index 000000000..c7a94e192 --- /dev/null +++ b/client/src/com/aerospike/client/command/TxnMonitor.java @@ -0,0 +1,162 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.command; + +import com.aerospike.client.BatchRecord; +import com.aerospike.client.Bin; +import com.aerospike.client.Key; +import com.aerospike.client.Operation; +import com.aerospike.client.Txn; +import com.aerospike.client.Value; +import com.aerospike.client.cdt.ListOperation; +import com.aerospike.client.cdt.ListOrder; +import com.aerospike.client.cdt.ListPolicy; +import com.aerospike.client.cdt.ListWriteFlags; +import com.aerospike.client.cluster.Cluster; +import com.aerospike.client.policy.BatchPolicy; +import com.aerospike.client.policy.Policy; +import com.aerospike.client.policy.WritePolicy; +import java.util.ArrayList; +import java.util.List; + +public final class TxnMonitor { + private static final ListPolicy OrderedListPolicy = new ListPolicy(ListOrder.ORDERED, + ListWriteFlags.ADD_UNIQUE | ListWriteFlags.NO_FAIL | ListWriteFlags.PARTIAL); + + private static final String BinNameId = "id"; + private static final String BinNameDigests = "keyds"; + + public static void addKey(Cluster cluster, WritePolicy policy, Key cmdKey) { + Txn txn = policy.txn; + + if (txn.getWrites().contains(cmdKey)) { + // Transaction monitor already contains this key. + return; + } + + Operation[] ops = getTranOps(txn, cmdKey); + addWriteKeys(cluster, policy, ops); + } + + public static void addKeys(Cluster cluster, BatchPolicy policy, Key[] keys) { + Operation[] ops = getTranOps(policy.txn, keys); + addWriteKeys(cluster, policy, ops); + } + + public static void addKeys(Cluster cluster, BatchPolicy policy, List records) { + Operation[] ops = getTranOps(policy.txn, records); + + if (ops != null) { + addWriteKeys(cluster, policy, ops); + } + } + + public static Operation[] getTranOps(Txn txn, Key cmdKey) { + txn.verifyCommand(); + txn.setNamespace(cmdKey.namespace); + + if (txn.monitorExists()) { + return new Operation[] { + ListOperation.append(OrderedListPolicy, BinNameDigests, Value.get(cmdKey.digest)) + }; + } + else { + return new Operation[] { + Operation.put(new Bin(BinNameId, txn.getId())), + ListOperation.append(OrderedListPolicy, BinNameDigests, Value.get(cmdKey.digest)) + }; + } + } + + public static Operation[] getTranOps(Txn txn, Key[] keys) { + txn.verifyCommand(); + + ArrayList list = new ArrayList<>(keys.length); + + for (Key key : keys) { + txn.setNamespace(key.namespace); + list.add(Value.get(key.digest)); + } + return getTranOps(txn, list); + } + + public static Operation[] getTranOps(Txn txn, List records) { + txn.verifyCommand(); + + ArrayList list = new ArrayList<>(records.size()); + + for (BatchRecord br : records) { + txn.setNamespace(br.key.namespace); + + if (br.hasWrite) { + list.add(Value.get(br.key.digest)); + } + } + + if (list.size() == 0) { + // Readonly batch does not need to add key digests. + return null; + } + return getTranOps(txn, list); + } + + private static Operation[] getTranOps(Txn txn, ArrayList list) { + if (txn.monitorExists()) { + return new Operation[] { + ListOperation.appendItems(OrderedListPolicy, BinNameDigests, list) + }; + } + else { + return new Operation[] { + Operation.put(new Bin(BinNameId, txn.getId())), + ListOperation.appendItems(OrderedListPolicy, BinNameDigests, list) + }; + } + } + + private static void addWriteKeys(Cluster cluster, Policy policy, Operation[] ops) { + Key txnKey = getTxnMonitorKey(policy.txn); + WritePolicy wp = copyTimeoutPolicy(policy); + OperateArgs args = new OperateArgs(wp, null, null, ops); + TxnAddKeys cmd = new TxnAddKeys(cluster, txnKey, args); + cmd.execute(); + } + + public static Key getTxnMonitorKey(Txn txn) { + return new Key(txn.getNamespace(), "> reads = txn.getReads(); + int max = reads.size(); + + if (max == 0) { + return; + } + + BatchRecord[] records = new BatchRecord[max]; + Key[] keys = new Key[max]; + Long[] versions = new Long[max]; + int count = 0; + + for (Map.Entry entry : reads) { + Key key = entry.getKey(); + keys[count] = key; + records[count] = new BatchRecord(key, false); + versions[count] = entry.getValue(); + count++; + } + + this.verifyRecords = records; + + BatchStatus status = new BatchStatus(true); + List bns = BatchNodeList.generate(cluster, verifyPolicy, keys, records, false, status); + IBatchCommand[] commands = new IBatchCommand[bns.size()]; + + count = 0; + + for (BatchNode bn : bns) { + if (bn.offsetsSize == 1) { + int i = bn.offsets[0]; + commands[count++] = new BatchSingle.TxnVerify( + cluster, verifyPolicy, versions[i], records[i], status, bn.node); + } + else { + commands[count++] = new Batch.TxnVerify( + cluster, bn, verifyPolicy, keys, versions, records, status); + } + } + + BatchExecutor.execute(cluster, verifyPolicy, commands, status); + + if (!status.getStatus()) { + throw new RuntimeException("Failed to verify one or more record versions"); + } + } + + private void markRollForward(WritePolicy writePolicy, Key txnKey) { + // Tell MRT monitor that a roll-forward will commence. + TxnMarkRollForward cmd = new TxnMarkRollForward(cluster, writePolicy, txnKey); + cmd.execute(); + } + + private void roll(BatchPolicy rollPolicy, int txnAttr) { + Set keySet = txn.getWrites(); + + if (keySet.isEmpty()) { + return; + } + + Key[] keys = keySet.toArray(new Key[keySet.size()]); + BatchRecord[] records = new BatchRecord[keys.length]; + + for (int i = 0; i < keys.length; i++) { + records[i] = new BatchRecord(keys[i], true); + } + + this.rollRecords = records; + + BatchAttr attr = new BatchAttr(); + attr.setTxn(txnAttr); + + BatchStatus status = new BatchStatus(true); + + List bns = BatchNodeList.generate(cluster, rollPolicy, keys, records, true, status); + IBatchCommand[] commands = new IBatchCommand[bns.size()]; + int count = 0; + + for (BatchNode bn : bns) { + if (bn.offsetsSize == 1) { + int i = bn.offsets[0]; + commands[count++] = new BatchSingle.TxnRoll( + cluster, rollPolicy, txn, records[i], status, bn.node, txnAttr); + } + else { + commands[count++] = new Batch.TxnRoll( + cluster, bn, rollPolicy, txn, keys, records, attr, status); + } + } + BatchExecutor.execute(cluster, rollPolicy, commands, status); + + if (!status.getStatus()) { + String rollString = txnAttr == Command.INFO4_MRT_ROLL_FORWARD? "commit" : "abort"; + throw new RuntimeException("Failed to " + rollString + " one or more records"); + } + } + + private void close(WritePolicy writePolicy, Key txnKey) { + // Delete MRT monitor on server. + TxnClose cmd = new TxnClose(cluster, txn, writePolicy, txnKey); + cmd.execute(); + + // Reset MRT on client. + txn.clear(); + } +} diff --git a/client/src/com/aerospike/client/command/WriteCommand.java b/client/src/com/aerospike/client/command/WriteCommand.java index d7901e2ea..d7091b7b7 100644 --- a/client/src/com/aerospike/client/command/WriteCommand.java +++ b/client/src/com/aerospike/client/command/WriteCommand.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -16,8 +16,6 @@ */ package com.aerospike.client.command; -import java.io.IOException; - import com.aerospike.client.AerospikeException; import com.aerospike.client.Bin; import com.aerospike.client.Key; @@ -25,41 +23,17 @@ import com.aerospike.client.ResultCode; import com.aerospike.client.cluster.Cluster; import com.aerospike.client.cluster.Connection; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.cluster.Partition; -import com.aerospike.client.metrics.LatencyType; import com.aerospike.client.policy.WritePolicy; +import java.io.IOException; -public final class WriteCommand extends SyncCommand { - private final WritePolicy writePolicy; - private final Key key; - private final Partition partition; +public final class WriteCommand extends SyncWriteCommand { private final Bin[] bins; private final Operation.Type operation; public WriteCommand(Cluster cluster, WritePolicy writePolicy, Key key, Bin[] bins, Operation.Type operation) { - super(cluster, writePolicy); - this.writePolicy = writePolicy; - this.key = key; - this.partition = Partition.write(cluster, writePolicy, key); + super(cluster, writePolicy, key); this.bins = bins; this.operation = operation; - cluster.addTran(); - } - - @Override - protected boolean isWrite() { - return true; - } - - @Override - protected Node getNode() { - return partition.getNodeWrite(cluster); - } - - @Override - protected LatencyType getLatencyType() { - return LatencyType.WRITE; } @Override @@ -69,25 +43,19 @@ protected void writeBuffer() { @Override protected void parseResult(Connection conn) throws IOException { - RecordParser rp = new RecordParser(conn, dataBuffer); + int resultCode = parseHeader(conn); - if (rp.resultCode == 0) { + if (resultCode == ResultCode.OK) { return; } - if (rp.resultCode == ResultCode.FILTERED_OUT) { + if (resultCode == ResultCode.FILTERED_OUT) { if (writePolicy.failOnFilteredOut) { - throw new AerospikeException(rp.resultCode); + throw new AerospikeException(resultCode); } return; } - throw new AerospikeException(rp.resultCode); - } - - @Override - protected boolean prepareRetry(boolean timeout) { - partition.prepareRetryWrite(timeout); - return true; + throw new AerospikeException(resultCode); } } diff --git a/proxy/src/com/aerospike/client/proxy/grpc/GrpcChannelSelector.java b/client/src/com/aerospike/client/listener/AbortListener.java similarity index 60% rename from proxy/src/com/aerospike/client/proxy/grpc/GrpcChannelSelector.java rename to client/src/com/aerospike/client/listener/AbortListener.java index 6384147d2..b2369971e 100644 --- a/proxy/src/com/aerospike/client/proxy/grpc/GrpcChannelSelector.java +++ b/client/src/com/aerospike/client/listener/AbortListener.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -14,20 +14,16 @@ * License for the specific language governing permissions and limitations under * the License. */ -package com.aerospike.client.proxy.grpc; +package com.aerospike.client.listener; -import java.util.List; +import com.aerospike.client.AbortStatus; /** - * A selector of channels to execute Aerospike proxy gRPC calls. + * Asynchronous result notifications for multi-record transaction (MRT) aborts. */ -public interface GrpcChannelSelector { +public interface AbortListener { /** - * Select a channel for the gRPC method. - * - * @param channels channels to select from. - * @param call the streaming call to be executed. - * @return the selected channel. + * This method is called when the abort succeeded or will succeed. */ - GrpcChannelExecutor select(List channels, GrpcStreamingCall call); + void onSuccess(AbortStatus status); } diff --git a/client/src/com/aerospike/client/listener/CommitListener.java b/client/src/com/aerospike/client/listener/CommitListener.java new file mode 100644 index 000000000..38b3494dc --- /dev/null +++ b/client/src/com/aerospike/client/listener/CommitListener.java @@ -0,0 +1,35 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.listener; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.CommitStatus; + +/** + * Asynchronous result notifications for multi-record transaction (MRT) commits. + */ +public interface CommitListener { + /** + * This method is called when the records are verified and the commit succeeded or will succeed. + */ + void onSuccess(CommitStatus status); + + /** + * This method is called when the commit fails. + */ + void onFailure(AerospikeException.Commit ae); +} diff --git a/client/src/com/aerospike/client/metrics/LatencyBuckets.java b/client/src/com/aerospike/client/metrics/LatencyBuckets.java index ca1b9df3e..0a7ad49c3 100644 --- a/client/src/com/aerospike/client/metrics/LatencyBuckets.java +++ b/client/src/com/aerospike/client/metrics/LatencyBuckets.java @@ -19,7 +19,7 @@ import java.util.concurrent.atomic.AtomicLong; /** - * Latency buckets for a transaction group (See {@link com.aerospike.client.metrics.LatencyType}). + * Latency buckets for a command group (See {@link com.aerospike.client.metrics.LatencyType}). * Latency bucket counts are cumulative and not reset on each metrics snapshot interval. */ public final class LatencyBuckets { diff --git a/client/src/com/aerospike/client/metrics/MetricsWriter.java b/client/src/com/aerospike/client/metrics/MetricsWriter.java index 007290393..cac8e0b9b 100644 --- a/client/src/com/aerospike/client/metrics/MetricsWriter.java +++ b/client/src/com/aerospike/client/metrics/MetricsWriter.java @@ -142,7 +142,7 @@ private void open() throws IOException { sb.setLength(0); sb.append(now.format(TimestampFormat)); sb.append(" header(1)"); - sb.append(" cluster[name,cpu,mem,recoverQueueSize,invalidNodeCount,tranCount,retryCount,delayQueueTimeoutCount,eventloop[],node[]]"); + sb.append(" cluster[name,cpu,mem,recoverQueueSize,invalidNodeCount,commandCount,retryCount,delayQueueTimeoutCount,eventloop[],node[]]"); sb.append(" eventloop[processSize,queueSize]"); sb.append(" node[name,address,port,syncConn,asyncConn,errors,timeouts,latency[]]"); sb.append(" conn[inUse,inPool,opened,closed]"); @@ -178,7 +178,7 @@ private void writeCluster(Cluster cluster) { sb.append(','); sb.append(cluster.getInvalidNodeCount()); // Cumulative. Not reset on each interval. sb.append(','); - sb.append(cluster.getTranCount()); // Cumulative. Not reset on each interval. + sb.append(cluster.getCommandCount()); // Cumulative. Not reset on each interval. sb.append(','); sb.append(cluster.getRetryCount()); // Cumulative. Not reset on each interval. sb.append(','); diff --git a/client/src/com/aerospike/client/policy/BatchDeletePolicy.java b/client/src/com/aerospike/client/policy/BatchDeletePolicy.java index 155534e06..b99cc7107 100644 --- a/client/src/com/aerospike/client/policy/BatchDeletePolicy.java +++ b/client/src/com/aerospike/client/policy/BatchDeletePolicy.java @@ -36,7 +36,7 @@ public final class BatchDeletePolicy { public Expression filterExp; /** - * Desired consistency guarantee when committing a transaction on the server. The default + * Desired consistency guarantee when committing a command on the server. The default * (COMMIT_ALL) indicates that the server should wait for master and all replica commits to * be successful before returning success to the client. *

@@ -62,7 +62,7 @@ public final class BatchDeletePolicy { public int generation; /** - * If the transaction results in a record deletion, leave a tombstone for the record. + * If the command results in a record deletion, leave a tombstone for the record. * This prevents deleted records from reappearing after node failures. * Valid for Aerospike Server Enterprise Edition only. *

diff --git a/client/src/com/aerospike/client/policy/BatchPolicy.java b/client/src/com/aerospike/client/policy/BatchPolicy.java index 1d921b578..587ad2ad0 100644 --- a/client/src/com/aerospike/client/policy/BatchPolicy.java +++ b/client/src/com/aerospike/client/policy/BatchPolicy.java @@ -19,7 +19,7 @@ /** * Batch parent policy. */ -public final class BatchPolicy extends Policy { +public class BatchPolicy extends Policy { /** * This field is ignored and deprecated. Sync batch node commands are now always issued using * virtual threads in parallel. Async batch node commands always ignored this field. This field @@ -33,7 +33,7 @@ public final class BatchPolicy extends Policy { * Allow batch to be processed immediately in the server's receiving thread for in-memory * namespaces. If false, the batch will always be processed in separate service threads. *

- * For batch transactions with smaller sized records (<= 1K per record), inline + * For batch commands with smaller sized records (<= 1K per record), inline * processing will be significantly faster on in-memory namespaces. *

* Inline processing can introduce the possibility of unfairness because the server diff --git a/client/src/com/aerospike/client/policy/BatchUDFPolicy.java b/client/src/com/aerospike/client/policy/BatchUDFPolicy.java index a4ffb78f0..f0dc82d4d 100644 --- a/client/src/com/aerospike/client/policy/BatchUDFPolicy.java +++ b/client/src/com/aerospike/client/policy/BatchUDFPolicy.java @@ -36,7 +36,7 @@ public final class BatchUDFPolicy { public Expression filterExp; /** - * Desired consistency guarantee when committing a transaction on the server. The default + * Desired consistency guarantee when committing a command on the server. The default * (COMMIT_ALL) indicates that the server should wait for master and all replica commits to * be successful before returning success to the client. *

@@ -61,7 +61,7 @@ public final class BatchUDFPolicy { public int expiration; /** - * If the transaction results in a record deletion, leave a tombstone for the record. + * If the command results in a record deletion, leave a tombstone for the record. * This prevents deleted records from reappearing after node failures. * Valid for Aerospike Server Enterprise Edition only. *

diff --git a/client/src/com/aerospike/client/policy/BatchWritePolicy.java b/client/src/com/aerospike/client/policy/BatchWritePolicy.java index 3107a9f12..c804ead4d 100644 --- a/client/src/com/aerospike/client/policy/BatchWritePolicy.java +++ b/client/src/com/aerospike/client/policy/BatchWritePolicy.java @@ -43,7 +43,7 @@ public final class BatchWritePolicy { public RecordExistsAction recordExistsAction = RecordExistsAction.UPDATE; /** - * Desired consistency guarantee when committing a transaction on the server. The default + * Desired consistency guarantee when committing a command on the server. The default * (COMMIT_ALL) indicates that the server should wait for master and all replica commits to * be successful before returning success to the client. *

@@ -92,7 +92,7 @@ public final class BatchWritePolicy { public int expiration; /** - * If the transaction results in a record deletion, leave a tombstone for the record. + * If the command results in a record deletion, leave a tombstone for the record. * This prevents deleted records from reappearing after node failures. * Valid for Aerospike Server Enterprise Edition only. *

diff --git a/client/src/com/aerospike/client/policy/ClientPolicy.java b/client/src/com/aerospike/client/policy/ClientPolicy.java index 051b52706..37443ae9a 100644 --- a/client/src/com/aerospike/client/policy/ClientPolicy.java +++ b/client/src/com/aerospike/client/policy/ClientPolicy.java @@ -110,7 +110,7 @@ public class ClientPolicy { public int minConnsPerNode; /** - * Maximum number of synchronous connections allowed per server node. Transactions will go + * Maximum number of synchronous connections allowed per server node. Commands will go * through retry logic and potentially fail with "ResultCode.NO_MORE_CONNECTIONS" if the maximum * number of connections would be exceeded. *

@@ -136,7 +136,7 @@ public class ClientPolicy { public int asyncMinConnsPerNode; /** - * Maximum number of asynchronous connections allowed per server node. Transactions will go + * Maximum number of asynchronous connections allowed per server node. Commands will go * through retry logic and potentially fail with "ResultCode.NO_MORE_CONNECTIONS" if the maximum * number of connections would be exceeded. *

@@ -174,7 +174,7 @@ public class ClientPolicy { * attempt to use a socket that has already been reaped by the server. *

* If server's proto-fd-idle-ms is zero (no reap), then maxSocketIdle should also be zero. - * Connections retrieved from a pool in transactions will not be checked for maxSocketIdle + * Connections retrieved from a pool in commands will not be checked for maxSocketIdle * when maxSocketIdle is zero. Idle connections will still be trimmed down from peak * connections to min connections (minConnsPerNode and asyncMinConnsPerNode) using a * hard-coded 55 second limit in the cluster tend thread. @@ -285,6 +285,17 @@ public class ClientPolicy { */ public BatchUDFPolicy batchUDFPolicyDefault = new BatchUDFPolicy(); + /** + * Default multi-record transaction (MRT) policy when verifying record versions in a batch. + */ + public TxnVerifyPolicy txnVerifyPolicyDefault = new TxnVerifyPolicy(); + + /** + * Default multi-record transaction (MRT) policy when rolling the transaction records forward (commit) + * or back (abort) in a batch. + */ + public TxnRollPolicy txnRollPolicyDefault = new TxnRollPolicy(); + /** * Default info policy that is used when info command's policy is null. */ @@ -420,6 +431,8 @@ public ClientPolicy(ClientPolicy other) { this.batchWritePolicyDefault = new BatchWritePolicy(other.batchWritePolicyDefault); this.batchDeletePolicyDefault = new BatchDeletePolicy(other.batchDeletePolicyDefault); this.batchUDFPolicyDefault = new BatchUDFPolicy(other.batchUDFPolicyDefault); + this.txnVerifyPolicyDefault = new TxnVerifyPolicy(other.txnVerifyPolicyDefault); + this.txnRollPolicyDefault = new TxnRollPolicy(other.txnRollPolicyDefault); this.infoPolicyDefault = new InfoPolicy(other.infoPolicyDefault); this.tlsPolicy = (other.tlsPolicy != null)? new TlsPolicy(other.tlsPolicy) : null; this.keepAlive = (other.keepAlive != null)? new TCPKeepAlive(other.keepAlive) : null; @@ -553,6 +566,14 @@ public void setBatchUDFPolicyDefault(BatchUDFPolicy batchUDFPolicyDefault) { this.batchUDFPolicyDefault = batchUDFPolicyDefault; } + public void setTxnVerifyPolicyDefault(TxnVerifyPolicy txnVerifyPolicyDefault) { + this.txnVerifyPolicyDefault = txnVerifyPolicyDefault; + } + + public void setTxnRollPolicyDefault(TxnRollPolicy txnRollPolicyDefault) { + this.txnRollPolicyDefault = txnRollPolicyDefault; + } + public void setInfoPolicyDefault(InfoPolicy infoPolicyDefault) { this.infoPolicyDefault = infoPolicyDefault; } diff --git a/client/src/com/aerospike/client/policy/CommitLevel.java b/client/src/com/aerospike/client/policy/CommitLevel.java index 663786a6b..72ffdc391 100644 --- a/client/src/com/aerospike/client/policy/CommitLevel.java +++ b/client/src/com/aerospike/client/policy/CommitLevel.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2021 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -17,7 +17,7 @@ package com.aerospike.client.policy; /** - * Desired consistency guarantee when committing a transaction on the server. + * Desired consistency guarantee when committing a command on the server. */ public enum CommitLevel { /** diff --git a/client/src/com/aerospike/client/policy/Policy.java b/client/src/com/aerospike/client/policy/Policy.java index 03f3565a4..b4e0d8427 100644 --- a/client/src/com/aerospike/client/policy/Policy.java +++ b/client/src/com/aerospike/client/policy/Policy.java @@ -17,12 +17,21 @@ package com.aerospike.client.policy; import com.aerospike.client.exp.Expression; +import com.aerospike.client.Txn; import java.util.Objects; /** - * Transaction policy attributes used in all database commands. + * Command policy attributes used in all database commands. */ public class Policy { + /** + * Multi-record transaction identifier (MRT). If this field is populated, the corresponding + * command will be included in the MRT. This field is ignored for scan/query. + *

+ * Default: null + */ + public Txn txn; + /** * Read policy for AP (availability) namespaces. *

@@ -47,7 +56,7 @@ public class Policy { /** * Optional expression filter. If filterExp exists and evaluates to false, the - * transaction is ignored. + * command is ignored. *

* Default: null *

@@ -84,7 +93,7 @@ public class Policy { *

* If socketTimeout is non-zero and the socket has been idle for at least socketTimeout, * both maxRetries and totalTimeout are checked. If maxRetries and totalTimeout are not - * exceeded, the transaction is retried. + * exceeded, the command is retried. *

* For synchronous methods, socketTimeout is the socket timeout (SO_TIMEOUT). * For asynchronous methods, the socketTimeout is implemented using a HashedWheelTimer. @@ -94,14 +103,14 @@ public class Policy { public int socketTimeout = 30000; /** - * Total transaction timeout in milliseconds. + * Total command timeout in milliseconds. *

* The totalTimeout is tracked on the client and sent to the server along with - * the transaction in the wire protocol. The client will most likely timeout - * first, but the server also has the capability to timeout the transaction. + * the command in the wire protocol. The client will most likely timeout + * first, but the server also has the capability to timeout the command. *

- * If totalTimeout is not zero and totalTimeout is reached before the transaction - * completes, the transaction will abort with + * If totalTimeout is not zero and totalTimeout is reached before the command + * completes, the command will abort with * {@link com.aerospike.client.AerospikeException.Timeout}. *

* If totalTimeout is zero, there will be no total time limit. @@ -114,11 +123,11 @@ public class Policy { /** * Delay milliseconds after socket read timeout in an attempt to recover the socket - * in the background. Processing continues on the original transaction and the user - * is still notified at the original transaction timeout. + * in the background. Processing continues on the original command and the user + * is still notified at the original command timeout. *

- * When a transaction is stopped prematurely, the socket must be drained of all incoming - * data or closed to prevent unread socket data from corrupting the next transaction + * When a command is stopped prematurely, the socket must be drained of all incoming + * data or closed to prevent unread socket data from corrupting the next command * that would use that socket. *

* If a socket read timeout occurs and timeoutDelay is greater than zero, the socket @@ -135,7 +144,7 @@ public class Policy { * can be avoided on these cloud providers. *

* The disadvantage of enabling timeoutDelay is that extra memory/processing is required - * to drain sockets and additional connections may still be needed for transaction retries. + * to drain sockets and additional connections may still be needed for command retries. *

* If timeoutDelay were to be enabled, 3000ms would be a reasonable value. *

@@ -144,15 +153,15 @@ public class Policy { public int timeoutDelay; /** - * Maximum number of retries before aborting the current transaction. + * Maximum number of retries before aborting the current command. * The initial attempt is not counted as a retry. *

- * If maxRetries is exceeded, the transaction will abort with + * If maxRetries is exceeded, the command will abort with * {@link com.aerospike.client.AerospikeException.Timeout}. *

* WARNING: Database writes that are not idempotent (such as add()) * should not be retried because the write operation may be performed - * multiple times if the client timed out previous transaction attempts. + * multiple times if the client timed out previous command attempts. * It's important to use a distinct WritePolicy for non-idempotent * writes which sets maxRetries = 0; *

@@ -236,7 +245,7 @@ public class Policy { /** * Throw exception if {@link #filterExp} is defined and that filter evaluates - * to false (transaction ignored). The {@link com.aerospike.client.AerospikeException} + * to false (command ignored). The {@link com.aerospike.client.AerospikeException} * will contain result code {@link com.aerospike.client.ResultCode#FILTERED_OUT}. *

* This field is not applicable to batch, scan or query commands. @@ -249,6 +258,7 @@ public class Policy { * Copy policy from another policy. */ public Policy(Policy other) { + this.txn = other.txn; this.readModeAP = other.readModeAP; this.readModeSC = other.readModeSC; this.replica = other.replica; @@ -296,6 +306,10 @@ public final void setTimeouts(int socketTimeout, int totalTimeout) { // Include setters to facilitate Spring's ConfigurationProperties. + public void setTxn(Txn txn) { + this.txn = txn; + } + public void setReadModeAP(ReadModeAP readModeAP) { this.readModeAP = readModeAP; } @@ -357,26 +371,11 @@ public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Policy policy = (Policy) o; - return connectTimeout == policy.connectTimeout && - socketTimeout == policy.socketTimeout && - totalTimeout == policy.totalTimeout && - timeoutDelay == policy.timeoutDelay && - maxRetries == policy.maxRetries && - sleepBetweenRetries == policy.sleepBetweenRetries && - readTouchTtlPercent == policy.readTouchTtlPercent && - sendKey == policy.sendKey && - compress == policy.compress && - failOnFilteredOut == policy.failOnFilteredOut && - readModeAP == policy.readModeAP && - readModeSC == policy.readModeSC && - replica == policy.replica && - Objects.equals(filterExp, policy.filterExp); + return connectTimeout == policy.connectTimeout && socketTimeout == policy.socketTimeout && totalTimeout == policy.totalTimeout && timeoutDelay == policy.timeoutDelay && maxRetries == policy.maxRetries && sleepBetweenRetries == policy.sleepBetweenRetries && readTouchTtlPercent == policy.readTouchTtlPercent && sendKey == policy.sendKey && compress == policy.compress && failOnFilteredOut == policy.failOnFilteredOut && Objects.equals(txn, policy.txn) && readModeAP == policy.readModeAP && readModeSC == policy.readModeSC && replica == policy.replica && Objects.equals(filterExp, policy.filterExp); } @Override public int hashCode() { - return Objects.hash(readModeAP, readModeSC, replica, filterExp, connectTimeout, socketTimeout, totalTimeout, - timeoutDelay, maxRetries, sleepBetweenRetries, readTouchTtlPercent, sendKey, compress, - failOnFilteredOut); + return Objects.hash(txn, readModeAP, readModeSC, replica, filterExp, connectTimeout, socketTimeout, totalTimeout, timeoutDelay, maxRetries, sleepBetweenRetries, readTouchTtlPercent, sendKey, compress, failOnFilteredOut); } } diff --git a/client/src/com/aerospike/client/policy/QueryPolicy.java b/client/src/com/aerospike/client/policy/QueryPolicy.java index 68dcfdcaa..b6a410315 100644 --- a/client/src/com/aerospike/client/policy/QueryPolicy.java +++ b/client/src/com/aerospike/client/policy/QueryPolicy.java @@ -18,6 +18,9 @@ /** * Container object for policy attributes used in query operations. + *

+ * Inherited Policy fields {@link Policy#txn} and {@link Policy#failOnFilteredOut} are ignored + * in query commands. */ public class QueryPolicy extends Policy { /** diff --git a/client/src/com/aerospike/client/policy/ScanPolicy.java b/client/src/com/aerospike/client/policy/ScanPolicy.java index 6e32a9b56..1ce36c39b 100644 --- a/client/src/com/aerospike/client/policy/ScanPolicy.java +++ b/client/src/com/aerospike/client/policy/ScanPolicy.java @@ -18,6 +18,9 @@ /** * Container object for optional parameters used in scan operations. + *

+ * Inherited Policy fields {@link Policy#txn} and {@link Policy#failOnFilteredOut} are ignored in + * scan commands. */ public final class ScanPolicy extends Policy { /** diff --git a/client/src/com/aerospike/client/policy/TxnRollPolicy.java b/client/src/com/aerospike/client/policy/TxnRollPolicy.java new file mode 100644 index 000000000..9aa2eb696 --- /dev/null +++ b/client/src/com/aerospike/client/policy/TxnRollPolicy.java @@ -0,0 +1,41 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.policy; + +/** + * Multi-record transaction (MRT) policy fields used to batch roll forward/backward records on + * commit or abort. Used a placeholder for now as there are no additional fields beyond BatchPolicy. + */ +public class TxnRollPolicy extends BatchPolicy { + /** + * Copy policy from another policy. + */ + public TxnRollPolicy(TxnRollPolicy other) { + super(other); + } + + /** + * Default constructor. + */ + public TxnRollPolicy() { + replica = Replica.MASTER; + maxRetries = 5; + socketTimeout = 3000; + totalTimeout = 10000; + sleepBetweenRetries = 1000; + } +} diff --git a/client/src/com/aerospike/client/policy/TxnVerifyPolicy.java b/client/src/com/aerospike/client/policy/TxnVerifyPolicy.java new file mode 100644 index 000000000..83d227967 --- /dev/null +++ b/client/src/com/aerospike/client/policy/TxnVerifyPolicy.java @@ -0,0 +1,42 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.client.policy; + +/** + * Multi-record transaction (MRT) policy fields used to batch verify record versions on commit. + * Used a placeholder for now as there are no additional fields beyond BatchPolicy. + */ +public class TxnVerifyPolicy extends BatchPolicy { + /** + * Copy policy from another policy. + */ + public TxnVerifyPolicy(TxnVerifyPolicy other) { + super(other); + } + + /** + * Default constructor. + */ + public TxnVerifyPolicy() { + readModeSC = ReadModeSC.LINEARIZE; + replica = Replica.MASTER; + maxRetries = 5; + socketTimeout = 3000; + totalTimeout = 10000; + sleepBetweenRetries = 1000; + } +} diff --git a/client/src/com/aerospike/client/policy/WritePolicy.java b/client/src/com/aerospike/client/policy/WritePolicy.java index 33dcc577f..9fb53f74b 100644 --- a/client/src/com/aerospike/client/policy/WritePolicy.java +++ b/client/src/com/aerospike/client/policy/WritePolicy.java @@ -40,7 +40,7 @@ public final class WritePolicy extends Policy { public GenerationPolicy generationPolicy = GenerationPolicy.NONE; /** - * Desired consistency guarantee when committing a transaction on the server. The default + * Desired consistency guarantee when committing a command on the server. The default * (COMMIT_ALL) indicates that the server should wait for master and all replica commits to * be successful before returning success to the client. *

@@ -93,7 +93,7 @@ public final class WritePolicy extends Policy { public boolean respondAllOps; /** - * If the transaction results in a record deletion, leave a tombstone for the record. + * If the command results in a record deletion, leave a tombstone for the record. * This prevents deleted records from reappearing after node failures. * Valid for Aerospike Server Enterprise Edition 3.10+ only. *

diff --git a/client/src/com/aerospike/client/query/QueryExecutor.java b/client/src/com/aerospike/client/query/QueryExecutor.java index a50693429..64859f8f1 100644 --- a/client/src/com/aerospike/client/query/QueryExecutor.java +++ b/client/src/com/aerospike/client/query/QueryExecutor.java @@ -50,7 +50,7 @@ public QueryExecutor(Cluster cluster, QueryPolicy policy, Statement statement, N // Initialize maximum number of nodes to query in parallel. this.maxConcurrentNodes = (policy.maxConcurrentNodes == 0 || policy.maxConcurrentNodes >= threads.length) ? threads.length : policy.maxConcurrentNodes; - cluster.addTran(); + cluster.addCommandCount(); } protected final void initializeThreads() { diff --git a/client/src/com/aerospike/client/query/QueryListenerExecutor.java b/client/src/com/aerospike/client/query/QueryListenerExecutor.java index 2662de646..ce8c96e23 100644 --- a/client/src/com/aerospike/client/query/QueryListenerExecutor.java +++ b/client/src/com/aerospike/client/query/QueryListenerExecutor.java @@ -33,7 +33,7 @@ public static void execute( QueryListener listener, PartitionTracker tracker ) { - cluster.addTran(); + cluster.addCommandCount(); TaskGen task = new TaskGen(statement); long taskId = task.getId(); diff --git a/client/src/com/aerospike/client/query/QueryPartitionExecutor.java b/client/src/com/aerospike/client/query/QueryPartitionExecutor.java index 322373d3a..7182ab7b5 100644 --- a/client/src/com/aerospike/client/query/QueryPartitionExecutor.java +++ b/client/src/com/aerospike/client/query/QueryPartitionExecutor.java @@ -59,7 +59,7 @@ public QueryPartitionExecutor( this.completedCount = new AtomicInteger(); this.done = new AtomicBoolean(); - cluster.addTran(); + cluster.addCommandCount(); cluster.threadFactory.newThread(this).start(); } diff --git a/client/src/com/aerospike/client/query/RecordSet.java b/client/src/com/aerospike/client/query/RecordSet.java index 7796db2b9..11cc6a568 100644 --- a/client/src/com/aerospike/client/query/RecordSet.java +++ b/client/src/com/aerospike/client/query/RecordSet.java @@ -177,7 +177,7 @@ protected void abort() { valid = false; queue.clear(); - // Send end command to transaction thread. + // Send end command to command thread. // It's critical that the end offer succeeds. while (! queue.offer(END)) { // Queue must be full. Remove one item to make room. diff --git a/client/src/com/aerospike/client/query/ResultSet.java b/client/src/com/aerospike/client/query/ResultSet.java index 842e13721..b8d1b5ace 100644 --- a/client/src/com/aerospike/client/query/ResultSet.java +++ b/client/src/com/aerospike/client/query/ResultSet.java @@ -158,7 +158,7 @@ protected void abort() { valid = false; queue.clear(); - // Send end command to transaction thread. + // Send end command to command thread. // It's critical that the end offer succeeds. while (!queue.offer(END)) { // Queue must be full. Remove one item to make room. diff --git a/examples/pom.xml b/examples/pom.xml index 058345680..8d50d104a 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -6,7 +6,7 @@ com.aerospike aerospike-parent - 8.1.4 + 9.0.0 aerospike-examples jar @@ -19,11 +19,6 @@ aerospike-client-jdk21 - - com.aerospike - aerospike-proxy-client - - io.netty netty-transport @@ -65,42 +60,26 @@ org.apache.maven.plugins maven-compiler-plugin - - 21 - 21 - - org.apache.maven.plugins - maven-shade-plugin - 3.4.1 + maven-assembly-plugin + + + jar-with-dependencies + + + + com.aerospike.examples.Main + + + - - shade - - - - - *:* - - META-INF/*.SF - META-INF/*.DSA - META-INF/*.RSA - - - - true - jar-with-dependencies - - - - - com.aerospike.examples.Main - - - - + make-my-jar-with-dependencies + package + + single + diff --git a/examples/src/com/aerospike/examples/AsyncExample.java b/examples/src/com/aerospike/examples/AsyncExample.java index 366669953..3b7bf4900 100644 --- a/examples/src/com/aerospike/examples/AsyncExample.java +++ b/examples/src/com/aerospike/examples/AsyncExample.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -19,10 +19,10 @@ import java.lang.reflect.Constructor; import java.util.List; +import com.aerospike.client.AerospikeClient; import com.aerospike.client.Host; import com.aerospike.client.IAerospikeClient; import com.aerospike.client.async.EventLoop; -import com.aerospike.client.async.EventLoopType; import com.aerospike.client.async.EventLoops; import com.aerospike.client.async.EventPolicy; import com.aerospike.client.async.NettyEventLoops; @@ -30,11 +30,9 @@ import com.aerospike.client.policy.ClientPolicy; import com.aerospike.client.policy.Policy; import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.AerospikeClientFactory; import com.aerospike.client.util.Util; import io.netty.channel.EventLoopGroup; -import io.netty.channel.epoll.Epoll; import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.kqueue.KQueueEventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; @@ -49,16 +47,6 @@ public static void runExamples(Console console, Parameters params, List eventPolicy.maxCommandsInProcess = params.maxCommandsInProcess; eventPolicy.maxCommandsInQueue = params.maxCommandsInQueue; - if (params.useProxyClient && params.eventLoopType == EventLoopType.DIRECT_NIO) { - // Proxy client requires netty event loops. - if (Epoll.isAvailable()) { - params.eventLoopType = EventLoopType.NETTY_EPOLL; - } - else { - params.eventLoopType = EventLoopType.NETTY_NIO; - } - } - EventLoops eventLoops; switch (params.eventLoopType) { @@ -106,7 +94,7 @@ public static void runExamples(Console console, Parameters params, List Host[] hosts = Host.parseHosts(params.host, params.port); - IAerospikeClient client = AerospikeClientFactory.getClient(policy, params.useProxyClient, hosts); + IAerospikeClient client = new AerospikeClient(policy, hosts); try { EventLoop eventLoop = eventLoops.get(0); diff --git a/examples/src/com/aerospike/examples/AsyncQuery.java b/examples/src/com/aerospike/examples/AsyncQuery.java index 9cfa2e9a8..a5a47a0e2 100644 --- a/examples/src/com/aerospike/examples/AsyncQuery.java +++ b/examples/src/com/aerospike/examples/AsyncQuery.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -46,18 +46,13 @@ public void runExample(IAerospikeClient client, EventLoop eventLoop) { String binName = "asqbin"; int size = 50; - // Proxy client does not support createIndex(), so must assume - // index already created to run this test. - if (! params.useProxyClient) { - createIndex(client, indexName, binName); - } + createIndex(client, indexName, binName); runQueryExample(client, eventLoop, keyPrefix, binName, size); // Wait until query finishes before dropping index. waitTillComplete(); - // Do not drop index because after native client tests run, the proxy - // client tests need the index to exist. + // Do not drop index because after native client tests run. //client.dropIndex(policy, params.namespace, params.set, indexName); } diff --git a/examples/src/com/aerospike/examples/AsyncTransaction.java b/examples/src/com/aerospike/examples/AsyncTransaction.java new file mode 100644 index 000000000..fe43254a3 --- /dev/null +++ b/examples/src/com/aerospike/examples/AsyncTransaction.java @@ -0,0 +1,168 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.examples; + +import com.aerospike.client.AbortStatus; +import com.aerospike.client.AerospikeException; +import com.aerospike.client.Bin; +import com.aerospike.client.CommitStatus; +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Record; +import com.aerospike.client.Txn; +import com.aerospike.client.async.EventLoop; +import com.aerospike.client.listener.AbortListener; +import com.aerospike.client.listener.CommitListener; +import com.aerospike.client.listener.DeleteListener; +import com.aerospike.client.listener.RecordListener; +import com.aerospike.client.listener.WriteListener; +import com.aerospike.client.policy.Policy; +import com.aerospike.client.policy.WritePolicy; + +/** + * Asynchronous multi-record transaction example. + */ +public class AsyncTransaction extends AsyncExample { + @Override + public void runExample(IAerospikeClient client, EventLoop eventLoop) { + Txn txn = new Txn(); + + console.info("Begin txn: " + txn.getId()); + put(client, txn, eventLoop); + } + + public void put(IAerospikeClient client, Txn txn, EventLoop eventLoop) { + console.info("Run put"); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + + Key key = new Key(params.namespace, params.set, 1); + + WriteListener wl = new WriteListener() { + public void onSuccess(final Key key) { + putAnother(client, txn, eventLoop); + } + + public void onFailure(AerospikeException e) { + console.error("Failed to write: namespace=%s set=%s key=%s exception=%s", + key.namespace, key.setName, key.userKey, e.getMessage()); + abort(client, txn, eventLoop); + } + }; + + client.put(eventLoop, wl, wp, key, new Bin("a", "val1")); + } + + public void putAnother(IAerospikeClient client, Txn txn, EventLoop eventLoop) { + console.info("Run another put"); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + + Key key = new Key(params.namespace, params.set, 2); + + WriteListener wl = new WriteListener() { + public void onSuccess(final Key key) { + get(client, txn, eventLoop); + } + + public void onFailure(AerospikeException e) { + console.error("Failed to write: namespace=%s set=%s key=%s exception=%s", + key.namespace, key.setName, key.userKey, e.getMessage()); + abort(client, txn, eventLoop); + } + }; + + client.put(eventLoop, wl, wp, key, new Bin("b", "val2")); + } + + public void get(IAerospikeClient client, Txn txn, EventLoop eventLoop) { + console.info("Run get"); + + Policy p = client.copyReadPolicyDefault(); + p.txn = txn; + + Key key = new Key(params.namespace, params.set, 3); + + RecordListener rl = new RecordListener() { + public void onSuccess(Key key, Record record) { + delete(client, txn, eventLoop); + } + + public void onFailure(AerospikeException e) { + console.error("Failed to read: namespace=%s set=%s key=%s exception=%s", + key.namespace, key.setName, key.userKey, e.getMessage()); + abort(client, txn, eventLoop); + } + }; + + client.get(eventLoop, rl, p, key); + } + + public void delete(IAerospikeClient client, Txn txn, EventLoop eventLoop) { + console.info("Run delete"); + + WritePolicy dp = client.copyWritePolicyDefault(); + dp.txn = txn; + dp.durableDelete = true; // Required when running delete in a MRT. + + Key key = new Key(params.namespace, params.set, 3); + + DeleteListener dl = new DeleteListener() { + public void onSuccess(final Key key, boolean existed) { + commit(client, txn, eventLoop); + } + + public void onFailure(AerospikeException e) { + console.error("Failed to delete: namespace=%s set=%s key=%s exception=%s", + key.namespace, key.setName, key.userKey, e.getMessage()); + abort(client, txn, eventLoop); + } + }; + + client.delete(eventLoop, dl, dp, key); + } + + public void commit(IAerospikeClient client, Txn txn, EventLoop eventLoop) { + console.info("Run commit"); + + CommitListener tcl = new CommitListener() { + public void onSuccess(CommitStatus status) { + console.info("Txn committed: " + txn.getId()); + } + + public void onFailure(AerospikeException.Commit ae) { + console.error("Txn commit failed: " + txn.getId()); + } + }; + + client.commit(eventLoop, tcl, txn); + } + + public void abort(IAerospikeClient client, Txn txn, EventLoop eventLoop) { + console.info("Run abort"); + + AbortListener tal = new AbortListener() { + public void onSuccess(AbortStatus status) { + console.error("Txn aborted: " + txn.getId()); + } + }; + + client.abort(eventLoop, tal, txn); + } +} diff --git a/examples/src/com/aerospike/examples/AsyncUserDefinedFunction.java b/examples/src/com/aerospike/examples/AsyncUserDefinedFunction.java index c8a48b548..d9c5e21f7 100644 --- a/examples/src/com/aerospike/examples/AsyncUserDefinedFunction.java +++ b/examples/src/com/aerospike/examples/AsyncUserDefinedFunction.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -32,11 +32,7 @@ public class AsyncUserDefinedFunction extends AsyncExample { */ @Override public void runExample(IAerospikeClient client, EventLoop eventLoop) { - // Register is not supported in the proxy client. To run this example with the proxy client, - // first run example with native client (which supports register) and then run proxy client. - if (! params.useProxyClient) { - register(client); - } + register(client); writeUsingUdfAsync(client, eventLoop); } diff --git a/examples/src/com/aerospike/examples/Example.java b/examples/src/com/aerospike/examples/Example.java index 8a93e6fe4..dedb0ab77 100644 --- a/examples/src/com/aerospike/examples/Example.java +++ b/examples/src/com/aerospike/examples/Example.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -19,10 +19,10 @@ import java.lang.reflect.Constructor; import java.util.List; +import com.aerospike.client.AerospikeClient; import com.aerospike.client.Host; import com.aerospike.client.IAerospikeClient; import com.aerospike.client.policy.ClientPolicy; -import com.aerospike.client.proxy.AerospikeClientFactory; public abstract class Example { @@ -43,7 +43,7 @@ public static void runExamples(Console console, Parameters params, List Host[] hosts = Host.parseHosts(params.host, params.port); - IAerospikeClient client = AerospikeClientFactory.getClient(policy, params.useProxyClient, hosts); + IAerospikeClient client = new AerospikeClient(policy, hosts); try { //params.setServerSpecific(client); diff --git a/examples/src/com/aerospike/examples/Main.java b/examples/src/com/aerospike/examples/Main.java index fd2c4d739..05285052b 100644 --- a/examples/src/com/aerospike/examples/Main.java +++ b/examples/src/com/aerospike/examples/Main.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -134,7 +134,6 @@ public static void main(String[] args) { "Value: DIRECT_NIO | NETTY_NIO | NETTY_EPOLL | NETTY_KQUEUE | NETTY_IOURING" ); - options.addOption("proxy", false, "Use proxy client."); options.addOption("g", "gui", false, "Invoke GUI to selectively run tests."); options.addOption("d", "debug", false, "Run in debug mode."); options.addOption("u", "usage", false, "Print usage."); @@ -174,10 +173,6 @@ public static void main(String[] args) { params.eventLoopType = EventLoopType.valueOf(cl.getOptionValue("eventLoopType", "").toUpperCase()); } - if (cl.hasOption("proxy")) { - params.useProxyClient = true; - } - if (cl.hasOption("d")) { Log.setLevel(Level.DEBUG); } @@ -187,14 +182,6 @@ public static void main(String[] args) { } else { Console console = new Console(); - - // If the Aerospike server's default port (3000) is used and the proxy client is used, - // Reset the port to the proxy server's default port (4000). - if (params.port == 3000 && params.useProxyClient) { - console.info("Change proxy server port to 4000"); - params.port = 4000; - } - runExamples(console, params, exampleNames); } } diff --git a/examples/src/com/aerospike/examples/Parameters.java b/examples/src/com/aerospike/examples/Parameters.java index 210d44bfb..121d5014d 100644 --- a/examples/src/com/aerospike/examples/Parameters.java +++ b/examples/src/com/aerospike/examples/Parameters.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -39,7 +39,6 @@ public class Parameters { EventLoopType eventLoopType = EventLoopType.DIRECT_NIO; int maxCommandsInProcess; int maxCommandsInQueue; - boolean useProxyClient; protected Parameters(TlsPolicy policy, String host, int port, String user, String password, AuthMode authMode, String namespace, String set) { this.host = host; diff --git a/examples/src/com/aerospike/examples/QueryAverage.java b/examples/src/com/aerospike/examples/QueryAverage.java index 940b72f50..5f958191c 100644 --- a/examples/src/com/aerospike/examples/QueryAverage.java +++ b/examples/src/com/aerospike/examples/QueryAverage.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -48,17 +48,12 @@ public void runExample(IAerospikeClient client, Parameters params) throws Except String binName = "l2"; int size = 10; - // Proxy client does not support register() nor createIndex(), so must assume - // there are already created to run this test. - if (! params.useProxyClient) { - register(client, params); - createIndex(client, params, indexName, binName); - } + register(client, params); + createIndex(client, params, indexName, binName); writeRecords(client, params, keyPrefix, size); runQuery(client, params, indexName, binName); - // Do not drop index because after native client tests run, the proxy - // client tests need the index to exist. + // Do not drop index because after native client tests run. //client.dropIndex(params.policy, params.namespace, params.set, indexName); } diff --git a/examples/src/com/aerospike/examples/QueryExp.java b/examples/src/com/aerospike/examples/QueryExp.java index f0895dfec..07abac909 100644 --- a/examples/src/com/aerospike/examples/QueryExp.java +++ b/examples/src/com/aerospike/examples/QueryExp.java @@ -50,10 +50,7 @@ public void runExample(IAerospikeClient client, Parameters params) throws Except String binName = "idxbin"; int size = 50; - if (!params.useProxyClient) { - createIndex(client, params, indexName, binName); - } - + createIndex(client, params, indexName, binName); writeRecords(client, params, binName, size); runQuery1(client, params, binName); runQuery2(client, params, binName); diff --git a/examples/src/com/aerospike/examples/QueryResume.java b/examples/src/com/aerospike/examples/QueryResume.java index efc281d2f..2425aef61 100644 --- a/examples/src/com/aerospike/examples/QueryResume.java +++ b/examples/src/com/aerospike/examples/QueryResume.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -47,9 +47,7 @@ public void runExample(IAerospikeClient client, Parameters params) throws Except String binName = "bin"; String setName = "qr"; - if (!params.useProxyClient) { - createIndex(client, params, setName, indexName, binName); - } + createIndex(client, params, setName, indexName, binName); writeRecords(client, params, setName, binName, 200); Statement stmt = new Statement(); diff --git a/examples/src/com/aerospike/examples/Transaction.java b/examples/src/com/aerospike/examples/Transaction.java new file mode 100644 index 000000000..8d22991c1 --- /dev/null +++ b/examples/src/com/aerospike/examples/Transaction.java @@ -0,0 +1,78 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.examples; + +import com.aerospike.client.Bin; +import com.aerospike.client.IAerospikeClient; +import com.aerospike.client.Key; +import com.aerospike.client.Txn; +import com.aerospike.client.policy.Policy; +import com.aerospike.client.policy.WritePolicy; + +public class Transaction extends Example { + public Transaction(Console console) { + super(console); + } + + /** + * Multi-record transaction. + */ + @Override + public void runExample(IAerospikeClient client, Parameters params) throws Exception { + txnReadWrite(client, params); + } + + private void txnReadWrite(IAerospikeClient client, Parameters params) { + Txn txn = new Txn(); + console.info("Begin txn: " + txn.getId()); + + try { + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + + console.info("Run put"); + Key key1 = new Key(params.namespace, params.set, 1); + client.put(wp, key1, new Bin("a", "val1")); + + console.info("Run another put"); + Key key2 = new Key(params.namespace, params.set, 2); + client.put(wp, key2, new Bin("b", "val2")); + + console.info("Run get"); + Policy p = client.copyReadPolicyDefault(); + p.txn = txn; + + Key key3 = new Key(params.namespace, params.set, 3); + client.get(p, key3); + + console.info("Run delete"); + WritePolicy dp = client.copyWritePolicyDefault(); + dp.txn = txn; + dp.durableDelete = true; // Required when running delete in a MRT. + client.delete(dp, key3); + } + catch (Throwable t) { + // Abort and rollback MRT (multi-record transaction) if any errors occur. + console.info("Abort txn: " + txn.getId()); + client.abort(txn); + throw t; + } + + console.info("Commit txn: " + txn.getId()); + client.commit(txn); + } +} diff --git a/examples/src/com/aerospike/examples/UserDefinedFunction.java b/examples/src/com/aerospike/examples/UserDefinedFunction.java index 724608410..afd54c18a 100644 --- a/examples/src/com/aerospike/examples/UserDefinedFunction.java +++ b/examples/src/com/aerospike/examples/UserDefinedFunction.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -42,12 +42,7 @@ public UserDefinedFunction(Console console) { */ @Override public void runExample(IAerospikeClient client, Parameters params) throws Exception { - // Register is not supported in the proxy client. To run this example with the proxy client, - // first run example with native client (which supports register) and then run proxy client. - if (! params.useProxyClient) { - register(client, params); - } - + register(client, params); writeUsingUdf(client, params); writeIfGenerationNotChanged(client, params); writeIfNotExists(client, params); diff --git a/pom.xml b/pom.xml index b70f2e9fa..5f17a6604 100644 --- a/pom.xml +++ b/pom.xml @@ -5,7 +5,7 @@ com.aerospike aerospike-parent aerospike-parent - 8.1.4 + 9.0.0 pom https://github.com/aerospike/aerospike-client-java @@ -23,7 +23,6 @@ client - proxy examples benchmarks test @@ -39,9 +38,7 @@ 2.18.1 3.2.0 - 4.1.112.Final - 2.0.62.Final - 1.65.1 + 4.1.114.Final 3.0.1 0.4 1.9.0 @@ -56,12 +53,6 @@ ${project.version} - - com.aerospike - aerospike-proxy-client - ${project.version} - - io.netty netty-transport @@ -95,12 +86,6 @@ ${netty.version} - - io.netty - netty-tcnative-boringssl-static - ${netty.tcnative.version} - - org.luaj luaj-jse @@ -125,13 +110,6 @@ ${junit.version} test - - - io.grpc - grpc-netty - ${grpc.version} - - @@ -159,15 +137,4 @@ - - - diff --git a/proxy/README.md b/proxy/README.md deleted file mode 100644 index 12a812520..000000000 --- a/proxy/README.md +++ /dev/null @@ -1,18 +0,0 @@ -Aerospike Java Proxy Client Library -=================================== - -The proxy client is designed to communicate with a proxy server in dbaas -(database as a service) applications. The communication is performed via GRPC -and HTTP/2. The proxy server relays the database commands to the Aerospike -server. The proxy client does not have knowledge of Aerospike server nodes. -The proxy server communicates directly with Aerospike server nodes. - -The proxy client's AerospikeClientProxy implements the same IAerospikeClient -interface as the native client's AerospikeClient. AerospikeClientProxy supports -single record, batch and most scan/query commands. AerospikeClientProxy does -not support info and user admin commands nor scan/query commands that are -directed to a single node. - -The source code can be imported into your IDE and/or built using Maven. - - mvn install diff --git a/proxy/pom.xml b/proxy/pom.xml deleted file mode 100644 index a2ef44f8c..000000000 --- a/proxy/pom.xml +++ /dev/null @@ -1,134 +0,0 @@ - - 4.0.0 - - - com.aerospike - aerospike-parent - 8.1.4 - - aerospike-proxy-client - jar - - aerospike-proxy-client - - - - com.aerospike - aerospike-client-jdk21 - - - - com.aerospike - aerospike-proxy-stub - 1.0.1 - - - - io.grpc - grpc-netty - - - - io.netty - netty-transport - - - - io.netty - netty-transport-native-epoll - linux-x86_64 - - - - io.netty - netty-tcnative-boringssl-static - - - - io.netty - netty-handler - - - - com.auth0 - java-jwt - 4.2.1 - - - - org.jctools - jctools-core - 4.0.1 - - - - com.fasterxml.jackson.core - jackson-databind - 2.17.2 - - - - - ${project.basedir}/src - - - resources - true - - - - - - org.apache.maven.plugins - maven-compiler-plugin - - 21 - 21 - - - - - maven-assembly-plugin - - - jar-with-dependencies - - - - - make-my-jar-with-dependencies - package - - single - - - - - - - org.apache.maven.plugins - maven-javadoc-plugin - 3.6.3 - - none - ${basedir} - Aerospike Java Proxy Client - public - true - Copyright © 2012–{currentYear} Aerospike, Inc. All rights reserved. - ${basedir}/src - - com/aerospike/client/proxy/AerospikeClientFactory.java - com/aerospike/client/proxy/AerospikeClientProxy.java - com/aerospike/client/proxy/RecordProxy.java - com/aerospike/client/proxy/RecordSequenceRecordSet.java - com/aerospike/client/proxy/ResultSetProxy.java - - - - - - - diff --git a/proxy/resources/project.properties b/proxy/resources/project.properties deleted file mode 100644 index de55c14db..000000000 --- a/proxy/resources/project.properties +++ /dev/null @@ -1,2 +0,0 @@ -name=${project.name} -version=${project.version} diff --git a/proxy/src/com/aerospike/client/proxy/AerospikeClientFactory.java b/proxy/src/com/aerospike/client/proxy/AerospikeClientFactory.java deleted file mode 100644 index cbc7ff82d..000000000 --- a/proxy/src/com/aerospike/client/proxy/AerospikeClientFactory.java +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.AerospikeClient; -import com.aerospike.client.Host; -import com.aerospike.client.IAerospikeClient; -import com.aerospike.client.policy.ClientPolicy; - -/** - * Factory class AerospikeClientFactory will generate either a native client or a proxy client, - * based on whether isProxy is true or false. This allows an application to work with either - * Aerospike native servers or proxy servers used in the database-as-a-service offering (dbaas). - */ -public class AerospikeClientFactory { - /** - * Return either a native Aerospike client or a proxy client, based on isProxy. - * - * @param clientPolicy client configuration parameters, pass in null for defaults - * @param isProxy if true, return AerospikeClientProxy, otherwise return AerospikeClient - * @param hosts array of server hosts that the client can connect - * @return IAerospikeClient - */ - public static IAerospikeClient getClient(ClientPolicy clientPolicy, boolean isProxy, Host... hosts) { - if (isProxy) { - return new AerospikeClientProxy(clientPolicy, hosts); - } - else { - return new AerospikeClient(clientPolicy, hosts); - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/AerospikeClientProxy.java b/proxy/src/com/aerospike/client/proxy/AerospikeClientProxy.java deleted file mode 100644 index eca2fe93a..000000000 --- a/proxy/src/com/aerospike/client/proxy/AerospikeClientProxy.java +++ /dev/null @@ -1,2864 +0,0 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.io.Closeable; -import java.util.ArrayList; -import java.util.Calendar; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.*; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.BatchDelete; -import com.aerospike.client.BatchRead; -import com.aerospike.client.BatchRecord; -import com.aerospike.client.BatchResults; -import com.aerospike.client.BatchUDF; -import com.aerospike.client.BatchWrite; -import com.aerospike.client.Bin; -import com.aerospike.client.Host; -import com.aerospike.client.IAerospikeClient; -import com.aerospike.client.Key; -import com.aerospike.client.Language; -import com.aerospike.client.Log; -import com.aerospike.client.Operation; -import com.aerospike.client.Record; -import com.aerospike.client.ResultCode; -import com.aerospike.client.ScanCallback; -import com.aerospike.client.Value; -import com.aerospike.client.admin.Privilege; -import com.aerospike.client.admin.Role; -import com.aerospike.client.admin.User; -import com.aerospike.client.async.EventLoop; -import com.aerospike.client.async.NettyEventLoop; -import com.aerospike.client.async.NettyEventLoops; -import com.aerospike.client.cdt.CTX; -import com.aerospike.client.cluster.Cluster; -import com.aerospike.client.cluster.ClusterStats; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.command.BatchAttr; -import com.aerospike.client.command.Command; -import com.aerospike.client.command.OperateArgs; -import com.aerospike.client.exp.Expression; -import com.aerospike.client.listener.BatchListListener; -import com.aerospike.client.listener.BatchOperateListListener; -import com.aerospike.client.listener.BatchRecordArrayListener; -import com.aerospike.client.listener.BatchRecordSequenceListener; -import com.aerospike.client.listener.BatchSequenceListener; -import com.aerospike.client.listener.ClusterStatsListener; -import com.aerospike.client.listener.DeleteListener; -import com.aerospike.client.listener.ExecuteListener; -import com.aerospike.client.listener.ExistsArrayListener; -import com.aerospike.client.listener.ExistsListener; -import com.aerospike.client.listener.ExistsSequenceListener; -import com.aerospike.client.listener.IndexListener; -import com.aerospike.client.listener.InfoListener; -import com.aerospike.client.listener.RecordArrayListener; -import com.aerospike.client.listener.RecordListener; -import com.aerospike.client.listener.RecordSequenceListener; -import com.aerospike.client.listener.WriteListener; -import com.aerospike.client.metrics.MetricsPolicy; -import com.aerospike.client.policy.AdminPolicy; -import com.aerospike.client.policy.BatchDeletePolicy; -import com.aerospike.client.policy.BatchPolicy; -import com.aerospike.client.policy.BatchUDFPolicy; -import com.aerospike.client.policy.BatchWritePolicy; -import com.aerospike.client.policy.ClientPolicy; -import com.aerospike.client.policy.InfoPolicy; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.policy.QueryPolicy; -import com.aerospike.client.policy.ScanPolicy; -import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.BatchProxy.BatchListListenerSync; -import com.aerospike.client.proxy.auth.AuthTokenManager; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.client.proxy.grpc.GrpcChannelProvider; -import com.aerospike.client.proxy.grpc.GrpcClientPolicy; -import com.aerospike.client.query.IndexCollectionType; -import com.aerospike.client.query.IndexType; -import com.aerospike.client.query.PartitionFilter; -import com.aerospike.client.query.PartitionTracker; -import com.aerospike.client.query.QueryListener; -import com.aerospike.client.query.RecordSet; -import com.aerospike.client.query.ResultSet; -import com.aerospike.client.query.Statement; -import com.aerospike.client.task.ExecuteTask; -import com.aerospike.client.task.IndexTask; -import com.aerospike.client.task.RegisterTask; -import com.aerospike.client.util.Packer; -import com.aerospike.client.util.Util; - -import io.netty.channel.Channel; - -/** - * Aerospike proxy client based implementation of {@link IAerospikeClient}. The proxy client - * communicates with a proxy server via GRPC and HTTP/2. The proxy server relays the database - * commands to the Aerospike server. The proxy client does not have knowledge of Aerospike - * server nodes. Only the proxy server can communicate directly with Aerospike server nodes. - * - * GRPC is an async framework, so an Aerospike sync command schedules the corresponding - * async command and then waits for the async command to complete before returning the data - * to the user. - * - * The async methods` eventLoop argument is ignored in the proxy client. Instead, the - * commands are pipelined into blocks which are then executed via one of multiple GRPC channels. - * Since the eventLoop thread is not chosen, results can be returned from different threads. - * If data is shared between multiple async command listeners, that data must be accessed in - * a thread-safe manner. - */ -public class AerospikeClientProxy implements IAerospikeClient, Closeable { - //------------------------------------------------------- - // Static variables. - //------------------------------------------------------- - - /** - * Proxy client version - */ - public static String Version = getVersion(); - - /** - * Lower limit of proxy server connection. - */ - private static final int MIN_CONNECTIONS = 1; - - // Thread factory used in synchronous batch, scan and query commands. - public final ThreadFactory threadFactory; - - /** - * Upper limit of proxy server connection. - */ - private static final int MAX_CONNECTIONS = 8; - - private static final String NotSupported = "Method not supported in proxy client: "; - - //------------------------------------------------------- - // Member variables. - //------------------------------------------------------- - - /** - * Default read policy that is used when read command policy is null. - */ - public final Policy readPolicyDefault; - - /** - * Default write policy that is used when write command policy is null. - */ - public final WritePolicy writePolicyDefault; - - /** - * Default scan policy that is used when scan command policy is null. - */ - public final ScanPolicy scanPolicyDefault; - - /** - * Default query policy that is used when query command policy is null. - */ - public final QueryPolicy queryPolicyDefault; - - /** - * Default parent policy used in batch read commands. Parent policy fields - * include socketTimeout, totalTimeout, maxRetries, etc... - */ - public final BatchPolicy batchPolicyDefault; - - /** - * Default parent policy used in batch write commands. Parent policy fields - * include socketTimeout, totalTimeout, maxRetries, etc... - */ - public final BatchPolicy batchParentPolicyWriteDefault; - - /** - * Default write policy used in batch operate commands. - * Write policy fields include generation, expiration, durableDelete, etc... - */ - public final BatchWritePolicy batchWritePolicyDefault; - - /** - * Default delete policy used in batch delete commands. - */ - public final BatchDeletePolicy batchDeletePolicyDefault; - - /** - * Default user defined function policy used in batch UDF execute commands. - */ - public final BatchUDFPolicy batchUDFPolicyDefault; - - /** - * Default info policy that is used when info command policy is null. - */ - public final InfoPolicy infoPolicyDefault; - - private final WritePolicy operatePolicyReadDefault; - private final AuthTokenManager authTokenManager; - private final GrpcCallExecutor executor; - - //------------------------------------------------------- - // Constructors - //------------------------------------------------------- - - /** - * Initialize proxy client with suitable hosts to seed the cluster map. - * The client policy is used to set defaults and size internal data structures. - *

- * In most cases, only one host is necessary to seed the cluster. The remaining hosts - * are added as future seeds in case of a complete network failure. - * - * @param policy client configuration parameters, pass in null for defaults - * @param hosts array of potential hosts to seed the cluster - * @throws AerospikeException if all host connections fail - */ - public AerospikeClientProxy(ClientPolicy policy, Host... hosts) { - if (policy == null) { - policy = new ClientPolicy(); - policy.minConnsPerNode = 1; - policy.maxConnsPerNode = 8; - policy.asyncMaxConnsPerNode = 8; - policy.timeout = 5000; - } - - this.threadFactory = Thread.ofVirtual().name("Aerospike-", 0L).factory(); - this.readPolicyDefault = policy.readPolicyDefault; - this.writePolicyDefault = policy.writePolicyDefault; - this.scanPolicyDefault = policy.scanPolicyDefault; - this.queryPolicyDefault = policy.queryPolicyDefault; - this.batchPolicyDefault = policy.batchPolicyDefault; - this.batchParentPolicyWriteDefault = policy.batchParentPolicyWriteDefault; - this.batchWritePolicyDefault = policy.batchWritePolicyDefault; - this.batchDeletePolicyDefault = policy.batchDeletePolicyDefault; - this.batchUDFPolicyDefault = policy.batchUDFPolicyDefault; - this.infoPolicyDefault = policy.infoPolicyDefault; - this.operatePolicyReadDefault = new WritePolicy(this.readPolicyDefault); - - GrpcChannelProvider channelProvider = new GrpcChannelProvider(); - - if (policy.user != null || policy.password != null) { - authTokenManager = new AuthTokenManager(policy, channelProvider); - } - else { - authTokenManager = null; - } - - try { - // The gRPC client policy transformed from the client policy. - GrpcClientPolicy grpcClientPolicy = toGrpcClientPolicy(policy); - executor = new GrpcCallExecutor(grpcClientPolicy, authTokenManager, hosts); - channelProvider.setCallExecutor(executor); - - // Warmup after the call executor in the channel provider has - // been set. The channel provider is used to fetch auth tokens - // required for the warm up calls. - executor.warmupChannels(); - } - catch (Throwable e) { - if(authTokenManager != null) { - authTokenManager.close(); - } - throw e; - } - } - - /** - * Return client version string. - */ - private static String getVersion() { - final Properties properties = new Properties(); - String version = null; - - try { - properties.load(AerospikeClientProxy.class.getClassLoader().getResourceAsStream("project.properties")); - version = properties.getProperty("version"); - } - catch (Exception e) { - Log.warn("Failed to retrieve client version: " + Util.getErrorMessage(e)); - } - return version; - } - - //------------------------------------------------------- - // Default Policies - //------------------------------------------------------- - - /** - * Return read policy default. Use when the policy will not be modified. - */ - public final Policy getReadPolicyDefault() { - return readPolicyDefault; - } - - /** - * Copy read policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final Policy copyReadPolicyDefault() { - return new Policy(readPolicyDefault); - } - - /** - * Return write policy default. Use when the policy will not be modified. - */ - public final WritePolicy getWritePolicyDefault() { - return writePolicyDefault; - } - - /** - * Copy write policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final WritePolicy copyWritePolicyDefault() { - return new WritePolicy(writePolicyDefault); - } - - /** - * Return scan policy default. Use when the policy will not be modified. - */ - public final ScanPolicy getScanPolicyDefault() { - return scanPolicyDefault; - } - - /** - * Copy scan policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final ScanPolicy copyScanPolicyDefault() { - return new ScanPolicy(scanPolicyDefault); - } - - /** - * Return query policy default. Use when the policy will not be modified. - */ - public final QueryPolicy getQueryPolicyDefault() { - return queryPolicyDefault; - } - - /** - * Copy query policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final QueryPolicy copyQueryPolicyDefault() { - return new QueryPolicy(queryPolicyDefault); - } - - /** - * Return batch header read policy default. Use when the policy will not be modified. - */ - public final BatchPolicy getBatchPolicyDefault() { - return batchPolicyDefault; - } - - /** - * Copy batch header read policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final BatchPolicy copyBatchPolicyDefault() { - return new BatchPolicy(batchPolicyDefault); - } - - /** - * Return batch header write policy default. Use when the policy will not be modified. - */ - public final BatchPolicy getBatchParentPolicyWriteDefault() { - return batchParentPolicyWriteDefault; - } - - /** - * Copy batch header write policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final BatchPolicy copyBatchParentPolicyWriteDefault() { - return new BatchPolicy(batchParentPolicyWriteDefault); - } - - /** - * Return batch detail write policy default. Use when the policy will not be modified. - */ - public final BatchWritePolicy getBatchWritePolicyDefault() { - return batchWritePolicyDefault; - } - - /** - * Copy batch detail write policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final BatchWritePolicy copyBatchWritePolicyDefault() { - return new BatchWritePolicy(batchWritePolicyDefault); - } - - /** - * Return batch detail delete policy default. Use when the policy will not be modified. - */ - public final BatchDeletePolicy getBatchDeletePolicyDefault() { - return batchDeletePolicyDefault; - } - - /** - * Copy batch detail delete policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final BatchDeletePolicy copyBatchDeletePolicyDefault() { - return new BatchDeletePolicy(batchDeletePolicyDefault); - } - - /** - * Return batch detail UDF policy default. Use when the policy will not be modified. - */ - public final BatchUDFPolicy getBatchUDFPolicyDefault() { - return batchUDFPolicyDefault; - } - - /** - * Copy batch detail UDF policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final BatchUDFPolicy copyBatchUDFPolicyDefault() { - return new BatchUDFPolicy(batchUDFPolicyDefault); - } - - /** - * Return info command policy default. Use when the policy will not be modified. - */ - public final InfoPolicy getInfoPolicyDefault() { - return infoPolicyDefault; - } - - /** - * Copy info command policy default. Use when the policy will be modified for use in a specific transaction. - */ - public final InfoPolicy copyInfoPolicyDefault() { - return new InfoPolicy(infoPolicyDefault); - } - - //------------------------------------------------------- - // Client Management - //------------------------------------------------------- - - /** - * Close GRPC executor and associated resources. The client instance should not - * be used after this call. - */ - @Override - public void close() { - try { - executor.close(); - } - catch (Throwable e) { - Log.warn("Failed to close grpcCallExecutor: " + Util.getErrorMessage(e)); - } - - try { - if (authTokenManager != null) { - authTokenManager.close(); - } - } - catch (Throwable e) { - Log.warn("Failed to close authTokenManager: " + Util.getErrorMessage(e)); - } - } - - /** - * This method will always return true in the proxy client. - */ - @Override - public boolean isConnected() { - return executor != null; - } - - /** - * Not supported in proxy client. - */ - @Override - public Node[] getNodes() { - throw new AerospikeException(NotSupported + "getNodes"); - } - - /** - * Not supported in proxy client. - */ - @Override - public List getNodeNames() { - throw new AerospikeException(NotSupported + "getNodeNames"); - } - - /** - * Not supported in proxy client. - */ - @Override - public Node getNode(String nodeName) { - throw new AerospikeException(NotSupported + "getNode"); - } - - /** - * Not supported in proxy client. - */ - public final void enableMetrics(MetricsPolicy policy) { - throw new AerospikeException(NotSupported + "enableMetrics"); - } - - /** - * Not supported in proxy client. - */ - public final void disableMetrics() { - throw new AerospikeException(NotSupported + "disableMetrics"); - } - - /** - * Not supported in proxy client. - */ - @Override - public ClusterStats getClusterStats() { - throw new AerospikeException(NotSupported + "getClusterStats"); - } - - /** - * Not supported in proxy client. - */ - public final void getClusterStats(ClusterStatsListener listener) { - throw new AerospikeException(NotSupported + "getClusterStats"); - } - - /** - * Not supported in proxy client. - */ - @Override - public Cluster getCluster() { - throw new AerospikeException(NotSupported + "getCluster"); - } - - //------------------------------------------------------- - // Write Record Operations - //------------------------------------------------------- - - /** - * Write record bin(s). - * - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param bins array of bin name/value pairs - * @throws AerospikeException if write fails - */ - @Override - public void put(WritePolicy policy, Key key, Bin... bins) { - CompletableFuture future = new CompletableFuture<>(); - WriteListener listener = prepareWriteListener(future); - put(null, listener, policy, key, bins); - getFuture(future); - } - - /** - * Asynchronously write record bin(s). - * - * @param eventLoop ignored, pass in null - * @param listener where to send results, pass in null for fire and forget - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param bins array of bin name/value pairs - * @throws AerospikeException if event loop registration fails - */ - @Override - public void put(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key, Bin... bins) { - if (policy == null) { - policy = writePolicyDefault; - } - WriteCommandProxy command = new WriteCommandProxy(executor, listener, policy, key, bins, Operation.Type.WRITE); - command.execute(); - } - - //------------------------------------------------------- - // String Operations - //------------------------------------------------------- - - /** - * Append bin string values to existing record bin values. - * This call only works for string values. - * - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param bins array of bin name/value pairs - * @throws AerospikeException if append fails - */ - @Override - public void append(WritePolicy policy, Key key, Bin... bins) { - CompletableFuture future = new CompletableFuture<>(); - WriteListener listener = prepareWriteListener(future); - append(null, listener, policy, key, bins); - getFuture(future); - } - - /** - * Asynchronously append bin string values to existing record bin values. - * This call only works for string values. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results, pass in null for fire and forget - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param bins array of bin name/value pairs - * @throws AerospikeException if event loop registration fails - */ - @Override - public void append(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key, Bin... bins) { - if (policy == null) { - policy = writePolicyDefault; - } - WriteCommandProxy command = new WriteCommandProxy(executor, listener, policy, key, bins, Operation.Type.APPEND); - command.execute(); - } - - /** - * Prepend bin string values to existing record bin values. - * This call works only for string values. - * - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param bins array of bin name/value pairs - * @throws AerospikeException if prepend fails - */ - @Override - public void prepend(WritePolicy policy, Key key, Bin... bins) { - CompletableFuture future = new CompletableFuture<>(); - WriteListener listener = prepareWriteListener(future); - prepend(null, listener, policy, key, bins); - getFuture(future); - } - - /** - * Asynchronously prepend bin string values to existing record bin values. - * This call only works for string values. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results, pass in null for fire and forget - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param bins array of bin name/value pairs - * @throws AerospikeException if event loop registration fails - */ - @Override - public void prepend(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key, Bin... bins) { - if (policy == null) { - policy = writePolicyDefault; - } - WriteCommandProxy command = new WriteCommandProxy(executor, listener, policy, key, bins, Operation.Type.PREPEND); - command.execute(); - } - - //------------------------------------------------------- - // Arithmetic Operations - //------------------------------------------------------- - - /** - * Add integer/double bin values to existing record bin values. - * - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param bins array of bin name/value pairs - * @throws AerospikeException if add fails - */ - @Override - public void add(WritePolicy policy, Key key, Bin... bins) { - CompletableFuture future = new CompletableFuture<>(); - WriteListener listener = prepareWriteListener(future); - add(null, listener, policy, key, bins); - getFuture(future); - } - - /** - * Asynchronously add integer/double bin values to existing record bin values. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results, pass in null for fire and forget - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param bins array of bin name/value pairs - * @throws AerospikeException if event loop registration fails - */ - @Override - public void add(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key, Bin... bins) { - if (policy == null) { - policy = writePolicyDefault; - } - WriteCommandProxy command = new WriteCommandProxy(executor, listener, policy, key, bins, Operation.Type.ADD); - command.execute(); - } - - //------------------------------------------------------- - // Delete Operations - //------------------------------------------------------- - - /** - * Delete record for specified key. - * - * @param policy delete configuration parameters, pass in null for defaults - * @param key unique record identifier - * @return whether record existed on server before deletion - * @throws AerospikeException if delete fails - */ - @Override - public boolean delete(WritePolicy policy, Key key) { - CompletableFuture future = new CompletableFuture<>(); - DeleteListener listener = prepareDeleteListener(future); - delete(null, listener, policy, key); - return getFuture(future); - } - - /** - * Asynchronously delete record for specified key. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results, pass in null for fire and forget - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @throws AerospikeException if event loop registration fails - */ - @Override - public void delete(EventLoop eventLoop, DeleteListener listener, WritePolicy policy, Key key) { - if (policy == null) { - policy = writePolicyDefault; - } - DeleteCommandProxy command = new DeleteCommandProxy(executor, listener, policy, key); - command.execute(); - } - - /** - * Delete records for specified keys. If a key is not found, the corresponding result - * {@link BatchRecord#resultCode} will be {@link ResultCode#KEY_NOT_FOUND_ERROR}. - * - * @param batchPolicy batch configuration parameters, pass in null for defaults - * @param deletePolicy delete configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @throws AerospikeException.BatchRecordArray which contains results for keys that did complete - */ - @Override - public BatchResults delete(BatchPolicy batchPolicy, BatchDeletePolicy deletePolicy, Key[] keys) { - CompletableFuture future = new CompletableFuture<>(); - BatchRecordArrayListener listener = prepareBatchRecordArrayListener(future); - delete(null, listener, batchPolicy, deletePolicy, keys); - return getFuture(future); - } - - /** - * Asynchronously delete records for specified keys. - *

- * If a key is not found, the corresponding result {@link BatchRecord#resultCode} will be - * {@link ResultCode#KEY_NOT_FOUND_ERROR}. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param batchPolicy batch configuration parameters, pass in null for defaults - * @param deletePolicy delete configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @throws AerospikeException if event loop registration fails - */ - @Override - public void delete( - EventLoop eventLoop, - BatchRecordArrayListener listener, - BatchPolicy batchPolicy, - BatchDeletePolicy deletePolicy, - Key[] keys - ) { - if (keys.length == 0) { - listener.onSuccess(new BatchRecord[0], true); - return; - } - - if (batchPolicy == null) { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (deletePolicy == null) { - deletePolicy = batchDeletePolicyDefault; - } - - BatchAttr attr = new BatchAttr(); - attr.setDelete(deletePolicy); - - CommandProxy command = new BatchProxy.OperateRecordArrayCommand(executor, - batchPolicy, keys, null, listener, attr); - - command.execute(); - } - - /** - * Asynchronously delete records for specified keys. - *

- * Each record result is returned in separate onRecord() calls. - * If a key is not found, the corresponding result {@link BatchRecord#resultCode} will be - * {@link ResultCode#KEY_NOT_FOUND_ERROR}. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param batchPolicy batch configuration parameters, pass in null for defaults - * @param deletePolicy delete configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @throws AerospikeException if event loop registration fails - */ - @Override - public void delete( - EventLoop eventLoop, - BatchRecordSequenceListener listener, - BatchPolicy batchPolicy, - BatchDeletePolicy deletePolicy, - Key[] keys - ) { - if (keys.length == 0) { - listener.onSuccess(); - return; - } - - if (batchPolicy == null) { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (deletePolicy == null) { - deletePolicy = batchDeletePolicyDefault; - } - - BatchAttr attr = new BatchAttr(); - attr.setDelete(deletePolicy); - - CommandProxy command = new BatchProxy.OperateRecordSequenceCommand(executor, - batchPolicy, keys, null, listener, attr); - - command.execute(); - } - - /** - * Not supported in proxy client. - */ - @Override - public void truncate(InfoPolicy policy, String ns, String set, Calendar beforeLastUpdate) { - throw new AerospikeException(NotSupported + "truncate"); - } - - //------------------------------------------------------- - // Touch Operations - //------------------------------------------------------- - - /** - * Reset record's time to expiration using the policy's expiration. - * Fail if the record does not exist. - * - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @throws AerospikeException if touch fails - */ - @Override - public void touch(WritePolicy policy, Key key) { - CompletableFuture future = new CompletableFuture<>(); - WriteListener listener = prepareWriteListener(future); - touch(null, listener, policy, key); - getFuture(future); - } - - /** - * Asynchronously reset record's time to expiration using the policy's expiration. - * Fail if the record does not exist. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results, pass in null for fire and forget - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @throws AerospikeException if event loop registration fails - */ - @Override - public void touch(EventLoop eventLoop, WriteListener listener, WritePolicy policy, Key key) { - if (policy == null) { - policy = writePolicyDefault; - } - TouchCommandProxy command = new TouchCommandProxy(executor, listener, policy, key); - command.execute(); - } - - //------------------------------------------------------- - // Existence-Check Operations - //------------------------------------------------------- - - /** - * Determine if a record key exists. - * - * @param policy generic configuration parameters, pass in null for defaults - * @param key unique record identifier - * @return whether record exists or not - * @throws AerospikeException if command fails - */ - @Override - public boolean exists(Policy policy, Key key) { - CompletableFuture future = new CompletableFuture<>(); - ExistsListener listener = prepareExistsListener(future); - exists(null, listener, policy, key); - return getFuture(future); - } - - /** - * Asynchronously determine if a record key exists. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy generic configuration parameters, pass in null for defaults - * @param key unique record identifier - * @throws AerospikeException if event loop registration fails - */ - @Override - public void exists(EventLoop eventLoop, ExistsListener listener, Policy policy, Key key) { - if (policy == null) { - policy = readPolicyDefault; - } - ExistsCommandProxy command = new ExistsCommandProxy(executor, listener, policy, key); - command.execute(); - } - - /** - * Check if multiple record keys exist in one batch call. - * The returned boolean array is in positional order with the original key array order. - * - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @return array key/existence status pairs - * @throws AerospikeException.BatchExists which contains results for keys that did complete - */ - @Override - public boolean[] exists(BatchPolicy policy, Key[] keys) { - CompletableFuture future = new CompletableFuture<>(); - ExistsArrayListener listener = prepareExistsArrayListener(future); - exists(null, listener, policy, keys); - return getFuture(future); - } - - /** - * Asynchronously check if multiple record keys exist in one batch call. - *

- * The returned boolean array is in positional order with the original key array order. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys unique record identifiers - * @throws AerospikeException if event loop registration fails - */ - @Override - public void exists(EventLoop eventLoop, ExistsArrayListener listener, BatchPolicy policy, Key[] keys) { - if (keys.length == 0) { - listener.onSuccess(keys, new boolean[0]); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CommandProxy command = new BatchProxy.ExistsArrayCommand(executor, policy, listener, keys); - command.execute(); - } - - /** - * Asynchronously check if multiple record keys exist in one batch call. - *

- * Each key's result is returned in separate onExists() calls. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys unique record identifiers - * @throws AerospikeException if event loop registration fails - */ - @Override - public void exists(EventLoop eventLoop, ExistsSequenceListener listener, BatchPolicy policy, Key[] keys) { - if (keys.length == 0) { - listener.onSuccess(); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CommandProxy command = new BatchProxy.ExistsSequenceCommand(executor, policy, listener, keys); - command.execute(); - } - - //------------------------------------------------------- - // Read Record Operations - //------------------------------------------------------- - - /** - * Read entire record for specified key. - * - * @param policy generic configuration parameters, pass in null for defaults - * @param key unique record identifier - * @return if found, return record instance. If not found, return null. - * @throws AerospikeException if read fails - */ - @Override - public Record get(Policy policy, Key key) { - return get(policy, key, (String[])null); - } - - /** - * Asynchronously read entire record for specified key. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy generic configuration parameters, pass in null for defaults - * @param key unique record identifier - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, RecordListener listener, Policy policy, Key key) { - get(eventLoop, listener, policy, key, (String[])null); - } - - /** - * Read record header and bins for specified key. - * - * @param policy generic configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param binNames bins to retrieve - * @return if found, return record instance. If not found, return null. - * @throws AerospikeException if read fails - */ - @Override - public Record get(Policy policy, Key key, String... binNames) { - CompletableFuture future = new CompletableFuture<>(); - RecordListener listener = prepareRecordListener(future); - get(null, listener, policy, key, binNames); - return getFuture(future); - } - - /** - * Asynchronously read record header and bins for specified key. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy generic configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param binNames bins to retrieve - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, RecordListener listener, Policy policy, Key key, String... binNames) { - if (policy == null) { - policy = readPolicyDefault; - } - ReadCommandProxy command = new ReadCommandProxy(executor, listener, policy, key, binNames); - command.execute(); - } - - /** - * Read record generation and expiration only for specified key. Bins are not read. - * - * @param policy generic configuration parameters, pass in null for defaults - * @param key unique record identifier - * @return if found, return record instance. If not found, return null. - * @throws AerospikeException if read fails - */ - @Override - public Record getHeader(Policy policy, Key key) { - CompletableFuture future = new CompletableFuture<>(); - RecordListener listener = prepareRecordListener(future); - getHeader(null, listener, policy, key); - return getFuture(future); - } - - /** - * Asynchronously read record generation and expiration only for specified key. Bins are not read. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy generic configuration parameters, pass in null for defaults - * @param key unique record identifier - * @throws AerospikeException if event loop registration fails - */ - @Override - public void getHeader(EventLoop eventLoop, RecordListener listener, Policy policy, Key key) { - if (policy == null) { - policy = readPolicyDefault; - } - ReadHeaderCommandProxy command = new ReadHeaderCommandProxy(executor, listener, policy, key); - command.execute(); - } - - //------------------------------------------------------- - // Batch Read Operations - //------------------------------------------------------- - - /** - * Read multiple records for specified batch keys in one batch call. - * This method allows different namespaces/bins to be requested for each key in the batch. - * The returned records are located in the same list. - * If the BatchRead key field is not found, the corresponding record field will be null. - * - * @param policy batch configuration parameters, pass in null for defaults - * @param records list of unique record identifiers and the bins to retrieve. - * The returned records are located in the same list. - * @return true if all batch key requests succeeded - * @throws AerospikeException if read fails - */ - @Override - public boolean get(BatchPolicy policy, List records) { - if (records.size() == 0) { - return true; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CompletableFuture future = new CompletableFuture<>(); - BatchListListenerSync listener = prepareBatchListListenerSync(future); - - CommandProxy command = new BatchProxy.ReadListCommandSync(executor, policy, listener, records); - command.execute(); - - return getFuture(future); - } - - /** - * Asynchronously read multiple records for specified batch keys in one batch call. - *

- * This method allows different namespaces/bins to be requested for each key in the batch. - * The returned records are located in the same list. - * If the BatchRead key field is not found, the corresponding record field will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param records list of unique record identifiers and the bins to retrieve. - * The returned records are located in the same list. - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, BatchListListener listener, BatchPolicy policy, List records) { - if (records.size() == 0) { - listener.onSuccess(records); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - CommandProxy command = new BatchProxy.ReadListCommand(executor, policy, listener, records); - command.execute(); - } - - /** - * Asynchronously read multiple records for specified batch keys in one batch call. - *

- * This method allows different namespaces/bins to be requested for each key in the batch. - * Each record result is returned in separate onRecord() calls. - * If the BatchRead key field is not found, the corresponding record field will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param records list of unique record identifiers and the bins to retrieve. - * The returned records are located in the same list. - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, BatchSequenceListener listener, BatchPolicy policy, List records) { - if (records.size() == 0) { - listener.onSuccess(); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CommandProxy command = new BatchProxy.ReadSequenceCommand(executor, policy, listener, records); - command.execute(); - } - - /** - * Read multiple records for specified keys in one batch call. - * The returned records are in positional order with the original key array order. - * If a key is not found, the positional record will be null. - * - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @return array of records - * @throws AerospikeException.BatchRecords which contains results for keys that did complete - */ - @Override - public Record[] get(BatchPolicy policy, Key[] keys) { - CompletableFuture future = new CompletableFuture<>(); - RecordArrayListener listener = prepareRecordArrayListener(future); - get(null, listener, policy, keys); - return getFuture(future); - } - - /** - * Asynchronously read multiple records for specified keys in one batch call. - *

- * The returned records are in positional order with the original key array order. - * If a key is not found, the positional record will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, RecordArrayListener listener, BatchPolicy policy, Key[] keys) { - if (keys.length == 0) { - listener.onSuccess(keys, new Record[0]); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CommandProxy command = new BatchProxy.GetArrayCommand(executor, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); - command.execute(); - } - - /** - * Asynchronously read multiple records for specified keys in one batch call. - *

- * Each record result is returned in separate onRecord() calls. - * If a key is not found, the record will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, RecordSequenceListener listener, BatchPolicy policy, Key[] keys) { - if (keys.length == 0) { - listener.onSuccess(); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CommandProxy command = new BatchProxy.GetSequenceCommand(executor, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_GET_ALL, false); - command.execute(); - } - - /** - * Read multiple record headers and bins for specified keys in one batch call. - * The returned records are in positional order with the original key array order. - * If a key is not found, the positional record will be null. - * - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param binNames array of bins to retrieve - * @return array of records - * @throws AerospikeException.BatchRecords which contains results for keys that did complete - */ - @Override - public Record[] get(BatchPolicy policy, Key[] keys, String... binNames) { - CompletableFuture future = new CompletableFuture<>(); - RecordArrayListener listener = prepareRecordArrayListener(future); - get(null, listener, policy, keys, binNames); - return getFuture(future); - } - - /** - * Asynchronously read multiple record headers and bins for specified keys in one batch call. - *

- * The returned records are in positional order with the original key array order. - * If a key is not found, the positional record will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param binNames array of bins to retrieve - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, RecordArrayListener listener, BatchPolicy policy, Key[] keys, String... binNames) { - if (keys.length == 0) { - listener.onSuccess(keys, new Record[0]); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - int readAttr = (binNames == null || binNames.length == 0)? Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ; - - CommandProxy command = new BatchProxy.GetArrayCommand(executor, policy, listener, keys, binNames, null, readAttr, false); - command.execute(); - } - - /** - * Asynchronously read multiple record headers and bins for specified keys in one batch call. - *

- * Each record result is returned in separate onRecord() calls. - * If a key is not found, the record will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param binNames array of bins to retrieve - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, RecordSequenceListener listener, BatchPolicy policy, Key[] keys, String... binNames) { - if (keys.length == 0) { - listener.onSuccess(); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - int readAttr = (binNames == null || binNames.length == 0)? Command.INFO1_READ | Command.INFO1_GET_ALL : Command.INFO1_READ; - - CommandProxy command = new BatchProxy.GetSequenceCommand(executor, policy, listener, keys, binNames, null, readAttr, false); - command.execute(); - } - - /** - * Read multiple records for specified keys using read operations in one batch call. - * The returned records are in positional order with the original key array order. - * If a key is not found, the positional record will be null. - * - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param ops array of read operations on record - * @return array of records - * @throws AerospikeException.BatchRecords which contains results for keys that did complete - */ - @Override - public Record[] get(BatchPolicy policy, Key[] keys, Operation... ops) { - CompletableFuture future = new CompletableFuture<>(); - RecordArrayListener listener = prepareRecordArrayListener(future); - get(null, listener, policy, keys, ops); - return getFuture(future); - } - - /** - * Asynchronously read multiple records for specified keys using read operations in one batch call. - *

- * The returned records are in positional order with the original key array order. - * If a key is not found, the positional record will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param ops array of read operations on record - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, RecordArrayListener listener, BatchPolicy policy, Key[] keys, Operation... ops) { - if (keys.length == 0) { - listener.onSuccess(keys, new Record[0]); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CommandProxy command = new BatchProxy.GetArrayCommand(executor, policy, listener, keys, null, ops, Command.INFO1_READ, true); - command.execute(); - } - - /** - * Asynchronously read multiple records for specified keys using read operations in one batch call. - *

- * Each record result is returned in separate onRecord() calls. - * If a key is not found, the record will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param ops array of read operations on record - * @throws AerospikeException if event loop registration fails - */ - @Override - public void get(EventLoop eventLoop, RecordSequenceListener listener, BatchPolicy policy, Key[] keys, Operation... ops) { - if (keys.length == 0) { - listener.onSuccess(); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CommandProxy command = new BatchProxy.GetSequenceCommand(executor, policy, listener, keys, null, ops, Command.INFO1_READ, true); - command.execute(); - } - - /** - * Read multiple record header data for specified keys in one batch call. - * The returned records are in positional order with the original key array order. - * If a key is not found, the positional record will be null. - * - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @return array of records - * @throws AerospikeException.BatchRecords which contains results for keys that did complete - */ - @Override - public Record[] getHeader(BatchPolicy policy, Key[] keys) { - CompletableFuture future = new CompletableFuture<>(); - RecordArrayListener listener = prepareRecordArrayListener(future); - getHeader(null, listener, policy, keys); - return getFuture(future); - } - - /** - * Asynchronously read multiple record header data for specified keys in one batch call. - *

- * The returned records are in positional order with the original key array order. - * If a key is not found, the positional record will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @throws AerospikeException if event loop registration fails - */ - @Override - public void getHeader(EventLoop eventLoop, RecordArrayListener listener, BatchPolicy policy, Key[] keys) { - if (keys.length == 0) { - listener.onSuccess(keys, new Record[0]); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CommandProxy command = new BatchProxy.GetArrayCommand(executor, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); - command.execute(); - } - - /** - * Asynchronously read multiple record header data for specified keys in one batch call. - *

- * Each record result is returned in separate onRecord() calls. - * If a key is not found, the record will be null. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @throws AerospikeException if event loop registration fails - */ - @Override - public void getHeader(EventLoop eventLoop, RecordSequenceListener listener, BatchPolicy policy, Key[] keys) { - if (keys.length == 0) { - listener.onSuccess(); - return; - } - - if (policy == null) { - policy = batchPolicyDefault; - } - - CommandProxy command = new BatchProxy.GetSequenceCommand(executor, policy, listener, keys, null, null, Command.INFO1_READ | Command.INFO1_NOBINDATA, false); - command.execute(); - } - - //------------------------------------------------------- - // Generic Database Operations - //------------------------------------------------------- - - /** - * Perform multiple read/write operations on a single key in one batch call. - * An example would be to add an integer value to an existing record and then - * read the result, all in one database call. - *

- * The server executes operations in the same order as the operations array. - * Both scalar bin operations (Operation) and CDT bin operations (ListOperation, - * MapOperation) can be performed in same call. - * - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param operations database operations to perform - * @return record if there is a read in the operations list - * @throws AerospikeException if command fails - */ - @Override - public Record operate(WritePolicy policy, Key key, Operation... operations) { - CompletableFuture future = new CompletableFuture<>(); - RecordListener listener = prepareRecordListener(future); - operate(null, listener, policy, key, operations); - return getFuture(future); - } - - /** - * Asynchronously perform multiple read/write operations on a single key in one batch call. - *

- * An example would be to add an integer value to an existing record and then - * read the result, all in one database call. - *

- * The server executes operations in the same order as the operations array. - * Both scalar bin operations (Operation) and CDT bin operations (ListOperation, - * MapOperation) can be performed in same call. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results, pass in null for fire and forget - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param operations database operations to perform - * @throws AerospikeException if event loop registration fails - */ - @Override - public void operate(EventLoop eventLoop, RecordListener listener, WritePolicy policy, Key key, Operation... operations) { - OperateArgs args = new OperateArgs(policy, writePolicyDefault, operatePolicyReadDefault, operations); - OperateCommandProxy command = new OperateCommandProxy(executor, listener, args.writePolicy, key, args); - command.execute(); - } - - //------------------------------------------------------- - // Batch Read/Write Operations - //------------------------------------------------------- - - /** - * Read/Write multiple records for specified batch keys in one batch call. - * This method allows different namespaces/bins for each key in the batch. - * The returned records are located in the same list. - *

- * {@link BatchRecord} can be {@link BatchRead}, {@link BatchWrite}, {@link BatchDelete} or - * {@link BatchUDF}. - * - * @param policy batch configuration parameters, pass in null for defaults - * @param records list of unique record identifiers and read/write operations - * @return true if all batch sub-commands succeeded - * @throws AerospikeException if command fails - */ - @Override - public boolean operate(BatchPolicy policy, List records) { - CompletableFuture future = new CompletableFuture<>(); - BatchOperateListListener listener = prepareBatchOperateListListener(future); - operate(null, listener, policy, records); - return getFuture(future); - } - - /** - * Asynchronously read/write multiple records for specified batch keys in one batch call. - *

- * This method allows different namespaces/bins to be requested for each key in the batch. - * The returned records are located in the same list. - *

- * {@link BatchRecord} can be {@link BatchRead}, {@link BatchWrite}, {@link BatchDelete} or - * {@link BatchUDF}. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param records list of unique record identifiers and read/write operations - * @throws AerospikeException if event loop registration fails - */ - @Override - public void operate( - EventLoop eventLoop, - BatchOperateListListener listener, - BatchPolicy policy, - List records - ) { - if (records.size() == 0) { - listener.onSuccess(records, true); - return; - } - - if (policy == null) { - policy = batchParentPolicyWriteDefault; - } - - CommandProxy command = new BatchProxy.OperateListCommand(this, executor, policy, listener, records); - command.execute(); - } - - /** - * Asynchronously read/write multiple records for specified batch keys in one batch call. - *

- * This method allows different namespaces/bins to be requested for each key in the batch. - * Each record result is returned in separate onRecord() calls. - *

- * {@link BatchRecord} can be {@link BatchRead}, {@link BatchWrite}, {@link BatchDelete} or - * {@link BatchUDF}. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy batch configuration parameters, pass in null for defaults - * @param records list of unique record identifiers and read/write operations - * @throws AerospikeException if event loop registration fails - */ - @Override - public void operate( - EventLoop eventLoop, - BatchRecordSequenceListener listener, - BatchPolicy policy, - List records - ) { - if (records.size() == 0) { - listener.onSuccess(); - return; - } - - if (policy == null) { - policy = batchParentPolicyWriteDefault; - } - - CommandProxy command = new BatchProxy.OperateSequenceCommand(this, executor, policy, listener, records); - command.execute(); - } - - /** - * Perform read/write operations on multiple keys. If a key is not found, the corresponding result - * {@link BatchRecord#resultCode} will be {@link ResultCode#KEY_NOT_FOUND_ERROR}. - * - * @param batchPolicy batch configuration parameters, pass in null for defaults - * @param writePolicy write configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param ops - * read/write operations to perform. {@link Operation#get()} is not allowed because it returns a - * variable number of bins and makes it difficult (sometimes impossible) to lineup operations - * with results. Instead, use {@link Operation#get(String)} for each bin name. - * @throws AerospikeException.BatchRecordArray which contains results for keys that did complete - */ - @Override - public BatchResults operate( - BatchPolicy batchPolicy, - BatchWritePolicy writePolicy, - Key[] keys, - Operation... ops - ) { - CompletableFuture future = new CompletableFuture<>(); - BatchRecordArrayListener listener = prepareBatchRecordArrayListener(future); - operate(null, listener, batchPolicy, writePolicy, keys, ops); - return getFuture(future); - } - - /** - * Asynchronously perform read/write operations on multiple keys. - *

- * If a key is not found, the corresponding result {@link BatchRecord#resultCode} will be - * {@link ResultCode#KEY_NOT_FOUND_ERROR}. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param batchPolicy batch configuration parameters, pass in null for defaults - * @param writePolicy write configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param ops - * read/write operations to perform. {@link Operation#get()} is not allowed because it returns a - * variable number of bins and makes it difficult (sometimes impossible) to lineup operations - * with results. Instead, use {@link Operation#get(String)} for each bin name. - * @throws AerospikeException if event loop registration fails - */ - @Override - public void operate( - EventLoop eventLoop, - BatchRecordArrayListener listener, - BatchPolicy batchPolicy, - BatchWritePolicy writePolicy, - Key[] keys, - Operation... ops - ) { - if (keys.length == 0) { - listener.onSuccess(new BatchRecord[0], true); - return; - } - - if (batchPolicy == null) { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (writePolicy == null) { - writePolicy = batchWritePolicyDefault; - } - - BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); - - CommandProxy command = new BatchProxy.OperateRecordArrayCommand(executor, - batchPolicy, keys, ops, listener, attr); - - command.execute(); - } - - /** - * Asynchronously perform read/write operations on multiple keys. - *

- * Each record result is returned in separate onRecord() calls. - * If a key is not found, the corresponding result {@link BatchRecord#resultCode} will be - * {@link ResultCode#KEY_NOT_FOUND_ERROR}. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param batchPolicy batch configuration parameters, pass in null for defaults - * @param writePolicy write configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param ops - * read/write operations to perform. {@link Operation#get()} is not allowed because it returns a - * variable number of bins and makes it difficult (sometimes impossible) to lineup operations - * with results. Instead, use {@link Operation#get(String)} for each bin name. - * @throws AerospikeException if event loop registration fails - */ - @Override - public void operate( - EventLoop eventLoop, - BatchRecordSequenceListener listener, - BatchPolicy batchPolicy, - BatchWritePolicy writePolicy, - Key[] keys, - Operation... ops - ) { - if (keys.length == 0) { - listener.onSuccess(); - return; - } - - if (batchPolicy == null) { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (writePolicy == null) { - writePolicy = batchWritePolicyDefault; - } - - BatchAttr attr = new BatchAttr(batchPolicy, writePolicy, ops); - - CommandProxy command = new BatchProxy.OperateRecordSequenceCommand(executor, - batchPolicy, keys, ops, listener, attr); - - command.execute(); - } - - //------------------------------------------------------- - // Scan Operations - //------------------------------------------------------- - - /** - * Read all records in specified namespace and set. - *

- * This call will block until the scan is complete - callbacks are made - * within the scope of this call. - * - * @param policy scan configuration parameters, pass in null for defaults - * @param namespace namespace - equivalent to database name - * @param setName optional set name - equivalent to database table - * @param callback read callback method - called with record data - * @param binNames optional bin to retrieve. All bins will be returned if not specified. - * @throws AerospikeException if scan fails - */ - @Override - public void scanAll( - ScanPolicy policy, - String namespace, - String setName, - ScanCallback callback, - String... binNames - ) { - CompletableFuture future = new CompletableFuture<>(); - RecordSequenceListener listener = new RecordSequenceListenerToCallback(callback, future); - scanPartitions(null, listener, policy, null, namespace, setName, binNames); - getFuture(future); - } - - /** - * Asynchronously read all records in specified namespace and set. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy scan configuration parameters, pass in null for defaults - * @param namespace namespace - equivalent to database name - * @param setName optional set name - equivalent to database table - * @param binNames optional bin to retrieve. All bins will be returned if not specified. - * @throws AerospikeException if event loop registration fails - */ - @Override - public void scanAll( - EventLoop eventLoop, - RecordSequenceListener listener, - ScanPolicy policy, - String namespace, - String setName, - String... binNames - ) { - scanPartitions(eventLoop, listener, policy, null, namespace, setName, binNames); - } - - /** - * Not supported in proxy client. - */ - @Override - public void scanNode( - ScanPolicy policy, - String nodeName, - String namespace, - String setName, - ScanCallback callback, - String... binNames - ) { - throw new AerospikeException(NotSupported + "scanNode"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void scanNode( - ScanPolicy policy, - Node node, - String namespace, - String setName, - ScanCallback callback, - String... binNames - ) { - throw new AerospikeException(NotSupported + "scanNode"); - } - - /** - * Read records in specified namespace, set and partition filter. - *

- * This call will block until the scan is complete - callbacks are made - * within the scope of this call. - * - * @param policy scan configuration parameters, pass in null for defaults - * @param partitionFilter filter on a subset of data partitions - * @param namespace namespace - equivalent to database name - * @param setName optional set name - equivalent to database table - * @param callback read callback method - called with record data - * @param binNames optional bin to retrieve. All bins will be returned if not specified - * @throws AerospikeException if scan fails - */ - @Override - public void scanPartitions( - ScanPolicy policy, - PartitionFilter partitionFilter, - String namespace, - String setName, - ScanCallback callback, - String... binNames - ) { - CompletableFuture future = new CompletableFuture<>(); - RecordSequenceListener listener = new RecordSequenceListenerToCallback(callback, future); - scanPartitions(null, listener, policy, partitionFilter, namespace, setName, binNames); - getFuture(future); - } - - /** - * Asynchronously read records in specified namespace, set and partition filter. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy scan configuration parameters, pass in null for defaults - * @param partitionFilter filter on a subset of data partitions - * @param namespace namespace - equivalent to database name - * @param setName optional set name - equivalent to database table - * @param binNames optional bin to retrieve. All bins will be returned if not specified. - * @throws AerospikeException if event loop registration fails - */ - @Override - public void scanPartitions( - EventLoop eventLoop, - RecordSequenceListener listener, - ScanPolicy policy, - PartitionFilter partitionFilter, - String namespace, - String setName, - String... binNames - ) { - if (policy == null) { - policy = scanPolicyDefault; - } - - PartitionTracker tracker = null; - - if (partitionFilter != null) { - tracker = new PartitionTracker(policy, 1, partitionFilter); - } - - ScanCommandProxy command = new ScanCommandProxy(executor, policy, listener, namespace, - setName, binNames, partitionFilter, tracker); - command.execute(); - } - - //--------------------------------------------------------------- - // User defined functions - //--------------------------------------------------------------- - - /** - * Not supported in proxy client. - */ - @Override - public RegisterTask register(Policy policy, String clientPath, String serverPath, Language language) { - throw new AerospikeException(NotSupported + "register"); - } - - /** - * Not supported in proxy client. - */ - @Override - public RegisterTask register( - Policy policy, - ClassLoader resourceLoader, - String resourcePath, - String serverPath, - Language language - ) { - throw new AerospikeException(NotSupported + "register"); - } - - /** - * Not supported in proxy client. - */ - @Override - public RegisterTask registerUdfString(Policy policy, String code, String serverPath, Language language) { - throw new AerospikeException(NotSupported + "registerUdfString"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void removeUdf(InfoPolicy policy, String serverPath) { - throw new AerospikeException(NotSupported + "removeUdf"); - } - - /** - * Execute user defined function on server and return results. - * The function operates on a single record. - * The package name is used to locate the udf file location: - *

- * {@code udf file = /.lua} - * - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param packageName server package name where user defined function resides - * @param functionName user defined function - * @param functionArgs arguments passed in to user defined function - * @return return value of user defined function - * @throws AerospikeException if transaction fails - */ - @Override - public Object execute(WritePolicy policy, Key key, String packageName, String functionName, Value... functionArgs) { - CompletableFuture future = new CompletableFuture<>(); - ExecuteListener listener = prepareExecuteListener(future); - execute(null, listener, policy, key, packageName, functionName, functionArgs); - return getFuture(future); - } - - /** - * Asynchronously execute user defined function on server. - *

- * The function operates on a single record. - * The package name is used to locate the udf file location: - *

- * {@code udf file = /.lua} - * - * @param eventLoop ignored, pass in null - * @param listener where to send results, pass in null for fire and forget - * @param policy write configuration parameters, pass in null for defaults - * @param key unique record identifier - * @param packageName server package name where user defined function resides - * @param functionName user defined function - * @param functionArgs arguments passed in to user defined function - * @throws AerospikeException if event loop registration fails - */ - @Override - public void execute( - EventLoop eventLoop, - ExecuteListener listener, - WritePolicy policy, - Key key, - String packageName, - String functionName, - Value... functionArgs - ) { - if (policy == null) { - policy = writePolicyDefault; - } - ExecuteCommandProxy command = new ExecuteCommandProxy(executor, listener, policy, key, - packageName, functionName, functionArgs); - command.execute(); - } - - /** - * Execute user defined function on server for each key and return results. - * The package name is used to locate the udf file location: - *

- * {@code udf file = /.lua} - * - * @param batchPolicy batch configuration parameters, pass in null for defaults - * @param udfPolicy udf configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param packageName server package name where user defined function resides - * @param functionName user defined function - * @param functionArgs arguments passed in to user defined function - * @throws AerospikeException.BatchRecordArray which contains results for keys that did complete - */ - @Override - public BatchResults execute( - BatchPolicy batchPolicy, - BatchUDFPolicy udfPolicy, - Key[] keys, - String packageName, - String functionName, - Value... functionArgs - ) { - CompletableFuture future = new CompletableFuture<>(); - BatchRecordArrayListener listener = prepareBatchRecordArrayListener(future); - execute(null, listener, batchPolicy, udfPolicy, keys, packageName, functionName, functionArgs); - return getFuture(future); - } - - /** - * Asynchronously execute user defined function on server for each key and return results. - *

- * The package name is used to locate the udf file location: - *

- * {@code udf file = /.lua} - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param batchPolicy batch configuration parameters, pass in null for defaults - * @param udfPolicy udf configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param packageName server package name where user defined function resides - * @param functionName user defined function - * @param functionArgs arguments passed in to user defined function - * @throws AerospikeException if command fails - */ - @Override - public void execute( - EventLoop eventLoop, - BatchRecordArrayListener listener, - BatchPolicy batchPolicy, - BatchUDFPolicy udfPolicy, - Key[] keys, - String packageName, - String functionName, - Value... functionArgs - ) { - if (keys.length == 0) { - listener.onSuccess(new BatchRecord[0], true); - return; - } - - if (batchPolicy == null) { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (udfPolicy == null) { - udfPolicy = batchUDFPolicyDefault; - } - - byte[] argBytes = Packer.pack(functionArgs); - - BatchAttr attr = new BatchAttr(); - attr.setUDF(udfPolicy); - - CommandProxy command = new BatchProxy.UDFArrayCommand(executor, batchPolicy, - listener, keys, packageName, functionName, argBytes, attr); - - command.execute(); - } - - /** - * Asynchronously execute user defined function on server for each key and return results. - * Each record result is returned in separate onRecord() calls. - *

- * The package name is used to locate the udf file location: - *

- * {@code udf file = /.lua} - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param batchPolicy batch configuration parameters, pass in null for defaults - * @param udfPolicy udf configuration parameters, pass in null for defaults - * @param keys array of unique record identifiers - * @param packageName server package name where user defined function resides - * @param functionName user defined function - * @param functionArgs arguments passed in to user defined function - * @throws AerospikeException if command fails - */ - @Override - public void execute( - EventLoop eventLoop, - BatchRecordSequenceListener listener, - BatchPolicy batchPolicy, - BatchUDFPolicy udfPolicy, - Key[] keys, - String packageName, - String functionName, - Value... functionArgs - ) { - if (keys.length == 0) { - listener.onSuccess(); - return; - } - - if (batchPolicy == null) { - batchPolicy = batchParentPolicyWriteDefault; - } - - if (udfPolicy == null) { - udfPolicy = batchUDFPolicyDefault; - } - - byte[] argBytes = Packer.pack(functionArgs); - - BatchAttr attr = new BatchAttr(); - attr.setUDF(udfPolicy); - - CommandProxy command = new BatchProxy.UDFSequenceCommand(executor, batchPolicy, - listener, keys, packageName, functionName, argBytes, attr); - - command.execute(); - } - - //---------------------------------------------------------- - // Query/Execute - //---------------------------------------------------------- - - /** - * Apply user defined function on records that match the background query statement filter. - * Records are not returned to the client. - * This asynchronous server call will return before the command is complete. - * The user can optionally wait for command completion by using the returned - * ExecuteTask instance. - * - * @param policy write configuration parameters, pass in null for defaults - * @param statement background query definition - * @param packageName server package where user defined function resides - * @param functionName function name - * @param functionArgs to pass to function name, if any - * @throws AerospikeException if command fails - */ - @Override - public ExecuteTask execute( - WritePolicy policy, - Statement statement, - String packageName, - String functionName, - Value... functionArgs - ) { - statement.setAggregateFunction(packageName, functionName, functionArgs); - return executeBackgroundTask(policy, statement); - } - - /** - * Apply operations on records that match the background query statement filter. - * Records are not returned to the client. - * This asynchronous server call will return before the command is complete. - * The user can optionally wait for command completion by using the returned - * ExecuteTask instance. - * - * @param policy write configuration parameters, pass in null for defaults - * @param statement background query definition - * @param operations list of operations to be performed on selected records - * @throws AerospikeException if command fails - */ - @Override - public ExecuteTask execute(WritePolicy policy, Statement statement, Operation... operations) { - if (operations.length > 0) { - statement.setOperations(operations); - } - return executeBackgroundTask(policy, statement); - } - - //-------------------------------------------------------- - // Query functions - //-------------------------------------------------------- - - /** - * Execute query on all server nodes and return record iterator. The query executor puts - * records on a queue in separate threads. The calling thread concurrently pops records off - * the queue through the record iterator. - *

- * This method is not recommended for paginated queries when the user does not iterate through - * all records in the RecordSet. In this case, there is a lag between when the client marks the - * last record retrieved from the server and when the record is retrieved from the RecordSet. - * For this case, use {@link #query(QueryPolicy, Statement, QueryListener)} which uses a listener - * callback (without a buffer) instead of a RecordSet. - * - * @param policy query configuration parameters, pass in null for defaults - * @param statement query definition - * @return record iterator - * @throws AerospikeException if query fails - */ - @Override - public RecordSet query(QueryPolicy policy, Statement statement) { - if (policy == null) { - policy = queryPolicyDefault; - } - - // @Ashish taskId will be zero by default here. - RecordSequenceRecordSet recordSet = new RecordSequenceRecordSet(statement.getTaskId(), policy.recordQueueSize); - query(null, recordSet, policy, statement); - return recordSet; - } - - /** - * Asynchronously execute query on all server nodes. - *

- * Each record result is returned in separate onRecord() calls. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy query configuration parameters, pass in null for defaults - * @param statement query definition - * @throws AerospikeException if event loop registration fails - */ - @Override - public void query(EventLoop eventLoop, RecordSequenceListener listener, QueryPolicy policy, Statement statement) { - if (policy == null) { - policy = queryPolicyDefault; - } - - long taskId = statement.prepareTaskId(); - QueryCommandProxy command = new QueryCommandProxy(executor, listener, - policy, statement, taskId, null, null); - command.execute(); - } - - /** - * Execute query on all server nodes and return records via the listener. This method will - * block until the query is complete. Listener callbacks are made within the scope of this call. - *

- * If {@link com.aerospike.client.policy.QueryPolicy#maxConcurrentNodes} is not 1, the supplied - * listener must handle shared data in a thread-safe manner, because the listener will be called - * by multiple query threads (one thread per node) in parallel. - * - * @param policy query configuration parameters, pass in null for defaults - * @param statement query definition - * @param listener where to send results - * @throws AerospikeException if query fails - */ - @Override - public void query(QueryPolicy policy, Statement statement, QueryListener listener) { - CompletableFuture future = new CompletableFuture<>(); - RecordSequenceToQueryListener adaptor = new RecordSequenceToQueryListener(listener, future); - query(null, adaptor, policy, statement); - getFuture(future); - } - - /** - * Execute query for specified partitions and return records via the listener. This method will - * block until the query is complete. Listener callbacks are made within the scope of this call. - *

- * If {@link com.aerospike.client.policy.QueryPolicy#maxConcurrentNodes} is not 1, the supplied - * listener must handle shared data in a thread-safe manner, because the listener will be called - * by multiple query threads (one thread per node) in parallel. - *

- * The completion status of all partitions is stored in the partitionFilter when the query terminates. - * This partitionFilter can then be used to resume an incomplete query at a later time. - * This is the preferred method for query terminate/resume functionality. - * - * @param policy query configuration parameters, pass in null for defaults - * @param statement query definition - * @param partitionFilter data partition filter. Set to - * {@link com.aerospike.client.query.PartitionFilter#all()} for all partitions. - * @param listener where to send results - * @throws AerospikeException if query fails - */ - @Override - public void query(QueryPolicy policy, Statement statement, PartitionFilter partitionFilter, QueryListener listener) { - CompletableFuture future = new CompletableFuture<>(); - RecordSequenceToQueryListener adaptor = new RecordSequenceToQueryListener(listener, future); - queryPartitions(null, adaptor, policy, statement, partitionFilter); - getFuture(future); - } - - /** - * Not supported in proxy client. - */ - @Override - public RecordSet queryNode(QueryPolicy policy, Statement statement, Node node) { - throw new AerospikeException(NotSupported + "queryNode"); - } - - /** - * Execute query for specified partitions and return record iterator. The query executor puts - * records on a queue in separate threads. The calling thread concurrently pops records off - * the queue through the record iterator. - * - * @param policy query configuration parameters, pass in null for defaults - * @param statement query definition - * @param partitionFilter filter on a subset of data partitions - * @throws AerospikeException if query fails - */ - @Override - public RecordSet queryPartitions(QueryPolicy policy, Statement statement, PartitionFilter partitionFilter) { - if (policy == null) { - policy = queryPolicyDefault; - } - - // @Ashish taskId will be zero by default here. - RecordSequenceRecordSet recordSet = new RecordSequenceRecordSet(statement.getTaskId(), policy.recordQueueSize); - queryPartitions(null, recordSet, policy, statement, partitionFilter); - return recordSet; - } - - /** - * Asynchronously execute query for specified partitions. - *

- * Each record result is returned in separate onRecord() calls. - * - * @param eventLoop ignored, pass in null - * @param listener where to send results - * @param policy query configuration parameters, pass in null for defaults - * @param statement query definition - * @param partitionFilter filter on a subset of data partitions - * @throws AerospikeException if query fails - */ - @Override - public void queryPartitions( - EventLoop eventLoop, - RecordSequenceListener listener, - QueryPolicy policy, - Statement statement, - PartitionFilter partitionFilter - ) { - if (policy == null) { - policy = queryPolicyDefault; - } - - long taskId = statement.prepareTaskId(); - PartitionTracker tracker = new PartitionTracker(policy, statement, 1, partitionFilter); - QueryCommandProxy command = new QueryCommandProxy(executor, listener, policy, - statement, taskId, partitionFilter, tracker); - command.execute(); - } - - /** - * Execute query, apply statement's aggregation function, and return result iterator. The query - * executor puts results on a queue in separate threads. The calling thread concurrently pops - * results off the queue through the result iterator. - *

- * The aggregation function is called on both server and client (final reduce). Therefore, - * the Lua script files must also reside on both server and client. - * The package name is used to locate the udf file location: - *

- * {@code udf file = /.lua} - * - * @param policy query configuration parameters, pass in null for defaults - * @param statement query definition - * @param packageName server package where user defined function resides - * @param functionName aggregation function name - * @param functionArgs arguments to pass to function name, if any - * @return result iterator - * @throws AerospikeException if query fails - */ - @Override - public ResultSet queryAggregate( - QueryPolicy policy, - Statement statement, - String packageName, - String functionName, - Value... functionArgs - ) { - statement.setAggregateFunction(packageName, functionName, functionArgs); - return queryAggregate(policy, statement); - } - - /** - * Execute query, apply statement's aggregation function, and return result iterator. - * The aggregation function should be initialized via the statement's setAggregateFunction() - * and should be located in a resource or a filesystem file. - *

- * The query executor puts results on a queue in separate threads. The calling thread - * concurrently pops results off the queue through the ResultSet iterator. - * The aggregation function is called on both server and client (final reduce). - * Therefore, the Lua script file must also reside on both server and client. - * - * @param policy query configuration parameters, pass in null for defaults - * @param statement query definition - * @throws AerospikeException if query fails - */ - @Override - public ResultSet queryAggregate(QueryPolicy policy, Statement statement) { - if (policy == null) { - policy = queryPolicyDefault; - } - - long taskId = statement.prepareTaskId(); - QueryAggregateCommandProxy commandProxy = new QueryAggregateCommandProxy( - executor, threadFactory, policy, statement, taskId); - commandProxy.execute(); - return commandProxy.getResultSet(); - } - - /** - * Not supported in proxy client. - */ - @Override - public ResultSet queryAggregateNode(QueryPolicy policy, Statement statement, Node node) { - throw new AerospikeException(NotSupported + "queryAggregateNode"); - } - - //-------------------------------------------------------- - // Secondary Index functions - //-------------------------------------------------------- - - /** - * Not supported in proxy client. - */ - @Override - public IndexTask createIndex( - Policy policy, - String namespace, - String setName, - String indexName, - String binName, - IndexType indexType - ) { - throw new AerospikeException(NotSupported + "createIndex"); - } - - /** - * Not supported in proxy client. - */ - @Override - public IndexTask createIndex( - Policy policy, - String namespace, - String setName, - String indexName, - String binName, - IndexType indexType, - IndexCollectionType indexCollectionType, - CTX... ctx - ) { - throw new AerospikeException(NotSupported + "createIndex"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void createIndex( - EventLoop eventLoop, - IndexListener listener, - Policy policy, - String namespace, - String setName, - String indexName, - String binName, - IndexType indexType, - IndexCollectionType indexCollectionType, - CTX... ctx - ) { - throw new AerospikeException(NotSupported + "createIndex"); - } - - /** - * Not supported in proxy client. - */ - @Override - public IndexTask dropIndex(Policy policy, String namespace, String setName, String indexName) { - throw new AerospikeException(NotSupported + "dropIndex"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void dropIndex( - EventLoop eventLoop, - IndexListener listener, - Policy policy, - String namespace, - String setName, - String indexName - ) { - throw new AerospikeException(NotSupported + "dropIndex"); - } - - //----------------------------------------------------------------- - // Async Info functions (sync info functions located in Info class) - //----------------------------------------------------------------- - - /** - * Not supported in proxy client. - */ - @Override - public void info(EventLoop eventLoop, InfoListener listener, InfoPolicy policy, Node node, String... commands) { - throw new AerospikeException(NotSupported + "info"); - } - - //----------------------------------------------------------------- - // XDR - Cross datacenter replication - //----------------------------------------------------------------- - - /** - * Not supported in proxy client. - */ - @Override - public void setXDRFilter(InfoPolicy policy, String datacenter, String namespace, Expression filter) { - throw new AerospikeException(NotSupported + "setXDRFilter"); - } - - //------------------------------------------------------- - // User administration - //------------------------------------------------------- - - /** - * Not supported in proxy client. - */ - @Override - public void createUser(AdminPolicy policy, String user, String password, List roles) { - throw new AerospikeException(NotSupported + "createUser"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void dropUser(AdminPolicy policy, String user) { - throw new AerospikeException(NotSupported + "dropUser"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void changePassword(AdminPolicy policy, String user, String password) { - throw new AerospikeException(NotSupported + "changePassword"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void grantRoles(AdminPolicy policy, String user, List roles) { - throw new AerospikeException(NotSupported + "grantRoles"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void revokeRoles(AdminPolicy policy, String user, List roles) { - throw new AerospikeException(NotSupported + "revokeRoles"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void createRole(AdminPolicy policy, String roleName, List privileges) { - throw new AerospikeException(NotSupported + "createRole"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void createRole(AdminPolicy policy, String roleName, List privileges, List whitelist) { - throw new AerospikeException(NotSupported + "createRole"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void createRole( - AdminPolicy policy, - String roleName, - List privileges, - List whitelist, - int readQuota, - int writeQuota - ) { - throw new AerospikeException(NotSupported + "createRole"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void dropRole(AdminPolicy policy, String roleName) { - throw new AerospikeException(NotSupported + "dropRole"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void grantPrivileges(AdminPolicy policy, String roleName, List privileges) { - throw new AerospikeException(NotSupported + "grantPrivileges"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void revokePrivileges(AdminPolicy policy, String roleName, List privileges) { - throw new AerospikeException(NotSupported + "revokePrivileges"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void setWhitelist(AdminPolicy policy, String roleName, List whitelist) { - throw new AerospikeException(NotSupported + "setWhitelist"); - } - - /** - * Not supported in proxy client. - */ - @Override - public void setQuotas(AdminPolicy policy, String roleName, int readQuota, int writeQuota) { - throw new AerospikeException(NotSupported + "setQuotas"); - } - - /** - * Not supported in proxy client. - */ - @Override - public User queryUser(AdminPolicy policy, String user) { - throw new AerospikeException(NotSupported + "queryUser"); - } - - /** - * Not supported in proxy client. - */ - @Override - public List queryUsers(AdminPolicy policy) { - throw new AerospikeException(NotSupported + "queryUsers"); - } - - /** - * Not supported in proxy client. - */ - @Override - public Role queryRole(AdminPolicy policy, String roleName) { - throw new AerospikeException(NotSupported + "queryRole"); - } - - /** - * Not supported in proxy client. - */ - @Override - public List queryRoles(AdminPolicy policy) { - throw new AerospikeException(NotSupported + "queryRoles"); - } - - //------------------------------------------------------- - // Internal Methods - //------------------------------------------------------- - - private static GrpcClientPolicy toGrpcClientPolicy(ClientPolicy policy) { - List eventLoops = null; - Class channelType = null; - - if (policy.eventLoops != null) { - if (! (policy.eventLoops instanceof NettyEventLoops)) { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, - "Netty event loops are required in proxy client"); - } - - NettyEventLoops nettyLoops = (NettyEventLoops)policy.eventLoops; - NettyEventLoop[] array = nettyLoops.getArray(); - eventLoops = new ArrayList<>(array.length); - - for (NettyEventLoop loop : array) { - eventLoops.add(loop.get()); - } - - channelType = nettyLoops.getSocketChannelClass(); - } - - int maxConnections = Math.min(MAX_CONNECTIONS, Math.max(MIN_CONNECTIONS, - Math.max(policy.asyncMaxConnsPerNode, policy.maxConnsPerNode))); - - return GrpcClientPolicy.newBuilder(eventLoops, channelType) - .maxChannels(maxConnections) - .connectTimeoutMillis(policy.timeout) - .closeTimeout(policy.closeTimeout) - .tlsPolicy(policy.tlsPolicy) - .build(); - } - - private ExecuteTask executeBackgroundTask(WritePolicy policy, Statement statement) { - if (policy == null) { - policy = writePolicyDefault; - } - - CompletableFuture future = new CompletableFuture<>(); - long taskId = statement.prepareTaskId(); - - BackgroundExecuteCommandProxy command = new BackgroundExecuteCommandProxy(executor, policy, - statement, taskId, future); - command.execute(); - - // Check whether the background task started. - getFuture(future); - - return new ExecuteTaskProxy(executor, taskId, statement.isScan()); - } - - private static WriteListener prepareWriteListener(final CompletableFuture future) { - return new WriteListener() { - @Override - public void onSuccess(Key key) { - future.complete(null); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - }; - } - - private static DeleteListener prepareDeleteListener(final CompletableFuture future) { - return new DeleteListener() { - @Override - public void onSuccess(Key key, boolean existed) { - future.complete(existed); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - }; - } - - private static RecordListener prepareRecordListener(final CompletableFuture future) { - return new RecordListener() { - @Override - public void onSuccess(Key key, Record record) { - future.complete(record); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - }; - } - - private static ExistsListener prepareExistsListener(final CompletableFuture future) { - return new ExistsListener() { - @Override - public void onSuccess(Key key, boolean exists) { - future.complete(exists); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - }; - } - - private static ExecuteListener prepareExecuteListener(final CompletableFuture future) { - return new ExecuteListener() { - @Override - public void onSuccess(Key key, Object obj) { - future.complete(obj); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - }; - } - - private static ExistsArrayListener prepareExistsArrayListener(final CompletableFuture future) { - return new ExistsArrayListener() { - @Override - public void onSuccess(Key[] keys, boolean[] exists) { - future.complete(exists); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - }; - } - - private static RecordArrayListener prepareRecordArrayListener(final CompletableFuture future) { - return new RecordArrayListener() { - @Override - public void onSuccess(Key[] keys, Record[] records) { - future.complete(records); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - }; - } - - private static BatchListListenerSync prepareBatchListListenerSync(final CompletableFuture future) { - return new BatchListListenerSync() { - @Override - public void onSuccess(List records, boolean status) { - future.complete(status); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - }; - } - - private static BatchOperateListListener prepareBatchOperateListListener(final CompletableFuture future) { - return new BatchOperateListListener() { - @Override - public void onSuccess(List records, boolean status) { - future.complete(status); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - }; - } - - private static BatchRecordArrayListener prepareBatchRecordArrayListener(final CompletableFuture future) { - return new BatchRecordArrayListener() { - @Override - public void onSuccess(BatchRecord[] records, boolean status) { - future.complete(new BatchResults(records, status)); - } - - @Override - public void onFailure(BatchRecord[] records, AerospikeException ae) { - future.completeExceptionally(new AerospikeException.BatchRecordArray(records, ae)); - } - }; - } - - static T getFuture(final CompletableFuture future) { - try { - return future.get(); - } - catch (ExecutionException e) { - if (e.getCause() instanceof AerospikeException) { - throw (AerospikeException)e.getCause(); - } - throw new AerospikeException(e); - } - catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new AerospikeException(e); - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/BackgroundExecuteCommandProxy.java b/proxy/src/com/aerospike/client/proxy/BackgroundExecuteCommandProxy.java deleted file mode 100644 index f8ae22bc9..000000000 --- a/proxy/src/com/aerospike/client/proxy/BackgroundExecuteCommandProxy.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.concurrent.CompletableFuture; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.ResultCode; -import com.aerospike.client.command.Command; -import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.client.proxy.grpc.GrpcConversions; -import com.aerospike.client.query.Statement; -import com.aerospike.proxy.client.Kvs; -import com.aerospike.proxy.client.QueryGrpc; - -/** - * Implements asynchronous background query execute for the proxy. - */ -public class BackgroundExecuteCommandProxy extends MultiCommandProxy { - private final Statement statement; - private final long taskId; - private final CompletableFuture future; - - public BackgroundExecuteCommandProxy( - GrpcCallExecutor executor, - WritePolicy writePolicy, - Statement statement, - long taskId, - CompletableFuture future - ) { - super(QueryGrpc.getBackgroundExecuteStreamingMethod(), executor, writePolicy); - this.statement = statement; - this.taskId = taskId; - this.future = future; - } - - @Override - void writeCommand(Command command) { - // Nothing to do since there is no Aerospike payload. - } - - @Override - void parseResult(Parser parser) { - RecordProxy recordProxy = parseRecordResult(parser, false, true, false); - - // Only on response is expected. - if (recordProxy.resultCode != ResultCode.OK) { - throw new AerospikeException(recordProxy.resultCode); - } - - future.complete(null); - } - - @Override - void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - - @Override - protected Kvs.AerospikeRequestPayload.Builder getRequestBuilder() { - // Set the query parameters in the Aerospike request payload. - Kvs.AerospikeRequestPayload.Builder builder = Kvs.AerospikeRequestPayload.newBuilder(); - Kvs.BackgroundExecuteRequest.Builder queryRequestBuilder = - Kvs.BackgroundExecuteRequest.newBuilder(); - - queryRequestBuilder.setWritePolicy(GrpcConversions.toGrpc((WritePolicy)policy)); - queryRequestBuilder.setStatement(GrpcConversions.toGrpc(statement, taskId, 0)); - builder.setBackgroundExecuteRequest(queryRequestBuilder.build()); - - return builder; - } -} diff --git a/proxy/src/com/aerospike/client/proxy/BatchProxy.java b/proxy/src/com/aerospike/client/proxy/BatchProxy.java deleted file mode 100644 index c4ce06f8d..000000000 --- a/proxy/src/com/aerospike/client/proxy/BatchProxy.java +++ /dev/null @@ -1,985 +0,0 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.List; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.BatchRead; -import com.aerospike.client.BatchRecord; -import com.aerospike.client.Key; -import com.aerospike.client.Operation; -import com.aerospike.client.Record; -import com.aerospike.client.ResultCode; -import com.aerospike.client.command.BatchAttr; -import com.aerospike.client.command.Command; -import com.aerospike.client.command.Command.KeyIter; -import com.aerospike.client.listener.BatchListListener; -import com.aerospike.client.listener.BatchOperateListListener; -import com.aerospike.client.listener.BatchRecordArrayListener; -import com.aerospike.client.listener.BatchRecordSequenceListener; -import com.aerospike.client.listener.BatchSequenceListener; -import com.aerospike.client.listener.ExistsArrayListener; -import com.aerospike.client.listener.ExistsSequenceListener; -import com.aerospike.client.listener.RecordArrayListener; -import com.aerospike.client.listener.RecordSequenceListener; -import com.aerospike.client.policy.BatchPolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.KVSGrpc; -import com.aerospike.proxy.client.Kvs; - -/** - * Batch proxy commands. - */ -public class BatchProxy { - //------------------------------------------------------- - // Batch Read Record List - //------------------------------------------------------- - - public interface BatchListListenerSync { - public void onSuccess(List records, boolean status); - public void onFailure(AerospikeException ae); - } - - public static final class ReadListCommandSync extends BaseCommand { - private final BatchListListenerSync listener; - private final List records; - private boolean status; - - public ReadListCommandSync( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - BatchListListenerSync listener, - List records - ) { - super(executor, batchPolicy, true, records.size()); - this.listener = listener; - this.records = records; - this.status = true; - } - - @Override - void writeCommand(Command command) { - BatchRecordIterProxy iter = new BatchRecordIterProxy(records); - command.setBatchOperate(batchPolicy, null, null, null, iter); - } - - @Override - void parse(Parser parser, int resultCode) { - BatchRead record = records.get(parser.batchIndex); - - if (resultCode == 0) { - record.setRecord(parseRecord(parser)); - } - else { - record.setError(resultCode, false); - status = false; - } - } - - @Override - void onSuccess() { - listener.onSuccess(records, status); - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } - } - - public static final class ReadListCommand extends BaseCommand { - private final BatchListListener listener; - private final List records; - - public ReadListCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - BatchListListener listener, - List records - ) { - super(executor, batchPolicy, true, records.size()); - this.listener = listener; - this.records = records; - } - - @Override - void writeCommand(Command command) { - BatchRecordIterProxy iter = new BatchRecordIterProxy(records); - command.setBatchOperate(batchPolicy, null, null, null, iter); - } - - @Override - void parse(Parser parser, int resultCode) { - BatchRead record = records.get(parser.batchIndex); - - if (resultCode == 0) { - record.setRecord(parseRecord(parser)); - } - else { - record.setError(resultCode, false); - } - } - - @Override - void onSuccess() { - listener.onSuccess(records); - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } - } - - public static final class ReadSequenceCommand extends BaseCommand { - private final BatchSequenceListener listener; - private final List records; - - public ReadSequenceCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - BatchSequenceListener listener, - List records - ) { - super(executor, batchPolicy, true, records.size()); - this.listener = listener; - this.records = records; - } - - @Override - void writeCommand(Command command) { - BatchRecordIterProxy iter = new BatchRecordIterProxy(records); - command.setBatchOperate(batchPolicy, null, null, null, iter); - } - - @Override - void parse(Parser parser, int resultCode) { - BatchRead record = records.get(parser.batchIndex); - - if (resultCode == ResultCode.OK) { - record.setRecord(parseRecord(parser)); - } - else { - record.setError(resultCode, false); - } - listener.onRecord(record); - } - - @Override - void onSuccess() { - listener.onSuccess(); - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } - } - - //------------------------------------------------------- - // Batch Read Key Array - //------------------------------------------------------- - - public static final class GetArrayCommand extends BaseCommand { - private final RecordArrayListener listener; - private final Record[] records; - private final Key[] keys; - private final String[] binNames; - private final Operation[] ops; - private final int readAttr; - private Exception exc; - - public GetArrayCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - RecordArrayListener listener, - Key[] keys, - String[] binNames, - Operation[] ops, - int readAttr, - boolean isOperation - ) { - super(executor, batchPolicy, isOperation, keys.length); - this.listener = listener; - this.keys = keys; - this.binNames = binNames; - this.ops = ops; - this.readAttr = readAttr; - this.records = new Record[keys.length]; - } - - @Override - void writeCommand(Command command) { - BatchAttr attr = new BatchAttr(batchPolicy, readAttr, ops); - KeyIterProxy iter = new KeyIterProxy(keys); - command.setBatchOperate(batchPolicy, iter, binNames, ops, attr); - } - - @Override - void parse(Parser parser, int resultCode) { - if (resultCode == ResultCode.OK) { - records[parser.batchIndex] = parseRecord(parser); - } - else if (resultCode == ResultCode.INVALID_NAMESPACE) { - exc = new AerospikeException.InvalidNamespace(keys[parser.batchIndex].namespace, 1); - } - } - - @Override - void onSuccess() { - if (exc == null) { - listener.onSuccess(keys, records); - } - else { - listener.onFailure(new AerospikeException.BatchRecords(records, exc)); - } - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(new AerospikeException.BatchRecords(records, ae)); - } - } - - public static final class GetSequenceCommand extends BaseCommand { - private final RecordSequenceListener listener; - private final Key[] keys; - private final String[] binNames; - private final Operation[] ops; - private final int readAttr; - - public GetSequenceCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - RecordSequenceListener listener, - Key[] keys, - String[] binNames, - Operation[] ops, - int readAttr, - boolean isOperation - ) { - super(executor, batchPolicy, isOperation, keys.length); - this.listener = listener; - this.keys = keys; - this.binNames = binNames; - this.ops = ops; - this.readAttr = readAttr; - } - - @Override - void writeCommand(Command command) { - BatchAttr attr = new BatchAttr(batchPolicy, readAttr, ops); - KeyIterProxy iter = new KeyIterProxy(keys); - command.setBatchOperate(batchPolicy, iter, binNames, ops, attr); - } - - @Override - void parse(Parser parser, int resultCode) { - Key keyOrig = keys[parser.batchIndex]; - - if (resultCode == ResultCode.OK) { - Record record = parseRecord(parser); - listener.onRecord(keyOrig, record); - } - else { - listener.onRecord(keyOrig, null); - } - } - - @Override - void onSuccess() { - listener.onSuccess(); - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } - } - - //------------------------------------------------------- - // Batch Exists - //------------------------------------------------------- - - public static final class ExistsArrayCommand extends BaseCommand { - private final ExistsArrayListener listener; - private final Key[] keys; - private final boolean[] existsArray; - private Exception exc; - - public ExistsArrayCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - ExistsArrayListener listener, - Key[] keys - ) { - super(executor, batchPolicy, false, keys.length); - this.listener = listener; - this.keys = keys; - this.existsArray = new boolean[keys.length]; - } - - @Override - void writeCommand(Command command) { - BatchAttr attr = new BatchAttr(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); - KeyIterProxy iter = new KeyIterProxy(keys); - command.setBatchOperate(batchPolicy, iter, null, null, attr); - } - - @Override - void parse(Parser parser, int resultCode) { - if (parser.opCount > 0) { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - - if (resultCode == 0) { - existsArray[parser.batchIndex] = true; - } - else if (resultCode == ResultCode.INVALID_NAMESPACE) { - exc = new AerospikeException.InvalidNamespace(keys[parser.batchIndex].namespace, 1); - } - } - - @Override - void onSuccess() { - if (exc == null) { - listener.onSuccess(keys, existsArray); - } - else { - listener.onFailure(new AerospikeException.BatchExists(existsArray, exc)); - } - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } - } - - public static final class ExistsSequenceCommand extends BaseCommand { - private final ExistsSequenceListener listener; - private final Key[] keys; - - public ExistsSequenceCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - ExistsSequenceListener listener, - Key[] keys - ) { - super(executor, batchPolicy, false, keys.length); - this.listener = listener; - this.keys = keys; - } - - @Override - void writeCommand(Command command) { - BatchAttr attr = new BatchAttr(batchPolicy, Command.INFO1_READ | Command.INFO1_NOBINDATA); - KeyIterProxy iter = new KeyIterProxy(keys); - command.setBatchOperate(batchPolicy, iter, null, null, attr); - } - - @Override - void parse(Parser parser, int resultCode) { - if (parser.opCount > 0) { - throw new AerospikeException.Parse("Received bins that were not requested!"); - } - listener.onExists(keys[parser.batchIndex], resultCode == 0); - } - - @Override - void onSuccess() { - listener.onSuccess(); - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } - } - - //------------------------------------------------------- - // Batch Operate Record List - //------------------------------------------------------- - - public static final class OperateListCommand extends BaseCommand { - private final AerospikeClientProxy client; - private final BatchOperateListListener listener; - private final List records; - private boolean status; - - public OperateListCommand( - AerospikeClientProxy client, - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - BatchOperateListListener listener, - List records - ) { - super(executor, batchPolicy, true, records.size()); - this.client = client; - this.listener = listener; - this.records = records; - this.status = true; - } - - @Override - void writeCommand(Command command) { - BatchRecordIterProxy iter = new BatchRecordIterProxy(records); - command.setBatchOperate(batchPolicy, client.batchWritePolicyDefault, client.batchUDFPolicyDefault, - client.batchDeletePolicyDefault, iter); - } - - @Override - void parse(Parser parser, int resultCode) { - BatchRecord record = records.get(parser.batchIndex); - - if (resultCode == ResultCode.OK) { - record.setRecord(parseRecord(parser)); - return; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) { - Record r = parseRecord(parser); - String m = r.getString("FAILURE"); - - if (m != null) { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = inDoubt; - status = false; - return; - } - } - - record.setError(resultCode, inDoubt); - status = false; - } - - @Override - void onSuccess() { - listener.onSuccess(records, status); - } - - @Override - void onFailure(AerospikeException ae) { - if (ae.getInDoubt()) { - for (BatchRecord record : records) { - if (record.resultCode == ResultCode.NO_RESPONSE) { - record.inDoubt = record.hasWrite; - } - } - } - listener.onFailure(ae); - } - } - - public static final class OperateSequenceCommand extends BaseCommand { - private final AerospikeClientProxy client; - private final BatchRecordSequenceListener listener; - private final List records; - - public OperateSequenceCommand( - AerospikeClientProxy client, - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - BatchRecordSequenceListener listener, - List records - ) { - super(executor, batchPolicy, true, records.size()); - this.client = client; - this.listener = listener; - this.records = records; - } - - @Override - void writeCommand(Command command) { - BatchRecordIterProxy iter = new BatchRecordIterProxy(records); - command.setBatchOperate(batchPolicy, client.batchWritePolicyDefault, client.batchUDFPolicyDefault, - client.batchDeletePolicyDefault, iter); - } - - @Override - void parse(Parser parser, int resultCode) { - BatchRecord record = records.get(parser.batchIndex); - - if (resultCode == ResultCode.OK) { - record.setRecord(parseRecord(parser)); - } - else if (resultCode == ResultCode.UDF_BAD_RESPONSE) { - Record r = parseRecord(parser); - String m = r.getString("FAILURE"); - - if (m != null) { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = inDoubt; - } - else { - record.setError(resultCode, inDoubt); - } - } - else { - record.setError(resultCode, inDoubt); - } - - listener.onRecord(record, parser.batchIndex); - } - - @Override - void onSuccess() { - listener.onSuccess(); - } - - @Override - void onFailure(AerospikeException ae) { - if (ae.getInDoubt()) { - for (BatchRecord record : records) { - if (record.resultCode == ResultCode.NO_RESPONSE) { - record.inDoubt = record.hasWrite; - } - } - } - listener.onFailure(ae); - } - } - - //------------------------------------------------------- - // Batch Operate Key Array - //------------------------------------------------------- - - public static final class OperateRecordArrayCommand extends BaseCommand { - private final BatchRecordArrayListener listener; - private final BatchRecord[] records; - private final Key[] keys; - private final Operation[] ops; - private final BatchAttr attr; - private boolean status; - - public OperateRecordArrayCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - Key[] keys, - Operation[] ops, - BatchRecordArrayListener listener, - BatchAttr attr - ) { - super(executor, batchPolicy, ops != null, keys.length); - this.keys = keys; - this.ops = ops; - this.listener = listener; - this.attr = attr; - this.status = true; - this.records = new BatchRecord[keys.length]; - - for (int i = 0; i < keys.length; i++) { - this.records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - } - - @Override - void writeCommand(Command command) { - KeyIterProxy iter = new KeyIterProxy(keys); - command.setBatchOperate(batchPolicy, iter, null, ops, attr); - } - - @Override - void parse(Parser parser, int resultCode) { - BatchRecord record = records[parser.batchIndex]; - - if (resultCode == 0) { - record.setRecord(parseRecord(parser)); - } - else { - record.setError(resultCode, inDoubt); - status = false; - } - } - - @Override - void onSuccess() { - listener.onSuccess(records, status); - } - - @Override - void onFailure(AerospikeException ae) { - if (ae.getInDoubt()) { - for (BatchRecord record : records) { - if (record.resultCode == ResultCode.NO_RESPONSE) { - record.inDoubt = record.hasWrite; - } - } - } - listener.onFailure(records, ae); - } - } - - public static final class OperateRecordSequenceCommand extends BaseCommand { - private final BatchRecordSequenceListener listener; - private final Key[] keys; - private final Operation[] ops; - private final BatchAttr attr; - - public OperateRecordSequenceCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - Key[] keys, - Operation[] ops, - BatchRecordSequenceListener listener, - BatchAttr attr - ) { - super(executor, batchPolicy, ops != null, keys.length); - this.keys = keys; - this.ops = ops; - this.listener = listener; - this.attr = attr; - } - - @Override - void writeCommand(Command command) { - KeyIterProxy iter = new KeyIterProxy(keys); - command.setBatchOperate(batchPolicy, iter, null, ops, attr); - } - - @Override - void parse(Parser parser, int resultCode) { - Key keyOrig = keys[parser.batchIndex]; - BatchRecord record; - - if (resultCode == ResultCode.OK) { - record = new BatchRecord(keyOrig, parseRecord(parser), attr.hasWrite); - } - else { - record = new BatchRecord(keyOrig, null, resultCode, inDoubt, attr.hasWrite); - } - - listener.onRecord(record, parser.batchIndex); - } - - @Override - void onSuccess() { - listener.onSuccess(); - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } - } - - //------------------------------------------------------- - // Batch UDF - //------------------------------------------------------- - - public static final class UDFArrayCommand extends BaseCommand { - private final BatchRecordArrayListener listener; - private final BatchRecord[] records; - private final Key[] keys; - private final String packageName; - private final String functionName; - private final byte[] argBytes; - private final BatchAttr attr; - private boolean status; - - public UDFArrayCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - BatchRecordArrayListener listener, - Key[] keys, - String packageName, - String functionName, - byte[] argBytes, - BatchAttr attr - ) { - super(executor, batchPolicy, false, keys.length); - this.listener = listener; - this.keys = keys; - this.packageName = packageName; - this.functionName = functionName; - this.argBytes = argBytes; - this.attr = attr; - this.status = true; - - this.records = new BatchRecord[keys.length]; - - for (int i = 0; i < keys.length; i++) { - this.records[i] = new BatchRecord(keys[i], attr.hasWrite); - } - } - - @Override - void writeCommand(Command command) { - KeyIterProxy iter = new KeyIterProxy(keys); - command.setBatchUDF(batchPolicy, iter, packageName, functionName, argBytes, attr); - } - - @Override - void parse(Parser parser, int resultCode) { - BatchRecord record = records[parser.batchIndex]; - - if (resultCode == ResultCode.OK) { - record.setRecord(parseRecord(parser)); - return; - } - - if (resultCode == ResultCode.UDF_BAD_RESPONSE) { - Record r = parseRecord(parser); - String m = r.getString("FAILURE"); - - if (m != null) { - // Need to store record because failure bin contains an error message. - record.record = r; - record.resultCode = resultCode; - record.inDoubt = inDoubt; - status = false; - return; - } - } - - record.setError(resultCode, inDoubt); - status = false; - } - - @Override - void onSuccess() { - listener.onSuccess(records, status); - } - - @Override - void onFailure(AerospikeException ae) { - if (ae.getInDoubt()) { - for (BatchRecord record : records) { - if (record.resultCode == ResultCode.NO_RESPONSE) { - record.inDoubt = record.hasWrite; - } - } - } - listener.onFailure(records, ae); - } - } - - public static final class UDFSequenceCommand extends BaseCommand { - private final BatchRecordSequenceListener listener; - private final Key[] keys; - private final String packageName; - private final String functionName; - private final byte[] argBytes; - private final BatchAttr attr; - - public UDFSequenceCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - BatchRecordSequenceListener listener, - Key[] keys, - String packageName, - String functionName, - byte[] argBytes, - BatchAttr attr - ) { - super(executor, batchPolicy, false, keys.length); - this.listener = listener; - this.keys = keys; - this.packageName = packageName; - this.functionName = functionName; - this.argBytes = argBytes; - this.attr = attr; - } - - @Override - void writeCommand(Command command) { - KeyIterProxy iter = new KeyIterProxy(keys); - command.setBatchUDF(batchPolicy, iter, packageName, functionName, argBytes, attr); - } - - @Override - void parse(Parser parser, int resultCode) { - Key keyOrig = keys[parser.batchIndex]; - BatchRecord record; - - if (resultCode == ResultCode.OK) { - record = new BatchRecord(keyOrig, parseRecord(parser), attr.hasWrite); - } - else if (resultCode == ResultCode.UDF_BAD_RESPONSE) { - Record r = parseRecord(parser); - String m = r.getString("FAILURE"); - - if (m != null) { - // Need to store record because failure bin contains an error message. - record = new BatchRecord(keyOrig, r, resultCode, inDoubt, attr.hasWrite); - } - else { - record = new BatchRecord(keyOrig, null, resultCode, inDoubt, attr.hasWrite); - } - } - else { - record = new BatchRecord(keyOrig, null, resultCode, inDoubt, attr.hasWrite); - } - listener.onRecord(record, parser.batchIndex); - } - - @Override - void onSuccess() { - listener.onSuccess(); - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } - } - - //------------------------------------------------------- - // Batch Base - //------------------------------------------------------- - - private static abstract class BaseCommand extends CommandProxy { - final BatchPolicy batchPolicy; - final boolean isOperation; - - public BaseCommand( - GrpcCallExecutor executor, - BatchPolicy batchPolicy, - boolean isOperation, - int numExpectedResponses - ) { - super(KVSGrpc.getBatchOperateStreamingMethod(), executor, batchPolicy, numExpectedResponses); - this.batchPolicy = batchPolicy; - this.isOperation = isOperation; - } - - @Override - final void onResponse(Kvs.AerospikeResponsePayload response) { - // Check final response status for client errors (negative error codes). - int resultCode = response.getStatus(); - boolean hasNext = response.getHasNext(); - - if (resultCode != 0 && !hasNext) { - notifyFailure(new AerospikeException(resultCode)); - return; - } - - // Server errors are checked in response payload in Parser. - byte[] bytes = response.getPayload().toByteArray(); - Parser parser = new Parser(bytes); - parser.parseProto(); - int rc = parser.parseHeader(); - - if (hasNext) { - if (resultCode == 0) { - resultCode = rc; - } - parser.skipKey(); - parse(parser, resultCode); - return; - } - - if (rc == ResultCode.OK) { - try { - onSuccess(); - } - catch (Throwable t) { - logOnSuccessError(t); - } - } - else { - notifyFailure(new AerospikeException(rc)); - } - } - - final Record parseRecord(Parser parser) { - return parser.parseRecord(isOperation); - } - - abstract void parse(Parser parser, int resultCode); - abstract void onSuccess(); - } - - //------------------------------------------------------- - // Proxy Iterators - //------------------------------------------------------- - - private static class BatchRecordIterProxy implements KeyIter { - private final List records; - private final int size; - private int offset; - private int index; - - public BatchRecordIterProxy(List records) { - this.records = records; - this.size = records.size(); - } - - @Override - public int size() { - return size; - } - - @Override - public BatchRecord next() { - if (index >= size) { - return null; - } - offset = index++; - return records.get(offset); - } - - @Override - public int offset() { - return offset; - } - - @Override - public void reset() { - index = 0; - } - } - - private static class KeyIterProxy implements KeyIter { - private final Key[] keys; - private int offset; - private int index; - - public KeyIterProxy(Key[] keys) { - this.keys = keys; - } - - @Override - public int size() { - return keys.length; - } - - @Override - public Key next() { - if (index >= keys.length) { - return null; - } - offset = index++; - return keys[offset]; - } - - @Override - public int offset() { - return offset; - } - - @Override - public void reset() { - index = 0; - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/CommandProxy.java b/proxy/src/com/aerospike/client/proxy/CommandProxy.java deleted file mode 100644 index a722dca80..000000000 --- a/proxy/src/com/aerospike/client/proxy/CommandProxy.java +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.concurrent.TimeUnit; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Log; -import com.aerospike.client.ResultCode; -import com.aerospike.client.command.Command; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.client.proxy.grpc.GrpcConversions; -import com.aerospike.client.proxy.grpc.GrpcStreamingCall; -import com.aerospike.client.util.Util; -import com.aerospike.proxy.client.Kvs; -import com.google.protobuf.ByteString; - -import io.grpc.MethodDescriptor; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import io.grpc.stub.StreamObserver; - -public abstract class CommandProxy { - final Policy policy; - private final GrpcCallExecutor executor; - private final MethodDescriptor methodDescriptor; - private long deadlineNanos; - private int sendTimeoutMillis; - private int iteration = 1; - private final int numExpectedResponses; - boolean inDoubt; - - public CommandProxy( - MethodDescriptor methodDescriptor, - GrpcCallExecutor executor, - Policy policy, - int numExpectedResponses - ) { - this.methodDescriptor = methodDescriptor; - this.executor = executor; - this.policy = policy; - this.numExpectedResponses = numExpectedResponses; - } - - final void execute() { - if (policy.totalTimeout > 0) { - deadlineNanos = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(policy.totalTimeout); - sendTimeoutMillis = (policy.socketTimeout > 0 && policy.socketTimeout < policy.totalTimeout)? - policy.socketTimeout : policy.totalTimeout; - } - else { - deadlineNanos = 0; // No total deadline. - sendTimeoutMillis = Math.max(policy.socketTimeout, 0); - } - - executeCommand(); - } - - private void executeCommand() { - long sendDeadlineNanos = - (sendTimeoutMillis > 0) ? - System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(sendTimeoutMillis) : 0; - - Kvs.AerospikeRequestPayload.Builder builder = getRequestBuilder(); - - executor.execute(new GrpcStreamingCall(methodDescriptor, builder, - policy, iteration, deadlineNanos, sendDeadlineNanos, numExpectedResponses, - new StreamObserver() { - @Override - public void onNext(Kvs.AerospikeResponsePayload response) { - try { - inDoubt |= response.getInDoubt(); - onResponse(response); - } - catch (Throwable t) { - onFailure(t); - // Re-throw to abort at the proxy/ - throw t; - } - } - - @Override - public void onError(Throwable t) { - inDoubt = true; - onFailure(t); - } - - @Override - public void onCompleted() { - } - })); - } - - boolean retry() { - if (iteration > policy.maxRetries) { - return false; - } - - if (policy.totalTimeout > 0) { - long remaining = deadlineNanos - System.nanoTime() - TimeUnit.MILLISECONDS.toNanos(policy.sleepBetweenRetries); - - if (remaining <= 0) { - return false; - } - } - - iteration++; - executor.getEventLoop().schedule(this::retryNow, policy.sleepBetweenRetries, TimeUnit.MILLISECONDS); - return true; - } - - private void retryNow() { - try { - executeCommand(); - } - catch (AerospikeException ae) { - notifyFailure(ae); - } - catch (Throwable t) { - notifyFailure(new AerospikeException(ResultCode.CLIENT_ERROR, t)); - } - } - - private void onFailure(Throwable t) { - AerospikeException ae; - - try { - if (t instanceof AerospikeException) { - ae = (AerospikeException)t; - ae.setPolicy(policy); - } - else if (t instanceof StatusRuntimeException) { - StatusRuntimeException sre = (StatusRuntimeException)t; - Status.Code code = sre.getStatus().getCode(); - - if (code == Status.Code.UNAVAILABLE) { - if (retry()) { - return; - } - } - ae = GrpcConversions.toAerospike(sre, policy, iteration); - } - else { - ae = new AerospikeException(ResultCode.CLIENT_ERROR, t); - } - } - catch (AerospikeException ae2) { - ae = ae2; - } - catch (Throwable t2) { - ae = new AerospikeException(ResultCode.CLIENT_ERROR, t2); - } - - notifyFailure(ae); - } - - final void notifyFailure(AerospikeException ae) { - try { - ae.setPolicy(policy); - ae.setIteration(iteration); - ae.setInDoubt(inDoubt); - onFailure(ae); - } - catch (Throwable t) { - Log.error("onFailure() error: " + Util.getStackTrace(t)); - } - } - - static void logOnSuccessError(Throwable t) { - Log.error("onSuccess() error: " + Util.getStackTrace(t)); - } - - Kvs.AerospikeRequestPayload.Builder getRequestBuilder() { - Command command = new Command(policy.socketTimeout, policy.totalTimeout, policy.maxRetries); - writeCommand(command); - - ByteString payload = ByteString.copyFrom(command.dataBuffer, 0, command.dataOffset); - return Kvs.AerospikeRequestPayload.newBuilder().setPayload(payload); - } - - abstract void writeCommand(Command command); - abstract void onResponse(Kvs.AerospikeResponsePayload response); - abstract void onFailure(AerospikeException ae); -} diff --git a/proxy/src/com/aerospike/client/proxy/DeleteCommandProxy.java b/proxy/src/com/aerospike/client/proxy/DeleteCommandProxy.java deleted file mode 100644 index bd0aefece..000000000 --- a/proxy/src/com/aerospike/client/proxy/DeleteCommandProxy.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.ResultCode; -import com.aerospike.client.command.Command; -import com.aerospike.client.listener.DeleteListener; -import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.KVSGrpc; - -public final class DeleteCommandProxy extends SingleCommandProxy { - private final DeleteListener listener; - private final WritePolicy writePolicy; - private final Key key; - - public DeleteCommandProxy( - GrpcCallExecutor executor, - DeleteListener listener, - WritePolicy writePolicy, - Key key - ) { - super(KVSGrpc.getDeleteStreamingMethod(), executor, writePolicy); - this.listener = listener; - this.writePolicy = writePolicy; - this.key = key; - } - - @Override - void writeCommand(Command command) { - command.setDelete(writePolicy, key); - } - - @Override - void parseResult(Parser parser) { - int resultCode = parser.parseResultCode(); - boolean existed; - - switch (resultCode) { - case ResultCode.OK: - existed = true; - break; - - case ResultCode.KEY_NOT_FOUND_ERROR: - existed = false; - break; - - case ResultCode.FILTERED_OUT: - if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); - } - existed = true; - break; - - default: - throw new AerospikeException(resultCode); - } - - try { - listener.onSuccess(key, existed); - } - catch (Throwable t) { - logOnSuccessError(t); - } - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } -} diff --git a/proxy/src/com/aerospike/client/proxy/ExecuteCommandProxy.java b/proxy/src/com/aerospike/client/proxy/ExecuteCommandProxy.java deleted file mode 100644 index 4961a6c31..000000000 --- a/proxy/src/com/aerospike/client/proxy/ExecuteCommandProxy.java +++ /dev/null @@ -1,110 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.Map; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.Record; -import com.aerospike.client.Value; -import com.aerospike.client.command.Command; -import com.aerospike.client.listener.ExecuteListener; -import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.KVSGrpc; - -public final class ExecuteCommandProxy extends ReadCommandProxy { - private final ExecuteListener executeListener; - private final WritePolicy writePolicy; - private final Key key; - private final String packageName; - private final String functionName; - private final Value[] args; - - public ExecuteCommandProxy( - GrpcCallExecutor executor, - ExecuteListener executeListener, - WritePolicy writePolicy, - Key key, - String packageName, - String functionName, - Value[] args - ) { - super(KVSGrpc.getExecuteStreamingMethod(), executor, null, writePolicy, key, false); - this.executeListener = executeListener; - this.writePolicy = writePolicy; - this.key = key; - this.packageName = packageName; - this.functionName = functionName; - this.args = args; - } - - @Override - void writeCommand(Command command) { - command.setUdf(writePolicy, key, packageName, functionName, args); - } - - @Override - void parseResult(Parser parser) { - Record record = parseRecordResult(parser); - Object obj = parseEndResult(record); - - try { - executeListener.onSuccess(key, obj); - } - catch (Throwable t) { - logOnSuccessError(t); - } - } - - private static Object parseEndResult(Record record) { - if (record == null || record.bins == null) { - return null; - } - - Map map = record.bins; - - Object obj = map.get("SUCCESS"); - - if (obj != null) { - return obj; - } - - // User defined functions don't have to return a value. - if (map.containsKey("SUCCESS")) { - return null; - } - - obj = map.get("FAILURE"); - - if (obj != null) { - throw new AerospikeException(obj.toString()); - } - throw new AerospikeException("Invalid UDF return value"); - } - - @Override - protected void handleNotFound(int resultCode) { - throw new AerospikeException(resultCode); - } - - @Override - void onFailure(AerospikeException ae) { - executeListener.onFailure(ae); - } -} diff --git a/proxy/src/com/aerospike/client/proxy/ExecuteTaskProxy.java b/proxy/src/com/aerospike/client/proxy/ExecuteTaskProxy.java deleted file mode 100644 index b81598f0f..000000000 --- a/proxy/src/com/aerospike/client/proxy/ExecuteTaskProxy.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package com.aerospike.client.proxy; - -import java.util.concurrent.CompletableFuture; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.client.task.ExecuteTask; - -public class ExecuteTaskProxy extends ExecuteTask { - private final long taskId; - private final boolean scan; - private final GrpcCallExecutor callExecutor; - - /** - * Initialize task with fields needed to query server nodes. - */ - public ExecuteTaskProxy(GrpcCallExecutor executor, long taskId, boolean isScan) { - super(taskId, isScan); - this.callExecutor = executor; - this.taskId = taskId; - this.scan = isScan; - } - - /** - * Return task id. - */ - public long getTaskId() { - return taskId; - } - - /** - * Query all nodes for task completion status. - */ - @Override - public int queryStatus() throws AerospikeException { - CompletableFuture future = new CompletableFuture<>(); - ExecuteTaskStatusCommandProxy command = new ExecuteTaskStatusCommandProxy(callExecutor, - new WritePolicy(), taskId, scan, future); - command.execute(); - return AerospikeClientProxy.getFuture(future); - } -} \ No newline at end of file diff --git a/proxy/src/com/aerospike/client/proxy/ExecuteTaskStatusCommandProxy.java b/proxy/src/com/aerospike/client/proxy/ExecuteTaskStatusCommandProxy.java deleted file mode 100644 index 5a0ed3251..000000000 --- a/proxy/src/com/aerospike/client/proxy/ExecuteTaskStatusCommandProxy.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.concurrent.CompletableFuture; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.ResultCode; -import com.aerospike.client.command.Command; -import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.Kvs; -import com.aerospike.proxy.client.QueryGrpc; - -/** - * Fetch status for a background task. - */ -public class ExecuteTaskStatusCommandProxy extends MultiCommandProxy { - private final long taskId; - private final boolean isScan; - private final CompletableFuture future; - private int status; - - public ExecuteTaskStatusCommandProxy( - GrpcCallExecutor executor, - WritePolicy writePolicy, - long taskId, - boolean isScan, - CompletableFuture future - ) { - super(QueryGrpc.getBackgroundTaskStatusStreamingMethod(), executor, writePolicy); - this.taskId = taskId; - this.isScan = isScan; - this.future = future; - } - - @Override - protected void writeCommand(Command command) { - // Nothing to do since there is no Aerospike payload. - } - - @Override - protected void parseResult(Parser parser) { - RecordProxy recordProxy = parseRecordResult(parser, false, true, false); - - // Only on response is expected. - if (recordProxy.resultCode != ResultCode.OK) { - throw new AerospikeException(recordProxy.resultCode); - } - - // Status has been set in onResponse - future.complete(status); - } - - @Override - void onResponse(Kvs.AerospikeResponsePayload response) { - // Set the value but do not report the result until the resultcode - // is computed in parseResult - if (!response.hasField(response.getDescriptorForType().findFieldByName("backgroundTaskStatus"))) { - throw new AerospikeException.Parse("missing task status field"); - } - status = response.getBackgroundTaskStatusValue(); - super.onResponse(response); - } - - @Override - protected void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } - - @Override - protected Kvs.AerospikeRequestPayload.Builder getRequestBuilder() { - // Set the query parameters in the Aerospike request payload. - Kvs.AerospikeRequestPayload.Builder builder = Kvs.AerospikeRequestPayload.newBuilder(); - Kvs.BackgroundTaskStatusRequest.Builder statusRequestBuilder = Kvs.BackgroundTaskStatusRequest.newBuilder(); - statusRequestBuilder.setTaskId(taskId); - statusRequestBuilder.setIsScan(isScan); - - builder.setBackgroundTaskStatusRequest(statusRequestBuilder.build()); - return builder; - } -} diff --git a/proxy/src/com/aerospike/client/proxy/ExistsCommandProxy.java b/proxy/src/com/aerospike/client/proxy/ExistsCommandProxy.java deleted file mode 100644 index fa9ff8de9..000000000 --- a/proxy/src/com/aerospike/client/proxy/ExistsCommandProxy.java +++ /dev/null @@ -1,85 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.ResultCode; -import com.aerospike.client.command.Command; -import com.aerospike.client.listener.ExistsListener; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.KVSGrpc; - -public final class ExistsCommandProxy extends SingleCommandProxy { - private final ExistsListener listener; - private final Key key; - - public ExistsCommandProxy( - GrpcCallExecutor executor, - ExistsListener listener, - Policy policy, - Key key - ) { - super(KVSGrpc.getExistsStreamingMethod(), executor, policy); - this.listener = listener; - this.key = key; - } - - @Override - void writeCommand(Command command) { - command.setExists(policy, key); - } - - @Override - void parseResult(Parser parser) { - int resultCode = parser.parseResultCode(); - boolean exists; - - switch (resultCode) { - case ResultCode.OK: - exists = true; - break; - - case ResultCode.KEY_NOT_FOUND_ERROR: - exists = false; - break; - - case ResultCode.FILTERED_OUT: - if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); - } - exists = true; - break; - - default: - throw new AerospikeException(resultCode); - } - - try { - listener.onSuccess(key, exists); - } - catch (Throwable t) { - logOnSuccessError(t); - } - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } -} diff --git a/proxy/src/com/aerospike/client/proxy/MultiCommandProxy.java b/proxy/src/com/aerospike/client/proxy/MultiCommandProxy.java deleted file mode 100644 index 4b637aedd..000000000 --- a/proxy/src/com/aerospike/client/proxy/MultiCommandProxy.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.Record; -import com.aerospike.client.ResultCode; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.client.query.BVal; -import com.aerospike.proxy.client.Kvs; - -import io.grpc.MethodDescriptor; - -public abstract class MultiCommandProxy extends CommandProxy { - boolean hasNext; - - public MultiCommandProxy( - MethodDescriptor methodDescriptor, - GrpcCallExecutor executor, - Policy policy - ) { - super(methodDescriptor, executor, policy, -1); - } - - @Override - void onResponse(Kvs.AerospikeResponsePayload response) { - // Check response status for client errors (negative error codes). - // Server errors are checked in response payload in Parser. - int status = response.getStatus(); - - if (status != 0) { - notifyFailure(new AerospikeException(status)); - return; - } - - hasNext = response.getHasNext(); - byte[] bytes = response.getPayload().toByteArray(); - Parser parser = new Parser(bytes); - parser.parseProto(); - parseResult(parser); - } - - final RecordProxy parseRecordResult( - Parser parser, - boolean isOperation, - boolean parseKey, - boolean parseBVal - ) { - Record record = null; - Key key = null; - BVal bVal = parseBVal ? new BVal() : null; - int resultCode = parser.parseHeader(); - - switch (resultCode) { - case ResultCode.OK: - if (parseKey) { - key = parser.parseKey(bVal); - } - else { - parser.skipKey(); - } - if (parser.opCount == 0) { - // Bin data was not returned. - record = new Record(null, parser.generation, parser.expiration); - } - else { - record = parser.parseRecord(isOperation); - } - break; - - case ResultCode.KEY_NOT_FOUND_ERROR: - handleNotFound(resultCode); - break; - - case ResultCode.FILTERED_OUT: - if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); - } - break; - - case ResultCode.UDF_BAD_RESPONSE: - parser.skipKey(); - record = parser.parseRecord(isOperation); - handleUdfError(record, resultCode); - break; - - default: - throw new AerospikeException(resultCode); - } - - return new RecordProxy(resultCode, key, record, bVal); - } - - protected void handleNotFound(int resultCode) { - // Do nothing in default case. Record will be null. - } - - protected void handleUdfError(Record record, int resultCode) { - String ret = (String)record.bins.get("FAILURE"); - - if (ret == null) { - throw new AerospikeException(resultCode); - } - - String message; - int code; - - try { - String[] list = ret.split(":"); - code = Integer.parseInt(list[2].trim()); - message = list[0] + ':' + list[1] + ' ' + list[3]; - } - catch (Exception e) { - // Use generic exception if parse error occurs. - throw new AerospikeException(resultCode, ret); - } - - throw new AerospikeException(code, message); - } - - abstract void parseResult(Parser parser); -} diff --git a/proxy/src/com/aerospike/client/proxy/OperateCommandProxy.java b/proxy/src/com/aerospike/client/proxy/OperateCommandProxy.java deleted file mode 100644 index 683eb15a0..000000000 --- a/proxy/src/com/aerospike/client/proxy/OperateCommandProxy.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.command.Command; -import com.aerospike.client.command.OperateArgs; -import com.aerospike.client.listener.RecordListener; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.KVSGrpc; - -public final class OperateCommandProxy extends ReadCommandProxy { - private final OperateArgs args; - - public OperateCommandProxy( - GrpcCallExecutor executor, - RecordListener listener, - Policy policy, - Key key, - OperateArgs args - ) { - super(KVSGrpc.getOperateStreamingMethod(), executor, listener, policy, key, true); - this.args = args; - } - - @Override - void writeCommand(Command command) { - command.setOperate(args.writePolicy, key, args); - } - - @Override - protected void handleNotFound(int resultCode) { - // Only throw not found exception for command with write operations. - // Read-only command operations return a null record. - if (args.hasWrite) { - throw new AerospikeException(resultCode); - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/Parser.java b/proxy/src/com/aerospike/client/proxy/Parser.java deleted file mode 100644 index 940c6f82d..000000000 --- a/proxy/src/com/aerospike/client/proxy/Parser.java +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.LinkedHashMap; -import java.util.Map; -import java.util.zip.DataFormatException; -import java.util.zip.Inflater; - -import org.luaj.vm2.LuaValue; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.Record; -import com.aerospike.client.ResultCode; -import com.aerospike.client.Value; -import com.aerospike.client.command.Buffer; -import com.aerospike.client.command.Command; -import com.aerospike.client.command.Command.OpResults; -import com.aerospike.client.command.FieldType; -import com.aerospike.client.lua.LuaInstance; -import com.aerospike.client.query.BVal; - -public final class Parser { - private byte[] buffer; - private int offset; - private int receiveSize; - private int resultCode; - int generation; - int expiration; - int batchIndex; - int fieldCount; - int opCount; - int info3; - - public Parser(byte[] buffer) { - this.buffer = buffer; - } - - public void parseProto() { - long sz = Buffer.bytesToLong(buffer, offset); - receiveSize = (int)(sz & 0xFFFFFFFFFFFFL); - int totalSize = receiveSize + 8; - - if (totalSize != buffer.length) { - throw new AerospikeException("size " + totalSize + " != buffer length " + buffer.length); - } - - offset += 8; - long type = (sz >> 48) & 0xff; - - if (type == Command.AS_MSG_TYPE) { - offset += 5; - } - else if (type == Command.MSG_TYPE_COMPRESSED) { - int usize = (int)Buffer.bytesToLong(buffer, offset); - offset += 8; - - byte[] buf = new byte[usize]; - - Inflater inf = new Inflater(); - try { - inf.setInput(buffer, offset, receiveSize - 8); - int rsize; - - try { - rsize = inf.inflate(buf); - } - catch (DataFormatException dfe) { - throw new AerospikeException.Serialize(dfe); - } - - if (rsize != usize) { - throw new AerospikeException("Decompressed size " + rsize + " is not expected " + usize); - } - - buffer = buf; - offset = 13; - } - finally { - inf.end(); - } - } - else { - throw new AerospikeException("Invalid proto type: " + type + " Expected: " + Command.AS_MSG_TYPE); - } - info3 = buffer[offset - 2] & 0xFF; - } - - public int parseResultCode() { - return buffer[offset] & 0xFF; - } - - public int parseHeader() { - resultCode = parseResultCode(); - offset += 1; - generation = Buffer.bytesToInt(buffer, offset); - offset += 4; - expiration = Buffer.bytesToInt(buffer, offset); - offset += 4; - batchIndex = Buffer.bytesToInt(buffer, offset); - offset += 4; - fieldCount = Buffer.bytesToShort(buffer, offset); - offset += 2; - opCount = Buffer.bytesToShort(buffer, offset); - offset += 2; - return resultCode; - } - - public void skipKey() { - // There can be fields in the response (setname etc). - // But for now, ignore them. Expose them to the API if needed in the future. - for (int i = 0; i < fieldCount; i++) { - int fieldlen = Buffer.bytesToInt(buffer, offset); - offset += 4 + fieldlen; - } - } - - public Key parseKey(BVal bVal) { - byte[] digest = null; - String namespace = null; - String setName = null; - Value userKey = null; - - for (int i = 0; i < fieldCount; i++) { - int fieldlen = Buffer.bytesToInt(buffer, offset); - offset += 4; - - int fieldtype = buffer[offset++]; - int size = fieldlen - 1; - - switch (fieldtype) { - case FieldType.DIGEST_RIPE: - digest = new byte[size]; - System.arraycopy(buffer, offset, digest, 0, size); - break; - - case FieldType.NAMESPACE: - namespace = Buffer.utf8ToString(buffer, offset, size); - break; - - case FieldType.TABLE: - setName = Buffer.utf8ToString(buffer, offset, size); - break; - - case FieldType.KEY: - int type = buffer[offset++]; - size--; - userKey = Buffer.bytesToKeyValue(type, buffer, offset, size); - break; - - case FieldType.BVAL_ARRAY: - bVal.val = Buffer.littleBytesToLong(buffer, offset); - break; - } - offset += size; - } - return new Key(namespace, digest, setName, userKey); - } - - public Record parseRecord(boolean isOperation) { - Map bins = new LinkedHashMap<>(); - - for (int i = 0; i < opCount; i++) { - int opSize = Buffer.bytesToInt(buffer, offset); - byte particleType = buffer[offset + 5]; - byte nameSize = buffer[offset + 7]; - String name = Buffer.utf8ToString(buffer, offset + 8, nameSize); - offset += 4 + 4 + nameSize; - - int particleBytesSize = opSize - (4 + nameSize); - Object value = Buffer.bytesToParticle(particleType, buffer, offset, particleBytesSize); - offset += particleBytesSize; - - if (isOperation) { - if (bins.containsKey(name)) { - // Multiple values returned for the same bin. - Object prev = bins.get(name); - - if (prev instanceof Command.OpResults) { - // List already exists. Add to it. - Command.OpResults list = (Command.OpResults)prev; - list.add(value); - } - else { - // Make a list to store all values. - Command.OpResults list = new OpResults(); - list.add(prev); - list.add(value); - bins.put(name, list); - } - } - else { - bins.put(name, value); - } - } - else { - bins.put(name, value); - } - } - return new Record(bins, generation, expiration); - } - - public LuaValue getLuaAggregateValue(LuaInstance instance) { - // Parse aggregateValue. - int opSize = Buffer.bytesToInt(buffer, offset); - offset += 5; - byte particleType = buffer[offset]; - offset += 2; - byte nameSize = buffer[offset++]; - - String name = Buffer.utf8ToString(buffer, offset, nameSize); - offset += nameSize; - - int particleBytesSize = opSize - (4 + nameSize); - - if (!name.equals("SUCCESS")) { - if (name.equals("FAILURE")) { - Object value = Buffer.bytesToParticle(particleType, buffer, offset, particleBytesSize); - throw new AerospikeException(ResultCode.QUERY_GENERIC, value != null ? value.toString() : null); - } - else { - throw new AerospikeException(ResultCode.PARSE_ERROR, "Query aggregate expected bin name SUCCESS. Received " + name); - } - } - - LuaValue aggregateValue = instance.getLuaValue(particleType, buffer, offset, particleBytesSize); - offset += particleBytesSize; - return aggregateValue; - } -} diff --git a/proxy/src/com/aerospike/client/proxy/QueryAggregateCommandProxy.java b/proxy/src/com/aerospike/client/proxy/QueryAggregateCommandProxy.java deleted file mode 100644 index e084cfbd7..000000000 --- a/proxy/src/com/aerospike/client/proxy/QueryAggregateCommandProxy.java +++ /dev/null @@ -1,251 +0,0 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.ThreadFactory; -import java.util.concurrent.atomic.AtomicBoolean; - -import org.luaj.vm2.LuaInteger; -import org.luaj.vm2.LuaValue; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Log; -import com.aerospike.client.ResultCode; -import com.aerospike.client.Value; -import com.aerospike.client.command.Command; -import com.aerospike.client.lua.LuaCache; -import com.aerospike.client.lua.LuaInputStream; -import com.aerospike.client.lua.LuaInstance; -import com.aerospike.client.lua.LuaOutputStream; -import com.aerospike.client.policy.QueryPolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.client.proxy.grpc.GrpcConversions; -import com.aerospike.client.query.ResultSet; -import com.aerospike.client.query.Statement; -import com.aerospike.proxy.client.Kvs; -import com.aerospike.proxy.client.QueryGrpc; - -/** - * Query aggregation command for the proxy. - */ -public final class QueryAggregateCommandProxy extends MultiCommandProxy implements Runnable { - private final BlockingQueue inputQueue; - private final ResultSetProxy resultSet; - private final LuaInstance lua; - private final Statement statement; - private final AtomicBoolean done; - private final long taskId; - private volatile Exception exception; - - public QueryAggregateCommandProxy( - GrpcCallExecutor executor, - ThreadFactory threadFactory, - QueryPolicy queryPolicy, - Statement statement, - long taskId - ) { - super(QueryGrpc.getQueryStreamingMethod(), executor, queryPolicy); - this.statement = statement; - this.taskId = taskId; - this.inputQueue = new ArrayBlockingQueue<>(500); - this.resultSet = new ResultSetProxy(this, queryPolicy.recordQueueSize); - this.done = new AtomicBoolean(); - - // Work around luaj LuaInteger static initialization bug. - // Calling LuaInteger.valueOf(long) is required because LuaValue.valueOf() does not have - // a method that takes in a long parameter. The problem is directly calling - // LuaInteger.valueOf(long) results in a static initialization error. - // - // If LuaValue.valueOf() is called before any luaj calls, then the static initializer in - // LuaInteger will be initialized properly. - LuaValue.valueOf(0); - - // Retrieve lua instance from cache. - lua = LuaCache.getInstance(); - - try { - // Start Lua virtual thread which reads from a queue, applies aggregate function and - // writes to a result set. - threadFactory.newThread(this).start(); - } - catch (RuntimeException re) { - // Put the lua instance back if thread creation fails. - LuaCache.putInstance(lua); - throw re; - } - } - - @Override - void writeCommand(Command command) { - // Nothing to do since there is no Aerospike payload. - } - - @Override - void parseResult(Parser parser) { - int resultCode = parser.parseHeader(); - parser.skipKey(); - - if (resultCode != 0) { - // Aggregation scans (with null query filter) will return KEY_NOT_FOUND_ERROR - // when the set does not exist on the target node. - if (resultCode == ResultCode.KEY_NOT_FOUND_ERROR) { - // Non-fatal error. - return; - } - throw new AerospikeException(resultCode); - } - - if (! super.hasNext) { - sendCompleted(); - return; - } - - if (parser.opCount != 1) { - throw new AerospikeException("Query aggregate expected exactly " + - "one bin. Received " + parser.opCount); - } - - LuaValue aggregateValue = parser.getLuaAggregateValue(lua); - - if (done.get()) { - throw new AerospikeException.QueryTerminated(); - } - - if (aggregateValue != null) { - try { - inputQueue.put(aggregateValue); - } - catch (InterruptedException ie) { - // Ignore - } - } - } - - @Override - void onFailure(AerospikeException ae) { - stop(ae); - } - - @Override - Kvs.AerospikeRequestPayload.Builder getRequestBuilder() { - // Set the query parameters in the Aerospike request payload. - Kvs.AerospikeRequestPayload.Builder builder = Kvs.AerospikeRequestPayload.newBuilder(); - Kvs.QueryRequest.Builder queryRequestBuilder = - Kvs.QueryRequest.newBuilder(); - - queryRequestBuilder.setQueryPolicy(GrpcConversions.toGrpc((QueryPolicy)policy)); - queryRequestBuilder.setStatement(GrpcConversions.toGrpc(statement, taskId, 0)); - builder.setQueryRequest(queryRequestBuilder.build()); - return builder; - } - - public void stop(Exception cause) { - // There is no need to stop threads if all threads have already completed. - if (done.compareAndSet(false, true)) { - exception = cause; - sendCancel(); - } - } - - private void sendCompleted() { - // Send end command to lua thread. - // It's critical that the end put succeeds. - // Loop through all interrupts. - while (true) { - try { - inputQueue.put(LuaValue.NIL); - break; - } - catch (InterruptedException ie) { - if (Log.debugEnabled()) { - Log.debug("Lua input queue " + taskId + " put " + - "interrupted"); - } - } - } - } - - private void sendCancel() { - // Clear lua input queue to ensure cancel is accepted. - inputQueue.clear(); - resultSet.abort(); - - // Send end command to lua input queue. - // It's critical that the end offer succeeds. - while (!inputQueue.offer(LuaValue.NIL)) { - // Queue must be full. Remove one item to make room. - if (inputQueue.poll() == null) { - // Can't offer or poll. Nothing further can be done. - if (Log.debugEnabled()) { - Log.debug("Lua input queue " + taskId + " both " + - "offer and poll failed on abort"); - } - break; - } - } - } - - public void checkForException() { - // Throw an exception if an error occurred. - if (exception != null) { - if (exception instanceof AerospikeException) { - throw (AerospikeException)exception; - } - else { - throw new AerospikeException(exception); - } - } - } - - public void run() { - try { - lua.loadPackage(statement); - - LuaValue[] args = new LuaValue[4 + statement.getFunctionArgs().length]; - args[0] = lua.getFunction(statement.getFunctionName()); - args[1] = LuaInteger.valueOf(2); - args[2] = new LuaInputStream(inputQueue); - args[3] = new LuaOutputStream(resultSet); - int count = 4; - - for (Value value : statement.getFunctionArgs()) { - args[count++] = value.getLuaValue(lua); - } - lua.call("apply_stream", args); - } - catch (Exception e) { - stop(e); - } - finally { - // Send end command to user's result set. - // If query was already cancelled, this put will be ignored. - resultSet.put(ResultSet.END); - LuaCache.putInstance(lua); - } - } - - long getTaskId() { - return taskId; - } - - public ResultSet getResultSet() { - return resultSet; - } -} diff --git a/proxy/src/com/aerospike/client/proxy/QueryCommandProxy.java b/proxy/src/com/aerospike/client/proxy/QueryCommandProxy.java deleted file mode 100644 index a8739e2a0..000000000 --- a/proxy/src/com/aerospike/client/proxy/QueryCommandProxy.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package com.aerospike.client.proxy; - -import com.aerospike.client.listener.RecordSequenceListener; -import com.aerospike.client.policy.QueryPolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.client.proxy.grpc.GrpcConversions; -import com.aerospike.client.query.PartitionFilter; -import com.aerospike.client.query.PartitionTracker; -import com.aerospike.client.query.Statement; -import com.aerospike.proxy.client.Kvs; -import com.aerospike.proxy.client.QueryGrpc; - -/** - * Implements asynchronous query for the proxy. - */ -public class QueryCommandProxy extends ScanQueryBaseCommandProxy { - private final Statement statement; - private final PartitionFilter partitionFilter; - private final long taskId; - private final long maxRecords; - - @SuppressWarnings("deprecation") - public QueryCommandProxy( - GrpcCallExecutor executor, - RecordSequenceListener listener, QueryPolicy queryPolicy, - Statement statement, - long taskId, - PartitionFilter partitionFilter, - PartitionTracker partitionTracker - ) { - super(false, QueryGrpc.getQueryStreamingMethod(), executor, queryPolicy, listener, partitionTracker); - this.statement = statement; - this.partitionFilter = partitionFilter; - this.taskId = taskId; - this.maxRecords = statement.getMaxRecords() > 0 ? statement.getMaxRecords() : queryPolicy.maxRecords; - } - - @Override - protected Kvs.AerospikeRequestPayload.Builder getRequestBuilder() { - // Set the query parameters in the Aerospike request payload. - Kvs.AerospikeRequestPayload.Builder builder = Kvs.AerospikeRequestPayload.newBuilder(); - Kvs.QueryRequest.Builder queryRequestBuilder = Kvs.QueryRequest.newBuilder(); - - queryRequestBuilder.setQueryPolicy(GrpcConversions.toGrpc((QueryPolicy)policy)); - - if (partitionFilter != null) { - queryRequestBuilder.setPartitionFilter(GrpcConversions.toGrpc(partitionFilter)); - } - - queryRequestBuilder.setStatement(GrpcConversions.toGrpc(statement, taskId, maxRecords)); - builder.setQueryRequest(queryRequestBuilder.build()); - return builder; - } -} diff --git a/proxy/src/com/aerospike/client/proxy/ReadCommandProxy.java b/proxy/src/com/aerospike/client/proxy/ReadCommandProxy.java deleted file mode 100644 index 4db4255c4..000000000 --- a/proxy/src/com/aerospike/client/proxy/ReadCommandProxy.java +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.Record; -import com.aerospike.client.ResultCode; -import com.aerospike.client.command.Command; -import com.aerospike.client.listener.RecordListener; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.KVSGrpc; -import com.aerospike.proxy.client.Kvs; - -import io.grpc.MethodDescriptor; - -public class ReadCommandProxy extends SingleCommandProxy { - private final RecordListener listener; - final Key key; - private final String[] binNames; - private final boolean isOperation; - - public ReadCommandProxy( - GrpcCallExecutor executor, - RecordListener listener, - Policy policy, - Key key, - String[] binNames - ) { - super(KVSGrpc.getReadStreamingMethod(), executor, policy); - this.listener = listener; - this.key = key; - this.binNames = binNames; - this.isOperation = false; - } - - public ReadCommandProxy( - MethodDescriptor methodDescriptor, - GrpcCallExecutor executor, - RecordListener listener, - Policy policy, - Key key, - boolean isOperation - ) { - super(methodDescriptor, executor, policy); - this.listener = listener; - this.key = key; - this.binNames = null; - this.isOperation = isOperation; - } - - @Override - void writeCommand(Command command) { - command.setRead(policy, key, binNames); - } - - @Override - void parseResult(Parser parser) { - Record record = parseRecordResult(parser); - - try { - listener.onSuccess(key, record); - } - catch (Throwable t) { - logOnSuccessError(t); - } - } - - protected final Record parseRecordResult(Parser parser) { - Record record = null; - int resultCode = parser.parseHeader(); - - switch (resultCode) { - case ResultCode.OK: - parser.skipKey(); - if (parser.opCount == 0) { - // Bin data was not returned. - record = new Record(null, parser.generation, parser.expiration); - } - else { - record = parser.parseRecord(isOperation); - } - break; - - case ResultCode.KEY_NOT_FOUND_ERROR: - handleNotFound(resultCode); - break; - - case ResultCode.FILTERED_OUT: - if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); - } - break; - - case ResultCode.UDF_BAD_RESPONSE: - parser.skipKey(); - record = parser.parseRecord(isOperation); - handleUdfError(record, resultCode); - break; - - default: - throw new AerospikeException(resultCode); - } - - return record; - } - - protected void handleNotFound(int resultCode) { - // Do nothing in default case. Record will be null. - } - - private void handleUdfError(Record record, int resultCode) { - String ret = (String)record.bins.get("FAILURE"); - - if (ret == null) { - throw new AerospikeException(resultCode); - } - - String message; - int code; - - try { - String[] list = ret.split(":"); - code = Integer.parseInt(list[2].trim()); - message = list[0] + ':' + list[1] + ' ' + list[3]; - } - catch (Exception e) { - // Use generic exception if parse error occurs. - throw new AerospikeException(resultCode, ret); - } - - throw new AerospikeException(code, message); - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } -} diff --git a/proxy/src/com/aerospike/client/proxy/ReadHeaderCommandProxy.java b/proxy/src/com/aerospike/client/proxy/ReadHeaderCommandProxy.java deleted file mode 100644 index 281382321..000000000 --- a/proxy/src/com/aerospike/client/proxy/ReadHeaderCommandProxy.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.Record; -import com.aerospike.client.ResultCode; -import com.aerospike.client.command.Command; -import com.aerospike.client.listener.RecordListener; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.KVSGrpc; - -public final class ReadHeaderCommandProxy extends SingleCommandProxy { - private final RecordListener listener; - private final Key key; - - public ReadHeaderCommandProxy( - GrpcCallExecutor executor, - RecordListener listener, - Policy policy, - Key key - ) { - super(KVSGrpc.getGetHeaderStreamingMethod(), executor, policy); - this.listener = listener; - this.key = key; - } - - @Override - void writeCommand(Command command) { - command.setReadHeader(policy, key); - } - - @Override - void parseResult(Parser parser) { - Record record = null; - int resultCode = parser.parseHeader(); - - switch (resultCode) { - case ResultCode.OK: - record = new Record(null, parser.generation, parser.expiration); - break; - - case ResultCode.KEY_NOT_FOUND_ERROR: - break; - - case ResultCode.FILTERED_OUT: - if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); - } - break; - - default: - throw new AerospikeException(resultCode); - } - - try { - listener.onSuccess(key, record); - } - catch (Throwable t) { - logOnSuccessError(t); - } - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } -} diff --git a/proxy/src/com/aerospike/client/proxy/RecordSequenceListenerToCallback.java b/proxy/src/com/aerospike/client/proxy/RecordSequenceListenerToCallback.java deleted file mode 100644 index 3af4230ce..000000000 --- a/proxy/src/com/aerospike/client/proxy/RecordSequenceListenerToCallback.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.concurrent.CompletableFuture; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.Record; -import com.aerospike.client.ScanCallback; -import com.aerospike.client.listener.RecordSequenceListener; - -class RecordSequenceListenerToCallback implements RecordSequenceListener { - private final ScanCallback callback; - private final CompletableFuture future; - - public RecordSequenceListenerToCallback(ScanCallback callback, CompletableFuture future) { - this.callback = callback; - this.future = future; - } - - @Override - public void onRecord(Key key, Record record) throws AerospikeException { - callback.scanCallback(key, record); - } - - @Override - public void onSuccess() { - future.complete(null); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } -} \ No newline at end of file diff --git a/proxy/src/com/aerospike/client/proxy/RecordSequenceRecordSet.java b/proxy/src/com/aerospike/client/proxy/RecordSequenceRecordSet.java deleted file mode 100644 index b782cb447..000000000 --- a/proxy/src/com/aerospike/client/proxy/RecordSequenceRecordSet.java +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.Log; -import com.aerospike.client.Record; -import com.aerospike.client.listener.RecordSequenceListener; -import com.aerospike.client.query.KeyRecord; -import com.aerospike.client.query.RecordSet; - -/** - * A {@link RecordSequenceListener} that implements a {@link RecordSet}. - */ -public class RecordSequenceRecordSet extends RecordSet implements RecordSequenceListener { - private final long taskId; - private volatile boolean valid = true; - private final BlockingQueue queue; - protected volatile KeyRecord record; - private volatile AerospikeException exception; - - public RecordSequenceRecordSet(long taskId, int capacity) { - this.queue = new ArrayBlockingQueue<>(capacity); - this.taskId = taskId; - } - - /** - * Retrieve next record. This method will block until a record is retrieved - * or the query is cancelled. - * - * @return whether record exists - if false, no more records are available - */ - public boolean next() throws AerospikeException { - if (!valid) { - checkForException(); - return false; - } - - try { - record = queue.take(); - } - catch (InterruptedException ie) { - valid = false; - - if (Log.debugEnabled()) { - Log.debug("RecordSet " + taskId + " take " + - "interrupted"); - } - return false; - } - - if (record == END) { - valid = false; - checkForException(); - return false; - } - return true; - } - - private void checkForException() { - if (exception != null) { - abort(); - throw exception; - } - } - - protected void abort() { - valid = false; - queue.clear(); - - // Send end command to transaction thread. - // It's critical that the end offer succeeds. - while (!queue.offer(END)) { - // Queue must be full. Remove one item to make room. - if (queue.poll() == null) { - // Can't offer or poll. Nothing further can be done. - if (Log.debugEnabled()) { - Log.debug("RecordSet " + taskId + " both offer and poll failed on abort"); - } - break; - } - } - } - - public void close() { - valid = false; - } - - @Override - public Record getRecord() { - return record.record; - } - - @Override - public Key getKey() { - return record.key; - } - - @Override - public KeyRecord getKeyRecord() { - return record; - } - - @Override - public void onRecord(Key key, Record record) throws AerospikeException { - if (!valid) { - // Abort the query. - throw new AerospikeException.QueryTerminated(); - } - - try { - // This put will block if queue capacity is reached. - queue.put(new KeyRecord(key, record)); - } - catch (InterruptedException ie) { - if (Log.debugEnabled()) { - Log.debug("RecordSet " + taskId + " put interrupted"); - } - - // Valid may have changed. Check again. - if (valid) { - abort(); - } - - // Abort the query. - throw new AerospikeException.QueryTerminated(); - } - } - - @Override - public void onSuccess() { - if (!valid) { - return; - } - - try { - // This put will block if queue capacity is reached. - queue.put(END); - } - catch (InterruptedException ie) { - if (Log.debugEnabled()) { - Log.debug("RecordSet " + taskId + " put interrupted"); - } - - // Valid may have changed. Check again. - if (valid) { - abort(); - } - } - } - - @Override - public void onFailure(AerospikeException ae) { - exception = ae; - abort(); - } -} - diff --git a/proxy/src/com/aerospike/client/proxy/RecordSequenceToQueryListener.java b/proxy/src/com/aerospike/client/proxy/RecordSequenceToQueryListener.java deleted file mode 100644 index a301c2b87..000000000 --- a/proxy/src/com/aerospike/client/proxy/RecordSequenceToQueryListener.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.concurrent.CompletableFuture; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Key; -import com.aerospike.client.Record; -import com.aerospike.client.listener.RecordSequenceListener; -import com.aerospike.client.query.QueryListener; - -class RecordSequenceToQueryListener implements RecordSequenceListener { - private final QueryListener listener; - private final CompletableFuture future; - - public RecordSequenceToQueryListener(QueryListener listener, CompletableFuture future) { - this.listener = listener; - this.future = future; - } - - @Override - public void onRecord(Key key, Record record) throws AerospikeException { - listener.onRecord(key, record); - } - - @Override - public void onSuccess() { - future.complete(null); - } - - @Override - public void onFailure(AerospikeException ae) { - future.completeExceptionally(ae); - } -} \ No newline at end of file diff --git a/proxy/src/com/aerospike/client/proxy/ResultSetProxy.java b/proxy/src/com/aerospike/client/proxy/ResultSetProxy.java deleted file mode 100644 index 1052b3c99..000000000 --- a/proxy/src/com/aerospike/client/proxy/ResultSetProxy.java +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.io.Closeable; -import java.util.Iterator; -import java.util.concurrent.ArrayBlockingQueue; -import java.util.concurrent.BlockingQueue; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Log; -import com.aerospike.client.query.ResultSet; - -/** - * This class manages result retrieval from queries. - * Multiple threads will retrieve results from the server nodes and put these results on the queue. - * The single user thread consumes these results from the queue. - */ -public class ResultSetProxy extends ResultSet { - private final QueryAggregateCommandProxy queryAggregateCommand; - - private final BlockingQueue queue; - - private volatile boolean valid = true; - - private Object row; - - /** - * Initialize result set with underlying producer/consumer queue. - */ - protected ResultSetProxy(QueryAggregateCommandProxy queryAggregateCommand, int capacity) { - this.queryAggregateCommand = queryAggregateCommand; - this.queue = new ArrayBlockingQueue<>(capacity); - } - - //------------------------------------------------------- - // Result traversal methods - //------------------------------------------------------- - - /** - * Retrieve next result. This method will block until a result is retrieved - * or the query is cancelled. - * - * @return whether result exists - if false, no more results are available - */ - public final boolean next() throws AerospikeException { - if (!valid) { - queryAggregateCommand.checkForException(); - return false; - } - - try { - row = queue.take(); - } - catch (InterruptedException ie) { - valid = false; - - if (Log.debugEnabled()) { - Log.debug("ResultSet " + queryAggregateCommand.getTaskId() + " take " + - "interrupted"); - } - return false; - } - - if (row == END) { - valid = false; - queryAggregateCommand.checkForException(); - return false; - } - return true; - } - - /** - * Close query. - */ - public final void close() { - valid = false; - - // Check if more results are available. - if (row != END && queue.poll() != END) { - // Some query threads may still be running. Stop these threads. - queryAggregateCommand.stop(new AerospikeException.QueryTerminated()); - } - } - - /** - * Provide Iterator for RecordSet. - */ - @Override - public Iterator iterator() { - return new ResultSetIterator(this); - } - - //------------------------------------------------------- - // Meta-data retrieval methods - //------------------------------------------------------- - - /** - * Get result. - */ - public final Object getObject() { - return row; - } - - //------------------------------------------------------- - // Methods for internal use only. - //------------------------------------------------------- - - /** - * Put object on the queue. - */ - public final boolean put(Object object) { - if (!valid) { - return false; - } - - try { - // This put will block if queue capacity is reached. - queue.put(object); - return true; - } - catch (InterruptedException ie) { - if (Log.debugEnabled()) { - Log.debug("ResultSet " + queryAggregateCommand.getTaskId() + " put " + - "interrupted"); - } - - // Valid may have changed. Check again. - if (valid) { - abort(); - } - return false; - } - } - - /** - * Abort retrieval with end token. - */ - public final void abort() { - valid = false; - queue.clear(); - - // Send end command to transaction thread. - // It's critical that the end offer succeeds. - while (!queue.offer(END)) { - // Queue must be full. Remove one item to make room. - if (queue.poll() == null) { - // Can't offer or poll. Nothing further can be done. - if (Log.debugEnabled()) { - Log.debug("ResultSet " + queryAggregateCommand.getTaskId() + " both" + - " " + - "offer and poll failed on abort"); - } - break; - } - } - } - - /** - * Support standard iteration interface for RecordSet. - */ - private static class ResultSetIterator implements Iterator, Closeable { - - private final ResultSetProxy resultSet; - private boolean more; - - ResultSetIterator(ResultSetProxy resultSet) { - this.resultSet = resultSet; - more = this.resultSet.next(); - } - - @Override - public boolean hasNext() { - return more; - } - - @Override - public Object next() { - Object obj = resultSet.row; - more = resultSet.next(); - return obj; - } - - @Override - public void remove() { - } - - @Override - public void close() { - resultSet.close(); - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/ScanCommandProxy.java b/proxy/src/com/aerospike/client/proxy/ScanCommandProxy.java deleted file mode 100644 index 250315275..000000000 --- a/proxy/src/com/aerospike/client/proxy/ScanCommandProxy.java +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.listener.RecordSequenceListener; -import com.aerospike.client.policy.ScanPolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.client.proxy.grpc.GrpcConversions; -import com.aerospike.client.query.PartitionFilter; -import com.aerospike.client.query.PartitionTracker; -import com.aerospike.proxy.client.Kvs; -import com.aerospike.proxy.client.ScanGrpc; - -/** - * Implements asynchronous scan for the proxy. - */ -public class ScanCommandProxy extends ScanQueryBaseCommandProxy { - private final String namespace; - private final String setName; - private final String[] binNames; - private final PartitionFilter partitionFilter; - - public ScanCommandProxy( - GrpcCallExecutor executor, - ScanPolicy scanPolicy, - RecordSequenceListener listener, String namespace, - String setName, - String[] binNames, - PartitionFilter partitionFilter, - PartitionTracker partitionTracker - ) { - super(true, ScanGrpc.getScanStreamingMethod(), executor, scanPolicy, - listener, partitionTracker); - this.namespace = namespace; - this.setName = setName; - this.binNames = binNames; - this.partitionFilter = partitionFilter; - } - - @Override - Kvs.AerospikeRequestPayload.Builder getRequestBuilder() { - // Set the scan parameters in the Aerospike request payload. - Kvs.AerospikeRequestPayload.Builder builder = Kvs.AerospikeRequestPayload.newBuilder(); - Kvs.ScanRequest.Builder scanRequestBuilder = Kvs.ScanRequest.newBuilder(); - - scanRequestBuilder.setScanPolicy(GrpcConversions.toGrpc((ScanPolicy)policy)); - scanRequestBuilder.setNamespace(namespace); - - if (setName != null) { - scanRequestBuilder.setSetName(setName); - } - - if (binNames != null) { - for (String binName : binNames) { - scanRequestBuilder.addBinNames(binName); - } - } - - if (partitionFilter != null) { - scanRequestBuilder.setPartitionFilter(GrpcConversions.toGrpc(partitionFilter)); - } - - builder.setScanRequest(scanRequestBuilder.build()); - return builder; - } -} diff --git a/proxy/src/com/aerospike/client/proxy/ScanQueryBaseCommandProxy.java b/proxy/src/com/aerospike/client/proxy/ScanQueryBaseCommandProxy.java deleted file mode 100644 index 60d4fe38c..000000000 --- a/proxy/src/com/aerospike/client/proxy/ScanQueryBaseCommandProxy.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import java.util.Collections; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.ResultCode; -import com.aerospike.client.cluster.Node; -import com.aerospike.client.command.Command; -import com.aerospike.client.listener.RecordSequenceListener; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.client.query.PartitionTracker; -import com.aerospike.proxy.client.Kvs; - -import io.grpc.MethodDescriptor; - -/** - * Base class for Scan and Query. - */ -abstract class ScanQueryBaseCommandProxy extends MultiCommandProxy { - private final RecordSequenceListener listener; - private final PartitionTracker partitionTracker; - protected final PartitionTracker.NodePartitions dummyNodePartitions; - private final boolean isScan; - - public ScanQueryBaseCommandProxy( - boolean isScan, - MethodDescriptor methodDescriptor, - GrpcCallExecutor executor, - Policy policy, - RecordSequenceListener listener, - PartitionTracker partitionTracker - ) { - super(methodDescriptor, executor, policy); - this.isScan = isScan; - this.listener = listener; - this.partitionTracker = partitionTracker; - this.dummyNodePartitions = new PartitionTracker.NodePartitions(null, Node.PARTITIONS); - } - - @Override - protected void writeCommand(Command command) { - // Nothing to do since there is no Aerospike payload. - } - - @Override - void parseResult(Parser parser) { - RecordProxy recordProxy = parseRecordResult(parser, false, true, !isScan); - - if ((parser.info3 & Command.INFO3_PARTITION_DONE) != 0) { - // When an error code is received, mark partition as unavailable - // for the current round. Unavailable partitions will be retried - // in the next round. Generation is overloaded as partitionId. - if (partitionTracker != null && recordProxy.resultCode != ResultCode.OK) { - partitionTracker.partitionUnavailable(dummyNodePartitions, parser.generation); - } - return; - } - - if (recordProxy.resultCode == ResultCode.OK && !super.hasNext) { - if (partitionTracker != null && !partitionTracker.isComplete(false, policy, Collections.singletonList(dummyNodePartitions))) { - retry(); - return; - } - - // This is the end of scan marker record. - listener.onSuccess(); - return; - } - - if (recordProxy.resultCode != ResultCode.OK) { - throw new AerospikeException(recordProxy.resultCode); - } - - listener.onRecord(recordProxy.key, recordProxy.record); - - if (partitionTracker != null) { - if (isScan) { - partitionTracker.setDigest(dummyNodePartitions, recordProxy.key); - } - else { - partitionTracker.setLast(dummyNodePartitions, recordProxy.key, - recordProxy.bVal.val); - } - } - } - - @Override - void onFailure(AerospikeException ae) { - if (partitionTracker != null && partitionTracker.shouldRetry(dummyNodePartitions, ae)) { - if (retry()) { - return; - } - // Retry failed. Notify the listener. - } - listener.onFailure(ae); - } -} diff --git a/proxy/src/com/aerospike/client/proxy/SingleCommandProxy.java b/proxy/src/com/aerospike/client/proxy/SingleCommandProxy.java deleted file mode 100644 index 7c2e40e8b..000000000 --- a/proxy/src/com/aerospike/client/proxy/SingleCommandProxy.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.Kvs; - -import io.grpc.MethodDescriptor; - -public abstract class SingleCommandProxy extends CommandProxy { - - public SingleCommandProxy( - MethodDescriptor methodDescriptor, - GrpcCallExecutor executor, - Policy policy - ) { - super(methodDescriptor, executor, policy, 1); - } - - void onResponse(Kvs.AerospikeResponsePayload response) { - // Check response status for client errors (negative error codes). - // Server errors are checked in response payload in Parser. - int status = response.getStatus(); - - if (status != 0) { - notifyFailure(new AerospikeException(status)); - return; - } - - byte[] bytes = response.getPayload().toByteArray(); - Parser parser = new Parser(bytes); - parser.parseProto(); - parseResult(parser); - } - - abstract void parseResult(Parser parser); -} diff --git a/proxy/src/com/aerospike/client/proxy/WriteCommandProxy.java b/proxy/src/com/aerospike/client/proxy/WriteCommandProxy.java deleted file mode 100644 index 209cc5f0b..000000000 --- a/proxy/src/com/aerospike/client/proxy/WriteCommandProxy.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Bin; -import com.aerospike.client.Key; -import com.aerospike.client.Operation; -import com.aerospike.client.ResultCode; -import com.aerospike.client.command.Command; -import com.aerospike.client.listener.WriteListener; -import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.proxy.grpc.GrpcCallExecutor; -import com.aerospike.proxy.client.KVSGrpc; - -public final class WriteCommandProxy extends SingleCommandProxy { - private final WriteListener listener; - private final WritePolicy writePolicy; - private final Key key; - private final Bin[] bins; - private final Operation.Type type; - - public WriteCommandProxy( - GrpcCallExecutor executor, - WriteListener listener, - WritePolicy writePolicy, - Key key, - Bin[] bins, - Operation.Type type - ) { - super(KVSGrpc.getWriteStreamingMethod(), executor, writePolicy); - this.listener = listener; - this.writePolicy = writePolicy; - this.key = key; - this.bins = bins; - this.type = type; - } - - @Override - void writeCommand(Command command) { - command.setWrite(writePolicy, type, key, bins); - } - - @Override - void parseResult(Parser parser) { - int resultCode = parser.parseResultCode(); - - switch (resultCode) { - case ResultCode.OK: - break; - - case ResultCode.FILTERED_OUT: - if (policy.failOnFilteredOut) { - throw new AerospikeException(resultCode); - } - break; - - default: - throw new AerospikeException(resultCode); - } - - try { - listener.onSuccess(key); - } - catch (Throwable t) { - logOnSuccessError(t); - } - } - - @Override - void onFailure(AerospikeException ae) { - listener.onFailure(ae); - } -} diff --git a/proxy/src/com/aerospike/client/proxy/auth/AuthTokenManager.java b/proxy/src/com/aerospike/client/proxy/auth/AuthTokenManager.java deleted file mode 100644 index d2a11ccbd..000000000 --- a/proxy/src/com/aerospike/client/proxy/auth/AuthTokenManager.java +++ /dev/null @@ -1,409 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.auth; - -import java.io.Closeable; -import java.io.IOException; -import java.util.Base64; -import java.util.Map; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicReference; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Log; -import com.aerospike.client.ResultCode; -import com.aerospike.client.policy.ClientPolicy; -import com.aerospike.client.proxy.auth.credentials.BearerTokenCallCredentials; -import com.aerospike.client.proxy.grpc.GrpcChannelProvider; -import com.aerospike.client.proxy.grpc.GrpcConversions; -import com.aerospike.proxy.client.Auth; -import com.aerospike.proxy.client.AuthServiceGrpc; -import com.fasterxml.jackson.databind.ObjectMapper; -import com.google.common.util.concurrent.ThreadFactoryBuilder; - -import io.grpc.CallOptions; -import io.grpc.Deadline; -import io.grpc.ManagedChannel; -import io.grpc.stub.StreamObserver; - -/** - * An access token manager for Aerospike proxy. - */ -public class AuthTokenManager implements Closeable { - /** - * A conservative estimate of minimum amount of time in millis it takes for - * token refresh to complete. Auto refresh should be scheduled at least - * this amount before expiry, i.e, if remaining expiry time is less than - * this amount refresh should be scheduled immediately. - */ - private static final int refreshMinTime = 5000; - - /** - * A cap on refresh time in millis to throttle an auto refresh requests in - * case of token refresh failure. - */ - private static final int maxExponentialBackOff = 15000; - - /** - * Fraction of token expiry time to elapse before scheduling an auto - * refresh. - * - * @see AuthTokenManager#refreshMinTime - */ - private static final float refreshAfterFraction = 0.95f; - - /** - * An {@link ObjectMapper} to parse access token. - */ - private static final ObjectMapper objectMapper = new ObjectMapper(); - - private final ClientPolicy clientPolicy; - private final GrpcChannelProvider channelProvider; - private final ScheduledExecutorService executor; - private final AtomicBoolean isFetchingToken = new AtomicBoolean(false); - private final AtomicBoolean isClosed = new AtomicBoolean(false); - /** - * Count of consecutive errors while refreshing the token. - */ - private final AtomicInteger consecutiveRefreshErrors = new AtomicInteger(0); - - /** - * The error encountered when refreshing the token. It will be null when - * {@link #consecutiveRefreshErrors} is zero. - */ - private final AtomicReference refreshError = - new AtomicReference<>(null); - - private volatile AccessToken accessToken; - private volatile boolean fetchScheduled; - /** - * A {@link ScheduledFuture} holding reference to the next auto schedule task. - */ - private ScheduledFuture refreshFuture; - - public AuthTokenManager(ClientPolicy clientPolicy, GrpcChannelProvider grpcCallExecutor) { - this.clientPolicy = clientPolicy; - this.channelProvider = grpcCallExecutor; - this.executor = Executors.newSingleThreadScheduledExecutor( - new ThreadFactoryBuilder().setNameFormat("aerospike-auth-manager").build()); - this.accessToken = new AccessToken(System.currentTimeMillis(), 0, ""); - fetchToken(true); - } - - /** - * Fetch the new token if expired or scheduled for auto refresh. - * - * @param forceRefresh A boolean flag to refresh token forcefully. This is required for initialization and auto - * refresh. Auto refresh will get rejected as token won't be expired at that time, but we need - * to refresh it beforehand. If true, this function will run from the invoking thread, - * not from the scheduler. - */ - private void fetchToken(boolean forceRefresh) { - fetchScheduled = false; - if (isClosed.get() || !isTokenRequired() || isFetchingToken.get()) { - return; - } - if (shouldRefresh(forceRefresh)) { - try { - if (Log.debugEnabled()) { - Log.debug("Starting token refresh"); - } - Auth.AerospikeAuthRequest aerospikeAuthRequest = Auth.AerospikeAuthRequest.newBuilder() - .setUsername(clientPolicy.user).setPassword(clientPolicy.password).build(); - ManagedChannel channel = channelProvider.getControlChannel(); - if (channel == null) { - isFetchingToken.set(false); - // Channel is unavailable. Try again. - unsafeScheduleRefresh(10, true); - return; - } - - isFetchingToken.set(true); - AuthServiceGrpc.newStub(channel).withDeadline(Deadline.after(refreshMinTime, TimeUnit.MILLISECONDS)) - .get(aerospikeAuthRequest, new StreamObserver() { - @Override - public void onNext(Auth.AerospikeAuthResponse aerospikeAuthResponse) { - try { - accessToken = - parseToken(aerospikeAuthResponse.getToken()); - if (Log.debugEnabled()) { - Log.debug(String.format("Fetched token successfully " + - "with TTL %d", accessToken.ttl)); - } - unsafeScheduleNextRefresh(); - clearRefreshErrors(); - } - catch (Exception e) { - onFetchError(e); - } - } - - @Override - public void onError(Throwable t) { - onFetchError(t); - } - - @Override - public void onCompleted() { - isFetchingToken.set(false); - } - }); - - } - catch (Exception e) { - onFetchError(e); - } - } - } - - private void clearRefreshErrors() { - consecutiveRefreshErrors.set(0); - refreshError.set(null); - } - - private void updateRefreshErrors(Throwable t) { - consecutiveRefreshErrors.incrementAndGet(); - refreshError.set(t); - } - - private void onFetchError(Throwable t) { - updateRefreshErrors(t); - Exception e = new Exception("Error fetching access token", t); - Log.error(GrpcConversions.getDisplayMessage(e, GrpcConversions.MAX_ERR_MSG_LENGTH)); - unsafeScheduleNextRefresh(); - isFetchingToken.set(false); - } - - private boolean shouldRefresh(boolean forceRefresh) { - return forceRefresh || !isTokenValid(); - } - - private void unsafeScheduleNextRefresh() { - long ttl = accessToken.ttl; - long delay = (long)Math.floor(ttl * refreshAfterFraction); - - if (ttl - delay < refreshMinTime) { - // We need at least refreshMinTimeMillis to refresh, schedule - // immediately. - delay = ttl - refreshMinTime; - } - - if (!isTokenValid()) { - // Force immediate refresh. - delay = 0; - } - - if (delay == 0 && consecutiveRefreshErrors.get() > 0) { - // If we continue to fail then schedule will be too aggressive on fetching new token. Avoid that by increasing - // fetch delay. - - delay = (long)(Math.pow(2, consecutiveRefreshErrors.get()) * 1000); - if (delay > maxExponentialBackOff) { - delay = maxExponentialBackOff; - } - - // Handle wrap around. - if (delay < 0) { - delay = 0; - } - } - unsafeScheduleRefresh(delay, true); - } - - private void unsafeScheduleRefresh(long delay, boolean forceRefresh) { - if (isClosed.get() || !forceRefresh || fetchScheduled) { - return; - } - if (!executor.isShutdown()) { - //noinspection ConstantValue - refreshFuture = executor.schedule(() -> fetchToken(forceRefresh), delay, TimeUnit.MILLISECONDS); - fetchScheduled = true; - if (Log.debugEnabled()) { - Log.debug(String.format("Scheduled refresh after %d millis", delay)); - } - } - } - - private boolean isTokenRequired() { - return clientPolicy.user != null; - } - - private AccessToken parseToken(String token) throws IOException { - String claims = token.split("\\.")[1]; - byte[] decodedClaims = Base64.getUrlDecoder().decode(claims); - @SuppressWarnings("unchecked") - Map parsedClaims = objectMapper.readValue(decodedClaims, Map.class); - Object expiryToken = parsedClaims.get("exp"); - Object iat = parsedClaims.get("iat"); - if (expiryToken instanceof Integer && iat instanceof Integer) { - int ttl = ((Integer)expiryToken - (Integer)iat) * 1000; - if (ttl <= 0) { - throw new IllegalArgumentException("token 'iat' > 'exp'"); - } - // Set expiry based on local clock. - long expiry = System.currentTimeMillis() + ttl; - return new AccessToken(expiry, ttl, token); - } - else { - throw new IllegalArgumentException("Unsupported access token format"); - } - } - - public CallOptions setCallCredentials(CallOptions callOptions) { - if (isTokenRequired()) { - if (!isTokenValid()) { - if (Log.warnEnabled()) { - // TODO: This warns for evey call, spamming the output. - // Should be rate limited. Possibly once in a few seconds. - // This alerts that auto refresh didn't finish correctly. In normal scenario, this should never - // happen. - Log.warn("Trying to refresh token before setting into call"); - } - unsafeScheduleRefresh(0, false); - } - if (!isTokenValid()) { - throw new IllegalStateException("Access token has expired"); - } - return callOptions.withCallCredentials(new BearerTokenCallCredentials(accessToken.token)); - } - return callOptions; - } - - /** - * @return the minimum amount of time it takes for the token to refresh. - */ - public int getRefreshMinTime() { - return refreshMinTime; - } - - private boolean isTokenValid() { - AccessToken token = accessToken; - return !isTokenRequired() || (token != null && !token.hasExpired()); - } - - public TokenStatus getTokenStatus() { - if (isTokenValid()) { - return new TokenStatus(); - } - - Throwable error = refreshError.get(); - if (error != null) { - return new TokenStatus(error); - } - - AccessToken token = accessToken; - if (token != null && token.hasExpired()) { - return new TokenStatus(new AerospikeException(ResultCode.NOT_AUTHENTICATED, - "token has expired")); - } - - return new TokenStatus(new AerospikeException(ResultCode.NOT_AUTHENTICATED)); - } - - @Override - public void close() { - if (isClosed.getAndSet(true)) { - return; - } - - // TODO copied from java.util.concurrent.ExecutorService#close available from Java 19. - boolean terminated = executor.isTerminated(); - if (!terminated) { - if (refreshFuture != null) { - refreshFuture.cancel(true); - } - executor.shutdown(); - boolean interrupted = false; - while (!terminated) { - try { - terminated = executor.awaitTermination(1L, TimeUnit.DAYS); - } - catch (InterruptedException e) { - if (!interrupted) { - executor.shutdownNow(); - interrupted = true; - } - } - } - if (interrupted) { - Thread.currentThread().interrupt(); - } - } - } - - public static class TokenStatus { - private final Throwable error; - private final Boolean valid; - - public TokenStatus() { - this.valid = true; - this.error = null; - } - - public TokenStatus(Throwable error) { - this.valid = false; - this.error = error; - } - - /** - * @return true iff the token is valid. - */ - public Boolean isValid() { - return valid; - } - - /** - * Get the token fetch error. Should be used only when {@link #isValid()} - * returns false. - * - * @return the token fetch error. - */ - public Throwable getError() { - return error; - } - } - - private static class AccessToken { - /** - * Local token expiry timestamp in millis. - */ - private final long expiry; - /** - * Remaining time to live for the token in millis. - */ - private final long ttl; - /** - * An access token for Aerospike proxy. - */ - private final String token; - - public AccessToken(long expiry, long ttl, String token) { - this.expiry = expiry; - this.ttl = ttl; - this.token = token; - } - - public boolean hasExpired() { - return System.currentTimeMillis() > expiry; - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/auth/credentials/BearerTokenCallCredentials.java b/proxy/src/com/aerospike/client/proxy/auth/credentials/BearerTokenCallCredentials.java deleted file mode 100644 index 1bcadd4a2..000000000 --- a/proxy/src/com/aerospike/client/proxy/auth/credentials/BearerTokenCallCredentials.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.auth.credentials; - -import static io.grpc.Metadata.ASCII_STRING_MARSHALLER; - -import java.util.concurrent.Executor; - -import io.grpc.CallCredentials; -import io.grpc.Metadata; -import io.grpc.Status; - -/** - * A {@link CallCredentials} implementation to access Aerospike proxy. - */ -public class BearerTokenCallCredentials extends CallCredentials { - private static final String BEARER_TYPE = "Bearer"; - private static final Metadata.Key AUTHORIZATION_METADATA_KEY = Metadata.Key.of("Authorization", ASCII_STRING_MARSHALLER); - - private final String value; - - public BearerTokenCallCredentials(String value) { - this.value = value; - } - - @Override - public void applyRequestMetadata(RequestInfo requestInfo, Executor executor, MetadataApplier metadataApplier) { - executor.execute(() -> { - try { - Metadata headers = new Metadata(); - headers.put(AUTHORIZATION_METADATA_KEY, String.format("%s %s", BEARER_TYPE, value)); - metadataApplier.apply(headers); - } - catch (Throwable e) { - metadataApplier.fail(Status.UNAUTHENTICATED.withCause(e)); - } - }); - } - - @SuppressWarnings("deprecation") - @Override - public void thisUsesUnstableApi() { - // noop - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/DefaultGrpcChannelSelector.java b/proxy/src/com/aerospike/client/proxy/grpc/DefaultGrpcChannelSelector.java deleted file mode 100644 index ce6ed0047..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/DefaultGrpcChannelSelector.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import java.util.ArrayList; -import java.util.Comparator; -import java.util.List; -import java.util.Random; - -/** - * A default gRPC stream selector which selects channel by the low and high - * water mark. - */ -public class DefaultGrpcChannelSelector implements GrpcChannelSelector { - private final int requestsLowWaterMark; - private final int requestsHighWaterMark; - private final Random random = new Random(); - - public DefaultGrpcChannelSelector(int requestsLowWaterMark, int requestsHighWaterMark) { - this.requestsLowWaterMark = requestsLowWaterMark; - this.requestsHighWaterMark = requestsHighWaterMark; - } - - @Override - public GrpcChannelExecutor select(List channels, GrpcStreamingCall call) { - // Sort by channel id. Leave original list as it is. - channels = new ArrayList<>(channels); - channels.sort(Comparator.comparingLong(GrpcChannelExecutor::getId)); - - // Select the first channel below the low watermark. - for (GrpcChannelExecutor channel : channels) { - if (channel.getOngoingRequests() < requestsLowWaterMark) { - return channel; - } - } - - // FIXME: it might be the case that the channel has opened - // maxConcurrentStreams but none of them are for this grpcCall. This - // also needs to be checked when selecting the channel. - - // All channels are above the low watermark, select the first channel - // below the high watermark. - for (GrpcChannelExecutor channel : channels) { - if (channel.getOngoingRequests() < requestsHighWaterMark) { - return channel; - } - } - - // TODO: maybe we should use in-flight bytes, number of streams, or - // some other parameter to select the channel. - // All channels are above the high water mark, select random channel. - return channels.get(random.nextInt(channels.size())); - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/DefaultGrpcStreamSelector.java b/proxy/src/com/aerospike/client/proxy/grpc/DefaultGrpcStreamSelector.java deleted file mode 100644 index 275f49ba9..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/DefaultGrpcStreamSelector.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import java.util.Comparator; -import java.util.List; -import java.util.stream.Collectors; - -import com.aerospike.proxy.client.KVSGrpc; -import com.aerospike.proxy.client.Kvs; -import com.aerospike.proxy.client.QueryGrpc; -import com.aerospike.proxy.client.ScanGrpc; - -/** - * A default gRPC stream selector which selects a free stream. - */ -public class DefaultGrpcStreamSelector implements GrpcStreamSelector { - private final int maxConcurrentStreamsPerChannel; - private final int maxConcurrentRequestsPerStream; - private final int totalRequestsPerStream; - - /** - * Streaming calls with less than these many responses will be - * multiplexed on the same stream. - */ - private static final int LARGE_RESPONSE_CUTOFF = 10; - - public DefaultGrpcStreamSelector(int maxConcurrentStreamsPerChannel, int maxConcurrentRequestsPerStream, int totalRequestsPerStream) { - this.maxConcurrentStreamsPerChannel = maxConcurrentStreamsPerChannel; - this.maxConcurrentRequestsPerStream = maxConcurrentRequestsPerStream; - this.totalRequestsPerStream = totalRequestsPerStream; - } - - @Override - public SelectedStream select(List streams, GrpcStreamingCall call) { - final String fullMethodName = - call.getStreamingMethodDescriptor().getFullMethodName(); - - // Always use a non-multiplexed new stream for a scan, long query, and - // a large batch. - if (isScan(call) || isLongQuery(call) || isLargeBatch(call)) { - return new SelectedStream(1, 1); - } - - // Sort by stream id. Leave original list as it is. - List filteredStreams = streams.stream() - .filter(grpcStream -> - grpcStream.getMethodDescriptor().getFullMethodName() - .equals(fullMethodName) && grpcStream.canEnqueue() - ) - .sorted(Comparator.comparingInt(GrpcStream::getId)) - .collect(Collectors.toList()); - - // Select first stream with less than max concurrent requests. - for (GrpcStream stream : filteredStreams) { - if (stream.getOngoingRequests() < stream.getMaxConcurrentRequests()) { - return new SelectedStream(stream); - } - } - - if (streams.size() < maxConcurrentStreamsPerChannel) { - // Create new stream. - return new SelectedStream(maxConcurrentRequestsPerStream, totalRequestsPerStream); - } - - // TODO What is the probability of this occurring? Should some streams - // in a channel be reserved for rarely used API's? - if (filteredStreams.isEmpty()) { - // No slots to create a new stream. - return null; - } - - // Select stream with lowest percent of total requests executed. - GrpcStream selected = filteredStreams.get(0); - for (GrpcStream stream : filteredStreams) { - float executedPercent = - (float)stream.getExecutedRequests() / stream.getTotalRequestsToExecute(); - float selectedPercent = - (float)selected.getExecutedRequests() / stream.getTotalRequestsToExecute(); - if (executedPercent < selectedPercent) { - selected = stream; - } - } - return new SelectedStream(selected); - } - - private boolean isLargeBatch(GrpcStreamingCall call) { - String fullMethodName = - call.getStreamingMethodDescriptor().getFullMethodName(); - - String batchFullMethodName = - KVSGrpc.getBatchOperateMethod().getFullMethodName(); - String batchStreamingFullMethodName = - KVSGrpc.getBatchOperateStreamingMethod().getFullMethodName(); - - if(!batchFullMethodName.equals(fullMethodName) && - !batchStreamingFullMethodName.equals(fullMethodName)) { - return false; // Not a batch method. - } - - return call.getNumExpectedResponses() < LARGE_RESPONSE_CUTOFF; - } - - private boolean isScan(GrpcStreamingCall call) { - String fullMethodName = - call.getStreamingMethodDescriptor().getFullMethodName(); - String scanFullMethodName = - ScanGrpc.getScanMethod().getFullMethodName(); - String scanStreamingFullMethodName = - ScanGrpc.getScanStreamingMethod().getFullMethodName(); - return scanFullMethodName.equals(fullMethodName) || - scanStreamingFullMethodName.equals(fullMethodName); - } - - private boolean isLongQuery(GrpcStreamingCall call) { - String fullMethodName = - call.getStreamingMethodDescriptor().getFullMethodName(); - String queryFullMethodName = - QueryGrpc.getQueryMethod().getFullMethodName(); - String queryStreamingFullMethodName = - QueryGrpc.getQueryStreamingMethod().getFullMethodName(); - - if (!queryFullMethodName.equals(fullMethodName) && - !queryStreamingFullMethodName.equals(fullMethodName)) { - return false; // Not a query request. - } - - Kvs.QueryRequest queryRequest = call.getRequestBuilder().getQueryRequest(); - if (queryRequest.getBackground()) { - return false; // Background queries send back a single response. - } - - if (queryRequest.getStatement().getMaxRecords() < LARGE_RESPONSE_CUTOFF) { - return false; // Records returned in responses is small. - } - - if (!queryRequest.getStatement().getFunctionName().isEmpty()) { - return false; // Query is an aggregation statement. - } - - if (queryRequest.hasQueryPolicy() && queryRequest.getQueryPolicy().getShortQuery()) { - return false; // Query is a short query. - } - - return true; - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/GrpcCallExecutor.java b/proxy/src/com/aerospike/client/proxy/grpc/GrpcCallExecutor.java deleted file mode 100644 index 747942302..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/GrpcCallExecutor.java +++ /dev/null @@ -1,292 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import java.io.Closeable; -import java.util.Collections; -import java.util.List; -import java.util.Random; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.LongAdder; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import javax.annotation.Nullable; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Host; -import com.aerospike.client.Log; -import com.aerospike.client.ResultCode; -import com.aerospike.client.proxy.auth.AuthTokenManager; -import com.aerospike.proxy.client.AboutGrpc; -import com.aerospike.proxy.client.Kvs; - -import io.grpc.Deadline; -import io.grpc.ManagedChannel; -import io.grpc.stub.StreamObserver; -import io.netty.channel.Channel; -import io.netty.channel.EventLoop; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.epoll.Epoll; -import io.netty.channel.epoll.EpollEventLoopGroup; -import io.netty.channel.epoll.EpollSocketChannel; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.nio.NioSocketChannel; -import io.netty.util.concurrent.DefaultThreadFactory; - -public class GrpcCallExecutor implements Closeable { - private static final int QUEUE_SIZE_UPPER_BOUND = 100 * 1024; - public static final int MIN_WARMUP_TIMEOUT = 5_000; - private final List channelExecutors; - private final List controlChannelExecutors; - private final GrpcClientPolicy grpcClientPolicy; - private final Random random = new Random(); - private final AtomicBoolean isClosed = new AtomicBoolean(false); - - /** - * Maximum allowed queue size. - */ - private final int maxQueueSize; - - private final LongAdder totalQueueSize = new LongAdder(); - private final GrpcChannelExecutor.ChannelTypeAndEventLoop controlChannelTypeAndEventLoop; - - public GrpcCallExecutor( - GrpcClientPolicy grpcClientPolicy, - @Nullable AuthTokenManager authTokenManager, - Host... hosts - ) { - if (hosts == null || hosts.length < 1) { - throw new AerospikeException(ResultCode.PARAMETER_ERROR, - "need at least one seed host"); - } - - this.grpcClientPolicy = grpcClientPolicy; - maxQueueSize = - Math.min(QUEUE_SIZE_UPPER_BOUND, - 5 * grpcClientPolicy.maxChannels - * grpcClientPolicy.maxConcurrentStreamsPerChannel - * grpcClientPolicy.maxConcurrentRequestsPerStream); - - this.controlChannelTypeAndEventLoop = getControlEventLoop(); - - try { - this.channelExecutors = - IntStream.range(0, grpcClientPolicy.maxChannels).mapToObj(value -> - new GrpcChannelExecutor(grpcClientPolicy, - new GrpcChannelExecutor.ChannelTypeAndEventLoop(grpcClientPolicy.channelType, - grpcClientPolicy.nextEventLoop()), - authTokenManager, hosts) - ).collect(Collectors.toList()); - this.controlChannelExecutors = - IntStream.range(0, 1).mapToObj(value -> - new GrpcChannelExecutor(grpcClientPolicy, - controlChannelTypeAndEventLoop, authTokenManager, - hosts) - ).collect(Collectors.toList()); - } - catch (Exception e) { - throw new AerospikeException(ResultCode.SERVER_ERROR, e); - } - } - - /** - * Warmup the channels with a call to the About gRPC endpoint. - */ - public void warmupChannels() { - final CountDownLatch doneSignal = - new CountDownLatch(channelExecutors.size()); - final int timeoutMillis = Math.max(MIN_WARMUP_TIMEOUT, - grpcClientPolicy.connectTimeoutMillis); - - channelExecutors.forEach(executor -> { - ManagedChannel channel = executor.getChannel(); - AboutGrpc.newStub(channel) - .withDeadline(Deadline.after(timeoutMillis, TimeUnit.MILLISECONDS)) - .get(Kvs.AboutRequest.newBuilder().build(), new StreamObserver() { - @Override - public void onNext(Kvs.AboutResponse value) { - doneSignal.countDown(); - } - - @Override - public void onError(Throwable t) { - Exception exception = new Exception("About call in warmup " + - "failed: ", t); - Log.debug(GrpcConversions.getDisplayMessage(exception - , GrpcConversions.MAX_ERR_MSG_LENGTH)); - doneSignal.countDown(); - } - - @Override - public void onCompleted() { - } - }); - }); - - try { - doneSignal.await(timeoutMillis, TimeUnit.MILLISECONDS); - } - catch (Throwable ignore) { - } - } - - public void execute(GrpcStreamingCall call) { - if (totalQueueSize.sum() > maxQueueSize) { - call.onError(new AerospikeException(ResultCode.NO_MORE_CONNECTIONS, - "Maximum queue " + maxQueueSize + " size exceeded")); - return; - } - - GrpcChannelExecutor executor = - grpcClientPolicy.grpcChannelSelector.select(channelExecutors, call); - - // TODO: In case of timeouts, lots of calls will end up filling the - // wait queues and timeout once removed for execution from the wait - // queue. Have a upper limit on the number of concurrent transactions - // per channel and reject this call if all the channels are full. - totalQueueSize.increment(); - - try { - executor.execute(new WrappedGrpcStreamingCall(call)); - } - catch (Exception e) { - // Call scheduling failed. - totalQueueSize.decrement(); - } - } - - public EventLoop getEventLoop() { - return channelExecutors.get(random.nextInt(channelExecutors.size())) - .getEventLoop(); - } - - public ManagedChannel getControlChannel() { - if (controlChannelExecutors.isEmpty()) { - return null; - } - return controlChannelExecutors.get(random.nextInt(controlChannelExecutors.size())) - .getChannel(); - } - - public ManagedChannel getChannel() { - if(channelExecutors.isEmpty()) { - return null; - } - return channelExecutors.get(random.nextInt(channelExecutors.size())) - .getChannel(); - } - - @Override - public void close() { - if (isClosed.getAndSet(true)) { - return; - } - - closeExecutors(channelExecutors); - closeExecutors(controlChannelExecutors); - - // Event loops should be closed after shutdown of channels. - closeEventLoops(); - } - - private GrpcChannelExecutor.ChannelTypeAndEventLoop getControlEventLoop() { - EventLoopGroup eventLoopGroup; - Class channelType; - DefaultThreadFactory tf = new DefaultThreadFactory("aerospike-proxy-control", true /*daemon*/); - - if (Epoll.isAvailable()) { - eventLoopGroup = new EpollEventLoopGroup(1, tf); - channelType = EpollSocketChannel.class; - } - else { - eventLoopGroup = new NioEventLoopGroup(1, tf); - channelType = NioSocketChannel.class; - } - - return new GrpcChannelExecutor.ChannelTypeAndEventLoop(channelType, (EventLoop)eventLoopGroup.iterator().next()); - } - - private void closeExecutors(List executors) { - for (GrpcChannelExecutor executor : executors) { - executor.shutdown(); - } - - // Wait for all executors to terminate. - while (true) { - boolean allTerminated = executors.stream() - .allMatch(GrpcChannelExecutor::isTerminated); - - if (allTerminated) { - return; - } - - Log.debug("Waiting for executors to shutdown with closeTimeout=" + grpcClientPolicy.closeTimeout); - try { - //noinspection BusyWait - Thread.sleep(1000); - } - catch (Throwable t) {/* Ignore*/} - } - } - - - private void closeEventLoops() { - if (grpcClientPolicy.closeEventLoops) { - closeEventLoops(grpcClientPolicy.eventLoops); - } - - // Close the control event loop. - closeEventLoops(Collections.singletonList(controlChannelTypeAndEventLoop.getEventLoop())); - } - - private void closeEventLoops(List eventLoops) { - eventLoops.stream() - .map(eventLoop -> - eventLoop.shutdownGracefully(0, grpcClientPolicy.terminationWaitMillis, TimeUnit.MILLISECONDS) - ).forEach(future -> { - try { - future.await(grpcClientPolicy.terminationWaitMillis); - } - catch (Exception e) { - // TODO log error? - } - } - ); - } - - private class WrappedGrpcStreamingCall extends GrpcStreamingCall { - WrappedGrpcStreamingCall(GrpcStreamingCall delegate) { - super(delegate); - } - - @Override - public void onNext(Kvs.AerospikeResponsePayload payload) { - if (!payload.getHasNext()) { - totalQueueSize.decrement(); - } - super.onNext(payload); - } - - @Override - public void onError(Throwable t) { - totalQueueSize.decrement(); - super.onError(t); - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/GrpcChannelExecutor.java b/proxy/src/com/aerospike/client/proxy/grpc/GrpcChannelExecutor.java deleted file mode 100644 index 7a7873a69..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/GrpcChannelExecutor.java +++ /dev/null @@ -1,690 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import java.io.FileInputStream; -import java.lang.reflect.Field; -import java.net.InetSocketAddress; -import java.net.SocketAddress; -import java.security.KeyStore; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.LinkedList; -import java.util.List; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; -import java.util.stream.Collectors; -import javax.annotation.Nullable; -import javax.net.ssl.KeyManagerFactory; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Host; -import com.aerospike.client.Log; -import com.aerospike.client.ResultCode; -import com.aerospike.client.policy.TlsPolicy; -import com.aerospike.client.proxy.AerospikeClientProxy; -import com.aerospike.client.proxy.auth.AuthTokenManager; -import com.aerospike.client.util.Util; -import com.aerospike.proxy.client.Kvs; - -import io.grpc.CallOptions; -import io.grpc.ManagedChannel; -import io.grpc.MethodDescriptor; -import io.grpc.NameResolver; -import io.grpc.netty.GrpcSslContexts; -import io.grpc.netty.NegotiationType; -import io.grpc.netty.NettyChannelBuilder; -import io.netty.channel.Channel; -import io.netty.channel.ChannelOption; -import io.netty.channel.EventLoop; -import io.netty.channel.WriteBufferWaterMark; -import io.netty.handler.ssl.ApplicationProtocolConfig; -import io.netty.handler.ssl.CipherSuiteFilter; -import io.netty.handler.ssl.ClientAuth; -import io.netty.handler.ssl.IdentityCipherSuiteFilter; -import io.netty.handler.ssl.JdkSslContext; -import io.netty.handler.ssl.SslContext; -import io.netty.handler.ssl.SslContextBuilder; -import io.netty.util.concurrent.ScheduledFuture; -import io.netty.util.internal.shaded.org.jctools.queues.MpscUnboundedArrayQueue; - -/** - * All gRPC requests on a HTTP/2 channel are handled by this class throughout - * the channel lifetime. - *

- * TODO: handle close of channel. - */ -public class GrpcChannelExecutor implements Runnable { - /** - * System property to configure gRPC override authority used as hostname - * in TLS verification of the proxy server. - */ - public static final String OVERRIDE_AUTHORITY = "com.aerospike.client" + - ".overrideAuthority"; - - private static final String AEROSPIKE_CLIENT_USER_AGENT = - "AerospikeClientJava/" + AerospikeClientProxy.Version; - - /** - * The delay between iterations of this executor. - *

- * TODO: how to select interval of execution? - */ - private static final long ITERATION_DELAY_MICROS = 250; - - /** - * Unique executor ids. - */ - private static final AtomicLong executorIdIndex = new AtomicLong(); - private static final AtomicInteger streamIdIndex = new AtomicInteger(); - - /** - * The HTTP/2 channel of this executor. - */ - private final ManagedChannel channel; - /** - * The Aerospike gRPC client policy. - */ - private final GrpcClientPolicy grpcClientPolicy; - /** - * The auth token manager. - */ - private final AuthTokenManager authTokenManager; - /** - * The event loop bound to the channel. All queued requests - * will be executed on this event loop. Some requests will be queued on - * this channel in the gRPC callback and some from the pending queue. - */ - private final EventLoop eventLoop; - /** - * Queued unary calls awaiting execution. - */ - private final MpscUnboundedArrayQueue pendingCalls = - new MpscUnboundedArrayQueue<>(32); - /** - * Queue of closed streams. - */ - private final List closedStreams = new ArrayList<>(32); - /** - * Map of stream id to streams. - */ - private final Map streams = new HashMap<>(); - /** - * Shutdown initiation time. - */ - private long shutdownStartTimeNanos; - /** - * Current state of the channel. - */ - private final AtomicReference channelState; - /** - * Unique id of the executor. - */ - private final long id; - // Statistics. - private final AtomicLong ongoingRequests = new AtomicLong(); - private final int drainLimit; - - /** - * The future to cancel the scheduled iteration of this executor. - */ - private ScheduledFuture iterateFuture; - - /** - * Time when the channel executor saw an invalid token. If this field is - * zero the token is valid. - *

- * Is not volatile because it is access from a single thread. - */ - private long tokenInvalidStartTime = 0; - - public GrpcChannelExecutor( - GrpcClientPolicy grpcClientPolicy, - ChannelTypeAndEventLoop channelTypeAndEventLoop, - @Nullable AuthTokenManager authTokenManager, - Host... hosts - ) { - if (grpcClientPolicy == null) { - throw new NullPointerException("grpcClientPolicy"); - } - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("hosts should be non-empty"); - } - - this.grpcClientPolicy = grpcClientPolicy; - this.drainLimit = - this.grpcClientPolicy.maxConcurrentStreamsPerChannel * grpcClientPolicy.maxConcurrentRequestsPerStream; - this.authTokenManager = authTokenManager; - this.id = executorIdIndex.getAndIncrement(); - - ChannelAndEventLoop channelAndEventLoop = - createGrpcChannel(channelTypeAndEventLoop.getEventLoop() - , channelTypeAndEventLoop.getChannelType(), hosts); - this.channel = channelAndEventLoop.managedChannel; - this.eventLoop = channelAndEventLoop.eventLoop; - - this.channelState = new AtomicReference<>(ChannelState.READY); - - this.iterateFuture = - channelAndEventLoop.eventLoop.scheduleAtFixedRate(this, 0, - ITERATION_DELAY_MICROS, TimeUnit.MICROSECONDS); - } - - private static SslContext getSslContext(TlsPolicy tlsPolicy) { - try { - SslContextBuilder sslContextBuilder = GrpcSslContexts.forClient(); - Field field = sslContextBuilder.getClass().getDeclaredField("apn"); - field.setAccessible(true); - ApplicationProtocolConfig applicationProtocolConfig = (ApplicationProtocolConfig)field.get(sslContextBuilder); - - if (tlsPolicy.context != null) { - CipherSuiteFilter csf = (tlsPolicy.ciphers != null) ? (iterable, list, set) -> { - if (tlsPolicy.ciphers != null) { - return tlsPolicy.ciphers; - } - return tlsPolicy.context.getSupportedSSLParameters().getCipherSuites(); - } : IdentityCipherSuiteFilter.INSTANCE; - - // Enforce ALPN in case NPN_AND_ALPN is the supported protocol. - // JdkSslContext fails with an exception when the protocol is - // NPN_AND_ALPN. - ApplicationProtocolConfig apn = applicationProtocolConfig; - if (applicationProtocolConfig.protocol() == ApplicationProtocolConfig.Protocol.NPN_AND_ALPN) { - // Constructor copied verbatim from package-private field - // io.grpc.netty.GrpcSslContexts.ALPN - apn = new ApplicationProtocolConfig( - ApplicationProtocolConfig.Protocol.ALPN, - ApplicationProtocolConfig.SelectorFailureBehavior.NO_ADVERTISE, - ApplicationProtocolConfig.SelectedListenerFailureBehavior.ACCEPT, - Collections.singletonList("h2")); - } - - return new JdkSslContext(tlsPolicy.context, true, null, csf, apn, ClientAuth.NONE, null, false); - } - - SslContextBuilder builder = SslContextBuilder.forClient(); - builder.applicationProtocolConfig(applicationProtocolConfig); - if (tlsPolicy.protocols != null) { - builder.protocols(tlsPolicy.protocols); - } - - if (tlsPolicy.ciphers != null) { - builder.ciphers(Arrays.asList(tlsPolicy.ciphers)); - } - - String keyStoreLocation = System.getProperty("javax.net.ssl.keyStore"); - - // Keystore is only required for mutual authentication. - if (keyStoreLocation != null) { - String keyStorePassword = System.getProperty("javax.net.ssl.keyStorePassword"); - char[] pass = (keyStorePassword != null) ? keyStorePassword.toCharArray() : null; - - KeyStore ks = KeyStore.getInstance(KeyStore.getDefaultType()); - - try (FileInputStream is = new FileInputStream(keyStoreLocation)) { - ks.load(is, pass); - } - - KeyManagerFactory kmf = KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm()); - kmf.init(ks, pass); - - builder.keyManager(kmf); - } - return builder.build(); - } - catch (Exception e) { - throw new AerospikeException("Failed to init netty TLS: " + Util.getErrorMessage(e)); - } - } - - /** - * Create a gRPC channel. - */ - @SuppressWarnings("deprecation") - private ChannelAndEventLoop createGrpcChannel(EventLoop eventLoop, Class channelType, Host[] hosts) { - NettyChannelBuilder builder; - - if (hosts.length == 1) { - builder = NettyChannelBuilder.forAddress(hosts[0].name, hosts[0].port); - } - else { - // Setup round-robin load balancing. - NameResolver.Factory nameResolverFactory = new MultiAddressNameResolverFactory( - Arrays.stream(hosts) - .map((host) -> new InetSocketAddress(host.name, host.port)) - .collect( - Collectors.toList())); - builder = NettyChannelBuilder.forTarget(String.format("%s:%d", - hosts[0].name, hosts[0].port)); - builder.nameResolverFactory(nameResolverFactory); - builder.defaultLoadBalancingPolicy("round_robin"); - } - - SingleEventLoopGroup eventLoopGroup = new SingleEventLoopGroup(eventLoop); - builder - .eventLoopGroup(eventLoopGroup) - .perRpcBufferLimit(128 * 1024 * 1024) - .channelType(channelType) - .negotiationType(NegotiationType.PLAINTEXT) - - // Have a very large limit because this response is coming from - // the proxy server. - .maxInboundMessageSize(128 * 1024 * 1024) - - // Execute callbacks in the assigned event loop. - // GrpcChannelExecutor.iterate and all of GrpcStream works on - // this assumption. - .directExecutor() - - // Retry logic is part of the client code. - .disableRetry() - - // Server and client flow control policy should be in sync. - .flowControlWindow(2 * 1024 * 1024) - - // TODO: is this beneficial? See https://github.com/grpc/grpc-java/issues/8260 - // for discussion. - // Enabling this feature create too many pings and the server - // sends GO_AWAY response. - // .initialFlowControlWindow(1024 * 1024) - - // TODO: Should these be part of GrpcClientPolicy? - .keepAliveWithoutCalls(true) - .keepAliveTime(25, TimeUnit.SECONDS) - .keepAliveTimeout(1, TimeUnit.MINUTES); - - if (grpcClientPolicy.tlsPolicy != null) { - builder.sslContext(getSslContext(grpcClientPolicy.tlsPolicy)); - builder.negotiationType(NegotiationType.TLS); - } - else { - builder.usePlaintext(); - } - - // For testing. Set this to force a hostname irrespective of the - // target IP for TLS verification. A simpler way than adding a DNS - // entry in the hosts file. - String authorityProperty = System.getProperty(OVERRIDE_AUTHORITY); - - if (authorityProperty != null && !authorityProperty.trim().isEmpty()) { - builder.overrideAuthority(authorityProperty); - } - - //setting buffer size can improve I/O - builder.withOption(ChannelOption.SO_SNDBUF, 1048576); - builder.withOption(ChannelOption.SO_RCVBUF, 1048576); - builder.withOption(ChannelOption.TCP_NODELAY, true); - builder.withOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, - grpcClientPolicy.connectTimeoutMillis); - builder.userAgent(AEROSPIKE_CLIENT_USER_AGENT); - - // TODO: better to have a receive buffer predictor - //builder.withOption(ChannelOption.valueOf("receiveBufferSizePredictorFactory"), new AdaptiveReceiveBufferSizePredictorFactory(MIN_PACKET_SIZE, INITIAL_PACKET_SIZE, MAX_PACKET_SIZE)) - - //if the server is sending 1000 messages per sec, optimum write buffer watermarks will - //prevent unnecessary throttling, Check NioSocketChannelConfig doc - builder.withOption(ChannelOption.WRITE_BUFFER_WATER_MARK, - new WriteBufferWaterMark(32 * 1024, 64 * 1024)); - - ManagedChannel channel = builder.build(); - // TODO: ensure it is a single threaded event loop. - return new ChannelAndEventLoop(channel, eventLoop); - } - - public void execute(GrpcStreamingCall call) { - if (channelState.get() != ChannelState.READY) { - call.failIfNotComplete(ResultCode.CLIENT_ERROR); - return; - } - // TODO: add always succeeds? - ongoingRequests.getAndIncrement(); - pendingCalls.add(call); - } - - @Override - public void run() { - try { - iterate(); - } - catch (Exception e) { - // TODO: signal failure, close channel? - if (Log.debugEnabled()) { - Log.debug("Uncaught exception in " + this + ":" + e); - } - } - } - - /** - * Process a single iteration. - */ - private void iterate() { - switch (channelState.get()) { - case READY: - executeCalls(); - break; - - case SHUTTING_DOWN: - boolean allCallsCompleted = pendingCalls.isEmpty() && - streams.values().stream() - .allMatch(grpcStream -> grpcStream.getOngoingRequests() == 0); - - int closeTimeout = grpcClientPolicy.closeTimeout; - if (closeTimeout < 0) { - // Shutdown immediately. - shutdownNow(); - } - else if (closeTimeout == 0) { - // Wait for all pending calls to complete. - if (allCallsCompleted) { - shutdownNow(); - } - else { - Log.debug(this + " shutdown: awaiting completion of " + - "all calls for closeTimeout=0."); - executeCalls(); - } - } - else { - // Wait for all pending calls to complete or timeout. - long elapsedTimeMillis = - TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - shutdownStartTimeNanos); - if (allCallsCompleted || elapsedTimeMillis >= closeTimeout) { - shutdownNow(); - } - else { - Log.debug(this + " shutdown: awaiting closeTimeout=" - + closeTimeout + ", elapsed time=" + elapsedTimeMillis); - executeCalls(); - } - } - break; - - case SHUTDOWN: - Log.warn("Iterate being called after channel shutdown"); - break; - - default: - Log.error("Unknown channel state: " + channelState.get()); - break; - } - } - - private void executeCalls() { - if (authTokenManager != null) { - AuthTokenManager.TokenStatus tokenStatus = - authTokenManager.getTokenStatus(); - if (!tokenStatus.isValid()) { - expireOrDrainOnInvalidToken(tokenStatus.getError()); - return; - } - } - - // Schedule pending calls onto streams. - pendingCalls.drain(this::scheduleCalls, drainLimit); - - // Execute stream calls. - streams.values().forEach(GrpcStream::executePendingCalls); - - // Process closed streams. - closedStreams.forEach(this::processClosedStream); - closedStreams.clear(); - } - - /** - * Expire queued calls and drain queue if required when we have an invalid - * auth token. - */ - private void expireOrDrainOnInvalidToken(Throwable tokenError) { - assert authTokenManager != null; - - if (tokenInvalidStartTime == 0) { - tokenInvalidStartTime = System.currentTimeMillis(); - } - - // Token is invalid. This happens at the start before the first - // access token fetch or if the token expires and could not be - // refreshed. - pendingCalls.forEach(call -> { - if (!call.hasCompleted() && - (call.hasSendDeadlineExpired() || call.hasExpired())) { - call.onError(tokenError); - } - }); - - - long tokenWaitTimeout = tokenInvalidStartTime + authTokenManager.getRefreshMinTime() * 3L; - - if (tokenWaitTimeout < System.currentTimeMillis()) { - tokenInvalidStartTime = 0; - // It's been too long without a valid access token. Drain and - // report all queued calls as failed. - pendingCalls.drain(call -> call.failIfNotComplete(tokenError)); - } - } - - /** - * Schedule the call on a stream. - */ - private void scheduleCalls(GrpcStreamingCall call) { - if (call.hasCompleted()) { - // Most likely expired while in queue. - return; - } - - if (call.hasSendDeadlineExpired() || call.hasExpired()) { - call.onError(new AerospikeException.Timeout(call.getPolicy(), - call.getIteration())); - return; - } - - // The stream will be close by the selector. - GrpcStreamSelector.SelectedStream selectedStream = - grpcClientPolicy.grpcStreamSelector.select(new ArrayList<>(streams.values()), call); - - if (selectedStream == null) { - // Requeue - pendingCalls.add(call); - return; - } - - if (selectedStream.useExistingStream()) { - selectedStream.getStream().enqueue(call); - return; - } - - scheduleCallsOnNewStream(call.getStreamingMethodDescriptor(), call, - selectedStream.getMaxConcurrentRequestsPerStream(), - selectedStream.getTotalRequestsPerStream()); - } - - private void processClosedStream(GrpcStream grpcStream) { - if (streams.remove(grpcStream.getId()) == null) { - // Should never happen. - return; - } - - // Schedule each of the pending calls. - pendingCalls.addAll(grpcStream.getPendingCalls()); - } - - /** - * Schedule calls in pendingCalls on a new stream. - */ - private void scheduleCallsOnNewStream( - MethodDescriptor methodDescriptor, - GrpcStreamingCall call, - int maxConcurrentRequestsPerStream, int totalRequestsPerStream - ) { - if (maxConcurrentRequestsPerStream <= 0) { // Should never happen. - maxConcurrentRequestsPerStream = - grpcClientPolicy.maxConcurrentRequestsPerStream; - } - if (totalRequestsPerStream <= 0) { // Should never happen. - totalRequestsPerStream = grpcClientPolicy.totalRequestsPerStream; - } - - CallOptions options = grpcClientPolicy.callOptions; - if (authTokenManager != null) { - try { - options = authTokenManager.setCallCredentials(grpcClientPolicy.callOptions); - } - catch (Exception e) { - AerospikeException aerospikeException = - new AerospikeException(ResultCode.NOT_AUTHENTICATED, e); - call.onError(aerospikeException); - return; - } - } - - LinkedList streamPendingCalls = new LinkedList<>(); - streamPendingCalls.add(call); - GrpcStream stream = new GrpcStream(this, methodDescriptor, - streamPendingCalls, options, nextStreamId(), eventLoop, - maxConcurrentRequestsPerStream, totalRequestsPerStream); - - streams.put(stream.getId(), stream); - } - - /** - * Start the shutdown of this channel. Any new requests will be rejected. - * The shutdown respects the clientTimeout setting. Use - * {@link #isTerminated()} to see if shutdown is complete. - */ - public void shutdown() { - if (!channelState.compareAndSet(ChannelState.READY, ChannelState.SHUTTING_DOWN)) { - return; - } - - shutdownStartTimeNanos = System.nanoTime(); - - // If inside event loop thread, cannot wait for calls in this channel - // to complete without deadlocking, abort and shutdown now. - if (eventLoop.inEventLoop()) { - shutdownNow(); - } - } - - /** - * WARN This method should always be called from the [eventLoop] - * thread. - */ - private void shutdownNow() { - if (channelState.getAndSet(ChannelState.SHUTDOWN) == ChannelState.SHUTDOWN) { - return; - } - - closeAllPendingCalls(); - channel.shutdownNow(); - iterateFuture.cancel(false); - } - - private void closeAllPendingCalls() { - while (!pendingCalls.isEmpty()) { - pendingCalls.drain(call -> { - try { - call.failIfNotComplete(ResultCode.CLIENT_ERROR); - } - catch (Exception e) { - Log.error("Error on call close " + call + ": " + e.getMessage()); - } - }); - } - streams.values().forEach(stream -> { - try { - stream.closePendingCalls(); - } - catch (Exception e) { - Log.error("Error closing stream " + stream + ": " + e.getMessage()); - } - }); - streams.clear(); - } - - boolean isTerminated() { - return channelState.get() == ChannelState.SHUTDOWN && channel.isTerminated(); - } - - private int nextStreamId() { - return streamIdIndex.getAndIncrement(); - } - - @Override - public String toString() { - return "GrpcChannelExecutor{id=" + id + '}'; - } - - public long getId() { - return id; - } - - public long getOngoingRequests() { - return ongoingRequests.get(); - } - - void onRequestCompleted() { - ongoingRequests.getAndDecrement(); - } - - public void onStreamClosed(GrpcStream grpcStream) { - closedStreams.add(grpcStream); - } - - public ManagedChannel getChannel() { - return channel; - } - - public EventLoop getEventLoop() { - return eventLoop; - } - - private static class ChannelAndEventLoop { - final ManagedChannel managedChannel; - final EventLoop eventLoop; - - private ChannelAndEventLoop(ManagedChannel managedChannel, EventLoop eventLoop) { - this.managedChannel = managedChannel; - this.eventLoop = eventLoop; - } - } - - private enum ChannelState { - READY, SHUTTING_DOWN, SHUTDOWN - } - - public static class ChannelTypeAndEventLoop { - private final Class channelType; - private final EventLoop eventLoop; - - public ChannelTypeAndEventLoop(Class channelType, EventLoop eventLoop) { - this.channelType = channelType; - this.eventLoop = eventLoop; - } - - public Class getChannelType() { - return channelType; - } - - public EventLoop getEventLoop() { - return eventLoop; - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/GrpcClientPolicy.java b/proxy/src/com/aerospike/client/proxy/grpc/GrpcClientPolicy.java deleted file mode 100644 index cc1c24f7e..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/GrpcClientPolicy.java +++ /dev/null @@ -1,382 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import java.util.List; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Collectors; -import java.util.stream.StreamSupport; - -import javax.annotation.Nullable; - -import com.aerospike.client.policy.ClientPolicy; -import com.aerospike.client.policy.TlsPolicy; - -import io.grpc.CallOptions; -import io.netty.channel.Channel; -import io.netty.channel.EventLoop; -import io.netty.channel.EventLoopGroup; -import io.netty.channel.epoll.Epoll; -import io.netty.channel.epoll.EpollEventLoopGroup; -import io.netty.channel.epoll.EpollSocketChannel; -import io.netty.channel.nio.NioEventLoopGroup; -import io.netty.channel.socket.nio.NioSocketChannel; -import io.netty.util.concurrent.DefaultThreadFactory; - -/** - * gRPC Aerospike proxy client policy. All the knobs and configs that - * affect the working of the gRPC proxy client are combined into this policy - * object. - */ -public class GrpcClientPolicy { - /** - * The event loops to process the gRPC HTTP/2 requests. - */ - public final List eventLoops; - - /** - * The type of the eventLoops. - */ - public final Class channelType; - - /** - * Should the event loops be closed in close. - */ - public final boolean closeEventLoops; - - /** - * Maximum number of HTTP/2 channels (connections) to open to the Aerospike - * gRPC proxy server. - *

- * Generally HTTP/2 based gRPC recommends a single channel to be - * sufficient for most purposes. In our performance experiments we have - * found that any number greater than 8 yields no extra - * performance gains. - */ - public final int maxChannels; - - /** - * Maximum number of concurrent HTTP/2 streams to have in-flight per HTTP/2 - * channel (connection). - *

- * Generally HTTP/2 servers restrict the number of concurrent HTTP/2 streams - * to about a 100 on a channel (connection). - */ - public final int maxConcurrentStreamsPerChannel; - - /** - * Maximum number of concurrent requests that are in-flight per streaming - * HTTP/2 call. - *

- * The Aerospike gRPC proxy server implements streaming for unary calls - * like Aerospike get, put, operate, etc to improve latency and throughput. - * maxConcurrentRequestsPerStream specifies the number of - * concurrent requests that can be sent on a single unary call based stream. - *

- * NOTE: This policy does not apply to queries, scans, etc. - */ - public final int maxConcurrentRequestsPerStream; - - /** - * Total number of HTTP/2 requests that are sent on a stream, after which - * the stream is closed. - *

- * The Aerospike gRPC proxy server implements streaming for unary calls - * like Aerospike get, put, operate, etc to improve latency and throughput. - * totalRequestsPerStream specifies the total number of - * requests that are sent on the stream, after which the stream is closed. - *

- * Requests to the Aerospike gRPC proxy server will be routed through a - * HTTP/2 load balancer over the public internet. HTTP/2 load balancer - * splits requests on a single HTTP/2 channel (connection) across the proxy - * servers, but it will send all requests on a HTTP/2 stream to a single - * gRPC Aerospike proxy server. This policy ensures that the requests are - * evenly load balanced across the gRPC Aerospike proxy servers. - *

- * NOTE: This policy does not apply to queries, scans, etc. - */ - public final int totalRequestsPerStream; - - /** - * The connection timeout in milliseconds when creating a new HTTP/2 - * channel (connection) to a gRPC Aerospike proxy server. - */ - public final int connectTimeoutMillis; - - /** - * See {@link ClientPolicy#closeTimeout}. - */ - public final int closeTimeout; - - /** - * Strategy to select a channel for a gRPC request. - */ - - public final GrpcChannelSelector grpcChannelSelector; - - /** - * Strategy to select a stream for a gRPC request. - */ - public final GrpcStreamSelector grpcStreamSelector; - - /** - * Call options. - */ - public final CallOptions callOptions; - - /** - * The TLS policy to connect to the gRPC Aerospike proxy server. - *

- * NOTE: The channel (connection) will be non-encrypted if this policy is null. - */ - @Nullable - public final TlsPolicy tlsPolicy; - - /** - * Milliseconds to wait for termination of the channels. Should be - * greater than the deadlines. The implementation is best-effort, its - * possible termination takes more time than this. - */ - public final long terminationWaitMillis; - - /** - * Index to get the next event loop. - */ - private final AtomicInteger eventLoopIndex = new AtomicInteger(0); - - private GrpcClientPolicy( - int maxChannels, - int maxConcurrentStreamsPerChannel, - int maxConcurrentRequestsPerStream, - int totalRequestsPerStream, - int connectTimeoutMillis, - long terminationWaitMillis, - int closeTimeout, GrpcChannelSelector grpcChannelSelector, - GrpcStreamSelector grpcStreamSelector, - CallOptions callOptions, - List eventLoops, - Class channelType, - boolean closeEventLoops, - @Nullable TlsPolicy tlsPolicy - ) { - this.maxChannels = maxChannels; - this.maxConcurrentStreamsPerChannel = maxConcurrentStreamsPerChannel; - this.maxConcurrentRequestsPerStream = maxConcurrentRequestsPerStream; - this.totalRequestsPerStream = totalRequestsPerStream; - this.connectTimeoutMillis = connectTimeoutMillis; - this.terminationWaitMillis = terminationWaitMillis; - this.closeTimeout = closeTimeout; - this.grpcChannelSelector = grpcChannelSelector; - this.grpcStreamSelector = grpcStreamSelector; - this.callOptions = callOptions; - this.eventLoops = eventLoops; - this.channelType = channelType; - this.closeEventLoops = closeEventLoops; - this.tlsPolicy = tlsPolicy; - } - - public static Builder newBuilder( - @Nullable List eventLoops, - @Nullable Class channelType - ) { - Builder builder = new Builder(); - - if (eventLoops == null || channelType == null) { - builder.closeEventLoops = true; - - DefaultThreadFactory tf = - new DefaultThreadFactory("aerospike-proxy", true /*daemon */); - - // TODO: select number of event loop threads? - EventLoopGroup eventLoopGroup; - - if (Epoll.isAvailable()) { - eventLoopGroup = new EpollEventLoopGroup(0, tf); - builder.channelType = EpollSocketChannel.class; - } - else { - eventLoopGroup = new NioEventLoopGroup(0, tf); - builder.channelType = NioSocketChannel.class; - } - - builder.eventLoops = StreamSupport.stream(eventLoopGroup.spliterator(), false) - .map(eventExecutor -> (EventLoop)eventExecutor) - .collect(Collectors.toList()); - } - else { - builder.channelType = channelType; - builder.eventLoops = eventLoops; - builder.closeEventLoops = false; - } - - // TODO: justify defaults. - builder.maxChannels = 8; - - // Multiple requests should be sent on a stream at once to enhance - // performance. So `maxConcurrentRequestsPerStream` should be the - // ideal batch size of the requests. - // TODO: maybe these parameters depend on the payload size? - builder.maxConcurrentStreamsPerChannel = 8; - builder.maxConcurrentRequestsPerStream = builder.totalRequestsPerStream = 128; - - builder.connectTimeoutMillis = 5000; - builder.terminationWaitMillis = 30000; - - builder.tlsPolicy = null; - return builder; - } - - public EventLoop nextEventLoop() { - int i = eventLoopIndex.getAndIncrement() % eventLoops.size(); - return eventLoops.get(i); - } - - public static class Builder { - private List eventLoops; - private Class channelType; - private boolean closeEventLoops; - private int maxChannels; - private int maxConcurrentStreamsPerChannel; - private int maxConcurrentRequestsPerStream; - private int totalRequestsPerStream; - private int connectTimeoutMillis; - @Nullable - private TlsPolicy tlsPolicy; - @Nullable - private GrpcChannelSelector grpcChannelSelector; - @Nullable - private GrpcStreamSelector grpcStreamSelector; - @Nullable - private CallOptions callOptions; - private long terminationWaitMillis; - private int closeTimeout; - - private Builder() { - } - - public GrpcClientPolicy build() { - if (grpcChannelSelector == null) { - // TODO: how should low and high water mark be selected? - int hwm = - maxConcurrentStreamsPerChannel * maxConcurrentRequestsPerStream; - int lwm = Math.max(16, (int)(0.8 * hwm)); - grpcChannelSelector = - new DefaultGrpcChannelSelector(lwm, hwm); - } - - if (grpcStreamSelector == null) { - grpcStreamSelector = - new DefaultGrpcStreamSelector(maxConcurrentStreamsPerChannel, maxConcurrentRequestsPerStream, totalRequestsPerStream); - } - - if (callOptions == null) { - callOptions = CallOptions.DEFAULT; - } - - return new GrpcClientPolicy(maxChannels, maxConcurrentStreamsPerChannel, - maxConcurrentRequestsPerStream, totalRequestsPerStream, - connectTimeoutMillis, terminationWaitMillis, closeTimeout, - grpcChannelSelector, grpcStreamSelector, callOptions, eventLoops, - channelType, closeEventLoops, tlsPolicy); - } - - public Builder maxChannels(int maxChannels) { - if (maxChannels < 1) { - throw new IllegalArgumentException(String.format( - "maxChannels=%d < 1", maxChannels - )); - } - this.maxChannels = maxChannels; - return this; - } - - public Builder maxConcurrentStreamsPerChannel(int maxConcurrentStreamsPerChannel) { - if (maxConcurrentStreamsPerChannel < 1) { - throw new IllegalArgumentException(String.format( - "maxConcurrentStreamsPerChannel=%d < 1", maxConcurrentStreamsPerChannel - )); - } - this.maxConcurrentStreamsPerChannel = maxConcurrentStreamsPerChannel; - return this; - } - - public Builder maxConcurrentRequestsPerStream(int maxConcurrentRequestsPerStream) { - if (maxConcurrentRequestsPerStream < 1) { - throw new IllegalArgumentException(String.format( - "maxConcurrentRequestsPerStream=%d < 1", maxConcurrentRequestsPerStream - )); - } - this.maxConcurrentRequestsPerStream = maxConcurrentRequestsPerStream; - return this; - } - - public Builder totalRequestsPerStream(int totalRequestsPerStream) { - if (totalRequestsPerStream < 0) { - throw new IllegalArgumentException(String.format( - "totalRequestsPerStream=%d < 0", totalRequestsPerStream - )); - } - this.totalRequestsPerStream = totalRequestsPerStream; - return this; - } - - public Builder connectTimeoutMillis(int connectTimeoutMillis) { - if (connectTimeoutMillis < 0) { - throw new IllegalArgumentException(String.format( - "connectTimeoutMillis=%d < 0", connectTimeoutMillis - )); - } - this.connectTimeoutMillis = connectTimeoutMillis; - return this; - } - - public Builder closeTimeout(int closeTimeout) { - this.closeTimeout = closeTimeout; - return this; - } - - public Builder tlsPolicy(@Nullable TlsPolicy tlsPolicy) { - this.tlsPolicy = tlsPolicy; - return this; - } - - public Builder grpcChannelSelector(GrpcChannelSelector grpcChannelSelector) { - this.grpcChannelSelector = grpcChannelSelector; - return this; - } - - public Builder grpcStreamSelector(GrpcStreamSelector grpcStreamSelector) { - this.grpcStreamSelector = grpcStreamSelector; - return this; - } - - public Builder callOptions(@Nullable CallOptions callOptions) { - this.callOptions = callOptions; - return this; - } - - public Builder terminationWaitMillis(long terminationWaitMillis) { - if (terminationWaitMillis < 0) { - throw new IllegalArgumentException(String.format( - "terminationWaitMillis=%d < 0", terminationWaitMillis - )); - } - this.terminationWaitMillis = terminationWaitMillis; - return this; - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/GrpcConversions.java b/proxy/src/com/aerospike/client/proxy/grpc/GrpcConversions.java deleted file mode 100644 index 69cad9969..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/GrpcConversions.java +++ /dev/null @@ -1,424 +0,0 @@ -/* - * Copyright 2012-2024 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Operation; -import com.aerospike.client.ResultCode; -import com.aerospike.client.Value; -import com.aerospike.client.policy.Policy; -import com.aerospike.client.policy.QueryDuration; -import com.aerospike.client.policy.QueryPolicy; -import com.aerospike.client.policy.ScanPolicy; -import com.aerospike.client.policy.WritePolicy; -import com.aerospike.client.query.Filter; -import com.aerospike.client.query.PartitionFilter; -import com.aerospike.client.query.PartitionStatus; -import com.aerospike.client.query.Statement; -import com.aerospike.client.util.Packer; -import com.aerospike.proxy.client.Kvs; -import com.google.protobuf.ByteString; - -import io.grpc.Status; -import io.grpc.StatusRuntimeException; - -/** - * Conversions from native client objects to Grpc objects. - */ -public class GrpcConversions { - private static final String ERROR_MESSAGE_SEPARATOR = " -> "; - public static final int MAX_ERR_MSG_LENGTH = 10 * 1024; - - public static void setRequestPolicy( - Policy policy, - Kvs.AerospikeRequestPayload.Builder requestBuilder - ) { - if (policy instanceof WritePolicy) { - Kvs.WritePolicy.Builder writePolicyBuilder = Kvs.WritePolicy.newBuilder(); - - Kvs.ReadModeAP readModeAP = Kvs.ReadModeAP.valueOf(policy.readModeAP.name()); - writePolicyBuilder.setReadModeAP(readModeAP); - - Kvs.ReadModeSC readModeSC = Kvs.ReadModeSC.valueOf(policy.readModeSC.name()); - writePolicyBuilder.setReadModeSC(readModeSC); - - Kvs.Replica replica = Kvs.Replica.valueOf(policy.replica.name()); - writePolicyBuilder.setReplica(replica); - - requestBuilder.setWritePolicy(writePolicyBuilder.build()); - } - else { - Kvs.ReadPolicy.Builder readPolicyBuilder = Kvs.ReadPolicy.newBuilder(); - - Kvs.ReadModeAP readModeAP = Kvs.ReadModeAP.valueOf(policy.readModeAP.name()); - readPolicyBuilder.setReadModeAP(readModeAP); - - Kvs.ReadModeSC readModeSC = Kvs.ReadModeSC.valueOf(policy.readModeSC.name()); - readPolicyBuilder.setReadModeSC(readModeSC); - - Kvs.Replica replica = Kvs.Replica.valueOf(policy.replica.name()); - readPolicyBuilder.setReplica(replica); - - requestBuilder.setReadPolicy(readPolicyBuilder.build()); - } - } - - public static Kvs.ScanPolicy toGrpc(ScanPolicy scanPolicy) { - // Base policy fields. - Kvs.ScanPolicy.Builder scanPolicyBuilder = Kvs.ScanPolicy.newBuilder(); - - Kvs.ReadModeAP readModeAP = Kvs.ReadModeAP.valueOf(scanPolicy.readModeAP.name()); - scanPolicyBuilder.setReadModeAP(readModeAP); - - Kvs.ReadModeSC readModeSC = Kvs.ReadModeSC.valueOf(scanPolicy.readModeSC.name()); - scanPolicyBuilder.setReadModeSC(readModeSC); - - Kvs.Replica replica = Kvs.Replica.valueOf(scanPolicy.replica.name()); - scanPolicyBuilder.setReplica(replica); - - if (scanPolicy.filterExp != null) { - scanPolicyBuilder.setExpression(ByteString.copyFrom(scanPolicy.filterExp.getBytes())); - } - - scanPolicyBuilder.setTotalTimeout(scanPolicy.totalTimeout); - scanPolicyBuilder.setCompress(scanPolicy.compress); - - // Scan policy specific fields - scanPolicyBuilder.setMaxRecords(scanPolicy.maxRecords); - scanPolicyBuilder.setRecordsPerSecond(scanPolicy.recordsPerSecond); - scanPolicyBuilder.setMaxConcurrentNodes(scanPolicy.maxConcurrentNodes); - scanPolicyBuilder.setConcurrentNodes(scanPolicy.concurrentNodes); - scanPolicyBuilder.setIncludeBinData(scanPolicy.includeBinData); - return scanPolicyBuilder.build(); - } - - @SuppressWarnings("deprecation") - public static Kvs.QueryPolicy toGrpc(QueryPolicy queryPolicy) { - // Base policy fields. - Kvs.QueryPolicy.Builder queryPolicyBuilder = Kvs.QueryPolicy.newBuilder(); - - Kvs.ReadModeAP readModeAP = Kvs.ReadModeAP.valueOf(queryPolicy.readModeAP.name()); - queryPolicyBuilder.setReadModeAP(readModeAP); - - Kvs.ReadModeSC readModeSC = Kvs.ReadModeSC.valueOf(queryPolicy.readModeSC.name()); - queryPolicyBuilder.setReadModeSC(readModeSC); - - Kvs.Replica replica = Kvs.Replica.valueOf(queryPolicy.replica.name()); - queryPolicyBuilder.setReplica(replica); - - if (queryPolicy.filterExp != null) { - queryPolicyBuilder.setExpression(ByteString.copyFrom(queryPolicy.filterExp.getBytes())); - } - - queryPolicyBuilder.setTotalTimeout(queryPolicy.totalTimeout); - queryPolicyBuilder.setCompress(queryPolicy.compress); - queryPolicyBuilder.setSendKey(queryPolicy.sendKey); - - // Query policy specific fields - queryPolicyBuilder.setMaxConcurrentNodes(queryPolicy.maxConcurrentNodes); - queryPolicyBuilder.setRecordQueueSize(queryPolicy.recordQueueSize); - queryPolicyBuilder.setInfoTimeout(queryPolicy.infoTimeout); - queryPolicyBuilder.setIncludeBinData(queryPolicy.includeBinData); - queryPolicyBuilder.setFailOnClusterChange(queryPolicy.failOnClusterChange); - // TODO: Proxy client protobuf query policy type need to support QueryDuration enum. - queryPolicyBuilder.setShortQuery(queryPolicy.shortQuery || queryPolicy.expectedDuration == QueryDuration.SHORT); - return queryPolicyBuilder.build(); - } - - /** - * Convert a value to packed bytes. - * - * @param value the value to pack - * @return the packed bytes. - */ - public static ByteString valueToByteString(Value value) { - Packer packer = new Packer(); - value.pack(packer); // Calculate buffer size. - packer.createBuffer(); - value.pack(packer); // Write to buffer. - return ByteString.copyFrom(packer.getBuffer()); - } - - public static Kvs.Filter toGrpc(Filter filter) { - Kvs.Filter.Builder builder = Kvs.Filter.newBuilder(); - - builder.setName(filter.getName()); - builder.setValType(filter.getValType()); - - if (filter.getBegin() != null) { - Packer packer = new Packer(); - filter.getBegin().pack(packer); - packer.createBuffer(); - filter.getBegin().pack(packer); - builder.setBegin(ByteString.copyFrom(packer.getBuffer())); - } - - if (filter.getBegin() != null) { - builder.setBegin(valueToByteString(filter.getBegin())); - } - - if (filter.getEnd() != null) { - builder.setEnd(valueToByteString(filter.getEnd())); - } - - if (filter.getPackedCtx() != null) { - builder.setPackedCtx(ByteString.copyFrom(filter.getPackedCtx())); - } - - builder.setColType(Kvs.IndexCollectionType.valueOf(filter.getColType().name())); - return builder.build(); - } - - public static Kvs.Operation toGrpc(Operation operation) { - Kvs.Operation.Builder builder = Kvs.Operation.newBuilder(); - builder.setType(Kvs.OperationType.valueOf(operation.type.name())); - - if (operation.binName != null) { - builder.setBinName(operation.binName); - } - - if (operation.value != null) { - builder.setValue(valueToByteString(operation.value)); - } - return builder.build(); - } - - /** - * @param statement Aerospike client statement - * @param taskId required non-zero taskId to use for the execution at the proxy - * @param maxRecords max records to return - * @return equivalent gRPC {@link com.aerospike.proxy.client.Kvs.Statement} - */ - public static Kvs.Statement toGrpc(Statement statement, long taskId, long maxRecords) { - Kvs.Statement.Builder statementBuilder = Kvs.Statement.newBuilder(); - statementBuilder.setNamespace(statement.getNamespace()); - - if (statement.getSetName() != null) { - statementBuilder.setSetName(statement.getSetName()); - } - - if (statement.getIndexName() != null) { - statementBuilder.setIndexName(statement.getIndexName()); - } - - if (statement.getBinNames() != null) { - for (String binName : statement.getBinNames()) { - statementBuilder.addBinNames(binName); - } - } - - if (statement.getFilter() != null) { - statementBuilder.setFilter(toGrpc(statement.getFilter())); - } - - - if (statement.getPackageName() != null) { - statementBuilder.setPackageName(statement.getPackageName()); - } - - if (statement.getFunctionName() != null) { - statementBuilder.setFunctionName(statement.getFunctionName()); - } - - if (statement.getFunctionArgs() != null) { - for (Value arg : statement.getFunctionArgs()) { - statementBuilder.addFunctionArgs(valueToByteString(arg)); - } - } - - if (statement.getOperations() != null) { - for (Operation operation : statement.getOperations()) { - statementBuilder.addOperations(toGrpc(operation)); - } - } - - statementBuilder.setTaskId(taskId); - - statementBuilder.setMaxRecords(maxRecords); - statementBuilder.setRecordsPerSecond(statement.getRecordsPerSecond()); - return statementBuilder.build(); - } - - public static Kvs.PartitionStatus toGrpc(PartitionStatus ps) { - Kvs.PartitionStatus.Builder builder = Kvs.PartitionStatus.newBuilder(); - builder.setId(ps.id); - builder.setBVal(ps.bval); - builder.setRetry(ps.retry); - if (ps.digest != null) { - builder.setDigest(ByteString.copyFrom(ps.digest)); - } - return builder.build(); - } - - public static Kvs.PartitionFilter toGrpc(PartitionFilter partitionFilter) { - Kvs.PartitionFilter.Builder builder = Kvs.PartitionFilter.newBuilder(); - builder.setBegin(partitionFilter.getBegin()); - builder.setCount(partitionFilter.getCount()); - builder.setRetry(partitionFilter.isRetry()); - - byte[] digest = partitionFilter.getDigest(); - if (digest != null && digest.length > 0) { - builder.setDigest(ByteString.copyFrom(digest)); - } - - if (partitionFilter.getPartitions() != null) { - for (PartitionStatus ps : partitionFilter.getPartitions()) { - builder.addPartitionStatuses(toGrpc(ps)); - } - } - return builder.build(); - } - - public static Kvs.BackgroundExecutePolicy toGrpc(WritePolicy writePolicy) { - // Base policy fields. - Kvs.BackgroundExecutePolicy.Builder queryPolicyBuilder = Kvs.BackgroundExecutePolicy.newBuilder(); - - Kvs.ReadModeAP readModeAP = Kvs.ReadModeAP.valueOf(writePolicy.readModeAP.name()); - queryPolicyBuilder.setReadModeAP(readModeAP); - - Kvs.ReadModeSC readModeSC = Kvs.ReadModeSC.valueOf(writePolicy.readModeSC.name()); - queryPolicyBuilder.setReadModeSC(readModeSC); - - Kvs.Replica replica = Kvs.Replica.valueOf(writePolicy.replica.name()); - queryPolicyBuilder.setReplica(replica); - - if (writePolicy.filterExp != null) { - queryPolicyBuilder.setExpression(ByteString.copyFrom(writePolicy.filterExp.getBytes())); - } - - queryPolicyBuilder.setTotalTimeout(writePolicy.totalTimeout); - queryPolicyBuilder.setCompress(writePolicy.compress); - queryPolicyBuilder.setSendKey(writePolicy.sendKey); - - // Query policy specific fields - queryPolicyBuilder.setRecordExistsAction(Kvs.RecordExistsAction.valueOf(writePolicy.recordExistsAction.name())); - queryPolicyBuilder.setGenerationPolicy(Kvs.GenerationPolicy.valueOf(writePolicy.generationPolicy.name())); - queryPolicyBuilder.setCommitLevel(Kvs.CommitLevel.valueOf(writePolicy.commitLevel.name())); - queryPolicyBuilder.setGeneration(writePolicy.generation); - queryPolicyBuilder.setExpiration(writePolicy.expiration); - queryPolicyBuilder.setRespondAllOps(writePolicy.respondAllOps); - queryPolicyBuilder.setDurableDelete(writePolicy.durableDelete); - queryPolicyBuilder.setXdr(writePolicy.xdr); - return queryPolicyBuilder.build(); - } - - public static AerospikeException toAerospike(StatusRuntimeException sre, Policy policy, int iteration) { - Status.Code code = sre.getStatus().getCode(); - int resultCode = ResultCode.CLIENT_ERROR; - switch (code) { - case CANCELLED: - case UNKNOWN: - case NOT_FOUND: - case ALREADY_EXISTS: - case FAILED_PRECONDITION: - case OUT_OF_RANGE: - case UNIMPLEMENTED: - case INTERNAL: - resultCode = ResultCode.CLIENT_ERROR; - break; - - case ABORTED: - case DATA_LOSS: - resultCode = ResultCode.SERVER_ERROR; - break; - - case INVALID_ARGUMENT: - resultCode = ResultCode.SERIALIZE_ERROR; - break; - - case DEADLINE_EXCEEDED: - return new AerospikeException.Timeout(policy, iteration); - - case PERMISSION_DENIED: - resultCode = ResultCode.FAIL_FORBIDDEN; - break; - - case RESOURCE_EXHAUSTED: - resultCode = ResultCode.QUOTA_EXCEEDED; - break; - - case UNAUTHENTICATED: - resultCode = ResultCode.NOT_AUTHENTICATED; - break; - - case UNAVAILABLE: - resultCode = ResultCode.SERVER_NOT_AVAILABLE; - break; - - case OK: - resultCode = ResultCode.OK; - break; - } - - return new AerospikeException(resultCode, getDisplayMessage(sre, MAX_ERR_MSG_LENGTH), sre); - } - - /** - * Get the error message to display restricting it to some length. - */ - public static String getDisplayMessage(Throwable e, int maxMsgLength) { - if (maxMsgLength <= 0) { - return ""; - } - - String errorMessage = getMessage(e); - Throwable rootCause = e.getCause(); - while (rootCause != null) { - String current = getMessage(rootCause); - errorMessage = (errorMessage.isEmpty()) ? current - : errorMessage + ERROR_MESSAGE_SEPARATOR + current; - rootCause = rootCause.getCause(); - } - - return take(errorMessage, maxMsgLength); - } - - /** - * Take at most first `n` characters from the string. - * - * @param s input string - * @param n number of characters to take. - * @return the string that is at most `n` characters in length. - */ - private static String take(String s, int n) { - int trimLength = Math.min(n, s.length()); - if (trimLength <= 0) { - return ""; - } - return s.substring(0, trimLength); - } - - /** - * Get error message for [e]. - */ - private static String getMessage(Throwable e) { - if (e == null) { - return ""; - } - - String errorMessage = e.getMessage() != null ? e.getMessage() : ""; - - errorMessage = errorMessage.split("\\r?\\n|\\r")[0]; - if (errorMessage.trim().isEmpty()) { - return e.getClass().getName(); - } - else { - return String.format("%s - %s", e.getClass().getName(), - errorMessage); - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/GrpcStream.java b/proxy/src/com/aerospike/client/proxy/grpc/GrpcStream.java deleted file mode 100644 index 2cdd80393..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/GrpcStream.java +++ /dev/null @@ -1,472 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import java.util.HashMap; -import java.util.Iterator; -import java.util.LinkedList; -import java.util.Map; -import java.util.concurrent.TimeUnit; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.Log; -import com.aerospike.client.ResultCode; -import com.aerospike.proxy.client.Kvs; - -import io.grpc.CallOptions; -import io.grpc.ClientCall; -import io.grpc.MethodDescriptor; -import io.grpc.stub.ClientCalls; -import io.grpc.stub.StreamObserver; -import io.netty.channel.EventLoop; - -/** - * This class executes a single Aerospike API method like get, put, etc. - * throughout its lifetime. It executes a maximum of `totalRequestsPerStream` - * before closing the stream. - *

- * NOTE All methods of the stream are executed within a single - * thread. This is implemented by - *

    - *
  • having the channel configured to use the direct executor
  • - *
  • have the channel and streams associated with the channel be - * executed on a single event loop
  • - *
- */ -public class GrpcStream implements StreamObserver { - /** - * Idle timeout after which the stream is closed, when no call are pending. - */ - private static final long IDLE_TIMEOUT = 30_000; - - /** - * Unique stream id in the channel. - */ - private final int id; - - /** - * The event loop within which all of GrpcStream calls are executed. - */ - private final EventLoop eventLoop; - - /** - * The request observer of the stream. - */ - private StreamObserver requestObserver; - - /** - * Maximum number of concurrent requests that can be in-flight. - */ - private final int maxConcurrentRequests; - - /** - * Total number of requests to process in this stream for its lifetime. - */ - private final int totalRequestsToExecute; - - /** - * The executor for this stream. - */ - private final GrpcChannelExecutor channelExecutor; - - /** - * The method processed by this stream. - */ - private final MethodDescriptor methodDescriptor; - - /** - * Queued calls pending execution. - *

- * WARN Ensure this is always accessed from the {@link #eventLoop} - * thread. - */ - private final LinkedList pendingCalls; - - /** - * Map of request id to the calls executing in this stream. - */ - private final Map executingCalls = new HashMap<>(); - - /** - * Is the stream closed. This variable is only accessed from the event - * loop thread assigned to this stream and its channel. - */ - private boolean isClosed = false; - - // Stream statistics. These are only updated from the event loop thread - // assigned to this stream and its channel. - - /** - * Number of requests sent on the gRPC stream. This is only updated from - * the event loop thread assigned to this stream and its channel. - */ - private volatile int requestsSent; - - /** - * Number of requests completed. This is only updated from - * the event loop thread assigned to this stream and its channel. - */ - private volatile int requestsCompleted; - - /** - * Timer started when this stream has no pending calls. - * There may still be calls executing (in-flight). - */ - private volatile long streamIdleStartTime; - - /** - * Indicates if the gRPC stream has been half closed from this side. - */ - private boolean streamHalfClosed; - - public GrpcStream( - GrpcChannelExecutor channelExecutor, - MethodDescriptor methodDescriptor, - LinkedList pendingCalls, - CallOptions callOptions, - int streamIndex, - EventLoop eventLoop, - int maxConcurrentRequests, - int totalRequestsToExecute - ) { - this.channelExecutor = channelExecutor; - this.methodDescriptor = methodDescriptor; - this.pendingCalls = pendingCalls; - this.id = streamIndex; - this.eventLoop = eventLoop; - this.maxConcurrentRequests = maxConcurrentRequests; - this.totalRequestsToExecute = totalRequestsToExecute; - ClientCall call = channelExecutor.getChannel() - .newCall(methodDescriptor, callOptions); - StreamObserver requestObserver = - ClientCalls.asyncBidiStreamingCall(call, this); - setRequestObserver(requestObserver); - } - - private void setRequestObserver(StreamObserver requestObserver) { - this.requestObserver = requestObserver; - } - - @Override - public void onNext(Kvs.AerospikeResponsePayload aerospikeResponsePayload) { - if (!eventLoop.inEventLoop()) { - // This call is not within the event loop thread. For some reason - // gRPC invokes some callbacks from a different thread. - eventLoop.schedule(() -> onNext(aerospikeResponsePayload), 0, - TimeUnit.NANOSECONDS); - return; - } - - // Invoke callback. - int callId = aerospikeResponsePayload.getId(); - GrpcStreamingCall call; - - if (aerospikeResponsePayload.getHasNext()) { - call = executingCalls.get(callId); - } - else { - call = executingCalls.remove(callId); - - // Update stats. - requestsCompleted++; - channelExecutor.onRequestCompleted(); - } - - // Call might have expired and been cancelled. - if (call != null && !call.isAborted()) { - try { - call.onNext(aerospikeResponsePayload); - } - catch (Throwable t) { - if (aerospikeResponsePayload.getHasNext()) { - abortCallAtServer(call, callId); - } - } - } - - executePendingCalls(); - } - - private void abortCallAtServer(GrpcStreamingCall call, int callId) { - call.markAborted(); - - // Let the proxy know that there has been a failure so that - // it can abort long-running jobs. - int requestId = requestsSent++; - Kvs.AerospikeRequestPayload.Builder builder = Kvs.AerospikeRequestPayload.newBuilder(); - builder.setId(requestId); - builder.setAbortRequest(Kvs.AbortRequest.newBuilder().setAbortId(callId)); - requestObserver.onNext(builder.build()); - } - - private void abortExecutingCalls(Throwable throwable) { - isClosed = true; - - for (GrpcStreamingCall call : executingCalls.values()) { - try { - call.onError(throwable); - } - catch (Exception e) { - Log.debug("Exception in invoking onError: " + e); - } - } - - markClosed(); - } - - /** - * Marks the stream as closed and moves pending calls nack to the channel - * executor. - */ - private void markClosed() { - isClosed = true; - - executingCalls.clear(); - - channelExecutor.onStreamClosed(this); - } - - @Override - public void onError(Throwable throwable) { - if (!eventLoop.inEventLoop()) { - // This call is not within the event loop thread. For some reason - // gRPC invokes error callback from a different thread. - eventLoop.schedule(() -> onError(throwable), 0, - TimeUnit.NANOSECONDS); - return; - } - - if (executingCalls.isEmpty()) { - // gRPC stream creation failed. - // Fail all pending calls, otherwise they will keep cycling - // through until a stream creation succeeds. - for (GrpcStreamingCall call : pendingCalls) { - try { - call.onError(throwable); - } - catch (Exception e) { - Log.debug("Exception in invoking onError: " + e); - } - } - pendingCalls.clear(); - } - - abortExecutingCalls(throwable); - } - - @Override - public void onCompleted() { - if (!eventLoop.inEventLoop()) { - eventLoop.schedule(this::onCompleted, 0, TimeUnit.NANOSECONDS); - return; - } - - abortExecutingCalls(new AerospikeException(ResultCode.SERVER_ERROR, - "stream completed before all responses have been received")); - } - - LinkedList getPendingCalls() { - return pendingCalls; - } - - MethodDescriptor getMethodDescriptor() { - return methodDescriptor; - } - - int getOngoingRequests() { - return executingCalls.size() + pendingCalls.size(); - } - - public int getId() { - return id; - } - - public int getRequestsCompleted() { - return requestsCompleted; - } - - int getMaxConcurrentRequests() { - return maxConcurrentRequests; - } - - int getTotalRequestsToExecute() { - return totalRequestsToExecute; - } - - @Override - public String toString() { - return "GrpcStream{id=" + id + ", channelExecutor=" + channelExecutor + '}'; - } - - public int getExecutedRequests() { - return getRequestsCompleted() + getOngoingRequests(); - } - - public void executePendingCalls() { - if (isClosed) { - return; - } - - if (pendingCalls.isEmpty()) { - if (streamIdleStartTime == 0) { - streamIdleStartTime = System.currentTimeMillis(); - } - - if (streamIdleStartTime + IDLE_TIMEOUT <= System.currentTimeMillis() && !streamHalfClosed) { - streamHalfClosed = true; - requestObserver.onCompleted(); - } - } - else if (streamIdleStartTime != 0) { - streamIdleStartTime = 0; - } - - if (streamHalfClosed) { - if (executingCalls.isEmpty()) { - // All executing calls are over. - markClosed(); - } - - // Should not push any new calls on this stream. - return; - } - - Iterator iterator = pendingCalls.iterator(); - while (iterator.hasNext()) { - GrpcStreamingCall call = iterator.next(); - - if (call.hasSendDeadlineExpired() || call.hasExpired()) { - call.onError(new AerospikeException.Timeout(call.getPolicy(), - call.getIteration())); - - iterator.remove(); // Remove from pending. - } - else if (executingCalls.size() < maxConcurrentRequests && requestsSent < totalRequestsToExecute) { - execute(call); - iterator.remove(); // Remove from pending. - } - else { - // Call remains in pending. - } - } - } - - private void execute(GrpcStreamingCall call) { - try { - if (call.hasExpired()) { - call.onError(new AerospikeException.Timeout(call.getPolicy(), - call.getIteration())); - return; - } - - Kvs.AerospikeRequestPayload.Builder requestBuilder = call.getRequestBuilder(); - - int requestId = requestsSent++; - requestBuilder - .setId(requestId) - .setIteration(call.getIteration()); - - GrpcConversions.setRequestPolicy(call.getPolicy(), requestBuilder); - Kvs.AerospikeRequestPayload requestPayload = requestBuilder - .build(); - executingCalls.put(requestId, call); - - requestObserver.onNext(requestPayload); - - if (requestsSent >= totalRequestsToExecute) { - // Complete this stream. - requestObserver.onCompleted(); - streamHalfClosed = true; - } - - if (call.hasExpiry()) { - // TODO: Is there a need for a more efficient implementation in - // terms of the call cancellation. - eventLoop.schedule(() -> onCallExpired(requestId), - call.nanosTillExpiry(), TimeUnit.NANOSECONDS); - } - } - catch (Exception e) { - // Failure in scheduling or delegating through request observer. - call.onError(e); - } - } - - private void onCallExpired(int callId) { - GrpcStreamingCall call = executingCalls.remove(callId); - - // Call has completed. - if (call == null) { - return; - } - - // Cancel call. - call.onError(new AerospikeException.Timeout(call.getPolicy(), call.getIteration())); - - // Abort long-running calls at server. - if (!call.isSingleResponse()) { - abortCallAtServer(call, callId); - } - } - - boolean canEnqueue() { - return !isClosed && !streamHalfClosed && requestsSent < totalRequestsToExecute; - } - - /** - * Enqueue the call to this stream. Should only be invoked if - * {@link #canEnqueue()} returned true. - */ - void enqueue(GrpcStreamingCall call) { - pendingCalls.add(call); - } - - public void closePendingCalls() { - pendingCalls.forEach(call -> { - try { - call.failIfNotComplete(ResultCode.CLIENT_ERROR); - } - catch (Exception e) { - Log.error("Error shutting down " + this.getClass() + ": " + e.getMessage()); - } - }); - - pendingCalls.clear(); - - executingCalls.values().forEach(call -> { - try { - call.failIfNotComplete(ResultCode.CLIENT_ERROR); - } - catch (Exception e) { - Log.error("Error shutting down " + this.getClass() + ": " + e.getMessage()); - } - }); - - executingCalls.clear(); - - // For hygiene complete the stream as well. - try { - requestObserver.onCompleted(); - streamHalfClosed =true; - } - catch (Throwable t) { - // Ignore. - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/GrpcStreamSelector.java b/proxy/src/com/aerospike/client/proxy/grpc/GrpcStreamSelector.java deleted file mode 100644 index 69e684fe5..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/GrpcStreamSelector.java +++ /dev/null @@ -1,83 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import java.util.List; - -/** - * A selector of streams within a channel to execute Aerospike proxy gRPC calls. - */ -public interface GrpcStreamSelector { - /** - * Select a stream for the gRPC method. All streams created by the - * selector should be close when the selector is closed. - * - * @param streams streams to select from. - * @param call the streaming call to be executed. - * @return the selected stream, null when no stream is - * selected. - */ - SelectedStream select(List streams, GrpcStreamingCall call); - - - class SelectedStream { - /** - * Wil be non-null only when a current stream is selected. - */ - private final GrpcStream stream; - - // Following fields only applies when {@link #stream} is - // null - - private final int maxConcurrentRequestsPerStream; - private final int totalRequestsPerStream; - - /** - * Create a new stream with the supplied parameters. - */ - public SelectedStream(int maxConcurrentRequestsPerStream, int totalRequestsPerStream) { - this.stream = null; - this.maxConcurrentRequestsPerStream = maxConcurrentRequestsPerStream; - this.totalRequestsPerStream = totalRequestsPerStream; - } - - /** - * Use an existing stream. - */ - public SelectedStream(GrpcStream stream) { - this.stream = stream; - this.maxConcurrentRequestsPerStream = 0; - this.totalRequestsPerStream = 0; - } - - boolean useExistingStream() { - return stream != null; - } - - public GrpcStream getStream() { - return stream; - } - - public int getMaxConcurrentRequestsPerStream() { - return maxConcurrentRequestsPerStream; - } - - public int getTotalRequestsPerStream() { - return totalRequestsPerStream; - } - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/GrpcStreamingCall.java b/proxy/src/com/aerospike/client/proxy/grpc/GrpcStreamingCall.java deleted file mode 100644 index 9c7d14391..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/GrpcStreamingCall.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import com.aerospike.client.AerospikeException; -import com.aerospike.client.policy.Policy; -import com.aerospike.proxy.client.Kvs; - -import io.grpc.MethodDescriptor; -import io.grpc.stub.StreamObserver; - -/** - * A gRPC call that is converted to a streaming call for performance. - */ -public class GrpcStreamingCall { - /** - * The streaming method to execute for this unary call. - */ - private final MethodDescriptor methodDescriptor; - - /** - * The request builder populated with command call specific parameters. - */ - private final Kvs.AerospikeRequestPayload.Builder requestBuilder; - - /** - * The stream response observer for the call. - */ - private final StreamObserver responseObserver; - - /** - * The deadline in nanoseconds w.r.t System.nanoTime() for this call to - * complete. A value of zero indicates that the call has no deadline. - */ - private final long deadlineNanos; - - /** - * The deadline in nanoseconds w.r.t System.nanoTime() for this call to - * be handed to the underlying gRPC sub system. A value of zero indicates - * that the call has no send deadline. - */ - private final long sendDeadlineNanos; - - /** - * Aerospike client policy for this request. - */ - private final Policy policy; - - /** - * Iteration number of this request. - */ - private final int iteration; - - /** - * Number of expected responses for this request. A negative value - * indicates that the number of responses is unknown. - */ - private final int numExpectedResponses; - - /** - * Indicates if this call completed (successfully or unsuccessfully). - */ - private volatile boolean completed; - - /** - * Indicates if this call aborted due to an application exception.. - */ - private volatile boolean aborted; - - protected GrpcStreamingCall(GrpcStreamingCall other) { - this(other.methodDescriptor, other.requestBuilder, other.getPolicy(), - other.iteration, other.deadlineNanos, other.sendDeadlineNanos, - other.numExpectedResponses, other.responseObserver); - - completed = other.completed; - aborted = other.aborted; - } - - public GrpcStreamingCall( - MethodDescriptor methodDescriptor, - Kvs.AerospikeRequestPayload.Builder requestBuilder, - Policy policy, - int iteration, - long deadlineNanos, - long sendDeadlineNanos, - int numExpectedResponses, - StreamObserver responseObserver - ) { - this.responseObserver = responseObserver; - this.methodDescriptor = methodDescriptor; - this.requestBuilder = requestBuilder; - this.iteration = iteration; - this.policy = policy; - this.deadlineNanos = deadlineNanos; - this.sendDeadlineNanos = sendDeadlineNanos; - this.numExpectedResponses = numExpectedResponses; - } - - public void onNext(Kvs.AerospikeResponsePayload payload) { - responseObserver.onNext(payload); - - if (!payload.getHasNext()) { - completed = true; - responseObserver.onCompleted(); - } - } - - public void onError(Throwable t) { - completed = true; - responseObserver.onError(t); - } - - /** - * Fail the call if it is not completed. - * - * @param resultCode aerospike error code. - */ - public void failIfNotComplete(int resultCode) { - if (!hasCompleted()) { - onError(new AerospikeException(resultCode)); - } - } - - /** - * Fail the call if it is not completed. - * - * @param throwable cause of failure. - */ - public void failIfNotComplete(Throwable throwable) { - if (!hasCompleted()) { - onError(throwable); - } - } - - /** - * @return true if this call has completed either because - * {@link #onNext(Kvs.AerospikeResponsePayload)} or - * {@link #onError(Throwable)} was invoked. - */ - public boolean hasCompleted() { - return completed; - } - - public MethodDescriptor getStreamingMethodDescriptor() { - return methodDescriptor; - } - - /** - * @return true if this call has expired. - */ - public boolean hasExpired() { - return hasExpiry() && (System.nanoTime() - deadlineNanos) >= 0; - } - - /** - * @return true if the send deadline has expired. - */ - public boolean hasSendDeadlineExpired() { - return sendDeadlineNanos > 0 && (System.nanoTime() - sendDeadlineNanos) >= 0; - } - - public boolean hasExpiry() { - return deadlineNanos != 0; - } - - public long nanosTillExpiry() { - if (!hasExpiry()) { - throw new IllegalStateException("call does not expire"); - } - long nanosTillExpiry = deadlineNanos - System.nanoTime(); - return nanosTillExpiry > 0 ? nanosTillExpiry : 0; - } - - public Kvs.AerospikeRequestPayload.Builder getRequestBuilder() { - return requestBuilder; - } - - public int getIteration() { - return iteration; - } - - public Policy getPolicy() { - return policy; - } - - public void markAborted() { - this.aborted = true; - this.completed = true; - } - - public boolean isAborted() { - return aborted; - } - - public boolean isSingleResponse() { - return numExpectedResponses == 1; - } - - public int getNumExpectedResponses() { - return numExpectedResponses; - } -} diff --git a/proxy/src/com/aerospike/client/proxy/grpc/MultiAddressNameResolverFactory.java b/proxy/src/com/aerospike/client/proxy/grpc/MultiAddressNameResolverFactory.java deleted file mode 100644 index 72603212e..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/MultiAddressNameResolverFactory.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import java.net.SocketAddress; -import java.net.URI; -import java.util.List; -import java.util.stream.Collectors; - -import io.grpc.Attributes; -import io.grpc.EquivalentAddressGroup; -import io.grpc.NameResolver; - -class MultiAddressNameResolverFactory extends NameResolver.Factory { - - final List addresses; - - MultiAddressNameResolverFactory(List addresses) { - this.addresses = addresses.stream() - .map(EquivalentAddressGroup::new) - .collect(Collectors.toList()); - } - - public NameResolver newNameResolver(URI notUsedUri, NameResolver.Args args) { - return new NameResolver() { - @Override - public String getServiceAuthority() { - return "Authority"; - } - - public void start(Listener2 listener) { - listener.onResult(ResolutionResult.newBuilder().setAddresses(addresses) - .setAttributes(Attributes.EMPTY).build()); - } - - public void shutdown() { - } - }; - } - - @Override - public String getDefaultScheme() { - return "multiaddress"; - } -} \ No newline at end of file diff --git a/proxy/src/com/aerospike/client/proxy/grpc/SingleEventLoopGroup.java b/proxy/src/com/aerospike/client/proxy/grpc/SingleEventLoopGroup.java deleted file mode 100644 index 2832ec281..000000000 --- a/proxy/src/com/aerospike/client/proxy/grpc/SingleEventLoopGroup.java +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.client.proxy.grpc; - -import java.util.Collection; -import java.util.Iterator; -import java.util.List; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; - -import javax.annotation.Nonnull; - -import org.checkerframework.checker.nullness.qual.NonNull; - -import com.fasterxml.jackson.databind.util.ArrayIterator; - -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; -import io.netty.channel.EventLoop; -import io.netty.channel.EventLoopGroup; -import io.netty.util.concurrent.EventExecutor; -import io.netty.util.concurrent.Future; -import io.netty.util.concurrent.ScheduledFuture; - -/** - * An event loop group containing a single event loop. - *

- * TODO: verify it is correct to delegate to the singleton event loop. - */ -class SingleEventLoopGroup implements EventLoopGroup { - private final EventLoop eventLoop; - - SingleEventLoopGroup(EventLoop eventLoop) { - this.eventLoop = eventLoop; - } - - @Override - public boolean isShuttingDown() { - return eventLoop.isShuttingDown(); - } - - @Override - public Future shutdownGracefully() { - return eventLoop.shutdownGracefully(); - } - - @Override - public Future shutdownGracefully(long quietPeriod, long timeout, TimeUnit unit) { - return eventLoop.shutdownGracefully(quietPeriod, timeout, unit); - } - - @Override - public Future terminationFuture() { - return eventLoop.terminationFuture(); - } - - @SuppressWarnings("deprecation") - @Override - public void shutdown() { - eventLoop.shutdown(); - } - - @SuppressWarnings("deprecation") - @Override - public List shutdownNow() { - return eventLoop.shutdownNow(); - } - - @Override - public boolean isShutdown() { - return eventLoop.isShutdown(); - } - - @Override - public boolean isTerminated() { - return eventLoop.isShutdown(); - } - - @Override - public boolean awaitTermination(long timeout, @Nonnull TimeUnit unit) throws InterruptedException { - return eventLoop.awaitTermination(timeout, unit); - } - - @Override - public EventLoop next() { - return eventLoop; - } - - @Override - public Iterator iterator() { - return new ArrayIterator<>(new EventExecutor[]{eventLoop}); - } - - @Override - public Future submit(Runnable task) { - return eventLoop.submit(task); - } - - @NonNull - @Override - public List> invokeAll(@NonNull Collection> tasks) throws InterruptedException { - return eventLoop.invokeAll(tasks); - } - - @NonNull - @Override - public List> invokeAll(@NonNull Collection> tasks, long timeout, @NonNull TimeUnit unit) throws InterruptedException { - return eventLoop.invokeAll(tasks, timeout, unit); - } - - @NonNull - @Override - public T invokeAny(@NonNull Collection> tasks) throws InterruptedException, ExecutionException { - return eventLoop.invokeAny(tasks); - } - - @Override - public T invokeAny(@NonNull Collection> tasks, long timeout, @NonNull TimeUnit unit) throws InterruptedException, ExecutionException, TimeoutException { - return eventLoop.invokeAny(tasks, timeout, unit); - } - - @Override - public Future submit(Runnable task, T result) { - return eventLoop.submit(task, result); - } - - @Override - public Future submit(Callable task) { - return eventLoop.submit(task); - } - - @Override - public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { - return eventLoop.schedule(command, delay, unit); - } - - @Override - public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { - return eventLoop.schedule(callable, delay, unit); - } - - @Override - public ScheduledFuture scheduleAtFixedRate(Runnable command, long initialDelay, long period, TimeUnit unit) { - return eventLoop.scheduleAtFixedRate(command, initialDelay, period, unit); - } - - @Override - public ScheduledFuture scheduleWithFixedDelay(Runnable command, long initialDelay, long delay, TimeUnit unit) { - return eventLoop.scheduleWithFixedDelay(command, initialDelay, delay, unit); - } - - @Override - public ChannelFuture register(io.netty.channel.Channel channel) { - return eventLoop.register(channel); - } - - @Override - public ChannelFuture register(ChannelPromise promise) { - return eventLoop.register(promise); - } - - @SuppressWarnings("deprecation") - @Override - public ChannelFuture register(io.netty.channel.Channel channel, ChannelPromise promise) { - return eventLoop.register(channel, promise); - } - - @Override - public void execute(@NonNull Runnable command) { - eventLoop.execute(command); - } -} diff --git a/test/pom.xml b/test/pom.xml index 066e5f650..c843f6d8c 100644 --- a/test/pom.xml +++ b/test/pom.xml @@ -6,7 +6,7 @@ com.aerospike aerospike-parent - 8.1.4 + 9.0.0 aerospike-client-test jar @@ -24,11 +24,6 @@ aerospike-client-jdk21 - - com.aerospike - aerospike-proxy-client - - io.netty netty-transport diff --git a/test/src/com/aerospike/test/SuiteAsync.java b/test/src/com/aerospike/test/SuiteAsync.java index 8088dfee7..a8ccdf373 100644 --- a/test/src/com/aerospike/test/SuiteAsync.java +++ b/test/src/com/aerospike/test/SuiteAsync.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -21,27 +21,26 @@ import org.junit.runner.RunWith; import org.junit.runners.Suite; +import com.aerospike.client.AerospikeClient; import com.aerospike.client.Host; import com.aerospike.client.IAerospikeClient; import com.aerospike.client.Log; import com.aerospike.client.async.EventLoop; -import com.aerospike.client.async.EventLoopType; import com.aerospike.client.async.EventLoops; import com.aerospike.client.async.EventPolicy; import com.aerospike.client.async.NettyEventLoops; import com.aerospike.client.async.NioEventLoops; import com.aerospike.client.policy.ClientPolicy; -import com.aerospike.client.proxy.AerospikeClientFactory; import com.aerospike.test.async.TestAsyncBatch; import com.aerospike.test.async.TestAsyncOperate; import com.aerospike.test.async.TestAsyncPutGet; import com.aerospike.test.async.TestAsyncQuery; import com.aerospike.test.async.TestAsyncScan; +import com.aerospike.test.async.TestAsyncTxn; import com.aerospike.test.async.TestAsyncUDF; import com.aerospike.test.util.Args; import io.netty.channel.EventLoopGroup; -import io.netty.channel.epoll.Epoll; import io.netty.channel.epoll.EpollEventLoopGroup; import io.netty.channel.kqueue.KQueueEventLoopGroup; import io.netty.channel.nio.NioEventLoopGroup; @@ -54,6 +53,7 @@ TestAsyncOperate.class, TestAsyncScan.class, TestAsyncQuery.class, + TestAsyncTxn.class, TestAsyncUDF.class }) public class SuiteAsync { @@ -70,16 +70,6 @@ public static void init() { EventPolicy eventPolicy = new EventPolicy(); - if (args.useProxyClient && args.eventLoopType == EventLoopType.DIRECT_NIO) { - // Proxy client requires netty event loops. - if (Epoll.isAvailable()) { - args.eventLoopType = EventLoopType.NETTY_EPOLL; - } - else { - args.eventLoopType = EventLoopType.NETTY_NIO; - } - } - switch (args.eventLoopType) { default: case DIRECT_NIO: { @@ -121,7 +111,7 @@ public static void init() { eventLoop = eventLoops.get(0); - client = AerospikeClientFactory.getClient(policy, args.useProxyClient, hosts); + client = new AerospikeClient(policy, hosts); try { args.setServerSpecific(client); diff --git a/test/src/com/aerospike/test/SuiteProxy.java b/test/src/com/aerospike/test/SuiteProxy.java deleted file mode 100644 index 2e5657ea0..000000000 --- a/test/src/com/aerospike/test/SuiteProxy.java +++ /dev/null @@ -1,101 +0,0 @@ -/* - * Copyright 2012-2023 Aerospike, Inc. - * - * Portions may be licensed to Aerospike, Inc. under one or more contributor - * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. - * - * Licensed under the Apache License, Version 2.0 (the "License"); you may not - * use this file except in compliance with the License. You may obtain a copy of - * the License at http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package com.aerospike.test; - -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.runner.RunWith; -import org.junit.runners.Suite; - -import com.aerospike.test.async.TestAsyncBatch; -import com.aerospike.test.async.TestAsyncOperate; -import com.aerospike.test.async.TestAsyncPutGet; -import com.aerospike.test.async.TestAsyncScan; -import com.aerospike.test.sync.basic.TestAdd; -import com.aerospike.test.sync.basic.TestAppend; -import com.aerospike.test.sync.basic.TestBatch; -import com.aerospike.test.sync.basic.TestBitExp; -import com.aerospike.test.sync.basic.TestDeleteBin; -import com.aerospike.test.sync.basic.TestExpOperation; -import com.aerospike.test.sync.basic.TestExpire; -import com.aerospike.test.sync.basic.TestFilterExp; -import com.aerospike.test.sync.basic.TestGeneration; -import com.aerospike.test.sync.basic.TestHLLExp; -import com.aerospike.test.sync.basic.TestListExp; -import com.aerospike.test.sync.basic.TestListMap; -import com.aerospike.test.sync.basic.TestMapExp; -import com.aerospike.test.sync.basic.TestOperate; -import com.aerospike.test.sync.basic.TestOperateBit; -import com.aerospike.test.sync.basic.TestOperateHll; -import com.aerospike.test.sync.basic.TestOperateList; -import com.aerospike.test.sync.basic.TestOperateMap; -import com.aerospike.test.sync.basic.TestPutGet; -import com.aerospike.test.sync.basic.TestReplace; -import com.aerospike.test.sync.basic.TestScan; -import com.aerospike.test.sync.basic.TestTouch; -import com.aerospike.test.util.Args; - -@RunWith(Suite.class) -@Suite.SuiteClasses({ - TestAsyncPutGet.class, - TestAsyncBatch.class, - TestAsyncOperate.class, - TestAsyncScan.class, - TestAdd.class, - TestAppend.class, - TestBatch.class, - TestBitExp.class, - TestDeleteBin.class, - TestExpire.class, - TestExpOperation.class, - TestFilterExp.class, - TestGeneration.class, - TestHLLExp.class, - TestListExp.class, - TestListMap.class, - TestMapExp.class, - TestOperate.class, - TestOperateBit.class, - TestOperateHll.class, - TestOperateList.class, - TestOperateMap.class, - TestPutGet.class, - TestReplace.class, - TestScan.class, - TestTouch.class -}) -public class SuiteProxy { - @BeforeClass - public static void init() { - Args args = Args.Instance; - args.useProxyClient = true; - - if (args.port == 3000) { - System.out.println("Change proxy server port to 4000"); - args.port = 4000; - } - - SuiteSync.init(); - SuiteAsync.init(); - } - - @AfterClass - public static void destroy() { - SuiteSync.destroy(); - SuiteAsync.destroy(); - } -} diff --git a/test/src/com/aerospike/test/SuiteSync.java b/test/src/com/aerospike/test/SuiteSync.java index f115deffc..c65aa0def 100644 --- a/test/src/com/aerospike/test/SuiteSync.java +++ b/test/src/com/aerospike/test/SuiteSync.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -21,11 +21,11 @@ import org.junit.runner.RunWith; import org.junit.runners.Suite; +import com.aerospike.client.AerospikeClient; import com.aerospike.client.Host; import com.aerospike.client.IAerospikeClient; import com.aerospike.client.Log; import com.aerospike.client.policy.ClientPolicy; -import com.aerospike.client.proxy.AerospikeClientFactory; import com.aerospike.test.sync.basic.TestAdd; import com.aerospike.test.sync.basic.TestAppend; import com.aerospike.test.sync.basic.TestBatch; @@ -49,6 +49,7 @@ import com.aerospike.test.sync.basic.TestScan; import com.aerospike.test.sync.basic.TestServerInfo; import com.aerospike.test.sync.basic.TestTouch; +import com.aerospike.test.sync.basic.TestTxn; import com.aerospike.test.sync.basic.TestUDF; import com.aerospike.test.sync.query.TestIndex; import com.aerospike.test.sync.query.TestQueryAverage; @@ -92,6 +93,7 @@ TestScan.class, TestServerInfo.class, TestTouch.class, + TestTxn.class, TestUDF.class, TestIndex.class, TestQueryAverage.class, @@ -124,7 +126,7 @@ public static void init() { Host[] hosts = Host.parseHosts(args.host, args.port); - client = AerospikeClientFactory.getClient(policy, args.useProxyClient, hosts); + client = new AerospikeClient(policy, hosts); try { args.setServerSpecific(client); diff --git a/test/src/com/aerospike/test/async/AsyncMonitor.java b/test/src/com/aerospike/test/async/AsyncMonitor.java index 413d13581..0d791daea 100644 --- a/test/src/com/aerospike/test/async/AsyncMonitor.java +++ b/test/src/com/aerospike/test/async/AsyncMonitor.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2021 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -39,6 +39,11 @@ public synchronized void waitTillComplete() { } if (error != null) { + if (error instanceof RuntimeException) { + RuntimeException e = (RuntimeException)error; + throw e; + } + StringWriter out = new StringWriter(); error.printStackTrace(new PrintWriter(out)); fail(out.toString()); diff --git a/test/src/com/aerospike/test/async/TestAsync.java b/test/src/com/aerospike/test/async/TestAsync.java index 6b1e98f98..0a2e3494a 100644 --- a/test/src/com/aerospike/test/async/TestAsync.java +++ b/test/src/com/aerospike/test/async/TestAsync.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -103,6 +103,14 @@ public boolean assertRecordFound(Key key, Record record) { return true; } + public boolean assertRecordNotFound(Key key, Record record) { + if (record != null) { + monitor.setError(new Exception("Record should not exist: namespace=" + args.namespace + " set=" + args.set + " key=" + key.userKey)); + return false; + } + return true; + } + public boolean assertBetween(long begin, long end, long value) { if (! (value >= begin && value <= end)) { monitor.setError(new Exception("Range " + value + " not between " + begin + " and " + end)); @@ -175,8 +183,27 @@ public boolean assertTrue(boolean b) { return true; } - public void setError(Exception e) { - monitor.setError(e); + public boolean assertBatchEqual(Key[] keys, Record[] recs, String binName, int expected) { + for (int i = 0; i < keys.length; i++) { + Record rec = recs[i]; + + if (rec == null) { + monitor.setError(new Exception("recs[" + i + "] is null")); + return false; + } + + int received = rec.getInt(binName); + + if (expected != received) { + monitor.setError(new Exception("Data mismatch: Expected " + expected + ". Received[" + i + "] " + received)); + return false; + } + } + return true; + } + + public void setError(Throwable t) { + monitor.setError(t); } public void waitTillComplete() { diff --git a/test/src/com/aerospike/test/async/TestAsyncOperate.java b/test/src/com/aerospike/test/async/TestAsyncOperate.java index 8f0a86b86..658ad924c 100644 --- a/test/src/com/aerospike/test/async/TestAsyncOperate.java +++ b/test/src/com/aerospike/test/async/TestAsyncOperate.java @@ -100,10 +100,16 @@ public void onSuccess(Key key, boolean existed) { map.put(Value.get("b"), Value.get(2)); map.put(Value.get("c"), Value.get(3)); - client.operate(eventLoop, new MapHandler(), null, key, + try { + client.operate(eventLoop, new MapHandler(), null, key, MapOperation.putItems(MapPolicy.Default, binName, map), MapOperation.getByRankRange(binName, -1, 1, MapReturnType.KEY_VALUE) - ); + ); + } + catch (Throwable t) { + setError(t); + notifyComplete(); + } } public void onFailure(AerospikeException e) { diff --git a/test/src/com/aerospike/test/async/TestAsyncTxn.java b/test/src/com/aerospike/test/async/TestAsyncTxn.java new file mode 100644 index 000000000..468384dba --- /dev/null +++ b/test/src/com/aerospike/test/async/TestAsyncTxn.java @@ -0,0 +1,764 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.test.async; + +import org.junit.Test; +import org.junit.BeforeClass; + +import com.aerospike.client.AbortStatus; +import com.aerospike.client.AerospikeException; +import com.aerospike.client.BatchRecord; +import com.aerospike.client.Bin; +import com.aerospike.client.CommitStatus; +import com.aerospike.client.Key; +import com.aerospike.client.Language; +import com.aerospike.client.Operation; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Txn; +import com.aerospike.client.Value; +import com.aerospike.client.listener.BatchRecordArrayListener; +import com.aerospike.client.listener.DeleteListener; +import com.aerospike.client.listener.ExecuteListener; +import com.aerospike.client.listener.RecordArrayListener; +import com.aerospike.client.listener.RecordListener; +import com.aerospike.client.listener.AbortListener; +import com.aerospike.client.listener.CommitListener; +import com.aerospike.client.listener.WriteListener; +import com.aerospike.client.policy.BatchPolicy; +import com.aerospike.client.policy.Policy; +import com.aerospike.client.policy.WritePolicy; +import com.aerospike.client.task.RegisterTask; +import com.aerospike.test.sync.basic.TestUDF; + +public class TestAsyncTxn extends TestAsync { + public static final String binName = "bin"; + + @BeforeClass + public static void register() { + RegisterTask task = client.register(null, TestUDF.class.getClassLoader(), "udf/record_example.lua", "record_example.lua", Language.LUA); + task.waitTillComplete(); + } + + @Test + public void asyncTxnWrite() { + Key key = new Key(args.namespace, args.set, "asyncTxnWrite"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + execute(cmds); + } + + @Test + public void asyncTxnWriteTwice() { + Key key = new Key(args.namespace, args.set, "asyncTxnWriteTwice"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(txn, key, "val1"), + new Put(txn, key, "val2"), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + execute(cmds); + } + + @Test + public void asyncTxnWriteBlock() { + Key key = new Key(args.namespace, args.set, "asyncTxnWriteBlock"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new Put(null, key, "val3"), // Should be blocked + new Commit(txn), + }; + + try { + execute(cmds); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException ae) { + if (ae.getResultCode() != ResultCode.MRT_BLOCKED) { + throw ae; + } + } + } + + @Test + public void asyncTxnWriteRead() { + Key key = new Key(args.namespace, args.set, "asyncTxnWriteRead"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new GetExpect(null, key, "val1"), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + execute(cmds); + } + + @Test + public void asyncTxnWriteAbort() { + Key key = new Key(args.namespace, args.set, "asyncTxnWriteAbort"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(null, key, "val1"), + new Put(txn, key, "val2"), + new GetExpect(txn, key, "val2"), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + execute(cmds); + } + + @Test + public void asyncTxnDelete() { + Key key = new Key(args.namespace, args.set, "asyncTxnDelete"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(null, key, "val1"), + new Delete(txn, key), + new Commit(txn), + new GetExpect(null, key, null) + }; + + execute(cmds); + } + + @Test + public void asyncTxnDeleteAbort() { + Key key = new Key(args.namespace, args.set, "asyncTxnDeleteAbort"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(null, key, "val1"), + new Delete(txn, key), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + execute(cmds); + } + + @Test + public void asyncTxnDeleteTwice() { + Key key = new Key(args.namespace, args.set, "asyncTxnDeleteTwice"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(null, key, "val1"), + new Delete(txn, key), + new Delete(txn, key), + new Commit(txn), + new GetExpect(null, key, null) + }; + + execute(cmds); + } + + @Test + public void asyncTxnTouch() { + Key key = new Key(args.namespace, args.set, "asyncTxnTouch"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(null, key, "val1"), + new Touch(txn, key), + new Commit(txn), + new GetExpect(null, key, "val1") + }; + + execute(cmds); + } + + @Test + public void asyncTxnTouchAbort() { + Key key = new Key(args.namespace, args.set, "asyncTxnTouchAbort"); + Txn txn = new Txn(); + + Runner[] cmds = new Runner[] { + new Put(null, key, "val1"), + new Touch(txn, key), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + execute(cmds); + } + + @Test + public void asyncTxnOperateWrite() { + Key key = new Key(args.namespace, args.set, "asyncTxnOperateWrite3"); + Txn txn = new Txn(); + Bin bin2 = new Bin("bin2", "bal1"); + + Runner[] cmds = new Runner[] { + new Put(null, key, new Bin(binName, "val1"), bin2), + new OperateExpect(txn, key, + bin2, + Operation.put(new Bin(binName, "val2")), + Operation.get(bin2.name) + ), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + execute(cmds); + } + + @Test + public void asyncTxnOperateWriteAbort() { + Key key = new Key(args.namespace, args.set, "asyncTxnOperateWriteAbort"); + Txn txn = new Txn(); + Bin bin2 = new Bin("bin2", "bal1"); + + Runner[] cmds = new Runner[] { + new Put(null, key, new Bin(binName, "val1"), bin2), + new OperateExpect(txn, key, + bin2, + Operation.put(new Bin(binName, "val2")), + Operation.get(bin2.name) + ), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + execute(cmds); + } + + @Test + public void asyncTxnUDF() { + Key key = new Key(args.namespace, args.set, "asyncTxnUDF"); + Txn txn = new Txn(); + Bin bin2 = new Bin("bin2", "bal1"); + + Runner[] cmds = new Runner[] { + new Put(null, key, new Bin(binName, "val1"), bin2), + new UDF(txn, key, "record_example", "writeBin", Value.get(binName), Value.get("val2")), + new Commit(txn), + new GetExpect(null, key, "val2") + }; + + execute(cmds); + } + + @Test + public void asyncTxnUDFAbort() { + Key key = new Key(args.namespace, args.set, "asyncTxnUDFAbort"); + Txn txn = new Txn(); + Bin bin2 = new Bin("bin2", "bal1"); + + Runner[] cmds = new Runner[] { + new Put(null, key, new Bin(binName, "val1"), bin2), + new UDF(txn, key, "record_example", "writeBin", Value.get(binName), Value.get("val2")), + new Abort(txn), + new GetExpect(null, key, "val1") + }; + + execute(cmds); + } + + @Test + public void asyncTxnBatch() { + Key[] keys = new Key[10]; + Bin bin = new Bin(binName, 1); + + for (int i = 0; i < keys.length; i++) { + Key key = new Key(args.namespace, args.set, "asyncTxnBatch" + i); + keys[i] = key; + client.put(null, key, bin); + } + + Txn txn = new Txn(); + bin = new Bin(binName, 2); + + Runner[] cmds = new Runner[] { + new BatchGetExpect(null, keys, 1), + new BatchOperate(txn, keys, Operation.put(bin)), + new Commit(txn), + new BatchGetExpect(null, keys, 2), + }; + + execute(cmds); + } + + @Test + public void asyncTxnBatchAbort() { + Key[] keys = new Key[10]; + Bin bin = new Bin(binName, 1); + + for (int i = 0; i < keys.length; i++) { + Key key = new Key(args.namespace, args.set, "asyncTxnBatch" + i); + keys[i] = key; + client.put(null, key, bin); + } + + Txn txn = new Txn(); + bin = new Bin(binName, 2); + + Runner[] cmds = new Runner[] { + new BatchGetExpect(null, keys, 1), + new BatchOperate(txn, keys, Operation.put(bin)), + new Abort(txn), + new BatchGetExpect(null, keys, 1), + }; + + execute(cmds); + } + + private void execute(Runner[] cmdArray) { + Cmds a = new Cmds(cmdArray); + a.runNext(); + waitTillComplete(); + } + + private void onError(Exception e) { + setError(e); + notifyComplete(); + } + + private void onError() { + // Error is located in monitor instance which is checked in waitTillComplete(); + notifyComplete(); + } + + private class Cmds implements Listener { + final Runner[] cmds; + int idx; + + private Cmds(Runner[] cmds) { + this.cmds = cmds; + this.idx = -1; + } + + private void runNext() { + if (++idx == cmds.length) { + notifyComplete(); + return; + } + + try { + cmds[idx].run(this); + } + catch (Exception e) { + onError(e); + } + } + + public void onSuccess() { + runNext(); + } + + public void onFailure() { + onError(); + } + + public void onFailure(Exception e) { + onError(e); + } + } + + private class Commit implements Runner { + private final Txn txn; + + private Commit(Txn txn) { + this.txn = txn; + } + + public void run(Listener listener) { + CommitListener tcl = new CommitListener() { + public void onSuccess(CommitStatus status) { + listener.onSuccess(); + } + + public void onFailure(AerospikeException.Commit ae) { + listener.onFailure(ae); + } + }; + + client.commit(eventLoop, tcl, txn); + } + } + + private class Abort implements Runner { + private final Txn txn; + + private Abort(Txn txn) { + this.txn = txn; + } + + public void run(Listener listener) { + AbortListener tal = new AbortListener() { + public void onSuccess(AbortStatus status) { + listener.onSuccess(); + } + }; + + client.abort(eventLoop, tal, txn); + } + } + + private class Put implements Runner { + private final Txn txn; + private final Key key; + private final Bin[] bins; + + private Put(Txn txn, Key key, String val) { + this.txn = txn; + this.key = key; + this.bins = new Bin[] {new Bin(binName, val)}; + } + + private Put(Txn txn, Key key, Bin... bins) { + this.txn = txn; + this.key = key; + this.bins = bins; + } + + public void run(Listener listener) { + WriteListener wl = new WriteListener() { + public void onSuccess(final Key key) { + listener.onSuccess(); + } + + public void onFailure(AerospikeException e) { + listener.onFailure(e); + } + }; + + WritePolicy wp = null; + + if (txn != null) { + wp = client.copyWritePolicyDefault(); + wp.txn = txn; + } + client.put(eventLoop, wl, wp, key, bins); + } + } + + private class GetExpect implements Runner { + private final Txn txn; + private final Key key; + private final String expect; + + private GetExpect(Txn txn, Key key, String expect) { + this.txn = txn; + this.key = key; + this.expect = expect; + } + + public void run(Listener listener) { + RecordListener rl = new RecordListener() { + public void onSuccess(Key key, Record record) { + if (expect != null) { + if (assertBinEqual(key, record, binName, expect)) { + listener.onSuccess(); + } + else { + listener.onFailure(); + } + } + else { + if (assertRecordNotFound(key, record)) { + listener.onSuccess(); + } + else { + listener.onFailure(); + } + } + } + + public void onFailure(AerospikeException e) { + listener.onFailure(e); + } + }; + + Policy p = null; + + if (txn != null) { + p = client.copyReadPolicyDefault(); + p.txn = txn; + } + client.get(eventLoop, rl, p, key); + } + } + + private class OperateExpect implements Runner { + private final Txn txn; + private final Key key; + private final Operation[] ops; + private final Bin expect; + + private OperateExpect(Txn txn, Key key, Bin expect, Operation... ops) { + this.txn = txn; + this.key = key; + this.expect = expect; + this.ops = ops; + } + + public void run(Listener listener) { + RecordListener rl = new RecordListener() { + public void onSuccess(Key key, Record record) { + if (expect != null) { + if (assertBinEqual(key, record, expect.name, expect.value.getObject())) { + listener.onSuccess(); + } + else { + listener.onFailure(); + } + } + else { + if (assertRecordNotFound(key, record)) { + listener.onSuccess(); + } + else { + listener.onFailure(); + } + } + } + + public void onFailure(AerospikeException e) { + listener.onFailure(e); + } + }; + + WritePolicy wp = null; + + if (txn != null) { + wp = client.copyWritePolicyDefault(); + wp.txn = txn; + } + client.operate(eventLoop, rl, wp, key, ops); + } + } + + private class UDF implements Runner { + private final Txn txn; + private final Key key; + private final String packageName; + private final String functionName; + private final Value[] functionArgs; + + private UDF( + Txn txn, + Key key, + String packageName, + String functionName, + Value... functionArgs + ) { + this.txn = txn; + this.key = key; + this.packageName = packageName; + this.functionName = functionName; + this.functionArgs = functionArgs; + } + + public void run(Listener listener) { + ExecuteListener el = new ExecuteListener() { + public void onSuccess(Key key, Object obj) { + listener.onSuccess(); + } + + public void onFailure(AerospikeException e) { + listener.onFailure(e); + } + }; + + WritePolicy wp = null; + + if (txn != null) { + wp = client.copyWritePolicyDefault(); + wp.txn = txn; + } + client.execute(eventLoop, el, wp, key, packageName, functionName, functionArgs); + } + } + + private class BatchGetExpect implements Runner { + private final Txn txn; + private final Key[] keys; + private final int expected; + + private BatchGetExpect(Txn txn, Key[] keys, int expected) { + this.txn = txn; + this.keys = keys; + this.expected = expected; + } + + public void run(Listener listener) { + RecordArrayListener ral = new RecordArrayListener() { + public void onSuccess(Key[] keys, Record[] records) { + if (assertBatchEqual(keys, records, binName, expected)) { + listener.onSuccess(); + } + else { + listener.onFailure(); + } + } + + public void onFailure(AerospikeException ae) { + listener.onFailure(ae); + } + }; + + BatchPolicy bp = null; + + if (txn != null) { + bp = client.copyBatchPolicyDefault(); + bp.txn = txn; + } + client.get(eventLoop, ral, bp, keys); + } + } + + private class BatchOperate implements Runner { + private final Txn txn; + private final Key[] keys; + private final Operation[] ops; + + private BatchOperate(Txn txn, Key[] keys, Operation... ops) { + this.txn = txn; + this.keys = keys; + this.ops = ops; + } + + public void run(Listener listener) { + BatchRecordArrayListener bral = new BatchRecordArrayListener() { + public void onSuccess(BatchRecord[] records, boolean status) { + if (status) { + listener.onSuccess(); + } + else { + StringBuilder sb = new StringBuilder(); + sb.append("Batch failed:"); + sb.append(System.lineSeparator()); + + for (BatchRecord br : records) { + if (br.resultCode == 0) { + sb.append("Record: " + br.record); + } + else { + sb.append("ResultCode: " + br.resultCode); + } + sb.append(System.lineSeparator()); + } + listener.onFailure(new AerospikeException(sb.toString())); + } + } + + public void onFailure(BatchRecord[] records, AerospikeException ae) { + listener.onFailure(ae); + } + }; + + + BatchPolicy bp = null; + + if (txn != null) { + bp = client.copyBatchParentPolicyWriteDefault(); + bp.txn = txn; + } + client.operate(eventLoop, bral, bp, null, keys, ops); + } + } + + private class Touch implements Runner { + private final Txn txn; + private final Key key; + + private Touch(Txn txn, Key key) { + this.txn = txn; + this.key = key; + } + + public void run(Listener listener) { + WriteListener wl = new WriteListener() { + public void onSuccess(final Key key) { + listener.onSuccess(); + } + + public void onFailure(AerospikeException e) { + listener.onFailure(e); + } + }; + + WritePolicy wp = null; + + if (txn != null) { + wp = client.copyWritePolicyDefault(); + wp.txn = txn; + } + client.touch(eventLoop, wl, wp, key); + } + } + + private class Delete implements Runner { + private final Txn txn; + private final Key key; + + private Delete(Txn txn, Key key) { + this.txn = txn; + this.key = key; + } + + public void run(Listener listener) { + DeleteListener dl = new DeleteListener() { + public void onSuccess(final Key key, boolean existed) { + listener.onSuccess(); + } + + public void onFailure(AerospikeException e) { + listener.onFailure(e); + } + }; + + WritePolicy wp = null; + + if (txn != null) { + wp = client.copyWritePolicyDefault(); + wp.txn = txn; + wp.durableDelete = true; + } + client.delete(eventLoop, dl, wp, key); + } + } + + private interface Runner { + void run(Listener listener); + } + + private interface Listener { + void onSuccess(); + void onFailure(); + void onFailure(Exception e); + } +} diff --git a/test/src/com/aerospike/test/async/TestAsyncUDF.java b/test/src/com/aerospike/test/async/TestAsyncUDF.java index 937cb8e9f..6c5e5dbf9 100644 --- a/test/src/com/aerospike/test/async/TestAsyncUDF.java +++ b/test/src/com/aerospike/test/async/TestAsyncUDF.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -41,10 +41,6 @@ public class TestAsyncUDF extends TestAsync { @BeforeClass public static void prepare() { - if (args.useProxyClient) { - System.out.println("Skip TestAsyncUDF.prepare"); - return; - } RegisterTask rtask = client.register(null, TestAsyncUDF.class.getClassLoader(), "udf/record_example.lua", "record_example.lua", Language.LUA); rtask.waitTillComplete(); } diff --git a/test/src/com/aerospike/test/sync/basic/TestAdd.java b/test/src/com/aerospike/test/sync/basic/TestAdd.java index 774489a58..6c28a9c42 100644 --- a/test/src/com/aerospike/test/sync/basic/TestAdd.java +++ b/test/src/com/aerospike/test/sync/basic/TestAdd.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -59,13 +59,11 @@ record = client.operate(null, key, Operation.add(bin), Operation.get(bin.name)); @Test public void addNullValue() { - if (! args.useProxyClient) { - Version version = Version.getServerVersion(client, null); + Version version = Version.getServerVersion(client, null); - // Do not run on servers < 3.6.1 - if (version.isLess(3, 6, 1)) { - return; - } + // Do not run on servers < 3.6.1 + if (version.isLess(3, 6, 1)) { + return; } Key key = new Key(args.namespace, args.set, "addkey"); diff --git a/test/src/com/aerospike/test/sync/basic/TestFilterExp.java b/test/src/com/aerospike/test/sync/basic/TestFilterExp.java index fdcadfa12..388ed55c9 100644 --- a/test/src/com/aerospike/test/sync/basic/TestFilterExp.java +++ b/test/src/com/aerospike/test/sync/basic/TestFilterExp.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -63,10 +63,6 @@ public class TestFilterExp extends TestSync { @BeforeClass public static void register() { - if (args.useProxyClient) { - System.out.println("Skip TestFilterExp.register"); - return; - } RegisterTask task = client.register(null, TestUDF.class.getClassLoader(), "udf/record_example.lua", "record_example.lua", Language.LUA); @@ -313,10 +309,6 @@ public void run() { @Test public void udf() { - if (args.useProxyClient) { - System.out.println("Skip TestFilterExp.udf"); - return; - } WritePolicy policy = new WritePolicy(); policy.filterExp = Exp.build(Exp.eq(Exp.intBin(binA), Exp.val(1))); @@ -337,10 +329,6 @@ public void udf() { @Test public void udfExcept() { - if (args.useProxyClient) { - System.out.println("Skip TestFilterExp.udfExcept"); - return; - } WritePolicy policy = new WritePolicy(); policy.filterExp = Exp.build(Exp.eq(Exp.intBin(binA), Exp.val(1))); policy.failOnFilteredOut = true; diff --git a/test/src/com/aerospike/test/sync/basic/TestOperateBit.java b/test/src/com/aerospike/test/sync/basic/TestOperateBit.java index 3ffee323a..4671d6330 100644 --- a/test/src/com/aerospike/test/sync/basic/TestOperateBit.java +++ b/test/src/com/aerospike/test/sync/basic/TestOperateBit.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -710,11 +710,6 @@ public void assertBitModifyInsert(int bin_sz, int offset, int set_sz, @Test public void operateBitSetEx() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateBit.operateBitSetEx"); - return; - } - BitPolicy policy = new BitPolicy(); int bin_sz = 15; int bin_bit_sz = bin_sz * 8; @@ -732,11 +727,6 @@ public void operateBitSetEx() { @Test public void operateBitLShiftEx() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateBit.operateBitLShiftEx"); - return; - } - BitPolicy policy = new BitPolicy(); int bin_sz = 15; int bin_bit_sz = bin_sz * 8; @@ -768,11 +758,6 @@ public void operateBitLShiftEx() { @Test public void operateBitRShiftEx() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateBit.operateBitRShiftEx"); - return; - } - BitPolicy policy = new BitPolicy(); BitPolicy partial_policy = new BitPolicy(BitWriteFlags.PARTIAL); int bin_sz = 15; @@ -821,11 +806,6 @@ public void operateBitRShiftEx() { @Test public void operateBitAndEx() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateBit.operateBitAndEx"); - return; - } - BitPolicy policy = new BitPolicy(); int bin_sz = 15; int bin_bit_sz = bin_sz * 8; @@ -843,11 +823,6 @@ public void operateBitAndEx() { @Test public void operateBitNotEx() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateBit.operateBitNotEx"); - return; - } - BitPolicy policy = new BitPolicy(); int bin_sz = 15; int bin_bit_sz = bin_sz * 8; @@ -879,11 +854,6 @@ public void operateBitInsertEx() { @Test public void operateBitAddEx() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateBit.operateBitAddEx"); - return; - } - BitPolicy policy = new BitPolicy(); int bin_sz = 15; int bin_bit_sz = bin_sz * 8; @@ -901,11 +871,6 @@ public void operateBitAddEx() { @Test public void operateBitSubEx() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateBit.operateBitSubEx"); - return; - } - BitPolicy policy = new BitPolicy(); int bin_sz = 15; int bin_bit_sz = bin_sz * 8; diff --git a/test/src/com/aerospike/test/sync/basic/TestOperateHll.java b/test/src/com/aerospike/test/sync/basic/TestOperateHll.java index bd35a0179..1ba453701 100644 --- a/test/src/com/aerospike/test/sync/basic/TestOperateHll.java +++ b/test/src/com/aerospike/test/sync/basic/TestOperateHll.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -217,11 +217,6 @@ public void assertInit(int nIndexBits, int nMinhashBits, boolean shouldPass) { @Test public void operateHLLInit() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateHll.operateHLLInit"); - return; - } - client.delete(null, key); for (ArrayList desc : legalDescriptions) { @@ -335,11 +330,6 @@ public void assertAddInit(int nIndexBits, int nMinhashBits) { @Test public void operateHLLAddInit() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateHll.operateHLLAddInit"); - return; - } - for (ArrayList desc : legalDescriptions) { assertAddInit(desc.get(0), desc.get(1)); } @@ -737,11 +727,6 @@ record = assertSuccess("", key, @Test public void getPut() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateHll.getPut"); - return; - } - for (ArrayList desc : legalDescriptions) { int nIndexBits = desc.get(0); int nMinhashBits = desc.get(1); @@ -750,7 +735,7 @@ public void getPut() { Operation.delete(), HLLOperation.init(HLLPolicy.Default, binName, nIndexBits, nMinhashBits)); Record record = client.get(null, key); - HLLValue hll = (HLLValue)record.getHLLValue(binName); + HLLValue hll = record.getHLLValue(binName); client.delete(null, key); client.put(null, key, new Bin(binName, hll)); @@ -841,11 +826,6 @@ record = assertSuccess("similarity and intersectCount", key, @Test public void operateSimilarity() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateHll.operateSimilarity"); - return; - } - double[] overlaps = new double[] {0.0001, 0.001, 0.01, 0.1, 0.5}; for (double overlap : overlaps) { @@ -884,11 +864,6 @@ public void operateSimilarity() { @Test public void operateEmptySimilarity() { - if (args.useProxyClient) { - System.out.println("Skip TestOperateHll.operateEmptySimilarity"); - return; - } - for (ArrayList desc : legalDescriptions) { int nIndexBits = desc.get(0); int nMinhashBits = desc.get(1); diff --git a/test/src/com/aerospike/test/sync/basic/TestScan.java b/test/src/com/aerospike/test/sync/basic/TestScan.java index c6a29289f..c3f028322 100644 --- a/test/src/com/aerospike/test/sync/basic/TestScan.java +++ b/test/src/com/aerospike/test/sync/basic/TestScan.java @@ -43,10 +43,6 @@ public void scanParallel() { @Test public void scanSeries() { - if (args.useProxyClient) { - System.out.println("Skip TestScan.scanSeries"); - return; - } ScanPolicy policy = new ScanPolicy(); List nodeList = client.getNodeNames(); diff --git a/test/src/com/aerospike/test/sync/basic/TestServerInfo.java b/test/src/com/aerospike/test/sync/basic/TestServerInfo.java index 2d39bf2bf..40164530c 100644 --- a/test/src/com/aerospike/test/sync/basic/TestServerInfo.java +++ b/test/src/com/aerospike/test/sync/basic/TestServerInfo.java @@ -31,10 +31,6 @@ public class TestServerInfo extends TestSync { @Test public void serverInfo() { - if (args.useProxyClient) { - System.out.println("Skip TestServerInfo.serverInfo"); - return; - } Node node = client.getNodes()[0]; GetServerConfig(node); GetNamespaceConfig(node); diff --git a/test/src/com/aerospike/test/sync/basic/TestTxn.java b/test/src/com/aerospike/test/sync/basic/TestTxn.java new file mode 100644 index 000000000..6c12dbbc9 --- /dev/null +++ b/test/src/com/aerospike/test/sync/basic/TestTxn.java @@ -0,0 +1,467 @@ +/* + * Copyright 2012-2024 Aerospike, Inc. + * + * Portions may be licensed to Aerospike, Inc. under one or more contributor + * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. + * + * Licensed under the Apache License, Version 2.0 (the "License"); you may not + * use this file except in compliance with the License. You may obtain a copy of + * the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package com.aerospike.test.sync.basic; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +import com.aerospike.test.sync.TestSync; +import org.junit.Test; +import org.junit.BeforeClass; + +import com.aerospike.client.AerospikeException; +import com.aerospike.client.BatchRecord; +import com.aerospike.client.BatchResults; +import com.aerospike.client.Bin; +import com.aerospike.client.Key; +import com.aerospike.client.Language; +import com.aerospike.client.Operation; +import com.aerospike.client.Record; +import com.aerospike.client.ResultCode; +import com.aerospike.client.Value; +import com.aerospike.client.policy.BatchPolicy; +import com.aerospike.client.policy.Policy; +import com.aerospike.client.policy.WritePolicy; +import com.aerospike.client.task.RegisterTask; +import com.aerospike.client.Txn; + +public class TestTxn extends TestSync { + public static final String binName = "bin"; + + @BeforeClass + public static void register() { + RegisterTask task = client.register(null, TestUDF.class.getClassLoader(), "udf/record_example.lua", "record_example.lua", Language.LUA); + task.waitTillComplete(); + } + + @Test + public void txnWrite() { + Key key = new Key(args.namespace, args.set, "mrtkey1"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + client.put(wp, key, new Bin(binName, "val2")); + + client.commit(txn); + + Record record = client.get(null, key); + assertBinEqual(key, record, binName, "val2"); + } + + @Test + public void txnWriteTwice() { + Key key = new Key(args.namespace, args.set, "mrtkey2"); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + client.put(wp, key, new Bin(binName, "val1")); + client.put(wp, key, new Bin(binName, "val2")); + + client.commit(txn); + + Record record = client.get(null, key); + assertBinEqual(key, record, binName, "val2"); + } + + @Test + public void txnWriteConflict() { + Key key = new Key(args.namespace, args.set, "mrtkey21"); + + Txn txn1 = new Txn(); + Txn txn2 = new Txn(); + + WritePolicy wp1 = client.copyWritePolicyDefault(); + WritePolicy wp2 = client.copyWritePolicyDefault(); + wp1.txn = txn1; + wp2.txn = txn2; + + client.put(wp1, key, new Bin(binName, "val1")); + + try { + client.put(wp2, key, new Bin(binName, "val2")); + } + catch (AerospikeException ae) { + if (ae.getResultCode() != ResultCode.MRT_BLOCKED) { + throw ae; + } + } + + client.commit(txn1); + client.commit(txn2); + + Record record = client.get(null, key); + assertBinEqual(key, record, binName, "val1"); + } + + @Test + public void txnWriteBlock() { + Key key = new Key(args.namespace, args.set, "mrtkey3"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + client.put(wp, key, new Bin(binName, "val2")); + + try { + // This write should be blocked. + client.put(null, key, new Bin(binName, "val3")); + throw new AerospikeException("Unexpected success"); + } + catch (AerospikeException e) { + if (e.getResultCode() != ResultCode.MRT_BLOCKED) { + throw e; + } + } + + client.commit(txn); + } + + @Test + public void txnWriteRead() { + Key key = new Key(args.namespace, args.set, "mrtkey4"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + client.put(wp, key, new Bin(binName, "val2")); + + Record record = client.get(null, key); + assertBinEqual(key, record, binName, "val1"); + + client.commit(txn); + + record = client.get(null, key); + assertBinEqual(key, record, binName, "val2"); + } + + @Test + public void txnWriteAbort() { + Key key = new Key(args.namespace, args.set, "mrtkey5"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + client.put(wp, key, new Bin(binName, "val2")); + + Policy p = client.copyReadPolicyDefault(); + p.txn = txn; + Record record = client.get(p, key); + assertBinEqual(key, record, binName, "val2"); + + client.abort(txn); + + record = client.get(null, key); + assertBinEqual(key, record, binName, "val1"); + } + + @Test + public void txnDelete() { + Key key = new Key(args.namespace, args.set, "mrtkey6"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + wp.durableDelete = true; + client.delete(wp, key); + + client.commit(txn); + + Record record = client.get(null, key); + assertNull(record); + } + + @Test + public void txnDeleteAbort() { + Key key = new Key(args.namespace, args.set, "mrtkey7"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + wp.durableDelete = true; + client.delete(wp, key); + + client.abort(txn); + + Record record = client.get(null, key); + assertBinEqual(key, record, binName, "val1"); + } + + @Test + public void txnDeleteTwice() { + Key key = new Key(args.namespace, args.set, "mrtkey8"); + + Txn txn = new Txn(); + + client.put(null, key, new Bin(binName, "val1")); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + wp.durableDelete = true; + client.delete(wp, key); + client.delete(wp, key); + + client.commit(txn); + + Record record = client.get(null, key); + assertNull(record); + } + + @Test + public void txnTouch() { + Key key = new Key(args.namespace, args.set, "mrtkey9"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + client.touch(wp, key); + + client.commit(txn); + + Record record = client.get(null, key); + assertBinEqual(key, record, binName, "val1"); + } + + @Test + public void txnTouchAbort() { + Key key = new Key(args.namespace, args.set, "mrtkey10"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + client.touch(wp, key); + + client.abort(txn); + + Record record = client.get(null, key); + assertBinEqual(key, record, binName, "val1"); + } + + @Test + public void txnOperateWrite() { + Key key = new Key(args.namespace, args.set, "mrtkey11"); + + client.put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + Record record = client.operate(wp, key, + Operation.put(new Bin(binName, "val2")), + Operation.get("bin2") + ); + assertBinEqual(key, record, "bin2", "bal1"); + + client.commit(txn); + + record = client.get(null, key); + assertBinEqual(key, record, binName, "val2"); + } + + @Test + public void txnOperateWriteAbort() { + Key key = new Key(args.namespace, args.set, "mrtkey12"); + + client.put(null, key, new Bin(binName, "val1"), new Bin("bin2", "bal1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + Record record = client.operate(wp, key, + Operation.put(new Bin(binName, "val2")), + Operation.get("bin2") + ); + assertBinEqual(key, record, "bin2", "bal1"); + + client.abort(txn); + + record = client.get(null, key); + assertBinEqual(key, record, binName, "val1"); + } + + @Test + public void txnUDF() { + Key key = new Key(args.namespace, args.set, "mrtkey13"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + client.execute(wp, key, "record_example", "writeBin", Value.get(binName), Value.get("val2")); + + client.commit(txn); + + Record record = client.get(null, key); + assertBinEqual(key, record, binName, "val2"); + } + + @Test + public void txnUDFAbort() { + Key key = new Key(args.namespace, args.set, "mrtkey14"); + + client.put(null, key, new Bin(binName, "val1")); + + Txn txn = new Txn(); + + WritePolicy wp = client.copyWritePolicyDefault(); + wp.txn = txn; + client.execute(wp, key, "record_example", "writeBin", Value.get(binName), Value.get("val2")); + + client.abort(txn); + + Record record = client.get(null, key); + assertBinEqual(key, record, binName, "val1"); + } + + @Test + public void txnBatch() { + Key[] keys = new Key[10]; + Bin bin = new Bin(binName, 1); + + for (int i = 0; i < keys.length; i++) { + Key key = new Key(args.namespace, args.set, i); + keys[i] = key; + + client.put(null, key, bin); + } + + Record[] recs = client.get(null, keys); + assertBatchEqual(keys, recs, 1); + + Txn txn = new Txn(); + + bin = new Bin(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.txn = txn; + + BatchResults bresults = client.operate(bp, null, keys, Operation.put(bin)); + + if (!bresults.status) { + StringBuilder sb = new StringBuilder(); + sb.append("Batch failed:"); + sb.append(System.lineSeparator()); + + for (BatchRecord br : bresults.records) { + if (br.resultCode == 0) { + sb.append("Record: " + br.record); + } + else { + sb.append("ResultCode: " + br.resultCode); + } + sb.append(System.lineSeparator()); + } + + throw new AerospikeException(sb.toString()); + } + + client.commit(txn); + + recs = client.get(null, keys); + assertBatchEqual(keys, recs, 2); + } + + @Test + public void txnBatchAbort() { + Key[] keys = new Key[10]; + Bin bin = new Bin(binName, 1); + + for (int i = 0; i < keys.length; i++) { + Key key = new Key(args.namespace, args.set, i); + keys[i] = key; + + client.put(null, key, bin); + } + + Record[] recs = client.get(null, keys); + assertBatchEqual(keys, recs, 1); + + Txn txn = new Txn(); + + bin = new Bin(binName, 2); + + BatchPolicy bp = BatchPolicy.WriteDefault(); + bp.txn = txn; + + BatchResults bresults = client.operate(bp, null, keys, Operation.put(bin)); + + if (!bresults.status) { + StringBuilder sb = new StringBuilder(); + sb.append("Batch failed:"); + sb.append(System.lineSeparator()); + + for (BatchRecord br : bresults.records) { + if (br.resultCode == 0) { + sb.append("Record: " + br.record); + } + else { + sb.append("ResultCode: " + br.resultCode); + } + sb.append(System.lineSeparator()); + } + + throw new AerospikeException(sb.toString()); + } + + client.abort(txn); + + recs = client.get(null, keys); + assertBatchEqual(keys, recs, 1); + } + + private void assertBatchEqual(Key[] keys, Record[] recs, int expected) { + for (int i = 0; i < keys.length; i++) { + Record rec = recs[i]; + + assertNotNull(rec); + + int received = rec.getInt(binName); + assertEquals(expected, received); + } + } +} diff --git a/test/src/com/aerospike/test/sync/basic/TestUDF.java b/test/src/com/aerospike/test/sync/basic/TestUDF.java index 9e4459fce..35ca22dfa 100644 --- a/test/src/com/aerospike/test/sync/basic/TestUDF.java +++ b/test/src/com/aerospike/test/sync/basic/TestUDF.java @@ -1,5 +1,5 @@ /* - * Copyright 2012-2023 Aerospike, Inc. + * Copyright 2012-2024 Aerospike, Inc. * * Portions may be licensed to Aerospike, Inc. under one or more contributor * license agreements WHICH ARE COMPATIBLE WITH THE APACHE LICENSE, VERSION 2.0. @@ -48,10 +48,6 @@ public class TestUDF extends TestSync { @BeforeClass public static void register() { - if (args.useProxyClient) { - System.out.println("Skip TestUDF.register"); - return; - } RegisterTask task = client.register(null, TestUDF.class.getClassLoader(), "udf/record_example.lua", "record_example.lua", Language.LUA); task.waitTillComplete(); } diff --git a/test/src/com/aerospike/test/util/Args.java b/test/src/com/aerospike/test/util/Args.java index b6965038a..23014ccb4 100644 --- a/test/src/com/aerospike/test/util/Args.java +++ b/test/src/com/aerospike/test/util/Args.java @@ -57,7 +57,6 @@ public class Args { public int totalTimeout = 1000; public boolean enterprise; public boolean hasTtl; - public boolean useProxyClient; public Args() { host = "127.0.0.1"; @@ -130,7 +129,6 @@ public Args() { "for single record and batch commands." ); - options.addOption("proxy", false, "Use proxy client."); options.addOption("d", "debug", false, "Run in debug mode."); options.addOption("u", "usage", false, "Print usage."); @@ -214,17 +212,6 @@ public Args() { totalTimeout = Integer.parseInt(cl.getOptionValue("totalTimeout"));; } - if (cl.hasOption("proxy")) { - useProxyClient = true; - } - - // If the Aerospike server's default port (3000) is used and the proxy client is used, - // Reset the port to the proxy server's default port (4000). - if (port == 3000 && useProxyClient) { - System.out.println("Change proxy server port to 4000"); - port = 4000; - } - if (cl.hasOption("d")) { Log.setLevel(Level.DEBUG); } @@ -262,11 +249,6 @@ private static void logUsage(Options options) { * Some database calls need to know how the server is configured. */ public void setServerSpecific(IAerospikeClient client) { - if (useProxyClient) { - // Proxy client does not support querying nodes directly for their configuration. - return; - } - Node node = client.getNodes()[0]; String editionFilter = "edition"; String namespaceFilter = "namespace/" + namespace;