\d+))?)?)?.*`
var vmeta = regexp.MustCompile(pattern)
- vs := info(nativeClient, "build")
+ vs := info(client, "build")
server := findNamedMatches(vmeta, vs)
req := findNamedMatches(vmeta, v)
@@ -471,7 +405,7 @@ func dropUser(
policy *as.AdminPolicy,
user string,
) {
- err := nativeClient.DropUser(policy, user)
+ err := client.DropUser(policy, user)
gm.Expect(err).ToNot(gm.HaveOccurred())
}
@@ -481,7 +415,7 @@ func dropIndex(
setName string,
indexName string,
) {
- gm.Expect(nativeClient.DropIndex(policy, namespace, setName, indexName)).ToNot(gm.HaveOccurred())
+ gm.Expect(client.DropIndex(policy, namespace, setName, indexName)).ToNot(gm.HaveOccurred())
// time.Sleep(time.Second)
}
@@ -494,7 +428,7 @@ func createIndex(
binName string,
indexType as.IndexType,
) {
- idxTask, err := nativeClient.CreateIndex(policy, namespace, setName, indexName, binName, indexType)
+ idxTask, err := client.CreateIndex(policy, namespace, setName, indexName, binName, indexType)
if err != nil {
if !err.Matches(ast.INDEX_FOUND) {
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -519,7 +453,7 @@ func createComplexIndex(
ctx ...*as.CDTContext,
) {
// queries only work on indices
- idxTask1, err := nativeClient.CreateComplexIndex(policy, namespace, setName, indexName, binName, indexType, indexCollectionType, ctx...)
+ idxTask1, err := client.CreateComplexIndex(policy, namespace, setName, indexName, binName, indexType, indexCollectionType, ctx...)
gm.Expect(err).ToNot(gm.HaveOccurred())
// wait until index is created
diff --git a/anonymous_fields_test.go b/anonymous_fields_test.go
index 4b889bb4..4d262927 100644
--- a/anonymous_fields_test.go
+++ b/anonymous_fields_test.go
@@ -17,7 +17,7 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/base_read_command.go b/base_read_command.go
new file mode 100644
index 00000000..10753620
--- /dev/null
+++ b/base_read_command.go
@@ -0,0 +1,100 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import (
+ "reflect"
+
+ "github.com/aerospike/aerospike-client-go/v8/types"
+)
+
+type baseReadCommand struct {
+ singleCommand
+
+ policy *BasePolicy
+ binNames []string
+ record *Record
+
+ // pointer to the object that's going to be unmarshalled
+ object *reflect.Value
+
+ replicaSequence int
+}
+
+// this method uses reflection.
+// Will not be set if performance flag is passed for the build.
+var objectParser func(
+ brc *baseReadCommand,
+ opCount int,
+ fieldCount int,
+ generation uint32,
+ expiration uint32,
+) Error
+
+func newBaseReadCommand(cluster *Cluster, policy *BasePolicy, key *Key) (baseReadCommand, Error) {
+ var partition *Partition
+ var err Error
+ if cluster != nil {
+ partition, err = PartitionForRead(cluster, policy, key)
+ if err != nil {
+ return baseReadCommand{}, err
+ }
+ }
+
+ return baseReadCommand{
+ singleCommand: newSingleCommand(cluster, key, partition),
+ policy: policy,
+ }, nil
+}
+
+func (cmd *baseReadCommand) getPolicy(ifc command) Policy {
+ return cmd.policy
+}
+
+func (cmd *baseReadCommand) writeBuffer(ifc command) Error {
+ panic(unreachable)
+}
+
+func (cmd *baseReadCommand) getNode(ifc command) (*Node, Error) {
+ return cmd.partition.GetNodeRead(cmd.cluster)
+}
+
+func (cmd *baseReadCommand) prepareRetry(ifc command, isTimeout bool) bool {
+ cmd.partition.PrepareRetryRead(isTimeout)
+ return true
+}
+
+func (cmd *baseReadCommand) parseResult(ifc command, conn *Connection) Error {
+ panic(unreachable)
+}
+
+func (cmd *baseReadCommand) handleUdfError(resultCode types.ResultCode) Error {
+ if ret, exists := cmd.record.Bins["FAILURE"]; exists {
+ return newError(resultCode, ret.(string))
+ }
+ return newError(resultCode)
+}
+
+func (cmd *baseReadCommand) GetRecord() *Record {
+ return cmd.record
+}
+
+func (cmd *baseReadCommand) Execute() Error {
+ panic(unreachable)
+}
+
+func (cmd *baseReadCommand) commandType() commandType {
+ return ttGet
+}
diff --git a/base_write_command.go b/base_write_command.go
new file mode 100644
index 00000000..c0d78bfc
--- /dev/null
+++ b/base_write_command.go
@@ -0,0 +1,102 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import "github.com/aerospike/aerospike-client-go/v8/types"
+
+// guarantee baseWriteCommand implements command interface
+var _ command = &baseWriteCommand{}
+
+type baseWriteCommand struct {
+ singleCommand
+
+ policy *WritePolicy
+}
+
+func newBaseWriteCommand(
+ cluster *Cluster,
+ policy *WritePolicy,
+ key *Key,
+) (baseWriteCommand, Error) {
+
+ var partition *Partition
+ var err Error
+ if cluster != nil {
+ partition, err = PartitionForWrite(cluster, &policy.BasePolicy, key)
+ if err != nil {
+ return baseWriteCommand{}, err
+ }
+ }
+
+ newBaseWriteCmd := baseWriteCommand{
+ singleCommand: newSingleCommand(cluster, key, partition),
+ policy: policy,
+ }
+
+ return newBaseWriteCmd, nil
+}
+
+func (cmd *baseWriteCommand) writeBuffer(ifc command) Error {
+ panic(unreachable)
+}
+
+func (cmd *baseWriteCommand) getPolicy(ifc command) Policy {
+ return cmd.policy
+}
+
+func (cmd *baseWriteCommand) getNode(ifc command) (*Node, Error) {
+ return cmd.partition.GetNodeWrite(cmd.cluster)
+}
+
+func (cmd *baseWriteCommand) prepareRetry(ifc command, isTimeout bool) bool {
+ cmd.partition.PrepareRetryWrite(isTimeout)
+ return true
+}
+
+func (cmd *baseWriteCommand) isRead() bool {
+ return false
+}
+
+func (cmd *baseWriteCommand) parseResult(ifc command, conn *Connection) Error {
+ panic(unreachable)
+}
+
+func (cmd *baseWriteCommand) Execute() Error {
+ panic(unreachable)
+}
+
+func (cmd *baseWriteCommand) onInDoubt() {
+ if cmd.policy.Txn != nil {
+ cmd.policy.Txn.OnWriteInDoubt(cmd.key)
+ }
+
+}
+
+func (cmd *baseWriteCommand) commandType() commandType {
+ return ttPut
+}
+
+func (cmd *baseWriteCommand) parseHeader() (types.ResultCode, Error) {
+ rp, err := newRecordParser(&cmd.baseCommand)
+ if err != nil {
+ return err.resultCode(), err
+ }
+
+ if err := rp.parseFields(cmd.policy.Txn, cmd.key, true); err != nil {
+ return err.resultCode(), err
+ }
+
+ return rp.resultCode, nil
+}
diff --git a/batch_attr.go b/batch_attr.go
index b5bc8503..9e3b127c 100644
--- a/batch_attr.go
+++ b/batch_attr.go
@@ -19,6 +19,7 @@ type batchAttr struct {
readAttr int
writeAttr int
infoAttr int
+ txnAttr int
expiration uint32
generation uint32
hasWrite bool
@@ -33,7 +34,18 @@ func newBatchAttr(policy *BatchPolicy, rattr int) *batchAttr {
return res
}
-func newBatchAttrOps(rp *BatchPolicy, wp *BatchWritePolicy, ops []*Operation) {
+func newBatchAttrOpsAttr(policy *BatchPolicy, rattr int, ops []*Operation) *batchAttr {
+ res := &batchAttr{}
+ res.setRead(policy)
+ res.readAttr = rattr
+ if len(ops) > 0 {
+ res.adjustRead(ops)
+ }
+ return res
+}
+
+// TODO: Check references
+func newBatchAttrOps(rp *BatchPolicy, wp *BatchWritePolicy, ops []*Operation) *batchAttr {
res := &batchAttr{}
readAllBins := false
readHeader := false
@@ -78,6 +90,8 @@ func newBatchAttrOps(rp *BatchPolicy, wp *BatchWritePolicy, ops []*Operation) {
res.readAttr |= _INFO1_NOBINDATA
}
}
+
+ return res
}
func (ba *batchAttr) setRead(rp *BatchPolicy) {
@@ -135,27 +149,16 @@ func (ba *batchAttr) setBatchRead(rp *BatchReadPolicy) {
}
func (ba *batchAttr) adjustRead(ops []*Operation) {
- readAllBins := false
- readHeader := false
-
for _, op := range ops {
switch op.opType {
- case _READ_HEADER:
- readHeader = true
- case _BIT_READ, _EXP_READ, _HLL_READ, _MAP_READ, _CDT_READ, _READ:
- // Read all bins if no bin is specified.
- if op.binName == "" {
- readAllBins = true
+ case _READ:
+ if len(op.binName) == 0 {
+ ba.readAttr |= _INFO1_GET_ALL
}
- default:
+ case _READ_HEADER:
+ ba.readAttr |= _INFO1_NOBINDATA
}
}
-
- if readAllBins {
- ba.readAttr |= _INFO1_GET_ALL
- } else if readHeader {
- ba.readAttr |= _INFO1_NOBINDATA
- }
}
func (ba *batchAttr) adjustReadForAllBins(readAllBins bool) {
@@ -291,3 +294,15 @@ func (ba *batchAttr) setBatchDelete(dp *BatchDeletePolicy) {
ba.infoAttr |= _INFO3_COMMIT_MASTER
}
}
+
+func (ba *batchAttr) setTxn(attr int) {
+ ba.filterExp = nil
+ ba.readAttr = 0
+ ba.writeAttr = _INFO2_WRITE | _INFO2_RESPOND_ALL_OPS | _INFO2_DURABLE_DELETE
+ ba.infoAttr = 0
+ ba.txnAttr = attr
+ ba.expiration = 0
+ ba.generation = 0
+ ba.hasWrite = true
+ ba.sendKey = false
+}
diff --git a/batch_command.go b/batch_command.go
index 1d2c55f2..3ad301dd 100644
--- a/batch_command.go
+++ b/batch_command.go
@@ -28,20 +28,15 @@ type batcher interface {
generateBatchNodes(*Cluster) ([]*batchNode, Error)
setSequence(int, int)
- executeSingle(clientIfc) Error
-}
-
-type clientIfc interface {
- ClientIfc
-
- operate(*WritePolicy, *Key, bool, ...*Operation) (*Record, Error)
- execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (*Record, Error)
+ // executeSingle(*Client) Error
+ setInDoubt(batcher)
+ inDoubt()
}
type batchCommand struct {
baseMultiCommand
- client clientIfc
+ client *Client
batch *batchNode
policy *BatchPolicy
sequenceAP int
@@ -52,6 +47,20 @@ type batchCommand struct {
filteredOutCnt int
}
+func (cmd *batchCommand) setInDoubt(ifc batcher) {
+ // Set error/inDoubt for keys associated this batch command when
+ // the command was not retried and split. If a split retry occurred,
+ // those new subcommands have already set inDoubt on the affected
+ // subset of keys.
+ if !cmd.splitRetry {
+ ifc.inDoubt()
+ }
+}
+
+func (cmd *batchCommand) inDoubt() {
+ // do nothing by defaut
+}
+
func (cmd *batchCommand) prepareRetry(ifc command, isTimeout bool) bool {
if !(cmd.policy.ReplicaPolicy == SEQUENCE || cmd.policy.ReplicaPolicy == PREFER_RACK) {
// Perform regular retry to same node.
@@ -79,12 +88,14 @@ func (cmd *batchCommand) retryBatch(ifc batcher, cluster *Cluster, deadline time
return false, nil
}
+ cmd.splitRetry = true
+
// Run batch requests sequentially in same thread.
var ferr Error
for _, batchNode := range batchNodes {
command := ifc.cloneBatchCommand(batchNode)
command.setSequence(cmd.sequenceAP, cmd.sequenceSC)
- if err := command.executeAt(command, cmd.policy.GetBasePolicy(), deadline, iteration); err != nil {
+ if err := command.executeIter(command, iteration); err != nil {
ferr = chainErrors(err, ferr)
if !cmd.policy.AllowPartialResults {
return false, ferr
@@ -103,12 +114,16 @@ func (cmd *batchCommand) getPolicy(ifc command) Policy {
return cmd.policy
}
-func (cmd *batchCommand) transactionType() transactionType {
+func (cmd *batchCommand) commandType() commandType {
return ttNone
}
func (cmd *batchCommand) Execute() Error {
- return cmd.execute(cmd)
+ err := cmd.execute(cmd)
+ if err != nil {
+ cmd.setInDoubt(cmd)
+ }
+ return err
}
func (cmd *batchCommand) filteredOut() int {
diff --git a/batch_command_delete.go b/batch_command_delete.go
index 28bb0e87..da04409d 100644
--- a/batch_command_delete.go
+++ b/batch_command_delete.go
@@ -15,8 +15,8 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type batchCommandDelete struct {
@@ -29,7 +29,7 @@ type batchCommandDelete struct {
}
func newBatchCommandDelete(
- client clientIfc,
+ client *Client,
batch *batchNode,
policy *BatchPolicy,
batchDeletePolicy *BatchDeletePolicy,
@@ -54,6 +54,7 @@ func newBatchCommandDelete(
records: records,
attr: attr,
}
+ res.txn = policy.Txn
return res
}
@@ -80,6 +81,15 @@ func (cmd *batchCommandDelete) parseRecordResults(ifc command, receiveSize int)
return false, err
}
resultCode := types.ResultCode(cmd.dataBuffer[5] & 0xFF)
+ generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
+ expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
+ batchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
+ fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
+ opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
+ err := cmd.parseFieldsWrite(resultCode, fieldCount, cmd.keys[batchIndex])
+ if err != nil {
+ return false, err
+ }
// The only valid server return codes are "ok" and "not found" and "filtered out".
// If other return codes are received, then abort the batch.
@@ -102,16 +112,6 @@ func (cmd *batchCommandDelete) parseRecordResults(ifc command, receiveSize int)
return false, nil
}
- generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
- expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
- batchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
- fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
- opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
- err := cmd.skipKey(fieldCount)
- if err != nil {
- return false, err
- }
-
if resultCode == 0 {
if err = cmd.parseRecord(cmd.records[batchIndex], cmd.keys[batchIndex], opCount, generation, expiration); err != nil {
return false, err
@@ -170,11 +170,11 @@ func (cmd *batchCommandDelete) parseRecord(rec *BatchRecord, key *Key, opCount i
return nil
}
-func (cmd *batchCommandDelete) transactionType() transactionType {
+func (cmd *batchCommandDelete) commandType() commandType {
return ttBatchWrite
}
-func (cmd *batchCommandDelete) executeSingle(client clientIfc) Error {
+func (cmd *batchCommandDelete) executeSingle(client *Client) Error {
policy := cmd.batchDeletePolicy.toWritePolicy(cmd.policy)
for i, key := range cmd.keys {
res, err := client.Operate(policy, key, DeleteOp())
diff --git a/batch_command_exists.go b/batch_command_exists.go
index faa7d957..6774b6aa 100644
--- a/batch_command_exists.go
+++ b/batch_command_exists.go
@@ -15,8 +15,8 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type batchCommandExists struct {
@@ -27,7 +27,7 @@ type batchCommandExists struct {
}
func newBatchCommandExists(
- client clientIfc,
+ client *Client,
batch *batchNode,
policy *BatchPolicy,
keys []*Key,
@@ -60,6 +60,10 @@ func (cmd *batchCommandExists) cloneBatchCommand(batch *batchNode) batcher {
}
func (cmd *batchCommandExists) writeBuffer(ifc command) Error {
+ if cmd.batch.Node.SupportsBatchAny() {
+ attr := newBatchAttr(cmd.policy, _INFO1_READ|_INFO1_NOBINDATA)
+ return cmd.setBatchOperate(cmd.policy, cmd.keys, cmd.batch, nil, nil, attr)
+ }
return cmd.setBatchRead(cmd.policy, cmd.keys, cmd.batch, nil, nil, _INFO1_READ|_INFO1_NOBINDATA)
}
@@ -75,6 +79,17 @@ func (cmd *batchCommandExists) parseRecordResults(ifc command, receiveSize int)
}
resultCode := types.ResultCode(cmd.dataBuffer[5] & 0xFF)
+ // generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
+ // expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
+ batchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
+ fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
+ opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
+ if len(cmd.keys) > batchIndex {
+ err := cmd.parseFieldsRead(fieldCount, cmd.keys[batchIndex])
+ if err != nil {
+ return false, err
+ }
+ }
// The only valid server return codes are "ok" and "not found".
// If other return codes are received, then abort the batch.
@@ -93,17 +108,15 @@ func (cmd *batchCommandExists) parseRecordResults(ifc command, receiveSize int)
return false, nil
}
- batchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
- fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
- opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
-
if opCount > 0 {
return false, newCustomNodeError(cmd.node, types.PARSE_ERROR, "Received bins that were not requested!")
}
- err := cmd.skipKey(fieldCount)
- if err != nil {
- return false, err
+ if len(cmd.keys) > batchIndex {
+ err := cmd.parseFieldsRead(fieldCount, cmd.keys[batchIndex])
+ if err != nil {
+ return false, err
+ }
}
// only set the results to true; as a result, no synchronization is needed
@@ -112,11 +125,11 @@ func (cmd *batchCommandExists) parseRecordResults(ifc command, receiveSize int)
return true, nil
}
-func (cmd *batchCommandExists) transactionType() transactionType {
+func (cmd *batchCommandExists) commandType() commandType {
return ttBatchRead
}
-func (cmd *batchCommandExists) executeSingle(client clientIfc) Error {
+func (cmd *batchCommandExists) executeSingle(client *Client) Error {
var err Error
for _, offset := range cmd.batch.offsets {
cmd.existsArray[offset], err = client.Exists(&cmd.policy.BasePolicy, cmd.keys[offset])
diff --git a/batch_command_get.go b/batch_command_get.go
index 1acd449c..dc6db7f4 100644
--- a/batch_command_get.go
+++ b/batch_command_get.go
@@ -17,8 +17,8 @@ package aerospike
import (
"reflect"
- "github.com/aerospike/aerospike-client-go/v7/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type batchCommandGet struct {
@@ -37,7 +37,7 @@ type batchCommandGet struct {
objectsFound []bool
}
-type batchObjectParsetIfc interface {
+type batchObjectParserIfc interface {
buf() []byte
readBytes(int) Error
object(int) *reflect.Value
@@ -46,7 +46,7 @@ type batchObjectParsetIfc interface {
// this method uses reflection.
// Will not be set if performance flag is passed for the build.
var batchObjectParser func(
- cmd batchObjectParsetIfc,
+ cmd batchObjectParserIfc,
offset int,
opCount int,
fieldCount int,
@@ -55,7 +55,7 @@ var batchObjectParser func(
) Error
func newBatchCommandGet(
- client clientIfc,
+ client *Client,
batch *batchNode,
policy *BatchPolicy,
keys []*Key,
@@ -83,6 +83,7 @@ func newBatchCommandGet(
records: records,
readAttr: readAttr,
}
+ res.txn = policy.Txn
return res
}
@@ -103,6 +104,10 @@ func (cmd *batchCommandGet) object(index int) *reflect.Value {
}
func (cmd *batchCommandGet) writeBuffer(ifc command) Error {
+ if cmd.batch.Node.SupportsBatchAny() {
+ attr := newBatchAttrOpsAttr(cmd.policy, cmd.readAttr, cmd.ops)
+ return cmd.setBatchOperate(cmd.policy, cmd.keys, cmd.batch, cmd.binNames, cmd.ops, attr)
+ }
return cmd.setBatchRead(cmd.policy, cmd.keys, cmd.batch, cmd.binNames, cmd.ops, cmd.readAttr)
}
@@ -117,6 +122,17 @@ func (cmd *batchCommandGet) parseRecordResults(ifc command, receiveSize int) (bo
return false, err
}
resultCode := types.ResultCode(cmd.dataBuffer[5] & 0xFF)
+ generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
+ expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
+ batchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
+ fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
+ opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
+ if len(cmd.keys) > batchIndex {
+ err := cmd.parseFieldsRead(fieldCount, cmd.keys[batchIndex])
+ if err != nil {
+ return false, err
+ }
+ }
// The only valid server return codes are "ok" and "not found" and "filtered out".
// If other return codes are received, then abort the batch.
@@ -135,16 +151,7 @@ func (cmd *batchCommandGet) parseRecordResults(ifc command, receiveSize int) (bo
return false, nil
}
- generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
- expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
- batchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
- fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
- opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
- err := cmd.skipKey(fieldCount)
- if err != nil {
- return false, err
- }
-
+ var err Error
if cmd.indexRecords != nil {
if len(cmd.indexRecords) > 0 {
if resultCode == 0 {
@@ -217,11 +224,11 @@ func (cmd *batchCommandGet) parseRecord(key *Key, opCount int, generation, expir
return newRecord(cmd.node, key, bins, generation, expiration), nil
}
-func (cmd *batchCommandGet) transactionType() transactionType {
+func (cmd *batchCommandGet) commandType() commandType {
return ttBatchRead
}
-func (cmd *batchCommandGet) executeSingle(client clientIfc) Error {
+func (cmd *batchCommandGet) executeSingle(client *Client) Error {
for _, offset := range cmd.batch.offsets {
var err Error
if len(cmd.ops) > 0 {
@@ -231,7 +238,7 @@ func (cmd *batchCommandGet) executeSingle(client clientIfc) Error {
return newError(types.PARAMETER_ERROR, "Write operations not allowed in batch read").setNode(cmd.node)
}
}
- cmd.records[offset], err = client.operate(cmd.policy.toWritePolicy(), cmd.keys[offset], true, cmd.ops...)
+ cmd.records[offset], err = client.Operate(cmd.policy.toWritePolicy(), cmd.keys[offset], cmd.ops...)
} else if (cmd.readAttr & _INFO1_NOBINDATA) == _INFO1_NOBINDATA {
cmd.records[offset], err = client.GetHeader(&cmd.policy.BasePolicy, cmd.keys[offset])
} else {
diff --git a/batch_command_get_reflect.go b/batch_command_get_reflect.go
index cee4a09c..515de5b0 100644
--- a/batch_command_get_reflect.go
+++ b/batch_command_get_reflect.go
@@ -19,7 +19,7 @@ package aerospike
import (
"reflect"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
// if this file is included in the build, it will include this method
@@ -28,7 +28,7 @@ func init() {
}
func parseBatchObject(
- cmd batchObjectParsetIfc,
+ cmd batchObjectParserIfc,
offset int,
opCount int,
fieldCount int,
diff --git a/batch_command_operate.go b/batch_command_operate.go
index b0502787..01f3d667 100644
--- a/batch_command_operate.go
+++ b/batch_command_operate.go
@@ -17,8 +17,8 @@ package aerospike
import (
"reflect"
- "github.com/aerospike/aerospike-client-go/v7/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type batchCommandOperate struct {
@@ -33,17 +33,17 @@ type batchCommandOperate struct {
}
func newBatchCommandOperate(
- client clientIfc,
+ client *Client,
batch *batchNode,
policy *BatchPolicy,
records []BatchRecordIfc,
-) *batchCommandOperate {
+) batchCommandOperate {
var node *Node
if batch != nil {
node = batch.Node
}
- res := &batchCommandOperate{
+ res := batchCommandOperate{
batchCommand: batchCommand{
client: client,
baseMultiCommand: *newMultiCommand(node, nil, true),
@@ -52,6 +52,8 @@ func newBatchCommandOperate(
},
records: records,
}
+ res.txn = policy.Txn
+
return res
}
@@ -105,7 +107,7 @@ func (cmd *batchCommandOperate) parseRecordResults(ifc command, receiveSize int)
fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
- err := cmd.skipKey(fieldCount)
+ err := cmd.parseFieldsBatch(resultCode, fieldCount, cmd.records[batchIndex])
if err != nil {
return false, err
}
@@ -117,7 +119,7 @@ func (cmd *batchCommandOperate) parseRecordResults(ifc command, receiveSize int)
// If it looks like the error is on the first record and the message is marked as last part,
// the error is for the whole command and not just for the first batchIndex
- lastMessage := (info3&_INFO3_LAST) == _INFO3_LAST || cmd.grpcEOS
+ lastMessage := (info3 & _INFO3_LAST) == _INFO3_LAST
if resultCode != 0 && lastMessage && receiveSize == int(_MSG_REMAINING_HEADER_SIZE) {
return false, newError(resultCode).setNode(cmd.node)
}
@@ -130,7 +132,7 @@ func (cmd *batchCommandOperate) parseRecordResults(ifc command, receiveSize int)
}
// for UDF failures
- var msg interface{}
+ var msg any
if rec != nil {
msg = rec.Bins["FAILURE"]
}
@@ -160,9 +162,7 @@ func (cmd *batchCommandOperate) parseRecordResults(ifc command, receiveSize int)
continue
}
- // Do not process records after grpc stream has ended.
- // This is a special case due to proxy server shortcomings.
- if resultCode == 0 && !cmd.grpcEOS {
+ if resultCode == 0 {
if cmd.objects == nil {
rec, err := cmd.parseRecord(cmd.records[batchIndex].key(), opCount, generation, expiration)
if err != nil {
@@ -229,7 +229,7 @@ func (cmd *batchCommandOperate) parseRecord(key *Key, opCount int, generation, e
return newRecord(cmd.node, key, bins, generation, expiration), nil
}
-func (cmd *batchCommandOperate) executeSingle(client clientIfc) Error {
+func (cmd *batchCommandOperate) executeSingle(client *Client) Error {
var res *Record
var err Error
for _, br := range cmd.records {
@@ -246,14 +246,14 @@ func (cmd *batchCommandOperate) executeSingle(client clientIfc) Error {
} else if len(ops) == 0 {
ops = append(ops, GetOp())
}
- res, err = client.operate(cmd.client.getUsableBatchReadPolicy(br.Policy).toWritePolicy(cmd.policy), br.Key, true, ops...)
+ res, err = client.Operate(cmd.client.getUsableBatchReadPolicy(br.Policy).toWritePolicy(cmd.policy), br.Key, ops...)
case *BatchWrite:
policy := cmd.client.getUsableBatchWritePolicy(br.Policy).toWritePolicy(cmd.policy)
policy.RespondPerEachOp = true
- res, err = client.operate(policy, br.Key, true, br.Ops...)
+ res, err = client.Operate(policy, br.Key, br.Ops...)
case *BatchDelete:
policy := cmd.client.getUsableBatchDeletePolicy(br.Policy).toWritePolicy(cmd.policy)
- res, err = client.operate(policy, br.Key, true, DeleteOp())
+ res, err = client.Operate(policy, br.Key, DeleteOp())
case *BatchUDF:
policy := cmd.client.getUsableBatchUDFPolicy(br.Policy).toWritePolicy(cmd.policy)
policy.RespondPerEachOp = true
@@ -291,7 +291,7 @@ func (cmd *batchCommandOperate) Execute() Error {
return cmd.execute(cmd)
}
-func (cmd *batchCommandOperate) transactionType() transactionType {
+func (cmd *batchCommandOperate) commandType() commandType {
if cmd.isRead() {
return ttBatchRead
}
diff --git a/batch_command_reflect.go b/batch_command_reflect.go
index 9ce42a79..ec5b9c8d 100644
--- a/batch_command_reflect.go
+++ b/batch_command_reflect.go
@@ -19,7 +19,7 @@ package aerospike
import (
"reflect"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
// this function will only be set if the performance flag is not passed for build
diff --git a/batch_command_udf.go b/batch_command_udf.go
index b8b9c445..7e5a89c1 100644
--- a/batch_command_udf.go
+++ b/batch_command_udf.go
@@ -15,8 +15,8 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type batchCommandUDF struct {
@@ -32,7 +32,7 @@ type batchCommandUDF struct {
}
func newBatchCommandUDF(
- client clientIfc,
+ client *Client,
batch *batchNode,
policy *BatchPolicy,
batchUDFPolicy *BatchUDFPolicy,
@@ -63,6 +63,7 @@ func newBatchCommandUDF(
args: args,
attr: attr,
}
+ res.txn = policy.Txn
return res
}
@@ -89,6 +90,15 @@ func (cmd *batchCommandUDF) parseRecordResults(ifc command, receiveSize int) (bo
return false, err
}
resultCode := types.ResultCode(cmd.dataBuffer[5] & 0xFF)
+ generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
+ expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
+ batchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
+ fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
+ opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
+ err := cmd.parseFieldsWrite(resultCode, fieldCount, cmd.keys[batchIndex])
+ if err != nil {
+ return false, err
+ }
// The only valid server return codes are "ok" and "not found" and "filtered out".
// If other return codes are received, then abort the batch.
@@ -111,16 +121,6 @@ func (cmd *batchCommandUDF) parseRecordResults(ifc command, receiveSize int) (bo
return false, nil
}
- generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
- expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
- batchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
- fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
- opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
- err := cmd.skipKey(fieldCount)
- if err != nil {
- return false, err
- }
-
if resultCode == 0 {
if err = cmd.parseRecord(cmd.records[batchIndex], cmd.keys[batchIndex], opCount, generation, expiration); err != nil {
return false, err
@@ -183,7 +183,7 @@ func (cmd *batchCommandUDF) isRead() bool {
return !cmd.attr.hasWrite
}
-func (cmd *batchCommandUDF) executeSingle(client clientIfc) Error {
+func (cmd *batchCommandUDF) executeSingle(client *Client) Error {
for i, key := range cmd.keys {
policy := cmd.batchUDFPolicy.toWritePolicy(cmd.policy)
policy.RespondPerEachOp = true
diff --git a/batch_delete.go b/batch_delete.go
index 6886d129..d058755f 100644
--- a/batch_delete.go
+++ b/batch_delete.go
@@ -78,14 +78,14 @@ func (bd *BatchDelete) size(parentPolicy *BasePolicy) (int, Error) {
}
}
- if bd.Policy.SendKey || parentPolicy.SendKey {
+ if (bd.Policy.SendKey || parentPolicy.SendKey) && bd.Key.hasValueToSend() {
if sz, err := bd.Key.userKey.EstimateSize(); err != nil {
return -1, err
} else {
size += sz + int(_FIELD_HEADER_SIZE) + 1
}
}
- } else if parentPolicy.SendKey {
+ } else if parentPolicy.SendKey && bd.Key.hasValueToSend() {
sz, err := bd.Key.userKey.EstimateSize()
if err != nil {
return -1, err
diff --git a/batch_delete_policy.go b/batch_delete_policy.go
index d73b10de..0e3d256e 100644
--- a/batch_delete_policy.go
+++ b/batch_delete_policy.go
@@ -21,7 +21,7 @@ type BatchDeletePolicy struct {
// Default: nil
FilterExpression *Expression
- // Desired consistency guarantee when committing a transaction on the server. The default
+ // Desired consistency guarantee when committing a command on the server. The default
// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to
// be successful before returning success to the client.
// Default: CommitLevel.COMMIT_ALL
@@ -38,7 +38,7 @@ type BatchDeletePolicy struct {
// Default: 0
Generation uint32
- // If the transaction results in a record deletion, leave a tombstone for the record.
+ // If the command results in a record deletion, leave a tombstone for the record.
// This prevents deleted records from reappearing after node failures.
// Valid for Aerospike Server Enterprise Edition only.
// Default: false (do not tombstone deleted records).
diff --git a/batch_executer.go b/batch_executer.go
index 45d9c5a7..58ca1abe 100644
--- a/batch_executer.go
+++ b/batch_executer.go
@@ -42,3 +42,20 @@ func (clnt *Client) batchExecute(policy *BatchPolicy, batchNodes []*batchNode, c
return filteredOut, errs
}
+
+// batchExecuteSimple Uses werrGroup to run commands using multiple goroutines,
+// and waits for their return
+func (clnt *Client) batchExecuteSimple(policy *BatchPolicy, cmds []command) Error {
+ maxConcurrentNodes := policy.ConcurrentNodes
+ if maxConcurrentNodes <= 0 {
+ maxConcurrentNodes = len(cmds)
+ }
+
+ // we need this list to count the number of filtered out records
+ weg := newWeightedErrGroup(maxConcurrentNodes)
+ for _, cmd := range cmds {
+ weg.execute(cmd)
+ }
+
+ return weg.wait()
+}
diff --git a/batch_index_command_get.go b/batch_index_command_get.go
index f840b04a..e949b7b1 100644
--- a/batch_index_command_get.go
+++ b/batch_index_command_get.go
@@ -15,37 +15,32 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
type batchIndexCommandGet struct {
- batchCommandGet
+ batchCommandOperate
+
+ indexRecords []*BatchRead
}
func newBatchIndexCommandGet(
- client clientIfc,
+ client *Client,
batch *batchNode,
policy *BatchPolicy,
records []*BatchRead,
isOperation bool,
-) *batchIndexCommandGet {
- var node *Node
- if batch != nil {
- node = batch.Node
+) batchIndexCommandGet {
+ recIfcs := make([]BatchRecordIfc, len(records))
+ for i := range records {
+ recIfcs[i] = records[i]
}
- res := &batchIndexCommandGet{
- batchCommandGet{
- batchCommand: batchCommand{
- client: client,
- baseMultiCommand: *newMultiCommand(node, nil, isOperation),
- policy: policy,
- batch: batch,
- },
- records: nil,
- indexRecords: records,
- },
+ res := batchIndexCommandGet{
+ batchCommandOperate: newBatchCommandOperate(client, batch, policy, recIfcs),
+ indexRecords: records,
}
+ res.txn = policy.Txn
return res
}
@@ -57,10 +52,6 @@ func (cmd *batchIndexCommandGet) cloneBatchCommand(batch *batchNode) batcher {
return &res
}
-func (cmd *batchIndexCommandGet) writeBuffer(ifc command) Error {
- return cmd.setBatchIndexRead(cmd.policy, cmd.indexRecords, cmd.batch)
-}
-
func (cmd *batchIndexCommandGet) Execute() Error {
if len(cmd.batch.offsets) == 1 {
return cmd.executeSingle(cmd.client)
@@ -68,8 +59,8 @@ func (cmd *batchIndexCommandGet) Execute() Error {
return cmd.execute(cmd)
}
-func (cmd *batchIndexCommandGet) executeSingle(client clientIfc) Error {
- for i, br := range cmd.indexRecords {
+func (cmd *batchIndexCommandGet) executeSingle(client *Client) Error {
+ for _, br := range cmd.indexRecords {
var ops []*Operation
if br.headerOnly() {
ops = []*Operation{GetHeaderOp()}
@@ -77,13 +68,15 @@ func (cmd *batchIndexCommandGet) executeSingle(client clientIfc) Error {
for i := range br.BinNames {
ops = append(ops, GetBinOp(br.BinNames[i]))
}
- } else {
+ } else if len(br.Ops) > 0 {
ops = br.Ops
+ } else {
+ ops = []*Operation{GetOp()}
}
- res, err := client.operate(cmd.policy.toWritePolicy(), br.Key, true, ops...)
- cmd.indexRecords[i].setRecord(res)
+ res, err := client.Operate(cmd.policy.toWritePolicy(), br.Key, ops...)
+ br.setRecord(res)
if err != nil {
- cmd.indexRecords[i].setRawError(err)
+ br.setRawError(err)
// Key not found is NOT an error for batch requests
if err.resultCode() == types.KEY_NOT_FOUND_ERROR {
@@ -103,7 +96,3 @@ func (cmd *batchIndexCommandGet) executeSingle(client clientIfc) Error {
}
return nil
}
-
-func (cmd *batchIndexCommandGet) generateBatchNodes(cluster *Cluster) ([]*batchNode, Error) {
- return newBatchNodeListRecords(cluster, cmd.policy, cmd.indexRecords, cmd.sequenceAP, cmd.sequenceSC, cmd.batch)
-}
diff --git a/batch_node_list.go b/batch_node_list.go
index 7ecd66a6..01fe7f82 100644
--- a/batch_node_list.go
+++ b/batch_node_list.go
@@ -14,7 +14,7 @@
package aerospike
-import "github.com/aerospike/aerospike-client-go/v7/types"
+import "github.com/aerospike/aerospike-client-go/v8/types"
func newBatchNodeList(cluster *Cluster, policy *BatchPolicy, keys []*Key, records []*BatchRecord, hasWrite bool) ([]*batchNode, Error) {
nodes := cluster.GetNodes()
@@ -295,18 +295,6 @@ func newBatchOperateNodeListIfc(cluster *Cluster, policy *BatchPolicy, records [
return batchNodes, errs
}
-func newGrpcBatchOperateListIfc(policy *BatchPolicy, records []BatchRecordIfc) (*batchNode, Error) {
- // Split keys by server node.
- batchNode := new(batchNode)
- for i := range records {
- b := records[i]
- b.prepare()
- batchNode.AddKey(i)
- }
-
- return batchNode, nil
-}
-
func findBatchNode(nodes []*batchNode, node *Node) *batchNode {
for i := range nodes {
// Note: using pointer equality for performance.
diff --git a/execute_task_native.go b/batch_offsets.go
similarity index 54%
rename from execute_task_native.go
rename to batch_offsets.go
index cbd62d7b..46df4fc8 100644
--- a/execute_task_native.go
+++ b/batch_offsets.go
@@ -1,6 +1,4 @@
-//go:build !as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
+// Copyright 2014-2024 Aerospike, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -16,7 +14,28 @@
package aerospike
-func (etsk *ExecuteTask) grpcIsDone() (bool, Error) {
- // should not be called out of the grpc proxy server context
- panic("UNREACHABLE")
+type BatchOffsets interface {
+ size() int
+ get(int) int
+}
+
+// enforce the interface
+var _ BatchOffsets = &batchOffsetsNative{}
+
+type batchOffsetsNative struct {
+ offsets []int
+}
+
+func newBatchOffsetsNative(batch *batchNode) *batchOffsetsNative {
+ return &batchOffsetsNative{
+ offsets: batch.offsets,
+ }
+}
+
+func (bon *batchOffsetsNative) size() int {
+ return len(bon.offsets)
+}
+
+func (bon *batchOffsetsNative) get(i int) int {
+ return bon.offsets[i]
}
diff --git a/batch_policy.go b/batch_policy.go
index e07d8793..d20d3b99 100644
--- a/batch_policy.go
+++ b/batch_policy.go
@@ -26,7 +26,7 @@ type BatchPolicy struct {
//
// Values:
// 1: Issue batch requests sequentially. This mode has a performance advantage for small
- // to medium sized batch sizes because requests can be issued in the main transaction goroutine.
+ // to medium sized batch sizes because requests can be issued in the main command goroutine.
// This is the default.
// 0: Issue all batch requests in concurrent goroutines. This mode has a performance
// advantage for extremely large batch sizes because each node can process the request
@@ -40,7 +40,7 @@ type BatchPolicy struct {
// Allow batch to be processed immediately in the server's receiving thread when the server
// deems it to be appropriate. If false, the batch will always be processed in separate
- // transaction goroutines. This field is only relevant for the new batch index protocol.
+ // command goroutines. This field is only relevant for the new batch index protocol.
//
// For batch exists or batch reads of smaller sized records (<= 1K per record), inline
// processing will be significantly faster on "in memory" namespaces. The server disables
diff --git a/batch_read.go b/batch_read.go
index aac921f7..f638f87e 100644
--- a/batch_read.go
+++ b/batch_read.go
@@ -17,7 +17,7 @@ package aerospike
import (
"fmt"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// BatchRead specifies the Key and bin names used in batch read commands
diff --git a/batch_record.go b/batch_record.go
index e08a32a4..42315b80 100644
--- a/batch_record.go
+++ b/batch_record.go
@@ -17,7 +17,7 @@ package aerospike
import (
"fmt"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
type batchRecordType byte
@@ -67,7 +67,7 @@ type BatchRecord struct {
// Err encapsulates the possible error chain for this key
Err Error
- // InDoubt signifies the possiblity that the write transaction may have completed even though an error
+ // InDoubt signifies the possiblity that the write command may have completed even though an error
// occurred for this record. This may be the case when a client error occurs (like timeout)
// after the command was sent to the server.
InDoubt bool
diff --git a/batch_test.go b/batch_test.go
index f03b726f..b783cdc5 100644
--- a/batch_test.go
+++ b/batch_test.go
@@ -22,8 +22,8 @@ import (
"strings"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ "github.com/aerospike/aerospike-client-go/v8/types"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -216,10 +216,6 @@ var _ = gg.Describe("Aerospike", func() {
gg.Context("BatchOperate operations", func() {
gg.It("must return the result with same ordering", func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
key1, _ := as.NewKey(ns, set, 1)
op1 := as.NewBatchWrite(nil, key1, as.PutOp(as.NewBin("bin1", "a")), as.PutOp(as.NewBin("bin2", "b")))
op3 := as.NewBatchRead(nil, key1, []string{"bin2"})
@@ -292,10 +288,6 @@ var _ = gg.Describe("Aerospike", func() {
})
gg.It("must successfully execute a BatchOperate for many keys", func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
gm.Expect(err).ToNot(gm.HaveOccurred())
bwPolicy := as.NewBatchWritePolicy()
bdPolicy := as.NewBatchDeletePolicy()
@@ -329,10 +321,6 @@ var _ = gg.Describe("Aerospike", func() {
})
gg.It("must successfully execute a delete op", func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
gm.Expect(err).ToNot(gm.HaveOccurred())
bwPolicy := as.NewBatchWritePolicy()
bdPolicy := as.NewBatchDeletePolicy()
@@ -440,13 +428,9 @@ var _ = gg.Describe("Aerospike", func() {
})
gg.It("Overall command error should be reflected in API call error and not BatchRecord error", func() {
- if *dbaas || *proxy {
- gg.Skip("Not supported in DBAAS or PROXY environments")
- }
-
var batchRecords []as.BatchRecordIfc
key, _ := as.NewKey(*namespace, set, 0)
- for i := 0; i < len(nativeClient.Cluster().GetNodes())*2000000; i++ {
+ for i := 0; i < len(client.Cluster().GetNodes())*2000000; i++ {
batchRecords = append(batchRecords, as.NewBatchReadHeader(nil, key))
}
@@ -477,17 +461,17 @@ var _ = gg.Describe("Aerospike", func() {
op5 := as.ListGetByValueRangeOp(binName, nil, as.NewValue(9), as.ListReturnTypeValue)
r, err := client.Operate(wpolicy, key, op1, op2, op3, op4, op5)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(r.Bins[binName]).To(gm.Equal([]interface{}{[]interface{}{7, 8}, []interface{}{0, 3, 4, 5}, []interface{}{7, 8, 9, 10}, []interface{}{2, 3, 4, 5}, []interface{}{7, 6, 5, 8}}))
+ gm.Expect(r.Bins[binName]).To(gm.Equal(as.OpResults{[]any{7, 8}, []any{0, 3, 4, 5}, []any{7, 8, 9, 10}, []any{2, 3, 4, 5}, []any{7, 6, 5, 8}}))
// Remove
op6 := as.ListRemoveByValueRangeOp(binName, as.ListReturnTypeIndex, as.NewValue(7), nil)
r2, err2 := client.Operate(wpolicy, key, op6)
gm.Expect(err2).ToNot(gm.HaveOccurred())
- gm.Expect(r2.Bins[binName]).To(gm.Equal([]interface{}{0, 3, 4, 5}))
+ gm.Expect(r2.Bins[binName]).To(gm.Equal([]any{0, 3, 4, 5}))
r3, err3 := client.Get(nil, key)
gm.Expect(err3).ToNot(gm.HaveOccurred())
- gm.Expect(r3.Bins[binName]).To(gm.Equal([]interface{}{6, 5}))
+ gm.Expect(r3.Bins[binName]).To(gm.Equal([]any{6, 5}))
})
gg.It("must return the result with same ordering", func() {
@@ -538,10 +522,6 @@ var _ = gg.Describe("Aerospike", func() {
gg.Context("BatchRead operations with TTL", func() {
gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
if serverIsOlderThan("7") {
gg.Skip("Not supported in server before v7.1")
}
@@ -641,12 +621,6 @@ var _ = gg.Describe("Aerospike", func() {
})
gg.Context("BatchUDF operations", func() {
- gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
- })
-
gg.It("must return the results for single BatchUDF vs multiple", func() {
luaCode := `-- Create a record
function rec_create(rec, bins)
@@ -657,12 +631,12 @@ var _ = gg.Describe("Aerospike", func() {
registerUDF(luaCode, "test_ops.lua")
for _, keyCount := range []int{10, 1} {
- nativeClient.Truncate(nil, ns, set, nil)
+ client.Truncate(nil, ns, set, nil)
batchRecords := []as.BatchRecordIfc{}
for k := 0; k < keyCount; k++ {
key, _ := as.NewKey(ns, set, k)
- args := make(map[interface{}]interface{})
+ args := make(map[any]any)
args["bin1_str"] = "a"
batchRecords = append(batchRecords, as.NewBatchUDF(
nil,
@@ -679,7 +653,7 @@ var _ = gg.Describe("Aerospike", func() {
for i := 0; i < keyCount; i++ {
gm.Expect(batchRecords[i].BatchRec().Err).To(gm.BeNil())
gm.Expect(batchRecords[i].BatchRec().ResultCode).To(gm.Equal(types.OK))
- gm.Expect(batchRecords[i].BatchRec().Record.Bins).To(gm.Equal(as.BinMap{"SUCCESS": map[interface{}]interface{}{"bin1_str": "a"}}))
+ gm.Expect(batchRecords[i].BatchRec().Record.Bins).To(gm.Equal(as.BinMap{"SUCCESS": map[any]any{"bin1_str": "a"}}))
}
}
})
@@ -702,7 +676,7 @@ var _ = gg.Describe("Aerospike", func() {
batchRecords := []as.BatchRecordIfc{}
key1, _ := as.NewKey(randString(10), set, 1)
- args := make(map[interface{}]interface{})
+ args := make(map[any]any)
args["bin1_str"] = "a"
batchRecords = append(batchRecords, as.NewBatchUDF(
nil,
@@ -801,7 +775,7 @@ var _ = gg.Describe("Aerospike", func() {
gg.It("must return correct errors", func() {
- nativeClient.Truncate(nil, ns, set, nil)
+ client.Truncate(nil, ns, set, nil)
udf := `function wait_and_update(rec, bins, n)
info("WAIT_AND_WRITE BEGIN")
@@ -856,10 +830,6 @@ var _ = gg.Describe("Aerospike", func() {
}
if nsInfo(ns, "storage-engine") == "device" {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
writeBlockSize := 1048576
bigBin := make(map[string]string, 0)
bigBin["big_bin"] = strings.Repeat("a", writeBlockSize)
@@ -925,7 +895,7 @@ var _ = gg.Describe("Aerospike", func() {
gm.Expect(rec.Err).ToNot(gm.HaveOccurred())
gm.Expect(rec.ResultCode).To(gm.Equal(types.OK))
gm.Expect(rec.InDoubt).To(gm.BeFalse())
- gm.Expect(rec.Record.Bins["SUCCESS"]).To(gm.Equal(map[interface{}]interface{}{"status": "OK"}))
+ gm.Expect(rec.Record.Bins["SUCCESS"]).To(gm.Equal(map[any]any{"status": "OK"}))
}
recs, err := client.BatchGet(nil, keys)
diff --git a/batch_udf.go b/batch_udf.go
index 6eced59e..73be3506 100644
--- a/batch_udf.go
+++ b/batch_udf.go
@@ -96,14 +96,14 @@ func (bu *BatchUDF) size(parentPolicy *BasePolicy) (int, Error) {
size += sz + int(_FIELD_HEADER_SIZE)
}
- if bu.Policy.SendKey || parentPolicy.SendKey {
+ if (bu.Policy.SendKey || parentPolicy.SendKey) && bu.Key.hasValueToSend() {
if sz, err := bu.Key.userKey.EstimateSize(); err != nil {
return -1, err
} else {
size += sz + int(_FIELD_HEADER_SIZE) + 1
}
}
- } else if parentPolicy.SendKey {
+ } else if parentPolicy.SendKey && bu.Key.hasValueToSend() {
sz, err := bu.Key.userKey.EstimateSize()
if err != nil {
return -1, err
diff --git a/batch_udf_policy.go b/batch_udf_policy.go
index f6c7ec8e..e43ba9a3 100644
--- a/batch_udf_policy.go
+++ b/batch_udf_policy.go
@@ -22,7 +22,7 @@ type BatchUDFPolicy struct {
// Default: nil
FilterExpression *Expression
- // Desired consistency guarantee when committing a transaction on the server. The default
+ // Desired consistency guarantee when committing a command on the server. The default
// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to
// be successful before returning success to the client.
//
@@ -38,7 +38,7 @@ type BatchUDFPolicy struct {
// > 0: Actual expiration in seconds.
Expiration uint32
- // DurableDelete leaves a tombstone for the record if the transaction results in a record deletion.
+ // DurableDelete leaves a tombstone for the record if the command results in a record deletion.
// This prevents deleted records from reappearing after node failures.
// Valid for Aerospike Server Enterprise Edition 3.10+ only.
DurableDelete bool
diff --git a/batch_write.go b/batch_write.go
index cac051f9..d94143c3 100644
--- a/batch_write.go
+++ b/batch_write.go
@@ -14,7 +14,7 @@
package aerospike
-import "github.com/aerospike/aerospike-client-go/v7/types"
+import "github.com/aerospike/aerospike-client-go/v8/types"
var _ BatchRecordIfc = &BatchWrite{}
@@ -78,14 +78,14 @@ func (bw *BatchWrite) size(parentPolicy *BasePolicy) (int, Error) {
}
}
- if bw.Policy.SendKey || parentPolicy.SendKey {
+ if (bw.Policy.SendKey || parentPolicy.SendKey) && bw.Key.hasValueToSend() {
if sz, err := bw.Key.userKey.EstimateSize(); err != nil {
return -1, err
} else {
size += sz + int(_FIELD_HEADER_SIZE) + 1
}
}
- } else if parentPolicy.SendKey {
+ } else if parentPolicy.SendKey && bw.Key.hasValueToSend() {
sz, err := bw.Key.userKey.EstimateSize()
if err != nil {
return -1, err
diff --git a/batch_write_policy.go b/batch_write_policy.go
index 606f2d2c..5574ad0a 100644
--- a/batch_write_policy.go
+++ b/batch_write_policy.go
@@ -25,7 +25,7 @@ type BatchWritePolicy struct {
// RecordExistsAction qualifies how to handle writes where the record already exists.
RecordExistsAction RecordExistsAction //= RecordExistsAction.UPDATE;
- // Desired consistency guarantee when committing a transaction on the server. The default
+ // Desired consistency guarantee when committing a command on the server. The default
// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to
// be successful before returning success to the client.
//
@@ -62,7 +62,7 @@ type BatchWritePolicy struct {
// > 0: Actual expiration in seconds.
Expiration uint32
- // DurableDelete leaves a tombstone for the record if the transaction results in a record deletion.
+ // DurableDelete leaves a tombstone for the record if the command results in a record deletion.
// This prevents deleted records from reappearing after node failures.
// Valid for Aerospike Server Enterprise Edition 3.10+ only.
DurableDelete bool
diff --git a/bench_batchget_test.go b/bench_batchget_test.go
index 83fbe865..18c97721 100644
--- a/bench_batchget_test.go
+++ b/bench_batchget_test.go
@@ -22,7 +22,7 @@ import (
_ "net/http/pprof"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
func makeDataForBatchGetBench(set string, bins []*as.Bin) {
diff --git a/bench_cdt_list_test.go b/bench_cdt_list_test.go
index d231256c..8966c4a5 100644
--- a/bench_cdt_list_test.go
+++ b/bench_cdt_list_test.go
@@ -21,7 +21,7 @@ import (
// "time"
_ "net/http/pprof"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
var list []as.Value
diff --git a/bench_get_test.go b/bench_get_test.go
index 66f2ca08..883a15be 100644
--- a/bench_get_test.go
+++ b/bench_get_test.go
@@ -22,7 +22,7 @@ import (
_ "net/http/pprof"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
func makeDataForGetBench(set string, bins []*as.Bin) {
diff --git a/bench_key_test.go b/bench_key_test.go
index f845ed07..5437d66c 100644
--- a/bench_key_test.go
+++ b/bench_key_test.go
@@ -18,9 +18,9 @@ import (
"strings"
"testing"
- as "github.com/aerospike/aerospike-client-go/v7"
- "github.com/aerospike/aerospike-client-go/v7/pkg/ripemd160"
- ParticleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ "github.com/aerospike/aerospike-client-go/v8/pkg/ripemd160"
+ ParticleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
)
var str = strings.Repeat("abcd", 128)
diff --git a/bench_rand_gen_test.go b/bench_rand_gen_test.go
index 85493b12..3384005a 100644
--- a/bench_rand_gen_test.go
+++ b/bench_rand_gen_test.go
@@ -19,7 +19,7 @@ import (
"testing"
"time"
- xor "github.com/aerospike/aerospike-client-go/v7/types/rand"
+ xor "github.com/aerospike/aerospike-client-go/v8/types/rand"
)
func Benchmark_math_rand(b *testing.B) {
diff --git a/bench_read_command_test.go b/bench_read_command_test.go
index d6ea9ce5..c49d6229 100644
--- a/bench_read_command_test.go
+++ b/bench_read_command_test.go
@@ -32,7 +32,7 @@ func doReadCommandWriteBuffer(set string, value interface{}, b *testing.B) {
key, _ := NewKey("test", set, 1000)
for i := 0; i < b.N; i++ {
- command, err := newReadCommand(nil, policy, key, binNames, nil)
+ command, err := newReadCommand(nil, policy, key, binNames)
if err != nil {
panic(err)
}
diff --git a/bench_recordset_test.go b/bench_recordset_test.go
index 153a9b3b..3a4a7e4e 100644
--- a/bench_recordset_test.go
+++ b/bench_recordset_test.go
@@ -21,7 +21,7 @@ import (
// "time"
_ "net/http/pprof"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
// _ "github.com/influxdata/influxdb/client"
)
diff --git a/buffered_connection.go b/buffered_connection.go
index ff4dfd85..0f217a45 100644
--- a/buffered_connection.go
+++ b/buffered_connection.go
@@ -17,8 +17,8 @@ package aerospike
import (
"fmt"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
type bufferedConn struct {
@@ -115,10 +115,6 @@ func (bc *bufferedConn) read(length int) ([]byte, Error) {
}
func (bc *bufferedConn) drainConn() Error {
- if bc.conn.grpcReader != nil {
- return nil
- }
-
if !bc.conn.IsConnected() {
return nil
}
diff --git a/bytes_buffer.go b/bytes_buffer.go
index 2ff42d47..c10fab88 100644
--- a/bytes_buffer.go
+++ b/bytes_buffer.go
@@ -94,6 +94,12 @@ func (buf *bufferEx) WriteInt16LittleEndian(num uint16) int {
return 2
}
+func (buf *bufferEx) WriteInt32LittleEndian(num uint32) int {
+ binary.LittleEndian.PutUint32(buf.dataBuffer[buf.dataOffset:buf.dataOffset+4], num)
+ buf.dataOffset += 4
+ return 4
+}
+
func (buf *bufferEx) WriteInt64LittleEndian(num uint64) int {
binary.LittleEndian.PutUint64(buf.dataBuffer[buf.dataOffset:buf.dataOffset+8], num)
buf.dataOffset += 8
diff --git a/cdt_bitwise_test.go b/cdt_bitwise_test.go
index 9c4ac6c0..5a5547fb 100644
--- a/cdt_bitwise_test.go
+++ b/cdt_bitwise_test.go
@@ -20,8 +20,8 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- as "github.com/aerospike/aerospike-client-go/v7"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
)
var _ = gg.Describe("CDT Bitwise Test", func() {
@@ -74,7 +74,7 @@ var _ = gg.Describe("CDT Bitwise Test", func() {
record, err := client.Operate(nil, key, full_ops...)
gm.Expect(err).ToNot(gm.HaveOccurred())
- result_list := record.Bins[cdtBinName].([]interface{})
+ result_list := record.Bins[cdtBinName].(as.OpResults)
lscan1_result := result_list[len(result_list)-7].(int)
rscan1_result := result_list[len(result_list)-6].(int)
getint_result := result_list[len(result_list)-5].(int)
@@ -109,8 +109,8 @@ var _ = gg.Describe("CDT Bitwise Test", func() {
rec, err := client.Operate(wpolicy, key, ops...)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(rec.Bins[cdtBinName]).To(gm.BeAssignableToTypeOf([]interface{}{}))
- binResults := rec.Bins[cdtBinName].([]interface{})
+ gm.Expect(rec.Bins[cdtBinName]).To(gm.BeAssignableToTypeOf(as.OpResults{}))
+ binResults := rec.Bins[cdtBinName].(as.OpResults)
results := make([]int64, len(binResults))
for i := range binResults {
results[i] = int64(binResults[i].(int))
@@ -161,7 +161,7 @@ var _ = gg.Describe("CDT Bitwise Test", func() {
// // make a fresh list before each operation
// gg.BeforeEach(func() {
- // list = []interface{}{}
+ // list = as.OpResults{}
// for i := 1; i <= listSize; i++ {
// list = append(list, i)
@@ -573,7 +573,7 @@ var _ = gg.Describe("CDT Bitwise Test", func() {
// assertRecordFound(key, record)
- result_list := record.Bins[cdtBinName].([]interface{})
+ result_list := record.Bins[cdtBinName].(as.OpResults)
results := make([][]byte, len(expected))
for i := 0; i < len(expected); i++ {
@@ -948,7 +948,7 @@ var _ = gg.Describe("CDT Bitwise Test", func() {
as.BitResizeOp(policy, cdtBinName, 0, as.BitResizeFlagsShrinkOnly),
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- result_list := record.Bins[cdtBinName].([]interface{})
+ result_list := record.Bins[cdtBinName].(as.OpResults)
get0 := result_list[1].([]byte)
get1 := result_list[3].([]byte)
get2 := result_list[5].([]byte)
diff --git a/cdt_context.go b/cdt_context.go
index 5f5e1265..9f3c14b6 100644
--- a/cdt_context.go
+++ b/cdt_context.go
@@ -19,7 +19,7 @@ import (
"encoding/base64"
"fmt"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
const (
diff --git a/cdt_context_test.go b/cdt_context_test.go
index f3f70092..96095153 100644
--- a/cdt_context_test.go
+++ b/cdt_context_test.go
@@ -18,7 +18,7 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
var _ = gg.Describe("CDTContext Test", func() {
diff --git a/cdt_list.go b/cdt_list.go
index e41e2fb4..4cacef39 100644
--- a/cdt_list.go
+++ b/cdt_list.go
@@ -310,18 +310,15 @@ func packCDTIfcVarParamsAsArray(packer BufferEx, opType int16, ctx []*CDTContext
}
size += n
} else {
- n, err = packShortRaw(packer, opType)
- if err != nil {
- return n, err
+ if n, err = packArrayBegin(packer, len(params)+1); err != nil {
+ return size + n, err
}
size += n
- if len(params) > 0 {
- if n, err = packArrayBegin(packer, len(params)); err != nil {
- return size + n, err
- }
- size += n
+ if n, err = packObject(packer, opType, false); err != nil {
+ return size + n, err
}
+ size += n
}
if len(params) > 0 {
@@ -587,7 +584,6 @@ func ListRemoveByValueListOp(binName string, values []interface{}, returnType Li
// If valueEnd is nil, the range is greater than equal to valueBegin.
// Server returns removed data specified by returnType
func ListRemoveByValueRangeOp(binName string, returnType ListReturnType, valueBegin, valueEnd interface{}, ctx ...*CDTContext) *Operation {
- // TODO: Inconsistent parameter order
if valueEnd == nil {
return &Operation{opType: _CDT_MODIFY, ctx: ctx, binName: binName, binValue: ListValue{_CDT_LIST_REMOVE_BY_VALUE_INTERVAL, IntegerValue(returnType), NewValue(valueBegin)}, encoder: listGenericOpEncoder}
}
diff --git a/cdt_list_test.go b/cdt_list_test.go
index 8a9a1611..e41cc526 100644
--- a/cdt_list_test.go
+++ b/cdt_list_test.go
@@ -21,7 +21,7 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
var _ = gg.Describe("CDT List Test", func() {
@@ -32,7 +32,7 @@ var _ = gg.Describe("CDT List Test", func() {
var key *as.Key
var wpolicy = as.NewWritePolicy(0, 0)
var cdtBinName string
- var list []interface{}
+ var list []any
gg.BeforeEach(func() {
@@ -52,7 +52,7 @@ var _ = gg.Describe("CDT List Test", func() {
gm.Expect(errors.Is(err, as.ErrKeyNotFound)).To(gm.BeTrue())
gm.Expect(cdtList).To(gm.BeNil())
- list := []interface{}{}
+ list := []any{}
for i := 1; i <= 100; i++ {
list = append(list, i)
@@ -84,7 +84,7 @@ var _ = gg.Describe("CDT List Test", func() {
gm.Expect(err).ToNot(gm.HaveOccurred())
gm.Expect(cdtList).ToNot(gm.BeNil())
gm.Expect(cdtList.Bins).ToNot(gm.BeNil())
- gm.Expect(cdtList.Bins[cdtBinName]).To(gm.Equal([]interface{}{4, []interface{}{1, 2, 3, 4}}))
+ gm.Expect(cdtList.Bins[cdtBinName]).To(gm.Equal(as.OpResults{4, []any{1, 2, 3, 4}}))
})
gg.Describe("CDT List Operations", func() {
@@ -96,7 +96,7 @@ var _ = gg.Describe("CDT List Test", func() {
_, err = client.Delete(nil, key)
gm.Expect(err).ToNot(gm.HaveOccurred())
- list = []interface{}{}
+ list = []any{}
for i := 1; i <= listSize; i++ {
list = append(list, i)
@@ -127,31 +127,31 @@ var _ = gg.Describe("CDT List Test", func() {
gg.It("should Get the last 3 element", func() {
cdtListRes, err := client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, -3, 3))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{listSize - 2, listSize - 1, listSize - 0}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{listSize - 2, listSize - 1, listSize - 0}))
})
gg.It("should Get the from element #7 till the end of list", func() {
cdtListRes, err := client.Operate(wpolicy, key, as.ListGetRangeFromOp(cdtBinName, 7))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{listSize - 2, listSize - 1, listSize - 0}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{listSize - 2, listSize - 1, listSize - 0}))
})
gg.It("should Get by value", func() {
cdtListRes, err := client.Operate(wpolicy, key, as.ListGetByValueOp(cdtBinName, 7, as.ListReturnTypeValue))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{7}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{7}))
- cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByValueListOp(cdtBinName, []interface{}{7, 9}, as.ListReturnTypeIndex))
+ cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByValueListOp(cdtBinName, []any{7, 9}, as.ListReturnTypeIndex))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{6, 8}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{6, 8}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByValueRangeOp(cdtBinName, 5, 9, as.ListReturnTypeValue))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{5, 6, 7, 8}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{5, 6, 7, 8}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByValueRangeOp(cdtBinName, 5, 9, as.ListReturnTypeIndex))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{4, 5, 6, 7}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{4, 5, 6, 7}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByValueRangeOp(cdtBinName, 5, 9, as.ListReturnTypeExists))
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -165,19 +165,19 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByIndexRangeOp(cdtBinName, 7, as.ListReturnTypeIndex))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{7, 8, 9}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{7, 8, 9}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByIndexRangeOp(cdtBinName, 7, as.ListReturnTypeValue))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{8, 9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{8, 9, 10}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByIndexRangeOp(cdtBinName, 8, as.ListReturnTypeValue))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{9, 10}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByIndexRangeCountOp(cdtBinName, 5, 2, as.ListReturnTypeValue))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{6, 7}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{6, 7}))
})
gg.It("should Get by rank", func() {
@@ -187,19 +187,19 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByRankRangeOp(cdtBinName, 7, as.ListReturnTypeIndex))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{7, 8, 9}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{7, 8, 9}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByRankRangeOp(cdtBinName, 7, as.ListReturnTypeValue))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{8, 9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{8, 9, 10}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByRankRangeOp(cdtBinName, 8, as.ListReturnTypeValue))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{9, 10}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetByRankRangeCountOp(cdtBinName, 5, 2, as.ListReturnTypeValue))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{6, 7}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{6, 7}))
})
gg.It("should append an element to the tail", func() {
@@ -232,7 +232,7 @@ var _ = gg.Describe("CDT List Test", func() {
})
gg.It("should append a few elements to the tail", func() {
- elems := []interface{}{math.MaxInt64, math.MaxInt64 - 1, math.MaxInt64 - 2}
+ elems := []any{math.MaxInt64, math.MaxInt64 - 1, math.MaxInt64 - 2}
cdtListRes, err := client.Operate(wpolicy, key, as.ListAppendOp(cdtBinName, elems...))
gm.Expect(err).ToNot(gm.HaveOccurred())
gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal(listSize + 3))
@@ -243,7 +243,7 @@ var _ = gg.Describe("CDT List Test", func() {
})
gg.It("should append a few elements to the tail with policy", func() {
- elems := []interface{}{math.MaxInt64, math.MaxInt64 - 1, math.MaxInt64 - 2}
+ elems := []any{math.MaxInt64, math.MaxInt64 - 1, math.MaxInt64 - 2}
cdtListRes, err := client.Operate(wpolicy, key, as.ListAppendWithPolicyOp(as.DefaultListPolicy(), cdtBinName, elems...))
gm.Expect(err).ToNot(gm.HaveOccurred())
gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal(listSize + 3))
@@ -279,11 +279,11 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{math.MaxInt64 - 1, math.MaxInt64 - 2, math.MaxInt64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{math.MaxInt64 - 1, math.MaxInt64 - 2, math.MaxInt64, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}))
})
gg.It("should prepend a few elements to the tail via ListInsertOp", func() {
- elems := []interface{}{math.MaxInt64, math.MaxInt64 - 1, math.MaxInt64 - 2}
+ elems := []any{math.MaxInt64, math.MaxInt64 - 1, math.MaxInt64 - 2}
cdtListRes, err := client.Operate(wpolicy, key, as.ListInsertOp(cdtBinName, 0, elems...))
gm.Expect(err).ToNot(gm.HaveOccurred())
gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal(listSize + 3))
@@ -387,13 +387,13 @@ var _ = gg.Describe("CDT List Test", func() {
})
gg.It("should remove elements by value", func() {
- cdtListRes, err := client.Operate(wpolicy, key, as.ListRemoveByValueListOp(cdtBinName, []interface{}{1, 2, 3, 4, 5, 6, 7}, as.ListReturnTypeValue))
+ cdtListRes, err := client.Operate(wpolicy, key, as.ListRemoveByValueListOp(cdtBinName, []any{1, 2, 3, 4, 5, 6, 7}, as.ListReturnTypeValue))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{1, 2, 3, 4, 5, 6, 7}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{1, 2, 3, 4, 5, 6, 7}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{8, 9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{8, 9, 10}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListRemoveByValueOp(cdtBinName, 9, as.ListReturnTypeCount))
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -401,17 +401,17 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{8, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{8, 10}))
})
gg.It("should remove elements by value range", func() {
cdtListRes, err := client.Operate(wpolicy, key, as.ListRemoveByValueRangeOp(cdtBinName, as.ListReturnTypeValue, 1, 5))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{1, 2, 3, 4}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{1, 2, 3, 4}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{5, 6, 7, 8, 9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{5, 6, 7, 8, 9, 10}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListRemoveByValueRangeOp(cdtBinName, as.ListReturnTypeCount, 6, 9))
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -419,7 +419,7 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{5, 9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{5, 9, 10}))
})
gg.It("should remove elements by index", func() {
@@ -429,7 +429,7 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{2, 3, 4, 5, 6, 7, 8, 9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{2, 3, 4, 5, 6, 7, 8, 9, 10}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListRemoveByIndexRangeOp(cdtBinName, 5, as.ListReturnTypeCount))
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -437,7 +437,7 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{2, 3, 4, 5, 6}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{2, 3, 4, 5, 6}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListRemoveByIndexRangeCountOp(cdtBinName, 2, 3, as.ListReturnTypeCount))
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -445,7 +445,7 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{2, 3}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{2, 3}))
})
gg.It("should remove elements by rank", func() {
@@ -458,7 +458,7 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{2, 3, 4, 5, 6, 7, 8, 9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{2, 3, 4, 5, 6, 7, 8, 9, 10}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListRemoveByRankRangeOp(cdtBinName, 5, as.ListReturnTypeCount))
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -466,7 +466,7 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{2, 3, 4, 5, 6}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{2, 3, 4, 5, 6}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListRemoveByRankRangeCountOp(cdtBinName, 2, 3, as.ListReturnTypeCount))
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -474,7 +474,7 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeOp(cdtBinName, 0, -1))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{2, 3}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{2, 3}))
})
gg.It("should increment elements", func() {
@@ -528,18 +528,18 @@ var _ = gg.Describe("CDT List Test", func() {
cdtListRes, err := client.Operate(wpolicy, key, as.ListGetRangeFromOp(cdtBinName, 0))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{101, 2, 3, 4, 5, 6, 7, 8, 9, 10}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{101, 2, 3, 4, 5, 6, 7, 8, 9, 10}))
cdtListRes, err = client.Operate(wpolicy, key, as.ListSortOp(cdtBinName, as.ListSortFlagsDefault))
gm.Expect(err).ToNot(gm.HaveOccurred())
cdtListRes, err = client.Operate(wpolicy, key, as.ListGetRangeFromOp(cdtBinName, 0))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]interface{}{2, 3, 4, 5, 6, 7, 8, 9, 10, 101}))
+ gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal([]any{2, 3, 4, 5, 6, 7, 8, 9, 10, 101}))
})
gg.It("should set elements", func() {
- elems := []interface{}{}
+ elems := []any{}
for i := 0; i < listSize; i++ {
cdtListRes, err := client.Operate(wpolicy, key, as.ListSetOp(cdtBinName, i, math.MaxInt64))
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -564,7 +564,7 @@ var _ = gg.Describe("CDT List Test", func() {
})
gg.It("should trim list elements", func() {
- elems := []interface{}{3, 4, 5}
+ elems := []any{3, 4, 5}
cdtListRes, err := client.Operate(wpolicy, key, as.ListTrimOp(cdtBinName, 2, 3))
gm.Expect(err).ToNot(gm.HaveOccurred())
gm.Expect(cdtListRes.Bins[cdtBinName]).To(gm.Equal(7))
@@ -601,7 +601,7 @@ var _ = gg.Describe("CDT List Test", func() {
cdtBinName2 := cdtBinName + "2"
- list := []interface{}{0, 4, 5, 9, 9, 11, 15, 0}
+ list := []any{0, 4, 5, 9, 9, 11, 15, 0}
cdtListPolicy1 := as.NewListPolicy(as.ListOrderOrdered, as.ListWriteFlagsAddUnique|as.ListWriteFlagsPartial|as.ListWriteFlagsNoFail)
cdtListPolicy2 := as.NewListPolicy(as.ListOrderOrdered, as.ListWriteFlagsAddUnique|as.ListWriteFlagsNoFail)
@@ -614,7 +614,7 @@ var _ = gg.Describe("CDT List Test", func() {
gm.Expect(record.Bins[cdtBinName]).To(gm.Equal(6))
gm.Expect(record.Bins[cdtBinName2]).To(gm.Equal(0))
- list = []interface{}{11, 3}
+ list = []any{11, 3}
record, err = client.Operate(wpolicy, key,
as.ListAppendWithPolicyOp(cdtListPolicy1, cdtBinName, list...),
@@ -630,7 +630,7 @@ var _ = gg.Describe("CDT List Test", func() {
gg.It("should support Relative GetList Ops", func() {
client.Delete(nil, key)
- list := []interface{}{0, 4, 5, 9, 11, 15}
+ list := []any{0, 4, 5, 9, 11, 15}
cdtListPolicy := as.NewListPolicy(as.ListOrderOrdered, as.ListWriteFlagsDefault)
record, err := client.Operate(wpolicy, key,
@@ -650,13 +650,13 @@ var _ = gg.Describe("CDT List Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(record.Bins[cdtBinName]).To(gm.Equal([]interface{}{6, []interface{}{5, 9, 11, 15}, []interface{}{9, 11, 15}, []interface{}{4, 5, 9, 11, 15}, []interface{}{4, 5, 9, 11, 15}, []interface{}{11, 15}, []interface{}{0, 4, 5, 9, 11, 15}, []interface{}{5, 9}, []interface{}{9}, []interface{}{4, 5}, []interface{}{4}, []interface{}{11, 15}, []interface{}{}}))
+ gm.Expect(record.Bins[cdtBinName]).To(gm.Equal(as.OpResults{6, []any{5, 9, 11, 15}, []any{9, 11, 15}, []any{4, 5, 9, 11, 15}, []any{4, 5, 9, 11, 15}, []any{11, 15}, []any{0, 4, 5, 9, 11, 15}, []any{5, 9}, []any{9}, []any{4, 5}, []any{4}, []any{11, 15}, []any{}}))
})
gg.It("should support Relative RemoveList Ops", func() {
client.Delete(nil, key)
- list := []interface{}{0, 4, 5, 9, 11, 15}
+ list := []any{0, 4, 5, 9, 11, 15}
cdtListPolicy := as.NewListPolicy(as.ListOrderOrdered, as.ListWriteFlagsDefault)
record, err := client.Operate(wpolicy, key,
@@ -670,13 +670,13 @@ var _ = gg.Describe("CDT List Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(record.Bins[cdtBinName]).To(gm.Equal([]interface{}{6, []interface{}{5, 9, 11, 15}, []interface{}{}, []interface{}{4}, []interface{}{}, []interface{}{}, []interface{}{0}}))
+ gm.Expect(record.Bins[cdtBinName]).To(gm.Equal(as.OpResults{6, []any{5, 9, 11, 15}, []any{}, []any{4}, []any{}, []any{}, []any{0}}))
})
gg.It("should support List Infinity Ops", func() {
client.Delete(nil, key)
- list := []interface{}{0, 4, 5, 9, 11, 15}
+ list := []any{0, 4, 5, 9, 11, 15}
cdtListPolicy := as.NewListPolicy(as.ListOrderOrdered, as.ListWriteFlagsDefault)
record, err := client.Operate(wpolicy, key,
@@ -685,34 +685,34 @@ var _ = gg.Describe("CDT List Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(record.Bins[cdtBinName]).To(gm.Equal([]interface{}{6, []interface{}{11, 15}}))
+ gm.Expect(record.Bins[cdtBinName]).To(gm.Equal(as.OpResults{6, []any{11, 15}}))
})
gg.It("should support List WildCard Ops", func() {
client.Delete(nil, key)
- list := []interface{}{
- []interface{}{"John", 55},
- []interface{}{"Jim", 95},
- []interface{}{"Joe", 80},
+ list := []any{
+ []any{"John", 55},
+ []any{"Jim", 95},
+ []any{"Joe", 80},
}
cdtListPolicy := as.NewListPolicy(as.ListOrderOrdered, as.ListWriteFlagsDefault)
record, err := client.Operate(wpolicy, key,
as.ListAppendWithPolicyOp(cdtListPolicy, cdtBinName, list...),
- as.ListGetByValueOp(cdtBinName, []interface{}{"Jim", as.NewWildCardValue()}, as.ListReturnTypeValue),
+ as.ListGetByValueOp(cdtBinName, []any{"Jim", as.NewWildCardValue()}, as.ListReturnTypeValue),
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(record.Bins[cdtBinName]).To(gm.Equal([]interface{}{3, []interface{}{[]interface{}{"Jim", 95}}}))
+ gm.Expect(record.Bins[cdtBinName]).To(gm.Equal(as.OpResults{3, []any{[]any{"Jim", 95}}}))
})
gg.It("should support Nested List Ops", func() {
client.Delete(nil, key)
- list := []interface{}{
- []interface{}{7, 9, 5},
- []interface{}{1, 2, 3},
- []interface{}{6, 5, 4, 1},
+ list := []any{
+ []any{7, 9, 5},
+ []any{1, 2, 3},
+ []any{6, 5, 4, 1},
}
err := client.Put(wpolicy, key, as.BinMap{cdtBinName: list})
@@ -725,12 +725,12 @@ var _ = gg.Describe("CDT List Test", func() {
record, err = client.Operate(wpolicy, key, as.ListAppendWithPolicyContextOp(as.DefaultListPolicy(), cdtBinName, []*as.CDTContext{as.CtxListIndex(-1)}, 11), as.GetBinOp(cdtBinName))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(record.Bins[cdtBinName]).To(gm.Equal([]interface{}{
+ gm.Expect(record.Bins[cdtBinName]).To(gm.Equal(as.OpResults{
5,
- []interface{}{
- []interface{}{7, 9, 5},
- []interface{}{1, 2, 3},
- []interface{}{6, 5, 4, 1, 11},
+ []any{
+ []any{7, 9, 5},
+ []any{1, 2, 3},
+ []any{6, 5, 4, 1, 11},
},
}))
})
@@ -738,15 +738,15 @@ var _ = gg.Describe("CDT List Test", func() {
gg.It("should support Nested List Map Ops", func() {
client.Delete(nil, key)
- m := map[interface{}]interface{}{
- "key1": []interface{}{
- []interface{}{7, 9, 5},
- []interface{}{13},
+ m := map[any]any{
+ "key1": []any{
+ []any{7, 9, 5},
+ []any{13},
},
- "key2": []interface{}{
- []interface{}{9},
- []interface{}{2, 4},
- []interface{}{6, 1, 9},
+ "key2": []any{
+ []any{9},
+ []any{2, 4},
+ []any{6, 1, 9},
},
}
@@ -760,17 +760,17 @@ var _ = gg.Describe("CDT List Test", func() {
record, err = client.Operate(wpolicy, key, as.ListAppendWithPolicyContextOp(as.DefaultListPolicy(), cdtBinName, []*as.CDTContext{as.CtxMapKey(as.StringValue("key2")), as.CtxListRank(0)}, 11), as.GetBinOp(cdtBinName))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(record.Bins[cdtBinName]).To(gm.Equal([]interface{}{
+ gm.Expect(record.Bins[cdtBinName]).To(gm.Equal(as.OpResults{
3,
- map[interface{}]interface{}{
- "key1": []interface{}{
- []interface{}{7, 9, 5},
- []interface{}{13},
+ map[any]any{
+ "key1": []any{
+ []any{7, 9, 5},
+ []any{13},
},
- "key2": []interface{}{
- []interface{}{9},
- []interface{}{2, 4, 11},
- []interface{}{6, 1, 9},
+ "key2": []any{
+ []any{9},
+ []any{2, 4, 11},
+ []any{6, 1, 9},
},
}}))
})
@@ -781,7 +781,7 @@ var _ = gg.Describe("CDT List Test", func() {
l1 := []as.Value{as.IntegerValue(7), as.IntegerValue(9), as.IntegerValue(5)}
l2 := []as.Value{as.IntegerValue(1), as.IntegerValue(2), as.IntegerValue(3)}
l3 := []as.Value{as.IntegerValue(6), as.IntegerValue(5), as.IntegerValue(4), as.IntegerValue(1)}
- inputList := []interface{}{as.ValueArray(l1), as.ValueArray(l2), as.ValueArray(l3)}
+ inputList := []any{as.ValueArray(l1), as.ValueArray(l2), as.ValueArray(l3)}
// Create list.
record, err := client.Operate(nil, key,
@@ -797,16 +797,16 @@ var _ = gg.Describe("CDT List Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- results := record.Bins[cdtBinName].([]interface{})
+ results := record.Bins[cdtBinName].(as.OpResults)
count := results[0]
gm.Expect(count).To(gm.Equal(1))
- list := results[1].([]interface{})
+ list := results[1].([]any)
gm.Expect(len(list)).To(gm.Equal(4))
// Test last nested list.
- list = list[1].([]interface{})
+ list = list[1].([]any)
gm.Expect(len(list)).To(gm.Equal(1))
gm.Expect(list[0]).To(gm.Equal(2))
})
diff --git a/cdt_map_test.go b/cdt_map_test.go
index 5dd76084..7765ca73 100644
--- a/cdt_map_test.go
+++ b/cdt_map_test.go
@@ -20,7 +20,7 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
const udfCDTTests = `
@@ -176,7 +176,7 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(rec.Bins).To(gm.Equal(as.BinMap{"bin": []interface{}{[]interface{}{"v1.0", "v1.1"}, []interface{}{"v2.0", "v2.1"}}}))
+ gm.Expect(rec.Bins).To(gm.Equal(as.BinMap{"bin": as.OpResults{[]interface{}{"v1.0", "v1.1"}, []interface{}{"v2.0", "v2.1"}}}))
rec, err = client.Get(nil, key)
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -193,7 +193,7 @@ var _ = gg.Describe("CDT Map Test", func() {
as.GetBinOp(cdtBinName),
)
// gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap).NotTo(gm.Equal([]interface{}{1, 2, 3, 4, 4, 4, map[interface{}]interface{}{1: 1, 2: 2, 3: 3, 4: 4}}))
+ gm.Expect(cdtMap).NotTo(gm.Equal(as.OpResults{1, 2, 3, 4, 4, 4, map[interface{}]interface{}{1: 1, 2: 2, 3: 3, 4: 4}}))
cdtMap, err = client.Get(nil, key, cdtBinName)
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -237,7 +237,7 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins[cdtBinName]).To(gm.Equal([]interface{}{3, 4, 4, 4, "my default", "changed", []as.MapPair{{Key: 12, Value: 23}, {Key: 13, Value: "myval2"}}}))
+ gm.Expect(cdtMap.Bins[cdtBinName]).To(gm.Equal(as.OpResults{3, 4, 4, 4, "my default", "changed", []as.MapPair{{Key: 12, Value: 23}, {Key: 13, Value: "myval2"}}}))
cdtMap, err = client.Get(nil, key, cdtBinName)
gm.Expect(err).ToNot(gm.HaveOccurred())
@@ -271,7 +271,7 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: 3, "other_bin": []interface{}{nil, "head...tail"}})) // there were two operations for bin `other_bin`, so the results come back in an array
+ gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: 3, "other_bin": as.OpResults{nil, "head...tail"}})) // there were two operations for bin `other_bin`, so the results come back in an array
// Should set SendKey == true for a solely read operation without getting PARAMETER_ERROR from the server
wpolicy2 := *wpolicy
@@ -297,12 +297,12 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[0]).To(gm.Equal(1))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[1]).To(gm.Equal(2))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[2]).To(gm.Equal(3))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[3]).To(gm.Equal(4))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[4]).To(gm.Equal([]as.MapPair{{Key: 3, Value: 2}}))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[5]).To(gm.ConsistOf([]as.MapPair{{Key: 1, Value: 4}, {Key: 2, Value: 3}, {Key: 3, Value: 2}, {Key: 4, Value: 1}}))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[0]).To(gm.Equal(1))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[1]).To(gm.Equal(2))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[2]).To(gm.Equal(3))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[3]).To(gm.Equal(4))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[4]).To(gm.Equal([]as.MapPair{{Key: 3, Value: 2}}))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[5]).To(gm.ConsistOf([]as.MapPair{{Key: 1, Value: 4}, {Key: 2, Value: 3}, {Key: 3, Value: 2}, {Key: 4, Value: 1}}))
cdtMap, err = client.Operate(wpolicy, key,
as.MapSetPolicyOp(as.NewMapPolicy(as.MapOrder.KEY_ORDERED, as.MapWriteMode.UPDATE), cdtBinName),
@@ -313,7 +313,7 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: []interface{}{interface{}(nil), 2, []as.MapPair{{Key: 1, Value: 4}}, []as.MapPair{{Key: 1, Value: 4}, {Key: 2, Value: 3}, {Key: 3, Value: 2}, {Key: 4, Value: 1}}}}))
+ gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: as.OpResults{interface{}(nil), 2, []as.MapPair{{Key: 1, Value: 4}}, []as.MapPair{{Key: 1, Value: 4}, {Key: 2, Value: 3}, {Key: 3, Value: 2}, {Key: 4, Value: 1}}}}))
})
@@ -330,12 +330,12 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[0]).To(gm.Equal(1))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[1]).To(gm.Equal(2))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[2]).To(gm.Equal(3))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[3]).To(gm.Equal(4))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[4]).To(gm.Equal([]as.MapPair{{Key: 3, Value: 2}}))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[5]).To(gm.ConsistOf([]as.MapPair{{Key: 1, Value: 4}, {Key: 2, Value: 3}, {Key: 3, Value: 2}, {Key: 4, Value: 1}}))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[0]).To(gm.Equal(1))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[1]).To(gm.Equal(2))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[2]).To(gm.Equal(3))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[3]).To(gm.Equal(4))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[4]).To(gm.Equal([]as.MapPair{{Key: 3, Value: 2}}))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[5]).To(gm.ConsistOf([]as.MapPair{{Key: 1, Value: 4}, {Key: 2, Value: 3}, {Key: 3, Value: 2}, {Key: 4, Value: 1}}))
cdtMap, err = client.Operate(wpolicy, key,
as.MapSetPolicyOp(as.NewMapPolicyWithFlagsAndPersistedIndex(as.MapOrder.KEY_ORDERED, as.MapWriteFlagsDefault), cdtBinName),
@@ -346,7 +346,7 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: []interface{}{interface{}(nil), 2, []as.MapPair{{Key: 1, Value: 4}}, []as.MapPair{{Key: 1, Value: 4}, {Key: 2, Value: 3}, {Key: 3, Value: 2}, {Key: 4, Value: 1}}}}))
+ gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: as.OpResults{interface{}(nil), 2, []as.MapPair{{Key: 1, Value: 4}}, []as.MapPair{{Key: 1, Value: 4}, {Key: 2, Value: 3}, {Key: 3, Value: 2}, {Key: 4, Value: 1}}}}))
})
@@ -399,7 +399,7 @@ var _ = gg.Describe("CDT Map Test", func() {
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: []interface{}{
+ gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: as.OpResults{
[]interface{}{"Harry", "Jim"},
[]as.MapPair{{Key: "Charlie", Value: 55}, {Key: "John", Value: 81}},
55,
@@ -448,7 +448,7 @@ var _ = gg.Describe("CDT Map Test", func() {
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: []interface{}{
+ gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: as.OpResults{
[]interface{}{"Charlie", "John"},
[]as.MapPair{{Key: "Harry", Value: 82}, {Key: "Jim", Value: 98}},
[]interface{}{0, 1, 2, 3},
@@ -490,11 +490,11 @@ var _ = gg.Describe("CDT Map Test", func() {
gm.Expect(err).ToNot(gm.HaveOccurred())
// gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: []interface{}{7, nil, 98, []interface{}{79, 84}, []interface{}{"Charlie"}}}))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[0]).To(gm.Equal(7))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[1]).To(gm.BeNil())
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[2]).To(gm.Equal(98))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[3]).To(gm.ConsistOf([]interface{}{79, 84}))
- gm.Expect(cdtMap.Bins[cdtBinName].([]interface{})[4]).To(gm.Equal([]interface{}{"Charlie"}))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[0]).To(gm.Equal(7))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[1]).To(gm.BeNil())
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[2]).To(gm.Equal(98))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[3]).To(gm.ConsistOf(as.OpResults{79, 84}))
+ gm.Expect(cdtMap.Bins[cdtBinName].(as.OpResults)[4]).To(gm.Equal([]interface{}{"Charlie"}))
})
})
@@ -522,7 +522,7 @@ var _ = gg.Describe("CDT Map Test", func() {
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: []interface{}{7, 2, 2, 2, 1}}))
+ gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: as.OpResults{7, 2, 2, 2, 1}}))
})
gg.It("should create a valid CDT Map and then execute Clear operations", func() {
@@ -554,7 +554,7 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: []interface{}{nil, 0}}))
+ gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: as.OpResults{nil, 0}}))
})
gg.It("should create a valid CDT Map and then execute RANK operations", func() {
@@ -604,7 +604,7 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: []interface{}{[]interface{}{"p1"}, []interface{}{"p3", "p2", "p4"}}}))
+ gm.Expect(cdtMap.Bins).To(gm.Equal(as.BinMap{cdtBinName: as.OpResults{[]interface{}{"p1"}, []interface{}{"p3", "p2", "p4"}}}))
})
gg.It("should support MapWriteFlagsPartial & MapWriteFlagsNoFail", func() {
@@ -754,7 +754,7 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins[cdtBinName]).To(gm.Equal([]interface{}{[]interface{}{5, 9}, []interface{}{9}, []interface{}{4, 5, 9}, []interface{}{9}, []interface{}{0, 4, 5, 9}, []interface{}{5}, []interface{}{9}, []interface{}{4}, []interface{}{9}, []interface{}{0}, []interface{}{17}, []interface{}{10, 15, 17}, []interface{}{17}, []interface{}{10}}))
+ gm.Expect(cdtMap.Bins[cdtBinName]).To(gm.Equal(as.OpResults{[]interface{}{5, 9}, []interface{}{9}, []interface{}{4, 5, 9}, []interface{}{9}, []interface{}{0, 4, 5, 9}, []interface{}{5}, []interface{}{9}, []interface{}{4}, []interface{}{9}, []interface{}{0}, []interface{}{17}, []interface{}{10, 15, 17}, []interface{}{17}, []interface{}{10}}))
})
gg.It("should support Relative MapRemove ops", func() {
@@ -787,7 +787,7 @@ var _ = gg.Describe("CDT Map Test", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins[cdtBinName]).To(gm.Equal([]interface{}{[]interface{}{15, 10}, []interface{}{}, []interface{}{2}}))
+ gm.Expect(cdtMap.Bins[cdtBinName]).To(gm.Equal(as.OpResults{[]interface{}{15, 10}, []interface{}{}, []interface{}{2}}))
client.Delete(nil, key)
cdtMap, err = client.Operate(wpolicy, key,
@@ -801,7 +801,7 @@ var _ = gg.Describe("CDT Map Test", func() {
as.MapRemoveByValueRelativeRankRangeCountOp(cdtBinName, 11, -1, 1, as.MapReturnType.VALUE),
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(cdtMap.Bins[cdtBinName]).To(gm.Equal([]interface{}{[]interface{}{17}, []interface{}{10}}))
+ gm.Expect(cdtMap.Bins[cdtBinName]).To(gm.Equal(as.OpResults{[]interface{}{17}, []interface{}{10}}))
})
gg.It("should support Nested Map ops", func() {
@@ -826,7 +826,7 @@ var _ = gg.Describe("CDT Map Test", func() {
record, err = client.Operate(wpolicy, key, as.MapPutOp(as.DefaultMapPolicy(), cdtBinName, as.StringValue("key21"), as.IntegerValue(11), as.CtxMapKey(as.StringValue("key2"))), as.GetBinOp(cdtBinName))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(record.Bins[cdtBinName]).To(gm.Equal([]interface{}{
+ gm.Expect(record.Bins[cdtBinName]).To(gm.Equal(as.OpResults{
2,
map[interface{}]interface{}{
"key1": map[interface{}]interface{}{
@@ -861,7 +861,7 @@ var _ = gg.Describe("CDT Map Test", func() {
record, err = client.Operate(wpolicy, key, as.MapPutOp(as.DefaultMapPolicy(), cdtBinName, as.StringValue("key121"), as.IntegerValue(11), as.CtxMapKey(as.StringValue("key1")), as.CtxMapRank(-1)), as.GetBinOp(cdtBinName))
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(record.Bins[cdtBinName]).To(gm.Equal([]interface{}{
+ gm.Expect(record.Bins[cdtBinName]).To(gm.Equal(as.OpResults{
1,
map[interface{}]interface{}{
"key1": map[interface{}]interface{}{
@@ -935,7 +935,7 @@ var _ = gg.Describe("CDT Map Test", func() {
gm.Expect(err).ToNot(gm.HaveOccurred())
- results := record.Bins[cdtBinName].([]interface{})
+ results := record.Bins[cdtBinName].(as.OpResults)
count := results[1]
gm.Expect(count).To(gm.Equal(1))
diff --git a/client.go b/client.go
index 60062cbb..ee94be51 100644
--- a/client.go
+++ b/client.go
@@ -26,8 +26,8 @@ import (
"strings"
"time"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
const unreachable = "UNREACHABLE"
@@ -61,6 +61,11 @@ type Client struct {
DefaultAdminPolicy *AdminPolicy
// DefaultInfoPolicy is used for all info commands without a specific policy.
DefaultInfoPolicy *InfoPolicy
+ // Default multi-record transaction (MRT) policy when verifying record versions in a batch on a commit.
+ DefaultTxnVerifyPolicy *TxnVerifyPolicy
+ // Default multi-record transaction (MRT) policy when rolling the transaction records forward (commit)
+ // or back (abort) in a batch.
+ DefaultTxnRollPolicy *TxnRollPolicy
}
func clientFinalizer(f *Client) {
@@ -124,6 +129,8 @@ func NewClientWithPolicyAndHost(policy *ClientPolicy, hosts ...*Host) (*Client,
DefaultQueryPolicy: NewQueryPolicy(),
DefaultAdminPolicy: NewAdminPolicy(),
DefaultInfoPolicy: NewInfoPolicy(),
+ DefaultTxnVerifyPolicy: NewTxnVerifyPolicy(),
+ DefaultTxnRollPolicy: NewTxnRollPolicy(),
}
runtime.SetFinalizer(client, clientFinalizer)
@@ -134,116 +141,136 @@ func NewClientWithPolicyAndHost(policy *ClientPolicy, hosts ...*Host) (*Client,
// Policy methods
//-------------------------------------------------------
-// DefaultPolicy returns corresponding default policy from the client
+// GetDefaultPolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultPolicy() *BasePolicy {
return clnt.DefaultPolicy
}
-// DefaultBatchPolicy returns corresponding default policy from the client
+// GetDefaultBatchPolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultBatchPolicy() *BatchPolicy {
return clnt.DefaultBatchPolicy
}
-// DefaultBatchWritePolicy returns corresponding default policy from the client
+// GetDefaultBatchWritePolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultBatchWritePolicy() *BatchWritePolicy {
return clnt.DefaultBatchWritePolicy
}
-// DefaultBatchReadPolicy returns corresponding default policy from the client
+// GetDefaultBatchReadPolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultBatchReadPolicy() *BatchReadPolicy {
return clnt.DefaultBatchReadPolicy
}
-// DefaultBatchDeletePolicy returns corresponding default policy from the client
+// GetDefaultBatchDeletePolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultBatchDeletePolicy() *BatchDeletePolicy {
return clnt.DefaultBatchDeletePolicy
}
-// DefaultBatchUDFPolicy returns corresponding default policy from the client
+// GetDefaultBatchUDFPolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultBatchUDFPolicy() *BatchUDFPolicy {
return clnt.DefaultBatchUDFPolicy
}
-// DefaultWritePolicy returns corresponding default policy from the client
+// GetDefaultWritePolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultWritePolicy() *WritePolicy {
return clnt.DefaultWritePolicy
}
-// DefaultScanPolicy returns corresponding default policy from the client
+// GetDefaultScanPolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultScanPolicy() *ScanPolicy {
return clnt.DefaultScanPolicy
}
-// DefaultQueryPolicy returns corresponding default policy from the client
+// GetDefaultQueryPolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultQueryPolicy() *QueryPolicy {
return clnt.DefaultQueryPolicy
}
-// DefaultAdminPolicy returns corresponding default policy from the client
+// GetDefaultAdminPolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultAdminPolicy() *AdminPolicy {
return clnt.DefaultAdminPolicy
}
-// DefaultInfoPolicy returns corresponding default policy from the client
+// GetDefaultInfoPolicy returns corresponding default policy from the client
func (clnt *Client) GetDefaultInfoPolicy() *InfoPolicy {
return clnt.DefaultInfoPolicy
}
-// DefaultPolicy returns corresponding default policy from the client
+// GetDefaultTxnVerifyPolicy returns corresponding default policy from the client
+func (clnt *Client) GetDefaultTxnVerifyPolicy() *TxnVerifyPolicy {
+ return clnt.DefaultTxnVerifyPolicy
+}
+
+// GetDefaultTxnRollPolicy returns corresponding default policy from the client
+func (clnt *Client) GetDefaultTxnRollPolicy() *TxnRollPolicy {
+ return clnt.DefaultTxnRollPolicy
+}
+
+// SetDefaultPolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultPolicy(policy *BasePolicy) {
clnt.DefaultPolicy = policy
}
-// DefaultBatchPolicy returns corresponding default policy from the client
+// SetDefaultBatchPolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultBatchPolicy(policy *BatchPolicy) {
clnt.DefaultBatchPolicy = policy
}
-// DefaultBatchWritePolicy returns corresponding default policy from the client
+// SetDefaultBatchWritePolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultBatchWritePolicy(policy *BatchWritePolicy) {
clnt.DefaultBatchWritePolicy = policy
}
-// DefaultBatchReadPolicy returns corresponding default policy from the client
+// SetDefaultBatchReadPolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultBatchReadPolicy(policy *BatchReadPolicy) {
clnt.DefaultBatchReadPolicy = policy
}
-// DefaultBatchDeletePolicy returns corresponding default policy from the client
+// SetDefaultBatchDeletePolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultBatchDeletePolicy(policy *BatchDeletePolicy) {
clnt.DefaultBatchDeletePolicy = policy
}
-// DefaultBatchUDFPolicy returns corresponding default policy from the client
+// SetDefaultBatchUDFPolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultBatchUDFPolicy(policy *BatchUDFPolicy) {
clnt.DefaultBatchUDFPolicy = policy
}
-// DefaultWritePolicy returns corresponding default policy from the client
+// SetDefaultWritePolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultWritePolicy(policy *WritePolicy) {
clnt.DefaultWritePolicy = policy
}
-// DefaultScanPolicy returns corresponding default policy from the client
+// SetDefaultScanPolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultScanPolicy(policy *ScanPolicy) {
clnt.DefaultScanPolicy = policy
}
-// DefaultQueryPolicy returns corresponding default policy from the client
+// SetDefaultQueryPolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultQueryPolicy(policy *QueryPolicy) {
clnt.DefaultQueryPolicy = policy
}
-// DefaultAdminPolicy returns corresponding default policy from the client
+// SetDefaultAdminPolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultAdminPolicy(policy *AdminPolicy) {
clnt.DefaultAdminPolicy = policy
}
-// DefaultInfoPolicy returns corresponding default policy from the client
+// SetDefaultInfoPolicy sets corresponding default policy on the client
func (clnt *Client) SetDefaultInfoPolicy(policy *InfoPolicy) {
clnt.DefaultInfoPolicy = policy
}
+// SetDefaultTxnVerifyPolicy sets corresponding default policy on the client
+func (clnt *Client) SetDefaultTxnVerifyPolicy(policy *TxnVerifyPolicy) {
+ clnt.DefaultTxnVerifyPolicy = policy
+}
+
+// SetDefaultTxnRollPolicy sets corresponding default policy on the client
+func (clnt *Client) SetDefaultTxnRollPolicy(policy *TxnRollPolicy) {
+ clnt.DefaultTxnRollPolicy = policy
+}
+
//-------------------------------------------------------
// Cluster Connection Management
//-------------------------------------------------------
@@ -278,12 +305,32 @@ func (clnt *Client) GetNodeNames() []string {
// Write Record Operations
//-------------------------------------------------------
+// PutPayload writes the raw write/delete payload to the server.
+// The policy specifies the transaction timeout.
+// If the policy is nil, the default relevant policy will be used.
+func (clnt *Client) PutPayload(policy *WritePolicy, key *Key, payload []byte) Error {
+ policy = clnt.getUsableWritePolicy(policy)
+ command, err := newWritePayloadCommand(clnt.cluster, policy, key, payload)
+ if err != nil {
+ return err
+ }
+
+ return command.Execute()
+}
+
// Put writes record bin(s) to the server.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
+// The policy specifies the command timeout, record expiration and how the command is
// handled when the record already exists.
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) Put(policy *WritePolicy, key *Key, binMap BinMap) Error {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
command, err := newWriteCommand(clnt.cluster, policy, key, nil, binMap, _WRITE)
if err != nil {
return err
@@ -293,12 +340,19 @@ func (clnt *Client) Put(policy *WritePolicy, key *Key, binMap BinMap) Error {
}
// PutBins writes record bin(s) to the server.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
+// The policy specifies the command timeout, record expiration and how the command is
// handled when the record already exists.
// This method avoids using the BinMap allocation and iteration and is lighter on GC.
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) PutBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
command, err := newWriteCommand(clnt.cluster, policy, key, bins, nil, _WRITE)
if err != nil {
return err
@@ -312,12 +366,19 @@ func (clnt *Client) PutBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
//-------------------------------------------------------
// Append appends bin value's string to existing record bin values.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
+// The policy specifies the command timeout, record expiration and how the command is
// handled when the record already exists.
// This call only works for string and []byte values.
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) Append(policy *WritePolicy, key *Key, binMap BinMap) Error {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
command, err := newWriteCommand(clnt.cluster, policy, key, nil, binMap, _APPEND)
if err != nil {
return err
@@ -329,6 +390,13 @@ func (clnt *Client) Append(policy *WritePolicy, key *Key, binMap BinMap) Error {
// AppendBins works the same as Append, but avoids BinMap allocation and iteration.
func (clnt *Client) AppendBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
command, err := newWriteCommand(clnt.cluster, policy, key, bins, nil, _APPEND)
if err != nil {
return err
@@ -338,12 +406,19 @@ func (clnt *Client) AppendBins(policy *WritePolicy, key *Key, bins ...*Bin) Erro
}
// Prepend prepends bin value's string to existing record bin values.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
+// The policy specifies the command timeout, record expiration and how the command is
// handled when the record already exists.
// This call works only for string and []byte values.
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) Prepend(policy *WritePolicy, key *Key, binMap BinMap) Error {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
command, err := newWriteCommand(clnt.cluster, policy, key, nil, binMap, _PREPEND)
if err != nil {
return err
@@ -355,6 +430,13 @@ func (clnt *Client) Prepend(policy *WritePolicy, key *Key, binMap BinMap) Error
// PrependBins works the same as Prepend, but avoids BinMap allocation and iteration.
func (clnt *Client) PrependBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
command, err := newWriteCommand(clnt.cluster, policy, key, bins, nil, _PREPEND)
if err != nil {
return err
@@ -368,12 +450,19 @@ func (clnt *Client) PrependBins(policy *WritePolicy, key *Key, bins ...*Bin) Err
//-------------------------------------------------------
// Add adds integer bin values to existing record bin values.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
+// The policy specifies the command timeout, record expiration and how the command is
// handled when the record already exists.
// This call only works for integer values.
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) Add(policy *WritePolicy, key *Key, binMap BinMap) Error {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
command, err := newWriteCommand(clnt.cluster, policy, key, nil, binMap, _ADD)
if err != nil {
return err
@@ -385,6 +474,13 @@ func (clnt *Client) Add(policy *WritePolicy, key *Key, binMap BinMap) Error {
// AddBins works the same as Add, but avoids BinMap allocation and iteration.
func (clnt *Client) AddBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
command, err := newWriteCommand(clnt.cluster, policy, key, bins, nil, _ADD)
if err != nil {
return err
@@ -398,10 +494,17 @@ func (clnt *Client) AddBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
//-------------------------------------------------------
// Delete deletes a record for specified key.
-// The policy specifies the transaction timeout.
+// The policy specifies the command timeout.
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) Delete(policy *WritePolicy, key *Key) (bool, Error) {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return false, err
+ }
+ }
+
command, err := newDeleteCommand(clnt.cluster, policy, key)
if err != nil {
return false, err
@@ -418,9 +521,17 @@ func (clnt *Client) Delete(policy *WritePolicy, key *Key) (bool, Error) {
// Touch updates a record's metadata.
// If the record exists, the record's TTL will be reset to the
// policy's expiration.
+// If the record does not exist, it can't be created because the server deletes empty records.
// If the record doesn't exist, it will return an error.
func (clnt *Client) Touch(policy *WritePolicy, key *Key) Error {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
command, err := newTouchCommand(clnt.cluster, policy, key)
if err != nil {
return err
@@ -438,6 +549,13 @@ func (clnt *Client) Touch(policy *WritePolicy, key *Key) Error {
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) Exists(policy *BasePolicy, key *Key) (bool, Error) {
policy = clnt.getUsablePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareRead(key.namespace); err != nil {
+ return false, err
+ }
+ }
+
command, err := newExistsCommand(clnt.cluster, policy, key)
if err != nil {
return false, err
@@ -454,6 +572,12 @@ func (clnt *Client) Exists(policy *BasePolicy, key *Key) (bool, Error) {
func (clnt *Client) BatchExists(policy *BatchPolicy, keys []*Key) ([]bool, Error) {
policy = clnt.getUsableBatchPolicy(policy)
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareReadForKeys(keys); err != nil {
+ return nil, err
+ }
+ }
+
// same array can be used without synchronization;
// when a key exists, the corresponding index will be marked true
existsArray := make([]bool, len(keys))
@@ -487,7 +611,13 @@ func (clnt *Client) BatchExists(policy *BatchPolicy, keys []*Key) ([]bool, Error
func (clnt *Client) Get(policy *BasePolicy, key *Key, binNames ...string) (*Record, Error) {
policy = clnt.getUsablePolicy(policy)
- command, err := newReadCommand(clnt.cluster, policy, key, binNames, nil)
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareRead(key.namespace); err != nil {
+ return nil, err
+ }
+ }
+
+ command, err := newReadCommand(clnt.cluster, policy, key, binNames)
if err != nil {
return nil, err
}
@@ -505,6 +635,12 @@ func (clnt *Client) Get(policy *BasePolicy, key *Key, binNames ...string) (*Reco
func (clnt *Client) GetHeader(policy *BasePolicy, key *Key) (*Record, Error) {
policy = clnt.getUsablePolicy(policy)
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareRead(key.namespace); err != nil {
+ return nil, err
+ }
+ }
+
command, err := newReadHeaderCommand(clnt.cluster, policy, key)
if err != nil {
return nil, err
@@ -528,6 +664,12 @@ func (clnt *Client) GetHeader(policy *BasePolicy, key *Key) (*Record, Error) {
func (clnt *Client) BatchGet(policy *BatchPolicy, keys []*Key, binNames ...string) ([]*Record, Error) {
policy = clnt.getUsableBatchPolicy(policy)
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareReadForKeys(keys); err != nil {
+ return nil, err
+ }
+ }
+
// same array can be used without synchronization;
// when a key exists, the corresponding index will be set to record
records := make([]*Record, len(keys))
@@ -537,7 +679,12 @@ func (clnt *Client) BatchGet(policy *BatchPolicy, keys []*Key, binNames ...strin
return nil, err
}
- cmd := newBatchCommandGet(clnt, nil, policy, keys, binNames, nil, records, _INFO1_READ, false)
+ rattr := _INFO1_READ
+ if len(binNames) == 0 {
+ rattr = rattr | _INFO1_GET_ALL
+ }
+
+ cmd := newBatchCommandGet(clnt, nil, policy, keys, binNames, nil, records, rattr, false)
filteredOut, err := clnt.batchExecute(policy, batchNodes, cmd)
if err != nil && !policy.AllowPartialResults {
return nil, err
@@ -558,6 +705,12 @@ func (clnt *Client) BatchGet(policy *BatchPolicy, keys []*Key, binNames ...strin
func (clnt *Client) BatchGetOperate(policy *BatchPolicy, keys []*Key, ops ...*Operation) ([]*Record, Error) {
policy = clnt.getUsableBatchPolicy(policy)
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareReadForKeys(keys); err != nil {
+ return nil, err
+ }
+ }
+
// same array can be used without synchronization;
// when a key exists, the corresponding index will be set to record
records := make([]*Record, len(keys))
@@ -589,6 +742,12 @@ func (clnt *Client) BatchGetOperate(policy *BatchPolicy, keys []*Key, ops ...*Op
func (clnt *Client) BatchGetComplex(policy *BatchPolicy, records []*BatchRead) Error {
policy = clnt.getUsableBatchPolicy(policy)
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareBatchReads(records); err != nil {
+ return err
+ }
+ }
+
cmd := newBatchIndexCommandGet(clnt, nil, policy, records, true)
batchNodes, err := newBatchIndexNodeList(clnt.cluster, policy, records)
@@ -596,7 +755,7 @@ func (clnt *Client) BatchGetComplex(policy *BatchPolicy, records []*BatchRead) E
return err
}
- filteredOut, err := clnt.batchExecute(policy, batchNodes, cmd)
+ filteredOut, err := clnt.batchExecute(policy, batchNodes, &cmd)
if err != nil && !policy.AllowPartialResults {
return err
}
@@ -616,6 +775,12 @@ func (clnt *Client) BatchGetComplex(policy *BatchPolicy, records []*BatchRead) E
func (clnt *Client) BatchGetHeader(policy *BatchPolicy, keys []*Key) ([]*Record, Error) {
policy = clnt.getUsableBatchPolicy(policy)
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareReadForKeys(keys); err != nil {
+ return nil, err
+ }
+ }
+
// same array can be used without synchronization;
// when a key exists, the corresponding index will be set to record
records := make([]*Record, len(keys))
@@ -646,6 +811,12 @@ func (clnt *Client) BatchDelete(policy *BatchPolicy, deletePolicy *BatchDeletePo
policy = clnt.getUsableBatchPolicy(policy)
deletePolicy = clnt.getUsableBatchDeletePolicy(deletePolicy)
+ if policy.Txn != nil {
+ if err := txnMonitor.addKeys(clnt.cluster, policy, keys); err != nil {
+ return nil, err
+ }
+ }
+
attr := &batchAttr{}
attr.setBatchDelete(deletePolicy)
@@ -676,13 +847,19 @@ func (clnt *Client) BatchDelete(policy *BatchPolicy, deletePolicy *BatchDeletePo
func (clnt *Client) BatchOperate(policy *BatchPolicy, records []BatchRecordIfc) Error {
policy = clnt.getUsableBatchPolicy(policy)
+ if policy.Txn != nil {
+ if err := txnMonitor.addKeysFromRecords(clnt.cluster, policy, records); err != nil {
+ return err
+ }
+ }
+
batchNodes, err := newBatchOperateNodeListIfc(clnt.cluster, policy, records)
if err != nil && policy.RespondAllKeys {
return err
}
cmd := newBatchCommandOperate(clnt, nil, policy, records)
- _, err = clnt.batchExecute(policy, batchNodes, cmd)
+ _, err = clnt.batchExecute(policy, batchNodes, &cmd)
return err
}
@@ -697,6 +874,12 @@ func (clnt *Client) BatchExecute(policy *BatchPolicy, udfPolicy *BatchUDFPolicy,
policy = clnt.getUsableBatchPolicy(policy)
udfPolicy = clnt.getUsableBatchUDFPolicy(udfPolicy)
+ if policy.Txn != nil {
+ if err := txnMonitor.addKeys(clnt.cluster, policy, keys); err != nil {
+ return nil, err
+ }
+ }
+
attr := &batchAttr{}
attr.setBatchUDF(udfPolicy)
@@ -727,26 +910,48 @@ func (clnt *Client) BatchExecute(policy *BatchPolicy, udfPolicy *BatchUDFPolicy,
//
// If the policy is nil, the default relevant policy will be used.
func (clnt *Client) Operate(policy *WritePolicy, key *Key, operations ...*Operation) (*Record, Error) {
- return clnt.operate(policy, key, false, operations...)
-}
-
-// useOpResults is used in batch single nodes commands and should be true to return the right type for BatchOperate results
-func (clnt *Client) operate(policy *WritePolicy, key *Key, useOpResults bool, operations ...*Operation) (*Record, Error) {
// TODO: Remove this method in the next major release.
policy = clnt.getUsableWritePolicy(policy)
args, err := newOperateArgs(clnt.cluster, policy, key, operations)
if err != nil {
return nil, err
}
- command, err := newOperateCommand(clnt.cluster, policy, key, args, useOpResults)
- if err != nil {
- return nil, err
- }
- if err := command.Execute(); err != nil {
- return nil, err
+ policy = args.writePolicy
+
+ if args.hasWrite {
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return nil, err
+ }
+ }
+
+ command, err := newOperateCommandWrite(clnt.cluster, key, args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := command.Execute(); err != nil {
+ return nil, err
+ }
+ return command.GetRecord(), nil
+ } else {
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareRead(key.namespace); err != nil {
+ return nil, err
+ }
+ }
+
+ command, err := newOperateCommandRead(clnt.cluster, key, args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := command.Execute(); err != nil {
+ return nil, err
+ }
+ return command.GetRecord(), nil
}
- return command.GetRecord(), nil
}
//-------------------------------------------------------
@@ -972,7 +1177,7 @@ func (clnt *Client) ListUDF(policy *BasePolicy) ([]*UDF, Error) {
//
// This method is only supported by Aerospike 3+ servers.
// If the policy is nil, the default relevant policy will be used.
-func (clnt *Client) Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (interface{}, Error) {
+func (clnt *Client) Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (any, Error) {
record, err := clnt.execute(policy, key, packageName, functionName, args...)
if err != nil {
return nil, err
@@ -995,6 +1200,13 @@ func (clnt *Client) Execute(policy *WritePolicy, key *Key, packageName string, f
func (clnt *Client) execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (*Record, Error) {
policy = clnt.getUsableWritePolicy(policy)
+
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return nil, err
+ }
+ }
+
command, err := newExecuteCommand(clnt.cluster, policy, key, packageName, functionName, NewValueArray(args))
if err != nil {
return nil, err
@@ -1237,6 +1449,54 @@ func (clnt *Client) queryNodePartitions(policy *QueryPolicy, node *Node, stateme
return res, nil
}
+//-------------------------------------------------------
+// Multi-Record Transactions
+//-------------------------------------------------------
+
+// Attempt to commit the given multi-record transaction. First, the expected record versions are
+// sent to the server nodes for verification. If all nodes return success, the transaction is
+// committed. Otherwise, the transaction is aborted.
+//
+// Requires server version 8.0+
+func (clnt *Client) Commit(txn *Txn) (CommitStatus, Error) {
+ tr := NewTxnRoll(clnt, txn)
+
+ switch txn.State() {
+ default:
+ fallthrough
+ case TxnStateOpen:
+ if err := tr.Verify(&clnt.GetDefaultTxnVerifyPolicy().BatchPolicy, &clnt.GetDefaultTxnRollPolicy().BatchPolicy); err != nil {
+ return CommitStatusUnverified, err
+ }
+ return tr.Commit(&clnt.GetDefaultTxnRollPolicy().BatchPolicy)
+ case TxnStateVerified:
+ return tr.Commit(&clnt.GetDefaultTxnRollPolicy().BatchPolicy)
+ case TxnStateCommitted:
+ return CommitStatusAlreadyCommitted, nil
+ case TxnStateAborted:
+ return CommitStatusAlreadyAborted, nil
+ }
+}
+
+// Abort and rollback the given multi-record transaction.
+//
+// Requires server version 8.0+
+func (clnt *Client) Abort(txn *Txn) (AbortStatus, Error) {
+ tr := NewTxnRoll(clnt, txn)
+ switch txn.State() {
+ default:
+ fallthrough
+ case TxnStateOpen:
+ fallthrough
+ case TxnStateVerified:
+ return tr.Abort(&clnt.GetDefaultTxnRollPolicy().BatchPolicy)
+ case TxnStateCommitted:
+ return AbortStatusAlreadyCommitted, nil
+ case TxnStateAborted:
+ return AbortStatusAlreadyAborted, nil
+ }
+}
+
//--------------------------------------------------------
// Index functions (Supported by Aerospike 3+ servers only)
//--------------------------------------------------------
@@ -1729,20 +1989,20 @@ func (clnt *Client) MetricsEnabled() bool {
return clnt.cluster.MetricsEnabled()
}
-// EnableMetrics enables the cluster transaction metrics gathering.
+// EnableMetrics enables the cluster command metrics gathering.
// If the parameters for the histogram in the policy are the different from the one already
// on the cluster, the metrics will be reset.
func (clnt *Client) EnableMetrics(policy *MetricsPolicy) {
clnt.cluster.EnableMetrics(policy)
}
-// DisableMetrics disables the cluster transaction metrics gathering.
+// DisableMetrics disables the cluster command metrics gathering.
func (clnt *Client) DisableMetrics() {
clnt.cluster.DisableMetrics()
}
// Stats returns internal statistics regarding the inner state of the client and the cluster.
-func (clnt *Client) Stats() (map[string]interface{}, Error) {
+func (clnt *Client) Stats() (map[string]any, Error) {
resStats := clnt.cluster.statsCopy()
clusterStats := *newNodeStats(clnt.cluster.MetricsPolicy())
@@ -1757,7 +2017,7 @@ func (clnt *Client) Stats() (map[string]interface{}, Error) {
return nil, newCommonError(err)
}
- res := map[string]interface{}{}
+ res := map[string]any{}
err = json.Unmarshal(b, &res)
if err != nil {
return nil, newCommonError(err)
@@ -1766,7 +2026,7 @@ func (clnt *Client) Stats() (map[string]interface{}, Error) {
res["open-connections"] = clusterStats.ConnectionsOpen.Get()
res["total-nodes"] = len(clnt.cluster.GetNodes())
- aggstats := res["cluster-aggregated-stats"].(map[string]interface{})
+ aggstats := res["cluster-aggregated-stats"].(map[string]any)
aggstats["exceeded-max-retries"] = clnt.cluster.maxRetriesExceededCount.Get()
aggstats["exceeded-total-timeout"] = clnt.cluster.totalTimeoutExceededCount.Get()
diff --git a/client_appengine_exclusions.go b/client_appengine_exclusions.go
index 180445cf..93ba5e14 100644
--- a/client_appengine_exclusions.go
+++ b/client_appengine_exclusions.go
@@ -22,8 +22,8 @@ import (
"golang.org/x/sync/semaphore"
- lualib "github.com/aerospike/aerospike-client-go/v7/internal/lua"
- "github.com/aerospike/aerospike-client-go/v7/logger"
+ lualib "github.com/aerospike/aerospike-client-go/v8/internal/lua"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
lua "github.com/yuin/gopher-lua"
)
diff --git a/client_builder_native.go b/client_builder_native.go
deleted file mode 100644
index e3f54d14..00000000
--- a/client_builder_native.go
+++ /dev/null
@@ -1,36 +0,0 @@
-//go:build !as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import "github.com/aerospike/aerospike-client-go/v7/types"
-
-// CreateClientWithPolicyAndHost generates a new Client of the specified type
-// with the specified ClientPolicy and sets up the cluster using the provided hosts.
-// If the policy is nil, the default relevant policy will be used.
-func CreateClientWithPolicyAndHost(typ ClientType, policy *ClientPolicy, hosts ...*Host) (ClientIfc, Error) {
- if len(hosts) == 0 {
- return nil, newError(types.SERVER_NOT_AVAILABLE, "No hosts were provided")
- }
-
- switch typ {
- case CTNative:
- return NewClientWithPolicyAndHost(policy, hosts...)
- case CTProxy:
- return nil, newError(types.GRPC_ERROR, "Proxy client mode not enabled. Pass -tags as_proxy during build")
- }
- return nil, newError(types.SERVER_NOT_AVAILABLE, "Invalid client type")
-}
diff --git a/client_builder_proxy.go b/client_builder_proxy.go
deleted file mode 100644
index de7f58bd..00000000
--- a/client_builder_proxy.go
+++ /dev/null
@@ -1,39 +0,0 @@
-//go:build as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import "github.com/aerospike/aerospike-client-go/v7/types"
-
-// CreateClientWithPolicyAndHost generates a new Client of the specified type
-// with the specified ClientPolicy and sets up the cluster using the provided hosts.
-// If the policy is nil, the default relevant policy will be used.
-func CreateClientWithPolicyAndHost(typ ClientType, policy *ClientPolicy, hosts ...*Host) (ClientIfc, Error) {
- if len(hosts) == 0 {
- return nil, newError(types.SERVER_NOT_AVAILABLE, "No hosts were provided")
- }
-
- switch typ {
- case CTNative:
- return NewClientWithPolicyAndHost(policy, hosts...)
- case CTProxy:
- if len(hosts) > 1 {
- return nil, newError(types.GRPC_ERROR, "Only one proxy host is acceptable")
- }
- return NewProxyClientWithPolicyAndHost(policy, hosts[0])
- }
- return nil, newError(types.SERVER_NOT_AVAILABLE, "Invalid client type")
-}
diff --git a/client_ifc.go b/client_ifc.go
deleted file mode 100644
index 7cfe2a1a..00000000
--- a/client_ifc.go
+++ /dev/null
@@ -1,138 +0,0 @@
-//go:build !as_performance && !app_engine
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "time"
-)
-
-// ClientIfc abstracts an Aerospike cluster.
-type ClientIfc interface {
- Add(policy *WritePolicy, key *Key, binMap BinMap) Error
- AddBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Append(policy *WritePolicy, key *Key, binMap BinMap) Error
- AppendBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- BatchDelete(policy *BatchPolicy, deletePolicy *BatchDeletePolicy, keys []*Key) ([]*BatchRecord, Error)
- BatchExecute(policy *BatchPolicy, udfPolicy *BatchUDFPolicy, keys []*Key, packageName string, functionName string, args ...Value) ([]*BatchRecord, Error)
- BatchExists(policy *BatchPolicy, keys []*Key) ([]bool, Error)
- BatchGet(policy *BatchPolicy, keys []*Key, binNames ...string) ([]*Record, Error)
- BatchGetComplex(policy *BatchPolicy, records []*BatchRead) Error
- BatchGetHeader(policy *BatchPolicy, keys []*Key) ([]*Record, Error)
- BatchGetOperate(policy *BatchPolicy, keys []*Key, ops ...*Operation) ([]*Record, Error)
- BatchOperate(policy *BatchPolicy, records []BatchRecordIfc) Error
- ChangePassword(policy *AdminPolicy, user string, password string) Error
- Close()
- Cluster() *Cluster
- CreateComplexIndex(policy *WritePolicy, namespace string, setName string, indexName string, binName string, indexType IndexType, indexCollectionType IndexCollectionType, ctx ...*CDTContext) (*IndexTask, Error)
- CreateIndex(policy *WritePolicy, namespace string, setName string, indexName string, binName string, indexType IndexType) (*IndexTask, Error)
- CreateRole(policy *AdminPolicy, roleName string, privileges []Privilege, whitelist []string, readQuota, writeQuota uint32) Error
- CreateUser(policy *AdminPolicy, user string, password string, roles []string) Error
- Delete(policy *WritePolicy, key *Key) (bool, Error)
- DropIndex(policy *WritePolicy, namespace string, setName string, indexName string) Error
- DropRole(policy *AdminPolicy, roleName string) Error
- DropUser(policy *AdminPolicy, user string) Error
- Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (interface{}, Error)
- ExecuteUDF(policy *QueryPolicy, statement *Statement, packageName string, functionName string, functionArgs ...Value) (*ExecuteTask, Error)
- ExecuteUDFNode(policy *QueryPolicy, node *Node, statement *Statement, packageName string, functionName string, functionArgs ...Value) (*ExecuteTask, Error)
- Exists(policy *BasePolicy, key *Key) (bool, Error)
- Get(policy *BasePolicy, key *Key, binNames ...string) (*Record, Error)
- GetHeader(policy *BasePolicy, key *Key) (*Record, Error)
- GetNodeNames() []string
- GetNodes() []*Node
- GrantPrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error
- GrantRoles(policy *AdminPolicy, user string, roles []string) Error
- IsConnected() bool
- ListUDF(policy *BasePolicy) ([]*UDF, Error)
- Operate(policy *WritePolicy, key *Key, operations ...*Operation) (*Record, Error)
- Prepend(policy *WritePolicy, key *Key, binMap BinMap) Error
- PrependBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Put(policy *WritePolicy, key *Key, binMap BinMap) Error
- PutBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Query(policy *QueryPolicy, statement *Statement) (*Recordset, Error)
- QueryExecute(policy *QueryPolicy, writePolicy *WritePolicy, statement *Statement, ops ...*Operation) (*ExecuteTask, Error)
- QueryNode(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error)
- queryNodePartitions(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error)
- QueryPartitions(policy *QueryPolicy, statement *Statement, partitionFilter *PartitionFilter) (*Recordset, Error)
- QueryRole(policy *AdminPolicy, role string) (*Role, Error)
- QueryRoles(policy *AdminPolicy) ([]*Role, Error)
- QueryUser(policy *AdminPolicy, user string) (*UserRoles, Error)
- QueryUsers(policy *AdminPolicy) ([]*UserRoles, Error)
- RegisterUDF(policy *WritePolicy, udfBody []byte, serverPath string, language Language) (*RegisterTask, Error)
- RegisterUDFFromFile(policy *WritePolicy, clientPath string, serverPath string, language Language) (*RegisterTask, Error)
- RemoveUDF(policy *WritePolicy, udfName string) (*RemoveTask, Error)
- RevokePrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error
- RevokeRoles(policy *AdminPolicy, user string, roles []string) Error
- ScanAll(apolicy *ScanPolicy, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanNode(apolicy *ScanPolicy, node *Node, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanPartitions(apolicy *ScanPolicy, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error)
- SetQuotas(policy *AdminPolicy, roleName string, readQuota, writeQuota uint32) Error
- SetWhitelist(policy *AdminPolicy, roleName string, whitelist []string) Error
- SetXDRFilter(policy *InfoPolicy, datacenter string, namespace string, filter *Expression) Error
- Stats() (map[string]interface{}, Error)
- String() string
- Touch(policy *WritePolicy, key *Key) Error
- Truncate(policy *InfoPolicy, namespace, set string, beforeLastUpdate *time.Time) Error
- WarmUp(count int) (int, Error)
-
- BatchGetObjects(policy *BatchPolicy, keys []*Key, objects []interface{}) (found []bool, err Error)
- GetObject(policy *BasePolicy, key *Key, obj interface{}) Error
- PutObject(policy *WritePolicy, key *Key, obj interface{}) (err Error)
- QueryAggregate(policy *QueryPolicy, statement *Statement, packageName, functionName string, functionArgs ...Value) (*Recordset, Error)
- QueryNodeObjects(policy *QueryPolicy, node *Node, statement *Statement, objChan interface{}) (*Recordset, Error)
- QueryObjects(policy *QueryPolicy, statement *Statement, objChan interface{}) (*Recordset, Error)
- QueryPartitionObjects(policy *QueryPolicy, statement *Statement, objChan interface{}, partitionFilter *PartitionFilter) (*Recordset, Error)
- ScanAllObjects(apolicy *ScanPolicy, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanNodeObjects(apolicy *ScanPolicy, node *Node, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanPartitionObjects(apolicy *ScanPolicy, objChan interface{}, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error)
-
- // TODO: Synchronization here for the sake of dynamic config in the future
-
- getUsablePolicy(*BasePolicy) *BasePolicy
- getUsableWritePolicy(*WritePolicy) *WritePolicy
- getUsableScanPolicy(*ScanPolicy) *ScanPolicy
- getUsableQueryPolicy(*QueryPolicy) *QueryPolicy
- getUsableAdminPolicy(*AdminPolicy) *AdminPolicy
- getUsableInfoPolicy(*InfoPolicy) *InfoPolicy
-
- getUsableBatchPolicy(*BatchPolicy) *BatchPolicy
- getUsableBatchReadPolicy(*BatchReadPolicy) *BatchReadPolicy
- getUsableBatchWritePolicy(*BatchWritePolicy) *BatchWritePolicy
- getUsableBatchDeletePolicy(*BatchDeletePolicy) *BatchDeletePolicy
- getUsableBatchUDFPolicy(*BatchUDFPolicy) *BatchUDFPolicy
-
- GetDefaultPolicy() *BasePolicy
- GetDefaultBatchPolicy() *BatchPolicy
- GetDefaultBatchWritePolicy() *BatchWritePolicy
- GetDefaultBatchDeletePolicy() *BatchDeletePolicy
- GetDefaultBatchUDFPolicy() *BatchUDFPolicy
- GetDefaultWritePolicy() *WritePolicy
- GetDefaultScanPolicy() *ScanPolicy
- GetDefaultQueryPolicy() *QueryPolicy
- GetDefaultAdminPolicy() *AdminPolicy
- GetDefaultInfoPolicy() *InfoPolicy
-
- SetDefaultPolicy(*BasePolicy)
- SetDefaultBatchPolicy(*BatchPolicy)
- SetDefaultBatchWritePolicy(*BatchWritePolicy)
- SetDefaultBatchDeletePolicy(*BatchDeletePolicy)
- SetDefaultBatchUDFPolicy(*BatchUDFPolicy)
- SetDefaultWritePolicy(*WritePolicy)
- SetDefaultScanPolicy(*ScanPolicy)
- SetDefaultQueryPolicy(*QueryPolicy)
- SetDefaultAdminPolicy(*AdminPolicy)
- SetDefaultInfoPolicy(*InfoPolicy)
-}
diff --git a/client_ifc_app_engine.go b/client_ifc_app_engine.go
deleted file mode 100644
index 310bb050..00000000
--- a/client_ifc_app_engine.go
+++ /dev/null
@@ -1,141 +0,0 @@
-//go:build !as_performance && app_engine
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "time"
-)
-
-// ClientIfc abstracts an Aerospike cluster.
-type ClientIfc interface {
- Add(policy *WritePolicy, key *Key, binMap BinMap) Error
- AddBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Append(policy *WritePolicy, key *Key, binMap BinMap) Error
- AppendBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- BatchDelete(policy *BatchPolicy, deletePolicy *BatchDeletePolicy, keys []*Key) ([]*BatchRecord, Error)
- BatchExecute(policy *BatchPolicy, udfPolicy *BatchUDFPolicy, keys []*Key, packageName string, functionName string, args ...Value) ([]*BatchRecord, Error)
- BatchExists(policy *BatchPolicy, keys []*Key) ([]bool, Error)
- BatchGet(policy *BatchPolicy, keys []*Key, binNames ...string) ([]*Record, Error)
- BatchGetComplex(policy *BatchPolicy, records []*BatchRead) Error
- BatchGetHeader(policy *BatchPolicy, keys []*Key) ([]*Record, Error)
- BatchGetOperate(policy *BatchPolicy, keys []*Key, ops ...*Operation) ([]*Record, Error)
- BatchOperate(policy *BatchPolicy, records []BatchRecordIfc) Error
- ChangePassword(policy *AdminPolicy, user string, password string) Error
- Close()
- Cluster() *Cluster
- CreateComplexIndex(policy *WritePolicy, namespace string, setName string, indexName string, binName string, indexType IndexType, indexCollectionType IndexCollectionType, ctx ...*CDTContext) (*IndexTask, Error)
- CreateIndex(policy *WritePolicy, namespace string, setName string, indexName string, binName string, indexType IndexType) (*IndexTask, Error)
- CreateRole(policy *AdminPolicy, roleName string, privileges []Privilege, whitelist []string, readQuota, writeQuota uint32) Error
- CreateUser(policy *AdminPolicy, user string, password string, roles []string) Error
- Delete(policy *WritePolicy, key *Key) (bool, Error)
- DropIndex(policy *WritePolicy, namespace string, setName string, indexName string) Error
- DropRole(policy *AdminPolicy, roleName string) Error
- DropUser(policy *AdminPolicy, user string) Error
- Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (interface{}, Error)
- ExecuteUDF(policy *QueryPolicy, statement *Statement, packageName string, functionName string, functionArgs ...Value) (*ExecuteTask, Error)
- ExecuteUDFNode(policy *QueryPolicy, node *Node, statement *Statement, packageName string, functionName string, functionArgs ...Value) (*ExecuteTask, Error)
- Exists(policy *BasePolicy, key *Key) (bool, Error)
- Get(policy *BasePolicy, key *Key, binNames ...string) (*Record, Error)
- GetHeader(policy *BasePolicy, key *Key) (*Record, Error)
- GetNodeNames() []string
- GetNodes() []*Node
- GrantPrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error
- GrantRoles(policy *AdminPolicy, user string, roles []string) Error
- IsConnected() bool
- ListUDF(policy *BasePolicy) ([]*UDF, Error)
- Operate(policy *WritePolicy, key *Key, operations ...*Operation) (*Record, Error)
- Prepend(policy *WritePolicy, key *Key, binMap BinMap) Error
- PrependBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Put(policy *WritePolicy, key *Key, binMap BinMap) Error
- PutBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Query(policy *QueryPolicy, statement *Statement) (*Recordset, Error)
- QueryExecute(policy *QueryPolicy, writePolicy *WritePolicy, statement *Statement, ops ...*Operation) (*ExecuteTask, Error)
- QueryNode(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error)
- queryNodePartitions(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error)
- QueryPartitions(policy *QueryPolicy, statement *Statement, partitionFilter *PartitionFilter) (*Recordset, Error)
- QueryRole(policy *AdminPolicy, role string) (*Role, Error)
- QueryRoles(policy *AdminPolicy) ([]*Role, Error)
- QueryUser(policy *AdminPolicy, user string) (*UserRoles, Error)
- QueryUsers(policy *AdminPolicy) ([]*UserRoles, Error)
- RegisterUDF(policy *WritePolicy, udfBody []byte, serverPath string, language Language) (*RegisterTask, Error)
- RegisterUDFFromFile(policy *WritePolicy, clientPath string, serverPath string, language Language) (*RegisterTask, Error)
- RemoveUDF(policy *WritePolicy, udfName string) (*RemoveTask, Error)
- RevokePrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error
- RevokeRoles(policy *AdminPolicy, user string, roles []string) Error
- ScanAll(apolicy *ScanPolicy, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanNode(apolicy *ScanPolicy, node *Node, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanPartitions(apolicy *ScanPolicy, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error)
- SetQuotas(policy *AdminPolicy, roleName string, readQuota, writeQuota uint32) Error
- SetWhitelist(policy *AdminPolicy, roleName string, whitelist []string) Error
- SetXDRFilter(policy *InfoPolicy, datacenter string, namespace string, filter *Expression) Error
- Stats() (map[string]interface{}, Error)
- String() string
- Touch(policy *WritePolicy, key *Key) Error
- Truncate(policy *InfoPolicy, namespace, set string, beforeLastUpdate *time.Time) Error
- WarmUp(count int) (int, Error)
-
- // QueryAggregate(policy *QueryPolicy, statement *Statement, packageName, functionName string, functionArgs ...Value) (*Recordset, Error)
-
- BatchGetObjects(policy *BatchPolicy, keys []*Key, objects []interface{}) (found []bool, err Error)
- GetObject(policy *BasePolicy, key *Key, obj interface{}) Error
- PutObject(policy *WritePolicy, key *Key, obj interface{}) (err Error)
- QueryNodeObjects(policy *QueryPolicy, node *Node, statement *Statement, objChan interface{}) (*Recordset, Error)
- QueryObjects(policy *QueryPolicy, statement *Statement, objChan interface{}) (*Recordset, Error)
- QueryPartitionObjects(policy *QueryPolicy, statement *Statement, objChan interface{}, partitionFilter *PartitionFilter) (*Recordset, Error)
- ScanAllObjects(apolicy *ScanPolicy, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanNodeObjects(apolicy *ScanPolicy, node *Node, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanPartitionObjects(apolicy *ScanPolicy, objChan interface{}, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error)
-
- // TODO: Synchronization here for the sake of dynamic config in the future
-
- getUsablePolicy(*BasePolicy) *BasePolicy
- getUsableWritePolicy(*WritePolicy) *WritePolicy
- getUsableScanPolicy(*ScanPolicy) *ScanPolicy
- getUsableQueryPolicy(*QueryPolicy) *QueryPolicy
- getUsableAdminPolicy(*AdminPolicy) *AdminPolicy
- getUsableInfoPolicy(*InfoPolicy) *InfoPolicy
-
- getUsableBatchPolicy(*BatchPolicy) *BatchPolicy
- getUsableBatchReadPolicy(*BatchReadPolicy) *BatchReadPolicy
- getUsableBatchWritePolicy(*BatchWritePolicy) *BatchWritePolicy
- getUsableBatchDeletePolicy(*BatchDeletePolicy) *BatchDeletePolicy
- getUsableBatchUDFPolicy(*BatchUDFPolicy) *BatchUDFPolicy
-
- GetDefaultPolicy() *BasePolicy
- GetDefaultBatchPolicy() *BatchPolicy
- GetDefaultBatchReadPolicy() *BatchReadPolicy
- GetDefaultBatchWritePolicy() *BatchWritePolicy
- GetDefaultBatchDeletePolicy() *BatchDeletePolicy
- GetDefaultBatchUDFPolicy() *BatchUDFPolicy
- GetDefaultWritePolicy() *WritePolicy
- GetDefaultScanPolicy() *ScanPolicy
- GetDefaultQueryPolicy() *QueryPolicy
- GetDefaultAdminPolicy() *AdminPolicy
- GetDefaultInfoPolicy() *InfoPolicy
-
- SetDefaultPolicy(*BasePolicy)
- SetDefaultBatchPolicy(*BatchPolicy)
- SetDefaultBatchReadPolicy(*BatchReadPolicy)
- SetDefaultBatchWritePolicy(*BatchWritePolicy)
- SetDefaultBatchDeletePolicy(*BatchDeletePolicy)
- SetDefaultBatchUDFPolicy(*BatchUDFPolicy)
- SetDefaultWritePolicy(*WritePolicy)
- SetDefaultScanPolicy(*ScanPolicy)
- SetDefaultQueryPolicy(*QueryPolicy)
- SetDefaultAdminPolicy(*AdminPolicy)
- SetDefaultInfoPolicy(*InfoPolicy)
-}
diff --git a/client_ifc_app_engine_perf.go b/client_ifc_app_engine_perf.go
deleted file mode 100644
index e2ccaa8c..00000000
--- a/client_ifc_app_engine_perf.go
+++ /dev/null
@@ -1,139 +0,0 @@
-//go:build as_performance && app_engine
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "time"
-)
-
-// ClientIfc abstracts an Aerospike cluster.
-type ClientIfc interface {
- Add(policy *WritePolicy, key *Key, binMap BinMap) Error
- AddBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Append(policy *WritePolicy, key *Key, binMap BinMap) Error
- AppendBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- BatchDelete(policy *BatchPolicy, deletePolicy *BatchDeletePolicy, keys []*Key) ([]*BatchRecord, Error)
- BatchExecute(policy *BatchPolicy, udfPolicy *BatchUDFPolicy, keys []*Key, packageName string, functionName string, args ...Value) ([]*BatchRecord, Error)
- BatchExists(policy *BatchPolicy, keys []*Key) ([]bool, Error)
- BatchGet(policy *BatchPolicy, keys []*Key, binNames ...string) ([]*Record, Error)
- BatchGetComplex(policy *BatchPolicy, records []*BatchRead) Error
- BatchGetHeader(policy *BatchPolicy, keys []*Key) ([]*Record, Error)
- BatchGetOperate(policy *BatchPolicy, keys []*Key, ops ...*Operation) ([]*Record, Error)
- BatchOperate(policy *BatchPolicy, records []BatchRecordIfc) Error
- ChangePassword(policy *AdminPolicy, user string, password string) Error
- Close()
- Cluster() *Cluster
- CreateComplexIndex(policy *WritePolicy, namespace string, setName string, indexName string, binName string, indexType IndexType, indexCollectionType IndexCollectionType, ctx ...*CDTContext) (*IndexTask, Error)
- CreateIndex(policy *WritePolicy, namespace string, setName string, indexName string, binName string, indexType IndexType) (*IndexTask, Error)
- CreateRole(policy *AdminPolicy, roleName string, privileges []Privilege, whitelist []string, readQuota, writeQuota uint32) Error
- CreateUser(policy *AdminPolicy, user string, password string, roles []string) Error
- Delete(policy *WritePolicy, key *Key) (bool, Error)
- DropIndex(policy *WritePolicy, namespace string, setName string, indexName string) Error
- DropRole(policy *AdminPolicy, roleName string) Error
- DropUser(policy *AdminPolicy, user string) Error
- Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (interface{}, Error)
- ExecuteUDF(policy *QueryPolicy, statement *Statement, packageName string, functionName string, functionArgs ...Value) (*ExecuteTask, Error)
- ExecuteUDFNode(policy *QueryPolicy, node *Node, statement *Statement, packageName string, functionName string, functionArgs ...Value) (*ExecuteTask, Error)
- Exists(policy *BasePolicy, key *Key) (bool, Error)
- Get(policy *BasePolicy, key *Key, binNames ...string) (*Record, Error)
- GetHeader(policy *BasePolicy, key *Key) (*Record, Error)
- GetNodeNames() []string
- GetNodes() []*Node
- GrantPrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error
- GrantRoles(policy *AdminPolicy, user string, roles []string) Error
- IsConnected() bool
- ListUDF(policy *BasePolicy) ([]*UDF, Error)
- Operate(policy *WritePolicy, key *Key, operations ...*Operation) (*Record, Error)
- Prepend(policy *WritePolicy, key *Key, binMap BinMap) Error
- PrependBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Put(policy *WritePolicy, key *Key, binMap BinMap) Error
- PutBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Query(policy *QueryPolicy, statement *Statement) (*Recordset, Error)
- QueryExecute(policy *QueryPolicy, writePolicy *WritePolicy, statement *Statement, ops ...*Operation) (*ExecuteTask, Error)
- QueryNode(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error)
- queryNodePartitions(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error)
- QueryPartitions(policy *QueryPolicy, statement *Statement, partitionFilter *PartitionFilter) (*Recordset, Error)
- QueryRole(policy *AdminPolicy, role string) (*Role, Error)
- QueryRoles(policy *AdminPolicy) ([]*Role, Error)
- QueryUser(policy *AdminPolicy, user string) (*UserRoles, Error)
- QueryUsers(policy *AdminPolicy) ([]*UserRoles, Error)
- RegisterUDF(policy *WritePolicy, udfBody []byte, serverPath string, language Language) (*RegisterTask, Error)
- RegisterUDFFromFile(policy *WritePolicy, clientPath string, serverPath string, language Language) (*RegisterTask, Error)
- RemoveUDF(policy *WritePolicy, udfName string) (*RemoveTask, Error)
- RevokePrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error
- RevokeRoles(policy *AdminPolicy, user string, roles []string) Error
- ScanAll(apolicy *ScanPolicy, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanNode(apolicy *ScanPolicy, node *Node, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanPartitions(apolicy *ScanPolicy, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error)
- SetQuotas(policy *AdminPolicy, roleName string, readQuota, writeQuota uint32) Error
- SetWhitelist(policy *AdminPolicy, roleName string, whitelist []string) Error
- SetXDRFilter(policy *InfoPolicy, datacenter string, namespace string, filter *Expression) Error
- Stats() (map[string]interface{}, Error)
- String() string
- Touch(policy *WritePolicy, key *Key) Error
- Truncate(policy *InfoPolicy, namespace, set string, beforeLastUpdate *time.Time) Error
- WarmUp(count int) (int, Error)
-
- // QueryAggregate(policy *QueryPolicy, statement *Statement, packageName, functionName string, functionArgs ...Value) (*Recordset, Error)
-
- // BatchGetObjects(policy *BatchPolicy, keys []*Key, objects []interface{}) (found []bool, err Error)
- // GetObject(policy *BasePolicy, key *Key, obj interface{}) Error
- // PutObject(policy *WritePolicy, key *Key, obj interface{}) (err Error)
- // QueryNodeObjects(policy *QueryPolicy, node *Node, statement *Statement, objChan interface{}) (*Recordset, Error)
- // QueryObjects(policy *QueryPolicy, statement *Statement, objChan interface{}) (*Recordset, Error)
- // QueryPartitionObjects(policy *QueryPolicy, statement *Statement, objChan interface{}, partitionFilter *PartitionFilter) (*Recordset, Error)
- // ScanAllObjects(apolicy *ScanPolicy, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error)
- // ScanNodeObjects(apolicy *ScanPolicy, node *Node, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error)
- // ScanPartitionObjects(apolicy *ScanPolicy, objChan interface{}, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error)
-
- // TODO: Synchronization here for the sake of dynamic config in the future
-
- getUsablePolicy(*BasePolicy) *BasePolicy
- getUsableWritePolicy(*WritePolicy) *WritePolicy
- getUsableScanPolicy(*ScanPolicy) *ScanPolicy
- getUsableQueryPolicy(*QueryPolicy) *QueryPolicy
- getUsableAdminPolicy(*AdminPolicy) *AdminPolicy
- getUsableInfoPolicy(*InfoPolicy) *InfoPolicy
-
- getUsableBatchPolicy(*BatchPolicy) *BatchPolicy
- getUsableBatchReadPolicy(*BatchReadPolicy) *BatchReadPolicy
- getUsableBatchWritePolicy(*BatchWritePolicy) *BatchWritePolicy
- getUsableBatchDeletePolicy(*BatchDeletePolicy) *BatchDeletePolicy
- getUsableBatchUDFPolicy(*BatchUDFPolicy) *BatchUDFPolicy
-
- GetDefaultPolicy() *BasePolicy
- GetDefaultBatchPolicy() *BatchPolicy
- GetDefaultBatchWritePolicy() *BatchWritePolicy
- GetDefaultBatchDeletePolicy() *BatchDeletePolicy
- GetDefaultBatchUDFPolicy() *BatchUDFPolicy
- GetDefaultWritePolicy() *WritePolicy
- GetDefaultScanPolicy() *ScanPolicy
- GetDefaultQueryPolicy() *QueryPolicy
- GetDefaultAdminPolicy() *AdminPolicy
- GetDefaultInfoPolicy() *InfoPolicy
-
- SetDefaultPolicy(*BasePolicy)
- SetDefaultBatchPolicy(*BatchPolicy)
- SetDefaultBatchWritePolicy(*BatchWritePolicy)
- SetDefaultBatchDeletePolicy(*BatchDeletePolicy)
- SetDefaultBatchUDFPolicy(*BatchUDFPolicy)
- SetDefaultWritePolicy(*WritePolicy)
- SetDefaultScanPolicy(*ScanPolicy)
- SetDefaultQueryPolicy(*QueryPolicy)
- SetDefaultAdminPolicy(*AdminPolicy)
- SetDefaultInfoPolicy(*InfoPolicy)
-}
diff --git a/client_ifc_as_performance.go b/client_ifc_as_performance.go
deleted file mode 100644
index 3e3376b6..00000000
--- a/client_ifc_as_performance.go
+++ /dev/null
@@ -1,141 +0,0 @@
-//go:build as_performance && !app_engine
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "time"
-)
-
-// ClientIfc abstracts an Aerospike cluster.
-type ClientIfc interface {
- Add(policy *WritePolicy, key *Key, binMap BinMap) Error
- AddBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Append(policy *WritePolicy, key *Key, binMap BinMap) Error
- AppendBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- BatchDelete(policy *BatchPolicy, deletePolicy *BatchDeletePolicy, keys []*Key) ([]*BatchRecord, Error)
- BatchExecute(policy *BatchPolicy, udfPolicy *BatchUDFPolicy, keys []*Key, packageName string, functionName string, args ...Value) ([]*BatchRecord, Error)
- BatchExists(policy *BatchPolicy, keys []*Key) ([]bool, Error)
- BatchGet(policy *BatchPolicy, keys []*Key, binNames ...string) ([]*Record, Error)
- BatchGetComplex(policy *BatchPolicy, records []*BatchRead) Error
- BatchGetHeader(policy *BatchPolicy, keys []*Key) ([]*Record, Error)
- BatchGetOperate(policy *BatchPolicy, keys []*Key, ops ...*Operation) ([]*Record, Error)
- BatchOperate(policy *BatchPolicy, records []BatchRecordIfc) Error
- ChangePassword(policy *AdminPolicy, user string, password string) Error
- Close()
- Cluster() *Cluster
- CreateComplexIndex(policy *WritePolicy, namespace string, setName string, indexName string, binName string, indexType IndexType, indexCollectionType IndexCollectionType, ctx ...*CDTContext) (*IndexTask, Error)
- CreateIndex(policy *WritePolicy, namespace string, setName string, indexName string, binName string, indexType IndexType) (*IndexTask, Error)
- CreateRole(policy *AdminPolicy, roleName string, privileges []Privilege, whitelist []string, readQuota, writeQuota uint32) Error
- CreateUser(policy *AdminPolicy, user string, password string, roles []string) Error
- Delete(policy *WritePolicy, key *Key) (bool, Error)
- DropIndex(policy *WritePolicy, namespace string, setName string, indexName string) Error
- DropRole(policy *AdminPolicy, roleName string) Error
- DropUser(policy *AdminPolicy, user string) Error
- Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (interface{}, Error)
- ExecuteUDF(policy *QueryPolicy, statement *Statement, packageName string, functionName string, functionArgs ...Value) (*ExecuteTask, Error)
- ExecuteUDFNode(policy *QueryPolicy, node *Node, statement *Statement, packageName string, functionName string, functionArgs ...Value) (*ExecuteTask, Error)
- Exists(policy *BasePolicy, key *Key) (bool, Error)
- Get(policy *BasePolicy, key *Key, binNames ...string) (*Record, Error)
- GetHeader(policy *BasePolicy, key *Key) (*Record, Error)
- GetNodeNames() []string
- GetNodes() []*Node
- GrantPrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error
- GrantRoles(policy *AdminPolicy, user string, roles []string) Error
- IsConnected() bool
- ListUDF(policy *BasePolicy) ([]*UDF, Error)
- Operate(policy *WritePolicy, key *Key, operations ...*Operation) (*Record, Error)
- Prepend(policy *WritePolicy, key *Key, binMap BinMap) Error
- PrependBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Put(policy *WritePolicy, key *Key, binMap BinMap) Error
- PutBins(policy *WritePolicy, key *Key, bins ...*Bin) Error
- Query(policy *QueryPolicy, statement *Statement) (*Recordset, Error)
- QueryExecute(policy *QueryPolicy, writePolicy *WritePolicy, statement *Statement, ops ...*Operation) (*ExecuteTask, Error)
- QueryNode(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error)
- queryNodePartitions(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error)
- QueryPartitions(policy *QueryPolicy, statement *Statement, partitionFilter *PartitionFilter) (*Recordset, Error)
- QueryRole(policy *AdminPolicy, role string) (*Role, Error)
- QueryRoles(policy *AdminPolicy) ([]*Role, Error)
- QueryUser(policy *AdminPolicy, user string) (*UserRoles, Error)
- QueryUsers(policy *AdminPolicy) ([]*UserRoles, Error)
- RegisterUDF(policy *WritePolicy, udfBody []byte, serverPath string, language Language) (*RegisterTask, Error)
- RegisterUDFFromFile(policy *WritePolicy, clientPath string, serverPath string, language Language) (*RegisterTask, Error)
- RemoveUDF(policy *WritePolicy, udfName string) (*RemoveTask, Error)
- RevokePrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error
- RevokeRoles(policy *AdminPolicy, user string, roles []string) Error
- ScanAll(apolicy *ScanPolicy, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanNode(apolicy *ScanPolicy, node *Node, namespace string, setName string, binNames ...string) (*Recordset, Error)
- ScanPartitions(apolicy *ScanPolicy, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error)
- SetQuotas(policy *AdminPolicy, roleName string, readQuota, writeQuota uint32) Error
- SetWhitelist(policy *AdminPolicy, roleName string, whitelist []string) Error
- SetXDRFilter(policy *InfoPolicy, datacenter string, namespace string, filter *Expression) Error
- Stats() (map[string]interface{}, Error)
- String() string
- Touch(policy *WritePolicy, key *Key) Error
- Truncate(policy *InfoPolicy, namespace, set string, beforeLastUpdate *time.Time) Error
- WarmUp(count int) (int, Error)
-
- QueryAggregate(policy *QueryPolicy, statement *Statement, packageName, functionName string, functionArgs ...Value) (*Recordset, Error)
-
- // BatchGetObjects(policy *BatchPolicy, keys []*Key, objects []interface{}) (found []bool, err Error)
- // GetObject(policy *BasePolicy, key *Key, obj interface{}) Error
- // PutObject(policy *WritePolicy, key *Key, obj interface{}) (err Error)
- // QueryNodeObjects(policy *QueryPolicy, node *Node, statement *Statement, objChan interface{}) (*Recordset, Error)
- // QueryObjects(policy *QueryPolicy, statement *Statement, objChan interface{}) (*Recordset, Error)
- // QueryPartitionObjects(policy *QueryPolicy, statement *Statement, objChan interface{}, partitionFilter *PartitionFilter) (*Recordset, Error)
- // ScanAllObjects(apolicy *ScanPolicy, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error)
- // ScanNodeObjects(apolicy *ScanPolicy, node *Node, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error)
- // ScanPartitionObjects(apolicy *ScanPolicy, objChan interface{}, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error)
-
- // TODO: Synchronization here for the sake of dynamic config in the future
-
- getUsablePolicy(*BasePolicy) *BasePolicy
- getUsableWritePolicy(*WritePolicy) *WritePolicy
- getUsableScanPolicy(*ScanPolicy) *ScanPolicy
- getUsableQueryPolicy(*QueryPolicy) *QueryPolicy
- getUsableAdminPolicy(*AdminPolicy) *AdminPolicy
- getUsableInfoPolicy(*InfoPolicy) *InfoPolicy
-
- getUsableBatchPolicy(*BatchPolicy) *BatchPolicy
- getUsableBatchReadPolicy(*BatchReadPolicy) *BatchReadPolicy
- getUsableBatchWritePolicy(*BatchWritePolicy) *BatchWritePolicy
- getUsableBatchDeletePolicy(*BatchDeletePolicy) *BatchDeletePolicy
- getUsableBatchUDFPolicy(*BatchUDFPolicy) *BatchUDFPolicy
-
- GetDefaultPolicy() *BasePolicy
- GetDefaultBatchPolicy() *BatchPolicy
- GetDefaultBatchReadPolicy() *BatchReadPolicy
- GetDefaultBatchWritePolicy() *BatchWritePolicy
- GetDefaultBatchDeletePolicy() *BatchDeletePolicy
- GetDefaultBatchUDFPolicy() *BatchUDFPolicy
- GetDefaultWritePolicy() *WritePolicy
- GetDefaultScanPolicy() *ScanPolicy
- GetDefaultQueryPolicy() *QueryPolicy
- GetDefaultAdminPolicy() *AdminPolicy
- GetDefaultInfoPolicy() *InfoPolicy
-
- SetDefaultPolicy(*BasePolicy)
- SetDefaultBatchPolicy(*BatchPolicy)
- SetDefaultBatchReadPolicy(*BatchReadPolicy)
- SetDefaultBatchWritePolicy(*BatchWritePolicy)
- SetDefaultBatchDeletePolicy(*BatchDeletePolicy)
- SetDefaultBatchUDFPolicy(*BatchUDFPolicy)
- SetDefaultWritePolicy(*WritePolicy)
- SetDefaultScanPolicy(*ScanPolicy)
- SetDefaultQueryPolicy(*QueryPolicy)
- SetDefaultAdminPolicy(*AdminPolicy)
- SetDefaultInfoPolicy(*InfoPolicy)
-}
diff --git a/client_object_test.go b/client_object_test.go
index 3e71cef0..88a220e5 100644
--- a/client_object_test.go
+++ b/client_object_test.go
@@ -21,7 +21,7 @@ import (
"strconv"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -31,12 +31,6 @@ import (
var _ = gg.Describe("Aerospike", func() {
var nsup int
- gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
- })
-
for _, useBoolType := range []bool{false, true} {
gg.BeforeEach(func() {
@@ -945,10 +939,6 @@ var _ = gg.Describe("Aerospike", func() {
gm.Expect(resObj.TTL1).NotTo(gm.Equal(uint32(0)))
gm.Expect(resObj.TTL1).To(gm.Equal(resObj.TTL2))
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
defaultTTL, nerr := strconv.Atoi(nsInfo(ns, "default-ttl"))
gm.Expect(nerr).ToNot(gm.HaveOccurred())
@@ -1100,12 +1090,6 @@ var _ = gg.Describe("Aerospike", func() {
}) // ScanObjects context
gg.Context("UDF Objects operations", func() {
- gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
- })
-
gg.It("must store and get values of types which implement Value interface using udf", func() {
udfFunc := `function setValue(rec, val)
rec['value'] = val
@@ -1339,10 +1323,6 @@ var _ = gg.Describe("Aerospike", func() {
})
gg.It("must query only relevant objects with the most complex structure possible", func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
// first create an index
createIndex(nil, ns, set, set+"inner1", "inner1", as.NUMERIC)
defer dropIndex(nil, ns, set, set+"inner1")
@@ -1381,10 +1361,6 @@ var _ = gg.Describe("Aerospike", func() {
})
gg.It("must query only relevant objects, and close and return", func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
// first create an index
createIndex(nil, ns, set, set+"inner1", "inner1", as.NUMERIC)
defer dropIndex(nil, ns, set, set+"inner1")
diff --git a/client_reflect.go b/client_reflect.go
index 995d27c3..aaac279d 100644
--- a/client_reflect.go
+++ b/client_reflect.go
@@ -19,11 +19,11 @@ package aerospike
import (
"reflect"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// PutObject writes record bin(s) to the server.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
+// The policy specifies the command timeout, record expiration and how the command is
// handled when the record already exists.
// If the policy is nil, the default relevant policy will be used.
// A struct can be tagged to influence the way the object is put in the database:
@@ -46,6 +46,12 @@ import (
func (clnt *Client) PutObject(policy *WritePolicy, key *Key, obj interface{}) (err Error) {
policy = clnt.getUsableWritePolicy(policy)
+ if policy.Txn != nil {
+ if err := txnMonitor.addKey(clnt.cluster, policy, key); err != nil {
+ return err
+ }
+ }
+
binMap := marshal(obj)
command, err := newWriteCommand(clnt.cluster, policy, key, nil, binMap, _WRITE)
if err != nil {
@@ -62,15 +68,16 @@ func (clnt *Client) PutObject(policy *WritePolicy, key *Key, obj interface{}) (e
func (clnt *Client) GetObject(policy *BasePolicy, key *Key, obj interface{}) Error {
policy = clnt.getUsablePolicy(policy)
+ if policy.Txn != nil {
+ if err := policy.Txn.prepareRead(key.namespace); err != nil {
+ return err
+ }
+ }
+
rval := reflect.ValueOf(obj)
binNames := objectMappings.getFields(rval.Type())
- partition, err := PartitionForRead(clnt.cluster, policy, key)
- if err != nil {
- return err
- }
-
- command, err := newReadCommand(clnt.cluster, policy, key, binNames, partition)
+ command, err := newReadCommand(clnt.cluster, policy, key, binNames)
if err != nil {
return err
}
@@ -86,7 +93,7 @@ func (clnt *Client) getObjectDirect(policy *BasePolicy, key *Key, rval *reflect.
policy = clnt.getUsablePolicy(policy)
binNames := objectMappings.getFields(rval.Type())
- command, err := newReadCommand(clnt.cluster, policy, key, binNames, nil)
+ command, err := newReadCommand(clnt.cluster, policy, key, binNames)
if err != nil {
return err
}
diff --git a/client_reflect_test.go b/client_reflect_test.go
index 6fac8087..4e83fdeb 100644
--- a/client_reflect_test.go
+++ b/client_reflect_test.go
@@ -21,7 +21,7 @@ import (
"math"
"strings"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/client_test.go b/client_test.go
index 3b04ba79..1d27be91 100644
--- a/client_test.go
+++ b/client_test.go
@@ -24,10 +24,10 @@ import (
"strings"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- "github.com/aerospike/aerospike-client-go/v7/types"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
- asub "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
+ asub "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -100,12 +100,6 @@ var _ = gg.Describe("Aerospike", func() {
gg.Describe("Client Management", func() {
- gg.BeforeEach(func() {
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
- })
-
dbHost := as.NewHost(*host, *port)
dbHost.TLSName = *nodeTLSName
@@ -285,16 +279,72 @@ var _ = gg.Describe("Aerospike", func() {
gm.Expect(err).ToNot(gm.HaveOccurred())
})
+ gg.Context("PutPayload operations", func() {
+
+ gg.It("must put a record", func() {
+ key, err = as.NewKey(ns, set, 0)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ binMap := as.BinMap{
+ "Aerospike": "value",
+ "Aerospike1": "value2",
+ }
+
+ wcmd, err := as.NewWriteCommand(nil, wpolicy, key, nil, binMap)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ err = wcmd.WriteBuffer(&wcmd)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ payload := wcmd.Buffer()
+
+ client.Delete(nil, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ err = client.PutPayload(nil, key, payload)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ rec, err := client.Get(nil, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(rec.Bins).To(gm.Equal(binMap))
+ })
+
+ gg.It("must delete a record", func() {
+ key, err = as.NewKey(ns, set, 0)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ binMap := as.BinMap{
+ "Aerospike": "value",
+ "Aerospike1": "value2",
+ }
+
+ err := client.Put(nil, key, binMap)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ exists, err := client.Exists(nil, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(exists).To(gm.BeTrue())
+
+ dcmd, err := as.NewDeleteCommand(nil, wpolicy, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ err = dcmd.WriteBuffer(dcmd)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ payload := dcmd.Buffer()
+
+ err = client.PutPayload(nil, key, payload)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ exists, err = client.Exists(nil, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(exists).To(gm.BeFalse())
+ })
+
+ })
+
gg.Context("Put operations", func() {
gg.Context("Expiration values", func() {
- gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
- })
-
gg.It("must return 30d if set to TTLServerDefault", func() {
wpolicy := as.NewWritePolicy(0, as.TTLServerDefault)
bin := as.NewBin("Aerospike", "value")
@@ -1726,10 +1776,6 @@ var _ = gg.Describe("Aerospike", func() {
gg.Context("XDR Filter", func() {
gg.BeforeEach(func() {
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
-
if !xdrEnabled() {
gg.Skip("XDR Filter Tests are not supported in the Community Edition, or when the server is not configured for XDR")
return
diff --git a/cluster.go b/cluster.go
index c98b8a98..4c70a950 100644
--- a/cluster.go
+++ b/cluster.go
@@ -24,11 +24,11 @@ import (
"golang.org/x/sync/errgroup"
- iatomic "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
- sm "github.com/aerospike/aerospike-client-go/v7/internal/atomic/map"
- "github.com/aerospike/aerospike-client-go/v7/internal/seq"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ iatomic "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
+ sm "github.com/aerospike/aerospike-client-go/v8/internal/atomic/map"
+ "github.com/aerospike/aerospike-client-go/v8/internal/seq"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// Cluster encapsulates the aerospike cluster nodes and manages
@@ -980,7 +980,7 @@ func (clstr *Cluster) MetricsEnabled() bool {
return clstr.metricsEnabled.Load()
}
-// EnableMetrics enables the cluster transaction metrics gathering.
+// EnableMetrics enables the cluster command metrics gathering.
// If the parameters for the histogram in the policy are the different from the one already
// on the cluster, the metrics will be reset.
func (clstr *Cluster) EnableMetrics(policy *MetricsPolicy) {
@@ -1004,7 +1004,7 @@ func (clstr *Cluster) EnableMetrics(policy *MetricsPolicy) {
}
}
-// DisableMetrics disables the cluster transaction metrics gathering.
+// DisableMetrics disables the cluster command metrics gathering.
func (clstr *Cluster) DisableMetrics() {
clstr.metricsEnabled.Store(false)
}
diff --git a/command.go b/command.go
index 3fa0e7c8..18a51773 100644
--- a/command.go
+++ b/command.go
@@ -22,12 +22,12 @@ import (
"fmt"
"time"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
- "github.com/aerospike/aerospike-client-go/v7/types/pool"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ "github.com/aerospike/aerospike-client-go/v8/types/pool"
- ParticleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ ParticleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
const (
@@ -58,7 +58,7 @@ const (
_INFO2_GENERATION int = (1 << 2)
// Update if new generation >= old, good for restore.
_INFO2_GENERATION_GT int = (1 << 3)
- // Transaction resulting in record deletion leaves tombstone (Enterprise only).
+ // command resulting in record deletion leaves tombstone (Enterprise only).
_INFO2_DURABLE_DELETE int = (1 << 4)
// Create only. Fail if record already exists.
_INFO2_CREATE_ONLY int = (1 << 5)
@@ -86,6 +86,13 @@ const (
// See Below
_INFO3_SC_READ_RELAX int = (1 << 7)
+ // Send MRT version to the server to be verified.
+ _INFO4_MRT_VERIFY_READ = (1 << 0)
+ // Roll forward MRT.
+ _INFO4_MRT_ROLL_FORWARD = (1 << 1)
+ // Roll back MRT.
+ _INFO4_MRT_ROLL_BACK = (1 << 2)
+
// Interpret SC_READ bits in info3.
//
// RELAX TYPE
@@ -109,22 +116,22 @@ const (
_BATCH_MSG_INFO uint8 = 0x2
_BATCH_MSG_GEN uint8 = 0x4
_BATCH_MSG_TTL uint8 = 0x8
+ _BATCH_MSG_INFO4 uint8 = 0x10
_MSG_TOTAL_HEADER_SIZE uint8 = 30
_FIELD_HEADER_SIZE uint8 = 5
_OPERATION_HEADER_SIZE uint8 = 8
_MSG_REMAINING_HEADER_SIZE uint8 = 22
- _DIGEST_SIZE uint8 = 20
_COMPRESS_THRESHOLD int = 128
_CL_MSG_VERSION int64 = 2
_AS_MSG_TYPE int64 = 3
_AS_MSG_TYPE_COMPRESSED int64 = 4
)
-type transactionType int
+type commandType int
const (
- ttNone transactionType = iota
+ ttNone commandType = iota
ttGet
ttGetHeader
ttExists
@@ -154,11 +161,13 @@ type command interface {
parseRecordResults(ifc command, receiveSize int) (bool, Error)
prepareRetry(ifc command, isTimeout bool) bool
- transactionType() transactionType
+ commandType() commandType
isRead() bool
+ onInDoubt()
execute(ifc command) Error
+ executeIter(ifc command, iter int) Error
executeAt(ifc command, policy *BasePolicy, deadline time.Time, iterations int) Error
canPutConnBack() bool
@@ -171,6 +180,9 @@ type command interface {
type baseCommand struct {
bufferEx
+ txn *Txn
+ version *uint64
+
node *Node
conn *Connection
@@ -193,10 +205,462 @@ type baseCommand struct {
commandWasSent bool
}
+//--------------------------------------------------
+// Multi-record Transactions
+//--------------------------------------------------
+
+func canRepeat(policy *BatchPolicy, key *Key, record, prev BatchRecordIfc, ver, verPrev *uint64) bool {
+ // Avoid relatively expensive full equality checks for performance reasons.
+ // Use reference equality only in hope that common namespaces/bin names are set from
+ // fixed variables. It's fine if equality not determined correctly because it just
+ // results in more space used. The batch will still be correct.
+ // Same goes for ver reference equality check.
+ return !policy.SendKey && verPrev == ver && prev != nil && prev.key().namespace == key.namespace &&
+ prev.key().setName == key.setName && record == prev
+}
+
+func canRepeatAttr(attr *batchAttr, key, keyPrev *Key, ver, verPrev *uint64) bool {
+ return !attr.sendKey && verPrev == ver && keyPrev != nil && keyPrev.namespace == key.namespace &&
+ keyPrev.setName == key.setName
+}
+
+func canRepeatKeys(key *Key, keyPrev *Key, ver, verPrev *uint64) bool {
+ if ver == nil || verPrev == nil {
+ return false
+ }
+ return *verPrev == *ver && keyPrev != nil && keyPrev.namespace == key.namespace &&
+ keyPrev.setName == key.setName
+}
+
+func (cmd *baseCommand) setTxnAddKeys(policy *WritePolicy, key *Key, args operateArgs) Error {
+ cmd.begin()
+ fieldCount := cmd.estimateRawKeySize(key)
+
+ if size, err := args.size(); err != nil {
+ return err
+ } else {
+ cmd.dataOffset += size
+ }
+
+ if err := cmd.sizeBuffer(policy.compress()); err != nil {
+ return err
+ }
+
+ cmd.dataOffset = 8
+ cmd.WriteByte(_MSG_REMAINING_HEADER_SIZE)
+ cmd.WriteByte(byte(args.readAttr))
+ cmd.WriteByte(byte(args.writeAttr))
+ cmd.WriteByte(0)
+ cmd.WriteByte(0)
+ cmd.WriteByte(0)
+ cmd.WriteInt32(0)
+ cmd.WriteUint32(policy.Expiration)
+ cmd.WriteInt32(0)
+ cmd.WriteInt16(int16(fieldCount))
+ cmd.WriteInt16(int16(len(args.operations)))
+ cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE)
+
+ if err := cmd.writeKey(key); err != nil {
+ return err
+ }
+
+ for _, operation := range args.operations {
+ if err := cmd.writeOperationForOperation(operation); err != nil {
+ return err
+ }
+ }
+ cmd.end()
+ cmd.markCompressed(policy)
+ return nil
+}
+
+func (cmd *baseCommand) setTxnVerify(policy *BasePolicy, key *Key, ver uint64) Error {
+ cmd.begin()
+ fieldCount := cmd.estimateRawKeySize(key)
+
+ // Version field.
+ cmd.dataOffset += int(7 + _FIELD_HEADER_SIZE)
+ fieldCount++
+
+ if err := cmd.sizeBuffer(policy.compress()); err != nil {
+ return err
+ }
+
+ cmd.dataOffset = 8
+ cmd.WriteByte(_MSG_REMAINING_HEADER_SIZE)
+ cmd.WriteByte(byte((_INFO1_READ | _INFO1_NOBINDATA)))
+ cmd.WriteByte(byte(0))
+ cmd.WriteByte(byte(_INFO3_SC_READ_TYPE))
+ cmd.WriteByte(byte(_INFO4_MRT_VERIFY_READ))
+ cmd.WriteByte(0)
+ cmd.WriteInt32(0)
+ cmd.WriteInt32(0)
+ cmd.WriteInt32(0)
+ cmd.WriteInt16(int16(fieldCount))
+ cmd.WriteInt16(0)
+ cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE)
+
+ if err := cmd.writeKey(key); err != nil {
+ return err
+ }
+
+ if err := cmd.writeFieldVersion(ver); err != nil {
+ return err
+ }
+ cmd.end()
+ return nil
+}
+
+func (cmd *baseCommand) setBatchTxnVerifyForBatchNode(
+ policy *BatchPolicy,
+ keys []*Key,
+ versions []*uint64,
+ batch *batchNode,
+) Error {
+ offsets := newBatchOffsetsNative(batch)
+ return cmd.setBatchTxnVerifyForOffsets(policy, keys, versions, offsets)
+}
+
+func (cmd *baseCommand) setBatchTxnVerifyForOffsets(
+ policy *BatchPolicy,
+ keys []*Key,
+ versions []*uint64,
+ offsets BatchOffsets,
+) Error {
+ // Estimate buffer size.
+ cmd.begin()
+
+ // Batch field
+ cmd.dataOffset += int(_FIELD_HEADER_SIZE + 5)
+
+ var keyPrev *Key
+ var verPrev *uint64
+
+ max := offsets.size()
+ for i := 0; i < max; i++ {
+ offset := offsets.get(i)
+ key := keys[offset]
+ ver := versions[offset]
+
+ cmd.dataOffset += len(key.digest) + 4
+
+ if canRepeatKeys(key, keyPrev, ver, verPrev) {
+ // Can set repeat previous namespace/bin names to save space.
+ cmd.dataOffset++
+ } else {
+ // Write full header and namespace/set/bin names.
+ cmd.dataOffset += 9 // header(4) + info4(1) + fieldCount(2) + opCount(2) = 9
+ cmd.dataOffset += len(key.namespace) + int(_FIELD_HEADER_SIZE)
+ cmd.dataOffset += len(key.setName) + int(_FIELD_HEADER_SIZE)
+
+ if ver != nil {
+ cmd.dataOffset += 7 + int(_FIELD_HEADER_SIZE)
+ }
+ keyPrev = key
+ verPrev = ver
+ }
+ }
+
+ if err := cmd.sizeBuffer(policy.compress()); err != nil {
+ return err
+ }
+
+ cmd.writeBatchHeader(policy, 1)
+
+ fieldSizeOffset := cmd.dataOffset
+ // Need to update size at end
+ cmd.writeFieldHeader(0, BATCH_INDEX)
+
+ cmd.WriteInt32(int32(max))
+ cmd.WriteByte(cmd.getBatchFlags(policy))
+ keyPrev = nil
+ verPrev = nil
+
+ for i := 0; i < max; i++ {
+ offset := offsets.get(i)
+ key := keys[offset]
+ ver := versions[offset]
+
+ cmd.WriteInt32(int32(offset))
+
+ digest := key.digest
+ copy(cmd.dataBuffer[cmd.dataOffset:], digest[:])
+ cmd.dataOffset += len(digest)
+
+ if canRepeatKeys(key, keyPrev, ver, verPrev) {
+ // Can set repeat previous namespace/bin names to save space.
+ cmd.WriteByte(_BATCH_MSG_REPEAT)
+ } else {
+ // Write full message.
+ cmd.WriteByte(byte(_BATCH_MSG_INFO | _BATCH_MSG_INFO4))
+ cmd.WriteByte(byte(_INFO1_READ | _INFO1_NOBINDATA))
+ cmd.WriteByte(byte(0))
+ cmd.WriteByte(byte(_INFO3_SC_READ_TYPE))
+ cmd.WriteByte(byte(_INFO4_MRT_VERIFY_READ))
+
+ fieldCount := 0
+
+ if ver != nil {
+ fieldCount++
+ }
+
+ if err := cmd.writeBatchFields(key, fieldCount, 0); err != nil {
+ return err
+ }
+
+ if ver != nil {
+ if err := cmd.writeFieldVersion(*ver); err != nil {
+ return err
+ }
+ }
+
+ keyPrev = key
+ verPrev = ver
+ }
+ }
+
+ // Write real field size.
+ cmd.WriteUint32At(uint32(cmd.dataOffset-int(_MSG_TOTAL_HEADER_SIZE)-4), fieldSizeOffset)
+ cmd.end()
+ cmd.markCompressed(policy)
+ return nil
+}
+
+func (cmd *baseCommand) setTxnMarkRollForward(key *Key) Error {
+ bin := NewBin("fwd", true)
+
+ cmd.begin()
+ fieldCount := cmd.estimateRawKeySize(key)
+ cmd.estimateOperationSizeForBin(bin)
+ cmd.writeTxnMonitor(key, 0, _INFO2_WRITE, fieldCount, 1)
+ cmd.writeOperationForBin(bin, _WRITE)
+ cmd.end()
+ return nil
+}
+
+func (cmd *baseCommand) setTxnRoll(key *Key, txn *Txn, txnAttr int) Error {
+ cmd.begin()
+ fieldCount := cmd.estimateRawKeySize(key)
+
+ fieldCount += cmd.sizeTxn(key, txn, false)
+
+ if err := cmd.sizeBuffer(false); err != nil {
+ return err
+ }
+
+ cmd.dataOffset = 8
+ cmd.WriteByte(_MSG_REMAINING_HEADER_SIZE)
+ cmd.WriteByte(byte(0))
+ cmd.WriteByte(byte(_INFO2_WRITE | _INFO2_DURABLE_DELETE))
+ cmd.WriteByte(byte(0))
+ cmd.WriteByte(byte(txnAttr))
+ cmd.WriteByte(0)
+ cmd.WriteInt32(0)
+ cmd.WriteInt32(0)
+ cmd.WriteInt32(0)
+ cmd.WriteInt16(int16(fieldCount))
+ cmd.WriteInt16(0)
+ cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE)
+
+ if err := cmd.writeKey(key); err != nil {
+ return err
+ }
+
+ cmd.writeTxn(txn, false)
+ cmd.end()
+ return nil
+}
+
+func (cmd *baseCommand) setBatchTxnRoll(
+ policy *BatchPolicy,
+ txn *Txn,
+ keys []*Key,
+ batch *batchNode,
+ attr *batchAttr,
+) Error {
+ offsets := newBatchOffsetsNative(batch)
+ return cmd.setBatchTxnRollForOffsets(policy, txn, keys, attr, offsets)
+}
+
+func (cmd *baseCommand) setBatchTxnRollForOffsets(
+ policy *BatchPolicy,
+ txn *Txn,
+ keys []*Key,
+ attr *batchAttr,
+ offsets BatchOffsets,
+) Error {
+ // Estimate buffer size.
+ cmd.begin()
+ fieldCount := 1
+ max := offsets.size()
+ versions := make([]*uint64, max)
+
+ for i := 0; i < max; i++ {
+ offset := offsets.get(i)
+ versions[i] = txn.GetReadVersion(keys[offset])
+ }
+
+ // Batch field
+ cmd.dataOffset += int(_FIELD_HEADER_SIZE + 5)
+
+ var keyPrev *Key
+ var verPrev *uint64
+
+ for i := 0; i < max; i++ {
+ offset := offsets.get(i)
+ key := keys[offset]
+ ver := versions[i]
+
+ cmd.dataOffset += len(key.digest) + 4
+
+ if canRepeatKeys(key, keyPrev, ver, verPrev) {
+ // Can set repeat previous namespace/bin names to save space.
+ cmd.dataOffset++
+ } else {
+ // Write full header and namespace/set/bin names.
+ cmd.dataOffset += 12 // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12
+ cmd.dataOffset += len(key.namespace) + int(_FIELD_HEADER_SIZE)
+ cmd.dataOffset += len(key.setName) + int(_FIELD_HEADER_SIZE)
+ cmd.sizeTxnBatch(txn, ver, attr.hasWrite)
+ cmd.dataOffset += 2 // gen(2) = 2
+ keyPrev = key
+ verPrev = ver
+ }
+ }
+
+ if err := cmd.sizeBuffer(policy.compress()); err != nil {
+ return err
+ }
+
+ cmd.writeBatchHeader(policy, fieldCount)
+
+ fieldSizeOffset := cmd.dataOffset
+ cmd.writeFieldHeader(0, BATCH_INDEX) // Need to update size at end
+
+ cmd.WriteInt32(int32(max))
+ cmd.WriteByte(cmd.getBatchFlags(policy))
+ keyPrev = nil
+ verPrev = nil
+
+ for i := 0; i < max; i++ {
+ offset := offsets.get(i)
+ key := keys[offset]
+ ver := versions[i]
+
+ cmd.WriteInt32(int32(offset))
+
+ digest := key.digest
+ copy(cmd.dataBuffer[cmd.dataOffset:], digest[:])
+ cmd.dataOffset += len(digest)
+
+ if canRepeatKeys(key, keyPrev, ver, verPrev) {
+ // Can set repeat previous namespace/bin names to save space.
+ cmd.WriteByte(_BATCH_MSG_REPEAT)
+ } else {
+ // Write full message.
+ cmd.writeBatchWrite(key, txn, ver, attr, nil, 0, 0)
+ keyPrev = key
+ verPrev = ver
+ }
+ }
+
+ // Write real field size.
+ cmd.WriteUint32At(uint32(cmd.dataOffset-int(_MSG_TOTAL_HEADER_SIZE)-4), fieldSizeOffset)
+ cmd.end()
+ cmd.markCompressed(policy)
+ return nil
+}
+
+func (cmd *baseCommand) setTxnClose(txn *Txn, key *Key) Error {
+ cmd.begin()
+ fieldCount := cmd.estimateRawKeySize(key)
+ if err := cmd.writeTxnMonitor(key, 0, _INFO2_WRITE|_INFO2_DELETE|_INFO2_DURABLE_DELETE, fieldCount, 0); err != nil {
+ return err
+ }
+ cmd.end()
+ return nil
+}
+
+func (cmd *baseCommand) writeTxnMonitor(key *Key, readAttr, writeAttr, fieldCount, opCount int) Error {
+ if err := cmd.sizeBuffer(false); err != nil {
+ return err
+ }
+ cmd.dataOffset = 8
+ cmd.WriteByte(_MSG_REMAINING_HEADER_SIZE)
+ cmd.WriteByte(byte(readAttr))
+ cmd.WriteByte(byte(writeAttr))
+ cmd.WriteByte(0)
+ cmd.WriteByte(0)
+ cmd.WriteByte(0)
+ cmd.WriteInt32(0)
+ cmd.WriteInt32(0)
+ cmd.WriteInt32(0)
+ cmd.WriteInt16(int16(fieldCount))
+ cmd.WriteInt16(int16(opCount))
+ cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE)
+
+ return cmd.writeKey(key)
+}
+
+func (cmd *baseCommand) sizeTxn(key *Key, txn *Txn, hasWrite bool) int {
+ fieldCount := 0
+
+ if txn != nil {
+ cmd.dataOffset += 8 + int(_FIELD_HEADER_SIZE)
+ fieldCount++
+
+ cmd.version = txn.GetReadVersion(key)
+
+ if cmd.version != nil {
+ cmd.dataOffset += 7 + int(_FIELD_HEADER_SIZE)
+ fieldCount++
+ }
+
+ if hasWrite && txn.deadline != 0 {
+ cmd.dataOffset += 4 + int(_FIELD_HEADER_SIZE)
+ fieldCount++
+ }
+ }
+ return fieldCount
+}
+
+func (cmd *baseCommand) sizeTxnBatch(txn *Txn, ver *uint64, hasWrite bool) {
+ if txn != nil {
+ cmd.dataOffset++ // Add info4 byte for MRT.
+ cmd.dataOffset += int(8 + _FIELD_HEADER_SIZE)
+
+ if ver != nil {
+ cmd.dataOffset += int(7 + _FIELD_HEADER_SIZE)
+ }
+
+ if hasWrite && txn.deadline != 0 {
+ cmd.dataOffset += int(4 + _FIELD_HEADER_SIZE)
+ }
+ }
+}
+
+func (cmd *baseCommand) writeTxn(txn *Txn, sendDeadline bool) {
+ if txn != nil {
+ cmd.writeFieldLE64(txn.Id(), MRT_ID)
+
+ if cmd.version != nil {
+ cmd.writeFieldVersion(*cmd.version)
+ }
+
+ if sendDeadline && txn.deadline != 0 {
+ cmd.writeFieldLE32(txn.deadline, MRT_DEADLINE)
+ }
+ }
+}
+
+//-------------------------------------------
+// Normal commands
+//-------------------------------------------
+
// Writes the command for write operations
func (cmd *baseCommand) setWrite(policy *WritePolicy, operation OperationType, key *Key, bins []*Bin, binMap BinMap) Error {
cmd.begin()
- fieldCount, err := cmd.estimateKeySize(key, policy.SendKey)
+ fieldCount, err := cmd.estimateKeySize(policy.GetBasePolicy(), key, true)
if err != nil {
return err
}
@@ -236,7 +700,7 @@ func (cmd *baseCommand) setWrite(policy *WritePolicy, operation OperationType, k
cmd.writeHeaderWrite(policy, _INFO2_WRITE, fieldCount, len(binMap))
}
- if err := cmd.writeKey(key, policy.SendKey); err != nil {
+ if err := cmd.writeKeyWithPolicy(&policy.BasePolicy, key, true); err != nil {
return err
}
@@ -267,9 +731,9 @@ func (cmd *baseCommand) setWrite(policy *WritePolicy, operation OperationType, k
}
// Writes the command for delete operations
-func (cmd *baseCommand) setDelete(policy *WritePolicy, key *Key) Error {
+func (cmd *baseCommand) setDelete(policy *WritePolicy, key *Key) (err Error) {
cmd.begin()
- fieldCount, err := cmd.estimateKeySize(key, false)
+ fieldCount, err := cmd.estimateKeySize(&policy.BasePolicy, key, true)
if err != nil {
return err
}
@@ -289,9 +753,10 @@ func (cmd *baseCommand) setDelete(policy *WritePolicy, key *Key) Error {
return err
}
cmd.writeHeaderWrite(policy, _INFO2_WRITE|_INFO2_DELETE, fieldCount, 0)
- if err := cmd.writeKey(key, false); err != nil {
+ if err := cmd.writeKeyWithPolicy(&policy.BasePolicy, key, true); err != nil {
return err
}
+
if policy.FilterExpression != nil {
if err := cmd.writeFilterExpression(policy.FilterExpression, predSize); err != nil {
return err
@@ -307,7 +772,7 @@ func (cmd *baseCommand) setDelete(policy *WritePolicy, key *Key) Error {
// Writes the command for touch operations
func (cmd *baseCommand) setTouch(policy *WritePolicy, key *Key) Error {
cmd.begin()
- fieldCount, err := cmd.estimateKeySize(key, policy.SendKey)
+ fieldCount, err := cmd.estimateKeySize(&policy.BasePolicy, key, true)
if err != nil {
return err
}
@@ -328,7 +793,7 @@ func (cmd *baseCommand) setTouch(policy *WritePolicy, key *Key) Error {
return err
}
cmd.writeHeaderWrite(policy, _INFO2_WRITE, fieldCount, 1)
- if err := cmd.writeKey(key, policy.SendKey); err != nil {
+ if err := cmd.writeKeyWithPolicy(&policy.BasePolicy, key, true); err != nil {
return err
}
if policy.FilterExpression != nil {
@@ -343,9 +808,9 @@ func (cmd *baseCommand) setTouch(policy *WritePolicy, key *Key) Error {
}
// Writes the command for exist operations
-func (cmd *baseCommand) setExists(policy *BasePolicy, key *Key) Error {
+func (cmd *baseCommand) setExists(policy *BasePolicy, key *Key) (err Error) {
cmd.begin()
- fieldCount, err := cmd.estimateKeySize(key, false)
+ fieldCount, err := cmd.estimateKeySize(policy, key, false)
if err != nil {
return err
}
@@ -365,7 +830,7 @@ func (cmd *baseCommand) setExists(policy *BasePolicy, key *Key) Error {
return err
}
cmd.writeHeaderReadHeader(policy, _INFO1_READ|_INFO1_NOBINDATA, fieldCount, 0)
- if err := cmd.writeKey(key, false); err != nil {
+ if err := cmd.writeKeyWithPolicy(policy, key, false); err != nil {
return err
}
if policy.FilterExpression != nil {
@@ -379,9 +844,9 @@ func (cmd *baseCommand) setExists(policy *BasePolicy, key *Key) Error {
}
// Writes the command for get operations (all bins)
-func (cmd *baseCommand) setReadForKeyOnly(policy *BasePolicy, key *Key) Error {
+func (cmd *baseCommand) setReadForKeyOnly(policy *BasePolicy, key *Key) (err Error) {
cmd.begin()
- fieldCount, err := cmd.estimateKeySize(key, false)
+ fieldCount, err := cmd.estimateKeySize(policy, key, false)
if err != nil {
return err
}
@@ -400,7 +865,7 @@ func (cmd *baseCommand) setReadForKeyOnly(policy *BasePolicy, key *Key) Error {
}
cmd.writeHeaderRead(policy, _INFO1_READ|_INFO1_GET_ALL, 0, 0, fieldCount, 0)
- if err := cmd.writeKey(key, false); err != nil {
+ if err := cmd.writeKeyWithPolicy(policy, key, false); err != nil {
return err
}
if policy.FilterExpression != nil {
@@ -416,10 +881,10 @@ func (cmd *baseCommand) setReadForKeyOnly(policy *BasePolicy, key *Key) Error {
}
// Writes the command for get operations (specified bins)
-func (cmd *baseCommand) setRead(policy *BasePolicy, key *Key, binNames []string) Error {
+func (cmd *baseCommand) setRead(policy *BasePolicy, key *Key, binNames []string) (err Error) {
if len(binNames) > 0 {
cmd.begin()
- fieldCount, err := cmd.estimateKeySize(key, false)
+ fieldCount, err := cmd.estimateKeySize(policy, key, false)
if err != nil {
return err
}
@@ -448,7 +913,7 @@ func (cmd *baseCommand) setRead(policy *BasePolicy, key *Key, binNames []string)
}
cmd.writeHeaderRead(policy, attr, 0, 0, fieldCount, len(binNames))
- if err := cmd.writeKey(key, false); err != nil {
+ if err := cmd.writeKeyWithPolicy(policy, key, false); err != nil {
return err
}
@@ -469,12 +934,9 @@ func (cmd *baseCommand) setRead(policy *BasePolicy, key *Key, binNames []string)
}
// Writes the command for getting metadata operations
-func (cmd *baseCommand) setReadHeader(policy *BasePolicy, key *Key) Error {
+func (cmd *baseCommand) setReadHeader(policy *BasePolicy, key *Key) (err Error) {
cmd.begin()
- fieldCount, err := cmd.estimateKeySize(key, false)
- if err != nil {
- return err
- }
+ fieldCount := cmd.estimateRawKeySize(key)
predSize := 0
if policy.FilterExpression != nil {
@@ -492,7 +954,7 @@ func (cmd *baseCommand) setReadHeader(policy *BasePolicy, key *Key) Error {
}
cmd.writeHeaderReadHeader(policy, _INFO1_READ|_INFO1_NOBINDATA, fieldCount, 0)
- if err := cmd.writeKey(key, false); err != nil {
+ if err := cmd.writeKeyWithPolicy(policy, key, false); err != nil {
return err
}
if policy.FilterExpression != nil {
@@ -522,7 +984,7 @@ func (cmd *baseCommand) setOperate(policy *WritePolicy, key *Key, args *operateA
}
}
- ksz, err := cmd.estimateKeySize(key, policy.SendKey && args.hasWrite)
+ ksz, err := cmd.estimateKeySize(&policy.BasePolicy, key, args.hasWrite)
if err != nil {
return err
}
@@ -545,7 +1007,7 @@ func (cmd *baseCommand) setOperate(policy *WritePolicy, key *Key, args *operateA
cmd.writeHeaderReadWrite(policy, args, fieldCount)
- if err := cmd.writeKey(key, policy.SendKey && args.hasWrite); err != nil {
+ if err := cmd.writeKeyWithPolicy(&policy.BasePolicy, key, args.hasWrite); err != nil {
return err
}
@@ -569,7 +1031,7 @@ func (cmd *baseCommand) setOperate(policy *WritePolicy, key *Key, args *operateA
func (cmd *baseCommand) setUdf(policy *WritePolicy, key *Key, packageName string, functionName string, args *ValueArray) Error {
cmd.begin()
- fieldCount, err := cmd.estimateKeySize(key, policy.SendKey)
+ fieldCount, err := cmd.estimateKeySize(&policy.BasePolicy, key, true)
if err != nil {
return err
}
@@ -596,7 +1058,7 @@ func (cmd *baseCommand) setUdf(policy *WritePolicy, key *Key, packageName string
}
cmd.writeHeaderWrite(policy, _INFO2_WRITE, fieldCount, 0)
- if err := cmd.writeKey(key, policy.SendKey); err != nil {
+ if err := cmd.writeKeyWithPolicy(&policy.BasePolicy, key, true); err != nil {
return err
}
if policy.FilterExpression != nil {
@@ -615,12 +1077,39 @@ func (cmd *baseCommand) setUdf(policy *WritePolicy, key *Key, packageName string
return nil
}
-func (cmd *baseCommand) setBatchOperateIfc(client ClientIfc, policy *BatchPolicy, records []BatchRecordIfc, batch *batchNode) (*batchAttr, Error) {
- offsets := batch.offsets
- max := len(batch.offsets)
+func (cmd *baseCommand) setBatchOperateIfc(
+ client *Client,
+ policy *BatchPolicy,
+ records []BatchRecordIfc,
+ batch *batchNode,
+) (*batchAttr, Error) {
+ offsets := newBatchOffsetsNative(batch)
+ return cmd.setBatchOperateIfcOffsets(client, policy, records, offsets)
+}
+
+func (cmd *baseCommand) setBatchOperateIfcOffsets(
+ client *Client,
+ policy *BatchPolicy,
+ records []BatchRecordIfc,
+ offsets BatchOffsets,
+) (*batchAttr, Error) {
+ max := offsets.size()
+ txn := policy.Txn
+ var versions []*uint64
// Estimate buffer size
cmd.begin()
+
+ if txn != nil {
+ versions = make([]*uint64, max)
+
+ for i := 0; i < max; i++ {
+ offset := offsets.get(i)
+ record := records[offset]
+ versions[i] = txn.GetReadVersion(record.key())
+ }
+ }
+
fieldCount := 1
predSize := 0
if policy.FilterExpression != nil {
@@ -638,13 +1127,21 @@ func (cmd *baseCommand) setBatchOperateIfc(client ClientIfc, policy *BatchPolicy
cmd.dataOffset += int(_FIELD_HEADER_SIZE) + 5
var prev BatchRecordIfc
+ var verPrev *uint64
for i := 0; i < max; i++ {
- record := records[offsets[i]]
+ record := records[offsets.get(i)]
key := record.key()
+
+ var ver *uint64
+ if len(versions) > 0 {
+ ver = versions[i]
+ }
+
cmd.dataOffset += len(key.digest) + 4
// Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if !policy.SendKey && prev != nil && prev.key().namespace == key.namespace && (prev.key().setName == key.setName) && record.equals(prev) {
+ // if !policy.SendKey && prev != nil && prev.key().namespace == key.namespace && (prev.key().setName == key.setName) && record.equals(prev) {
+ if canRepeat(policy, key, record, prev, ver, verPrev) {
// Can set repeat previous namespace/bin names to save space.
cmd.dataOffset++
} else {
@@ -652,7 +1149,7 @@ func (cmd *baseCommand) setBatchOperateIfc(client ClientIfc, policy *BatchPolicy
cmd.dataOffset += 12 // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12
cmd.dataOffset += len(key.namespace) + int(_FIELD_HEADER_SIZE)
cmd.dataOffset += len(key.setName) + int(_FIELD_HEADER_SIZE)
-
+ cmd.sizeTxnBatch(txn, ver, record.BatchRec().hasWrite)
if sz, err := record.size(&policy.BasePolicy); err != nil {
return nil, err
} else {
@@ -660,6 +1157,7 @@ func (cmd *baseCommand) setBatchOperateIfc(client ClientIfc, policy *BatchPolicy
}
prev = record
+ verPrev = ver
}
}
@@ -686,18 +1184,26 @@ func (cmd *baseCommand) setBatchOperateIfc(client ClientIfc, policy *BatchPolicy
attr := &batchAttr{}
prev = nil
+ verPrev = nil
for i := 0; i < max; i++ {
- index := offsets[i]
+ index := offsets.get(i)
cmd.WriteUint32(uint32(index))
record := records[index]
+
+ var ver *uint64
+ if len(versions) > 0 {
+ ver = versions[i]
+ }
+
key := record.key()
if _, err := cmd.Write(key.digest[:]); err != nil {
return nil, newCommonError(err)
}
// Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if !policy.SendKey && prev != nil && prev.key().namespace == key.namespace && prev.key().setName == key.setName && record.equals(prev) {
+ // if !policy.SendKey && prev != nil && prev.key().namespace == key.namespace && prev.key().setName == key.setName && record.equals(prev) {
+ if canRepeat(policy, key, record, prev, ver, verPrev) {
// Can set repeat previous namespace/bin names to save space.
cmd.WriteByte(_BATCH_MSG_REPEAT) // repeat
} else {
@@ -708,13 +1214,13 @@ func (cmd *baseCommand) setBatchOperateIfc(client ClientIfc, policy *BatchPolicy
attr.setBatchRead(client.getUsableBatchReadPolicy(br.Policy))
if len(br.BinNames) > 0 {
- cmd.writeBatchBinNames(key, br.BinNames, attr, attr.filterExp)
+ cmd.writeBatchBinNames(key, txn, ver, br.BinNames, attr, attr.filterExp)
} else if br.Ops != nil {
attr.adjustRead(br.Ops)
- cmd.writeBatchOperations(key, br.Ops, attr, attr.filterExp)
+ cmd.writeBatchOperations(key, txn, ver, br.Ops, attr, attr.filterExp)
} else {
attr.adjustReadForAllBins(br.ReadAllBins)
- cmd.writeBatchRead(key, attr, attr.filterExp, 0)
+ cmd.writeBatchRead(key, txn, ver, attr, attr.filterExp, 0)
}
case _BRT_BATCH_WRITE:
@@ -722,13 +1228,13 @@ func (cmd *baseCommand) setBatchOperateIfc(client ClientIfc, policy *BatchPolicy
attr.setBatchWrite(client.getUsableBatchWritePolicy(bw.Policy))
attr.adjustWrite(bw.Ops)
- cmd.writeBatchOperations(key, bw.Ops, attr, attr.filterExp)
+ cmd.writeBatchOperations(key, txn, ver, bw.Ops, attr, attr.filterExp)
case _BRT_BATCH_UDF:
bu := record.(*BatchUDF)
attr.setBatchUDF(client.getUsableBatchUDFPolicy(bu.Policy))
- cmd.writeBatchWrite(key, attr, attr.filterExp, 3, 0)
+ cmd.writeBatchWrite(key, txn, ver, attr, attr.filterExp, 3, 0)
cmd.writeFieldString(bu.PackageName, UDF_PACKAGE_NAME)
cmd.writeFieldString(bu.FunctionName, UDF_FUNCTION)
cmd.writeFieldBytes(bu.argBytes, UDF_ARGLIST)
@@ -737,9 +1243,10 @@ func (cmd *baseCommand) setBatchOperateIfc(client ClientIfc, policy *BatchPolicy
bd := record.(*BatchDelete)
attr.setBatchDelete(client.getUsableBatchDeletePolicy(bd.Policy))
- cmd.writeBatchWrite(key, attr, attr.filterExp, 0, 0)
+ cmd.writeBatchWrite(key, txn, ver, attr, attr.filterExp, 0, 0)
}
prev = record
+ verPrev = ver
}
}
@@ -751,19 +1258,50 @@ func (cmd *baseCommand) setBatchOperateIfc(client ClientIfc, policy *BatchPolicy
}
-func (cmd *baseCommand) setBatchOperate(policy *BatchPolicy, keys []*Key, batch *batchNode, binNames []string, ops []*Operation, attr *batchAttr) Error {
- offsets := batch.offsets
- max := len(batch.offsets)
+func (cmd *baseCommand) setBatchOperate(
+ policy *BatchPolicy,
+ keys []*Key,
+ batch *batchNode,
+ binNames []string,
+ ops []*Operation,
+ attr *batchAttr,
+) Error {
+ offsets := newBatchOffsetsNative(batch)
+ return cmd.setBatchOperateOffsets(policy, keys, binNames, ops, attr, offsets)
+}
+
+func (cmd *baseCommand) setBatchOperateOffsets(
+ policy *BatchPolicy,
+ keys []*Key,
+ binNames []string,
+ ops []*Operation,
+ attr *batchAttr,
+ offsets BatchOffsets,
+) Error {
+ max := offsets.size()
+ txn := policy.Txn
+ var versions []*uint64
+
// Estimate buffer size
cmd.begin()
- fieldCount := 1
- predSize := 0
+
+ if txn != nil {
+ versions = make([]*uint64, max)
+
+ for i := 0; i < max; i++ {
+ offset := offsets.get(i)
+ key := keys[offset]
+ versions[i] = txn.GetReadVersion(key)
+ }
+ }
exp := policy.FilterExpression
if attr.filterExp != nil {
exp = attr.filterExp
}
+ fieldCount := 1
+ predSize := 0
if exp != nil {
var err Error
predSize, err = cmd.estimateExpressionSize(exp)
@@ -778,13 +1316,19 @@ func (cmd *baseCommand) setBatchOperate(policy *BatchPolicy, keys []*Key, batch
cmd.dataOffset += int(_FIELD_HEADER_SIZE) + 5
- var prev *Key
+ var keyPrev *Key
+ var verPrev *uint64
for i := 0; i < max; i++ {
- key := keys[offsets[i]]
+ key := keys[offsets.get(i)]
+ var ver *uint64
+ if len(versions) > 0 {
+ ver = versions[i]
+ }
cmd.dataOffset += len(key.digest) + 4
// Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if !attr.sendKey && prev != nil && prev.namespace == key.namespace && (prev.setName == key.setName) {
+ // if !attr.sendKey && keyPrev != nil && keyPrev.namespace == key.namespace && (keyPrev.setName == key.setName) {
+ if canRepeatAttr(attr, key, keyPrev, ver, verPrev) {
// Can set repeat previous namespace/bin names to save space.
cmd.dataOffset++
} else {
@@ -792,8 +1336,9 @@ func (cmd *baseCommand) setBatchOperate(policy *BatchPolicy, keys []*Key, batch
cmd.dataOffset += 12 // header(4) + ttl(4) + fielCount(2) + opCount(2) = 12
cmd.dataOffset += len(key.namespace) + int(_FIELD_HEADER_SIZE)
cmd.dataOffset += len(key.setName) + int(_FIELD_HEADER_SIZE)
+ cmd.sizeTxnBatch(txn, ver, attr.hasWrite)
- if attr.sendKey {
+ if attr.sendKey && key.hasValueToSend() {
if sz, err := key.userKey.EstimateSize(); err != nil {
return err
} else {
@@ -822,7 +1367,8 @@ func (cmd *baseCommand) setBatchOperate(policy *BatchPolicy, keys []*Key, batch
cmd.dataOffset += 2 // Extra write specific fields.
}
- prev = key
+ keyPrev = key
+ verPrev = ver
}
}
@@ -846,9 +1392,15 @@ func (cmd *baseCommand) setBatchOperate(policy *BatchPolicy, keys []*Key, batch
cmd.WriteByte(cmd.getBatchFlags(policy))
- prev = nil
+ keyPrev = nil
+ verPrev = nil
for i := 0; i < max; i++ {
- index := offsets[i]
+ index := offsets.get(i)
+ var ver *uint64
+ if len(versions) > 0 {
+ ver = versions[i]
+ }
+
cmd.WriteUint32(uint32(index))
key := keys[index]
@@ -857,22 +1409,23 @@ func (cmd *baseCommand) setBatchOperate(policy *BatchPolicy, keys []*Key, batch
}
// Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if !attr.sendKey && prev != nil && prev.namespace == key.namespace && prev.setName == key.setName {
+ if canRepeatAttr(attr, key, keyPrev, ver, verPrev) {
// Can set repeat previous namespace/bin names to save space.
cmd.WriteByte(_BATCH_MSG_REPEAT) // repeat
} else {
// Write full header, namespace and bin names.
if len(binNames) > 0 {
- cmd.writeBatchBinNames(key, binNames, attr, nil)
+ cmd.writeBatchBinNames(key, txn, ver, binNames, attr, nil)
} else if len(ops) > 0 {
- cmd.writeBatchOperations(key, ops, attr, nil)
+ cmd.writeBatchOperations(key, txn, ver, ops, attr, nil)
} else if (attr.writeAttr & _INFO2_DELETE) != 0 {
- cmd.writeBatchWrite(key, attr, nil, 0, 0)
+ cmd.writeBatchWrite(key, txn, ver, attr, nil, 0, 0)
} else {
- cmd.writeBatchRead(key, attr, nil, 0)
+ cmd.writeBatchRead(key, txn, ver, attr, nil, 0)
}
- prev = key
+ keyPrev = key
+ verPrev = ver
}
}
@@ -883,12 +1436,51 @@ func (cmd *baseCommand) setBatchOperate(policy *BatchPolicy, keys []*Key, batch
return nil
}
-func (cmd *baseCommand) setBatchUDF(policy *BatchPolicy, keys []*Key, batch *batchNode, packageName, functionName string, args ValueArray, attr *batchAttr) Error {
- offsets := batch.offsets
- max := len(batch.offsets)
+func (cmd *baseCommand) setBatchUDF(
+ policy *BatchPolicy,
+ keys []*Key,
+ batch *batchNode,
+ packageName, functionName string,
+ args ValueArray,
+ attr *batchAttr,
+) Error {
+ offsets := newBatchOffsetsNative(batch)
+ return cmd.setBatchUDFOffsets(
+ policy,
+ keys,
+ packageName,
+ functionName,
+ args,
+ attr,
+ offsets,
+ )
+}
+
+func (cmd *baseCommand) setBatchUDFOffsets(
+ policy *BatchPolicy,
+ keys []*Key,
+ packageName, functionName string,
+ args ValueArray,
+ attr *batchAttr,
+ offsets BatchOffsets,
+) Error {
+ max := offsets.size()
+ txn := policy.Txn
+ var versions []*uint64
// Estimate buffer size
cmd.begin()
+
+ if txn != nil {
+ versions = make([]*uint64, max)
+
+ for i := 0; i < max; i++ {
+ offset := offsets.get(i)
+ key := keys[offset]
+ versions[i] = txn.GetReadVersion(key)
+ }
+ }
+
fieldCount := 1
predSize := 0
if policy.FilterExpression != nil {
@@ -905,13 +1497,20 @@ func (cmd *baseCommand) setBatchUDF(policy *BatchPolicy, keys []*Key, batch *bat
cmd.dataOffset += int(_FIELD_HEADER_SIZE) + 5
- var prev *Key
+ var keyPrev *Key
+ var verPrev *uint64
for i := 0; i < max; i++ {
- key := keys[offsets[i]]
+ index := offsets.get(i)
+ key := keys[index]
cmd.dataOffset += len(key.digest) + 4
+ var ver *uint64
+ if len(versions) > 0 {
+ ver = versions[i]
+ }
+
// Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if !attr.sendKey && prev != nil && prev.namespace == key.namespace && (prev.setName == key.setName) {
+ if canRepeatAttr(attr, key, keyPrev, ver, verPrev) {
// Can set repeat previous namespace/bin names to save space.
cmd.dataOffset++
} else {
@@ -919,8 +1518,9 @@ func (cmd *baseCommand) setBatchUDF(policy *BatchPolicy, keys []*Key, batch *bat
cmd.dataOffset += 12 // header(4) + ttl(4) + fieldCount(2) + opCount(2) = 12
cmd.dataOffset += len(key.namespace) + int(_FIELD_HEADER_SIZE)
cmd.dataOffset += len(key.setName) + int(_FIELD_HEADER_SIZE)
+ cmd.sizeTxnBatch(txn, ver, attr.hasWrite)
- if attr.sendKey {
+ if attr.sendKey && key.hasValueToSend() {
if sz, err := key.userKey.EstimateSize(); err != nil {
return err
} else {
@@ -935,7 +1535,8 @@ func (cmd *baseCommand) setBatchUDF(policy *BatchPolicy, keys []*Key, batch *bat
cmd.dataOffset += sz
}
- prev = key
+ keyPrev = key
+ verPrev = ver
}
}
@@ -959,28 +1560,36 @@ func (cmd *baseCommand) setBatchUDF(policy *BatchPolicy, keys []*Key, batch *bat
cmd.WriteByte(cmd.getBatchFlags(policy))
- prev = nil
+ keyPrev = nil
+ verPrev = nil
for i := 0; i < max; i++ {
- index := offsets[i]
+ index := offsets.get(i)
cmd.WriteUint32(uint32(index))
+ var ver *uint64
+ if len(versions) > 0 {
+ ver = versions[i]
+ }
+
key := keys[index]
if _, err := cmd.Write(key.digest[:]); err != nil {
return newCommonError(err)
}
// Try reference equality in hope that namespace/set for all keys is set from fixed variables.
- if !attr.sendKey && prev != nil && prev.namespace == key.namespace && prev.setName == key.setName {
+ // if !attr.sendKey && keyPrev != nil && keyPrev.namespace == key.namespace && keyPrev.setName == key.setName {
+ if canRepeatAttr(attr, key, keyPrev, ver, verPrev) {
// Can set repeat previous namespace/bin names to save space.
cmd.WriteByte(_BATCH_MSG_REPEAT) // repeat
} else {
- cmd.writeBatchWrite(key, attr, nil, 3, 0)
+ cmd.writeBatchWrite(key, txn, ver, attr, nil, 3, 0)
cmd.writeFieldString(packageName, UDF_PACKAGE_NAME)
cmd.writeFieldString(functionName, UDF_FUNCTION)
if err := cmd.writeUdfArgs(&args); err != nil {
return err
}
- prev = key
+ keyPrev = key
+ verPrev = ver
}
}
@@ -1017,19 +1626,33 @@ func (cmd *baseCommand) writeBatchHeader(policy *BatchPolicy, fieldCount int) {
// cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE)
}
-func (cmd *baseCommand) writeBatchBinNames(key *Key, binNames []string, attr *batchAttr, filter *Expression) {
- cmd.writeBatchRead(key, attr, filter, len(binNames))
+func (cmd *baseCommand) writeBatchBinNames(
+ key *Key,
+ txn *Txn,
+ ver *uint64,
+ binNames []string,
+ attr *batchAttr,
+ filter *Expression,
+) {
+ cmd.writeBatchRead(key, txn, ver, attr, filter, len(binNames))
for i := range binNames {
cmd.writeOperationForBinName(binNames[i], _READ)
}
}
-func (cmd *baseCommand) writeBatchOperations(key *Key, ops []*Operation, attr *batchAttr, filter *Expression) {
+func (cmd *baseCommand) writeBatchOperations(
+ key *Key,
+ txn *Txn,
+ ver *uint64,
+ ops []*Operation,
+ attr *batchAttr,
+ filter *Expression,
+) {
if attr.hasWrite {
- cmd.writeBatchWrite(key, attr, filter, 0, len(ops))
+ cmd.writeBatchWrite(key, txn, ver, attr, filter, 0, len(ops))
} else {
- cmd.writeBatchRead(key, attr, filter, len(ops))
+ cmd.writeBatchRead(key, txn, ver, attr, filter, len(ops))
}
for i := range ops {
@@ -1037,29 +1660,58 @@ func (cmd *baseCommand) writeBatchOperations(key *Key, ops []*Operation, attr *b
}
}
-func (cmd *baseCommand) writeBatchRead(key *Key, attr *batchAttr, filter *Expression, opCount int) {
- cmd.WriteByte(_BATCH_MSG_INFO | _BATCH_MSG_TTL)
- cmd.WriteByte(byte(attr.readAttr))
- cmd.WriteByte(byte(attr.writeAttr))
- cmd.WriteByte(byte(attr.infoAttr))
- cmd.WriteUint32(attr.expiration)
- cmd.writeBatchFieldsWithFilter(key, filter, 0, opCount)
-}
-
-func (cmd *baseCommand) writeBatchWrite(key *Key, attr *batchAttr, filter *Expression, fieldCount, opCount int) {
- cmd.WriteByte(_BATCH_MSG_INFO | _BATCH_MSG_GEN | _BATCH_MSG_TTL)
- cmd.WriteByte(byte(attr.readAttr))
- cmd.WriteByte(byte(attr.writeAttr))
- cmd.WriteByte(byte(attr.infoAttr))
- cmd.WriteUint16(uint16(attr.generation))
- cmd.WriteUint32(attr.expiration)
-
- if attr.sendKey {
- fieldCount++
- cmd.writeBatchFieldsWithFilter(key, filter, fieldCount, opCount)
- cmd.writeFieldValue(key.userKey, KEY)
+func (cmd *baseCommand) writeBatchRead(
+ key *Key,
+ txn *Txn,
+ ver *uint64,
+ attr *batchAttr,
+ filter *Expression,
+ opCount int,
+) {
+ if txn != nil {
+ cmd.WriteByte(_BATCH_MSG_INFO | _BATCH_MSG_INFO4 | _BATCH_MSG_TTL)
+ cmd.WriteByte(byte(attr.readAttr))
+ cmd.WriteByte(byte(attr.writeAttr))
+ cmd.WriteByte(byte(attr.infoAttr))
+ cmd.WriteByte(byte(attr.txnAttr))
+ cmd.WriteUint32(attr.expiration)
+ cmd.writeBatchFieldsTxn(key, txn, ver, attr, filter, 0, opCount)
+ } else {
+ cmd.WriteByte(_BATCH_MSG_INFO | _BATCH_MSG_TTL)
+ cmd.WriteByte(byte(attr.readAttr))
+ cmd.WriteByte(byte(attr.writeAttr))
+ cmd.WriteByte(byte(attr.infoAttr))
+ cmd.WriteUint32(attr.expiration)
+ cmd.writeBatchFieldsWithFilter(key, filter, 0, opCount)
+ }
+}
+
+func (cmd *baseCommand) writeBatchWrite(
+ key *Key,
+ txn *Txn,
+ ver *uint64,
+ attr *batchAttr,
+ filter *Expression,
+ fieldCount,
+ opCount int,
+) {
+ if txn != nil {
+ cmd.WriteByte(_BATCH_MSG_INFO | _BATCH_MSG_INFO4 | _BATCH_MSG_GEN | _BATCH_MSG_TTL)
+ cmd.WriteByte(byte(attr.readAttr))
+ cmd.WriteByte(byte(attr.writeAttr))
+ cmd.WriteByte(byte(attr.infoAttr))
+ cmd.WriteByte(byte(attr.txnAttr))
+ cmd.WriteUint16(uint16(attr.generation)) // Note the reduced size of the gen field
+ cmd.WriteUint32(attr.expiration)
+ cmd.writeBatchFieldsTxn(key, txn, ver, attr, filter, fieldCount, opCount)
} else {
- cmd.writeBatchFieldsWithFilter(key, filter, fieldCount, opCount)
+ cmd.WriteByte(_BATCH_MSG_INFO | _BATCH_MSG_GEN | _BATCH_MSG_TTL)
+ cmd.WriteByte(byte(attr.readAttr))
+ cmd.WriteByte(byte(attr.writeAttr))
+ cmd.WriteByte(byte(attr.infoAttr))
+ cmd.WriteUint16(uint16(attr.generation))
+ cmd.WriteUint32(attr.expiration)
+ cmd.writeBatchFieldsReg(key, attr, filter, fieldCount, opCount)
}
}
@@ -1351,6 +2003,56 @@ func (cmd *baseCommand) setBatchIndexRead(policy *BatchPolicy, records []*BatchR
return nil
}
+func (cmd *baseCommand) writeBatchFieldsTxn(
+ key *Key,
+ txn *Txn,
+ ver *uint64,
+ attr *batchAttr,
+ filter *Expression,
+ fieldCount, opCount int,
+) Error {
+ fieldCount++
+ if ver != nil {
+ fieldCount++
+ }
+
+ if attr.hasWrite && txn.deadline != 0 {
+ fieldCount++
+ }
+
+ if filter != nil {
+ fieldCount++
+ }
+
+ if attr.sendKey && key.hasValueToSend() {
+ fieldCount++
+ }
+
+ if err := cmd.writeBatchFields(key, fieldCount, opCount); err != nil {
+ return err
+ }
+
+ cmd.writeFieldLE64(txn.Id(), MRT_ID)
+
+ if ver != nil {
+ cmd.writeFieldVersion(*ver)
+ }
+
+ if attr.hasWrite && txn.deadline != 0 {
+ cmd.writeFieldLE32(txn.deadline, MRT_DEADLINE)
+ }
+
+ if filter != nil {
+ filter.pack(cmd)
+ }
+
+ if attr.sendKey && key.hasValueToSend() {
+ cmd.writeFieldValue(key.userKey, KEY)
+ }
+
+ return nil
+}
+
func (cmd *baseCommand) writeBatchFieldsWithFilter(key *Key, filter *Expression, fieldCount, opCount int) Error {
if filter != nil {
fieldCount++
@@ -1368,6 +2070,33 @@ func (cmd *baseCommand) writeBatchFieldsWithFilter(key *Key, filter *Expression,
return nil
}
+func (cmd *baseCommand) writeBatchFieldsReg(
+ key *Key,
+ attr *batchAttr,
+ filter *Expression,
+ fieldCount,
+ opCount int,
+) Error {
+ if filter != nil {
+ fieldCount++
+ }
+
+ if attr.sendKey && key.hasValueToSend() {
+ fieldCount++
+ }
+
+ cmd.writeBatchFields(key, fieldCount, opCount)
+
+ if filter != nil {
+ filter.pack(cmd)
+ }
+
+ if attr.sendKey && key.hasValueToSend() {
+ cmd.writeFieldValue(key.userKey, KEY)
+ }
+ return nil
+}
+
func (cmd *baseCommand) writeBatchFields(key *Key, fieldCount, opCount int) Error {
fieldCount += 2
cmd.WriteUint16(uint16(fieldCount))
@@ -1382,7 +2111,6 @@ func (cmd *baseCommand) setScan(policy *ScanPolicy, namespace *string, setName *
cmd.begin()
fieldCount := 0
- // for grpc
partsFullSize := 0
partsPartialSize := 0
maxRecords := int64(0)
@@ -1505,7 +2233,7 @@ func (cmd *baseCommand) setScan(policy *ScanPolicy, namespace *string, setName *
cmd.writeFieldHeader(4, SOCKET_TIMEOUT)
cmd.WriteInt32(int32(policy.SocketTimeout / time.Millisecond)) // in milliseconds
- cmd.writeFieldHeader(8, TRAN_ID)
+ cmd.writeFieldHeader(8, QUERY_ID)
cmd.WriteUint64(taskID)
for i := range binNames {
@@ -1748,7 +2476,7 @@ func (cmd *baseCommand) setQuery(policy *QueryPolicy, wpolicy *WritePolicy, stat
cmd.writeFieldString(statement.SetName, TABLE)
}
- cmd.writeFieldHeader(8, TRAN_ID)
+ cmd.writeFieldHeader(8, QUERY_ID)
cmd.WriteUint64(taskID)
if statement.Filter != nil {
@@ -1870,23 +2598,29 @@ func (cmd *baseCommand) setQuery(policy *QueryPolicy, wpolicy *WritePolicy, stat
return nil
}
-func (cmd *baseCommand) estimateKeySize(key *Key, sendKey bool) (int, Error) {
- fieldCount := 0
-
- if key.namespace != "" {
- cmd.dataOffset += len(key.namespace) + int(_FIELD_HEADER_SIZE)
- fieldCount++
+func (cmd *baseCommand) estimateKeyAttrSize(policy Policy, key *Key, attr *batchAttr, filterExp *Expression) (int, Error) {
+ fieldCount, err := cmd.estimateKeySize(policy.GetBasePolicy(), key, attr.hasWrite)
+ if err != nil {
+ return -1, err
}
- if key.setName != "" {
- cmd.dataOffset += len(key.setName) + int(_FIELD_HEADER_SIZE)
+ if filterExp != nil {
+ predSize, err := cmd.estimateExpressionSize(filterExp)
+ if err != nil {
+ return -1, err
+ }
+ cmd.dataOffset += predSize
fieldCount++
}
+ return fieldCount, nil
+}
- cmd.dataOffset += int(_DIGEST_SIZE + _FIELD_HEADER_SIZE)
- fieldCount++
+func (cmd *baseCommand) estimateKeySize(policy *BasePolicy, key *Key, hasWrite bool) (int, Error) {
+ fieldCount := cmd.estimateRawKeySize(key)
- if sendKey {
+ fieldCount += cmd.sizeTxn(key, policy.Txn, hasWrite)
+
+ if policy.SendKey && key.hasValueToSend() {
// field header size + key size
sz, err := key.userKey.EstimateSize()
if err != nil {
@@ -1899,6 +2633,25 @@ func (cmd *baseCommand) estimateKeySize(key *Key, sendKey bool) (int, Error) {
return fieldCount, nil
}
+func (cmd *baseCommand) estimateRawKeySize(key *Key) int {
+ fieldCount := 0
+
+ if key.namespace != "" {
+ cmd.dataOffset += len(key.namespace) + int(_FIELD_HEADER_SIZE)
+ fieldCount++
+ }
+
+ if key.setName != "" {
+ cmd.dataOffset += len(key.setName) + int(_FIELD_HEADER_SIZE)
+ fieldCount++
+ }
+
+ cmd.dataOffset += int(len(key.digest) + int(_FIELD_HEADER_SIZE))
+ fieldCount++
+
+ return fieldCount
+}
+
func (cmd *baseCommand) estimateUdfSize(packageName string, functionName string, args *ValueArray) (int, Error) {
cmd.dataOffset += len(packageName) + int(_FIELD_HEADER_SIZE)
cmd.dataOffset += len(functionName) + int(_FIELD_HEADER_SIZE)
@@ -2178,19 +2931,46 @@ func (cmd *baseCommand) writeHeaderReadHeader(policy *BasePolicy, readAttr, fiel
cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE)
}
-func (cmd *baseCommand) writeKey(key *Key, sendKey bool) Error {
- // Write key into buffer.
- if key.namespace != "" {
- cmd.writeFieldString(key.namespace, NAMESPACE)
+// Header write for batch single commands.
+func (cmd *baseCommand) writeKeyAttr(
+ policy Policy,
+ key *Key,
+ attr *batchAttr,
+ filterExp *Expression,
+ fieldCount int,
+ operationCount int,
+) Error {
+ cmd.dataOffset = 8
+ // Write all header data except total size which must be written last.
+ cmd.WriteByte(_MSG_REMAINING_HEADER_SIZE) // Message header length.
+ cmd.WriteByte(byte(attr.readAttr))
+ cmd.WriteByte(byte(attr.writeAttr))
+ cmd.WriteByte(byte(attr.infoAttr))
+ cmd.WriteByte(0) // unused
+ cmd.WriteByte(0) // clear the result code
+ cmd.WriteUint32(attr.generation)
+ cmd.WriteUint32(attr.expiration)
+ cmd.WriteInt32(0)
+ cmd.WriteInt16(int16(fieldCount))
+ cmd.WriteInt16(int16(operationCount))
+ cmd.dataOffset = int(_MSG_TOTAL_HEADER_SIZE)
+
+ cmd.writeKeyWithPolicy(policy.GetBasePolicy(), key, attr.hasWrite)
+
+ if filterExp != nil {
+ filterExp.pack(cmd)
}
+ return nil
+}
- if key.setName != "" {
- cmd.writeFieldString(key.setName, TABLE)
+func (cmd *baseCommand) writeKeyWithPolicy(policy *BasePolicy, key *Key, sendDeadline bool) Error {
+ if err := cmd.writeKey(key); err != nil {
+ return err
}
- cmd.writeFieldBytes(key.digest[:], DIGEST_RIPE)
+ cmd.writeTxn(policy.Txn, sendDeadline)
- if sendKey {
+ if policy.SendKey && key.hasValueToSend() {
if err := cmd.writeFieldValue(key.userKey, KEY); err != nil {
return err
}
@@ -2199,6 +2979,20 @@ func (cmd *baseCommand) writeKey(key *Key, sendKey bool) Error {
return nil
}
+func (cmd *baseCommand) writeKey(key *Key) Error {
+ // Write key into buffer.
+ if key.namespace != "" {
+ cmd.writeFieldString(key.namespace, NAMESPACE)
+ }
+
+ if key.setName != "" {
+ cmd.writeFieldString(key.setName, TABLE)
+ }
+
+ cmd.writeFieldBytes(key.digest[:], DIGEST_RIPE)
+ return nil
+}
+
func (cmd *baseCommand) writeOperationForBin(bin *Bin, operation OperationType) Error {
nameLength := copy(cmd.dataBuffer[(cmd.dataOffset+int(_OPERATION_HEADER_SIZE)):], bin.Name)
@@ -2329,6 +3123,23 @@ func (cmd *baseCommand) writeFieldValue(value Value, ftype FieldType) Error {
return err
}
+func (cmd *baseCommand) writeFieldVersion(ver uint64) Error {
+ cmd.writeFieldHeader(7, RECORD_VERSION)
+ Buffer.Uint64ToVersionBytes(ver, cmd.dataBuffer, cmd.dataOffset)
+ cmd.dataOffset += 7
+ return nil
+}
+
+func (cmd *baseCommand) writeFieldLE32(val int, typ FieldType) {
+ cmd.writeFieldHeader(4, typ)
+ cmd.WriteInt32LittleEndian(uint32(val))
+}
+
+func (cmd *baseCommand) writeFieldLE64(val int64, typ FieldType) {
+ cmd.writeFieldHeader(8, typ)
+ cmd.WriteInt64LittleEndian(uint64(val))
+}
+
func (cmd *baseCommand) writeUdfArgs(value *ValueArray) Error {
if value != nil {
vlen, err := value.EstimateSize()
@@ -2531,16 +3342,12 @@ func (cmd *baseCommand) batchInDoubt(isWrite bool, commandSentCounter int) bool
return isWrite && commandSentCounter > 1
}
-func (cmd *baseCommand) isRead() bool {
- return true
+func (cmd *baseCommand) onInDoubt() {
+ // called in write commands if the command execution on server was inDoubt
}
-// grpcPutBufferBack puts the assigned buffer back in the pool.
-// This function should only be called from grpc commands.
-func (cmd *baseCommand) grpcPutBufferBack() {
- // put the data buffer back in the pool in case it gets used again
- buffPool.Put(cmd.dataBuffer)
- cmd.dataBuffer = nil
+func (cmd *baseCommand) isRead() bool {
+ return true
}
///////////////////////////////////////////////////////////////////////////////
@@ -2556,6 +3363,17 @@ func (cmd *baseCommand) execute(ifc command) Error {
return cmd.executeAt(ifc, policy, deadline, -1)
}
+func (cmd *baseCommand) executeIter(ifc command, iter int) Error {
+ policy := ifc.getPolicy(ifc).GetBasePolicy()
+ deadline := policy.deadline()
+
+ err := cmd.executeAt(ifc, policy, deadline, iter)
+ if err.IsInDoubt() {
+ cmd.onInDoubt()
+ }
+ return err
+}
+
func (cmd *baseCommand) executeAt(ifc command, policy *BasePolicy, deadline time.Time, iterations int) (errChain Error) {
// for exponential backoff
interval := policy.SleepBetweenRetries
@@ -2578,7 +3396,7 @@ func (cmd *baseCommand) executeAt(ifc command, policy *BasePolicy, deadline time
if cmd.node != nil && cmd.node.cluster != nil {
cmd.node.cluster.maxRetriesExceededCount.GetAndIncrement()
}
- applyTransactionMetrics(cmd.node, ifc.transactionType(), transStart)
+ applyTransactionMetrics(cmd.node, ifc.commandType(), transStart)
return chainErrors(ErrMaxRetriesExceeded.err(), errChain).iter(cmd.commandSentCounter).setInDoubt(ifc.isRead(), cmd.commandSentCounter).setNode(cmd.node)
}
@@ -2604,7 +3422,7 @@ func (cmd *baseCommand) executeAt(ifc command, policy *BasePolicy, deadline time
alreadyRetried, err := bc.retryBatch(bc, cmd.node.cluster, deadline, cmd.commandSentCounter)
if alreadyRetried {
// Batch was retried in separate subcommands. Complete this command.
- applyTransactionMetrics(cmd.node, ifc.transactionType(), transStart)
+ applyTransactionMetrics(cmd.node, ifc.commandType(), transStart)
if err != nil {
return chainErrors(err, errChain).iter(cmd.commandSentCounter).setNode(cmd.node).setInDoubt(ifc.isRead(), cmd.commandSentCounter)
}
@@ -2676,7 +3494,7 @@ func (cmd *baseCommand) executeAt(ifc command, policy *BasePolicy, deadline time
isClientTimeout = true
}
// if the connection pool is empty, we still haven't tried
- // the transaction to increase the iteration count.
+ // the command to increase the iteration count.
cmd.commandSentCounter--
}
logger.Logger.Debug("Node " + cmd.node.String() + ": " + err.Error())
@@ -2698,31 +3516,26 @@ func (cmd *baseCommand) executeAt(ifc command, policy *BasePolicy, deadline time
// Close socket to flush out possible garbage. Do not put back in pool.
cmd.conn.Close()
cmd.conn = nil
- applyTransactionMetrics(cmd.node, ifc.transactionType(), transStart)
+ applyTransactionMetrics(cmd.node, ifc.commandType(), transStart)
return err
}
- // Reset timeout in send buffer (destined for server) and socket.
- binary.BigEndian.PutUint32(cmd.dataBuffer[22:], 0)
- if !deadline.IsZero() {
- serverTimeout := time.Until(deadline)
- if serverTimeout < time.Millisecond {
- serverTimeout = time.Millisecond
+ if _, rawPayload := ifc.(*writePayloadCommand); !rawPayload {
+ // Reset timeout in send buffer (destined for server) and socket.
+ binary.BigEndian.PutUint32(cmd.dataBuffer[22:], 0)
+ if !deadline.IsZero() {
+ serverTimeout := time.Until(deadline)
+ if serverTimeout < time.Millisecond {
+ serverTimeout = time.Millisecond
+ }
+ binary.BigEndian.PutUint32(cmd.dataBuffer[22:], uint32(serverTimeout/time.Millisecond))
}
- binary.BigEndian.PutUint32(cmd.dataBuffer[22:], uint32(serverTimeout/time.Millisecond))
- }
-
- // now that the deadline has been set in the buffer, compress the contents
- if err = cmd.compress(); err != nil {
- applyTransactionErrorMetrics(cmd.node)
- return chainErrors(err, errChain).iter(cmd.commandSentCounter).setNode(cmd.node).setInDoubt(ifc.isRead(), cmd.commandSentCounter)
- }
- // now that the deadline has been set in the buffer, compress the contents
- if err = cmd.prepareBuffer(ifc, deadline); err != nil {
- applyTransactionErrorMetrics(cmd.node)
- applyTransactionMetrics(cmd.node, ifc.transactionType(), transStart)
- return chainErrors(err, errChain).iter(cmd.commandSentCounter).setNode(cmd.node)
+ // now that the deadline has been set in the buffer, compress the contents
+ if err = cmd.compress(); err != nil {
+ applyTransactionErrorMetrics(cmd.node)
+ return chainErrors(err, errChain).iter(cmd.commandSentCounter).setNode(cmd.node).setInDoubt(ifc.isRead(), cmd.commandSentCounter)
+ }
}
// Send command.
@@ -2790,11 +3603,11 @@ func (cmd *baseCommand) executeAt(ifc command, policy *BasePolicy, deadline time
cmd.conn = nil
}
- applyTransactionMetrics(cmd.node, ifc.transactionType(), transStart)
+ applyTransactionMetrics(cmd.node, ifc.commandType(), transStart)
return errChain.setInDoubt(ifc.isRead(), cmd.commandSentCounter)
}
- applyTransactionMetrics(cmd.node, ifc.transactionType(), transStart)
+ applyTransactionMetrics(cmd.node, ifc.commandType(), transStart)
// in case it has grown and re-allocated, it means
// it was borrowed from the pool, sp put it back.
@@ -2860,7 +3673,7 @@ func deviceOverloadError(err Error) bool {
return err.Matches(types.DEVICE_OVERLOAD)
}
-func applyTransactionMetrics(node *Node, tt transactionType, tb time.Time) {
+func applyTransactionMetrics(node *Node, tt commandType, tb time.Time) {
if node != nil && node.cluster.MetricsEnabled() {
applyMetrics(tt, &node.stats, tb)
}
@@ -2878,7 +3691,7 @@ func applyTransactionRetryMetrics(node *Node) {
}
}
-func applyMetrics(tt transactionType, metrics *nodeStats, s time.Time) {
+func applyMetrics(tt commandType, metrics *nodeStats, s time.Time) {
d := uint64(time.Since(s).Microseconds())
switch tt {
case ttGet:
@@ -2905,3 +3718,22 @@ func applyMetrics(tt transactionType, metrics *nodeStats, s time.Time) {
metrics.BatchWriteMetrics.Add(d)
}
}
+
+func (cmd *baseCommand) parseVersion(fieldCount int) *uint64 {
+ var version *uint64
+
+ for i := 0; i < fieldCount; i++ {
+ length := Buffer.BytesToInt32(cmd.dataBuffer, cmd.dataOffset)
+ cmd.dataOffset += 4
+
+ typ := cmd.dataBuffer[cmd.dataOffset]
+ cmd.dataOffset++
+ size := length - 1
+
+ if FieldType(typ) == RECORD_VERSION && size == 7 {
+ version = Buffer.VersionBytesToUint64(cmd.dataBuffer, cmd.dataOffset)
+ }
+ cmd.dataOffset += int(size)
+ }
+ return version
+}
diff --git a/commit_error.go b/commit_error.go
new file mode 100644
index 00000000..a3f8dadb
--- /dev/null
+++ b/commit_error.go
@@ -0,0 +1,25 @@
+// Copyright 2014-2024 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+// Multi-record transaction (MRT) error status.
+type CommitError string
+
+const (
+ CommitErrorVerifyFail CommitError = "MRT verify failed. MRT aborted."
+ CommitErrorVerifyFailCloseAbandoned CommitError = "MRT verify failed. MRT aborted. MRT client close abandoned. Server will eventually close the MRT."
+ CommitErrorVerifyFailAbortAbandoned CommitError = "MRT verify failed. MRT client abort abandoned. Server will eventually abort the MRT."
+ CommitErrorMarkRollForwardAbandoned CommitError = "MRT client mark roll forward abandoned. Server will eventually abort the MRT."
+)
diff --git a/commit_policy.go b/commit_policy.go
index cf34300a..c9215f07 100644
--- a/commit_policy.go
+++ b/commit_policy.go
@@ -17,7 +17,7 @@
package aerospike
-// CommitLevel indicates the desired consistency guarantee when committing a transaction on the server.
+// CommitLevel indicates the desired consistency guarantee when committing a command on the server.
type CommitLevel int
const (
diff --git a/commit_status.go b/commit_status.go
new file mode 100644
index 00000000..13040386
--- /dev/null
+++ b/commit_status.go
@@ -0,0 +1,27 @@
+// Copyright 2014-2024 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+// Multi-record transaction (MRT) commit status code.
+type CommitStatus string
+
+const (
+ CommitStatusOK CommitStatus = "Commit succeeded"
+ CommitStatusUnverified CommitStatus = "Commit process was disrupted on client side and unverified"
+ CommitStatusAlreadyCommitted CommitStatus = "Already committed"
+ CommitStatusAlreadyAborted CommitStatus = "Already aborted"
+ CommitStatusRollForwardAbandoned CommitStatus = "MRT client roll forward abandoned. Server will eventually commit the MRT."
+ CommitStatusCloseAbandoned CommitStatus = "MRT has been rolled forward, but MRT client close was abandoned. Server will eventually close the MRT."
+)
diff --git a/complex_index_test.go b/complex_index_test.go
index 144b59b0..9f159861 100644
--- a/complex_index_test.go
+++ b/complex_index_test.go
@@ -15,7 +15,7 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -24,12 +24,6 @@ import (
// ALL tests are isolated by SetName and Key, which are 50 random characters
var _ = gg.Describe("Complex Index operations test", func() {
- gg.BeforeEach(func() {
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
- })
-
gg.Describe("Complex Index Creation", func() {
// connection data
var err error
diff --git a/complex_query_test.go b/complex_query_test.go
index 2adff807..da9bf0a1 100644
--- a/complex_query_test.go
+++ b/complex_query_test.go
@@ -15,7 +15,7 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -40,10 +40,6 @@ var _ = gg.Describe("Query operations on complex types", gg.Ordered, func() {
var keys map[string]*as.Key
gg.BeforeAll(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
keys = make(map[string]*as.Key, keyCount)
set = randString(50)
for i := 0; i < keyCount; i++ {
@@ -80,10 +76,6 @@ var _ = gg.Describe("Query operations on complex types", gg.Ordered, func() {
})
gg.AfterAll(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
dropIndex(nil, ns, set, set+bin1.Name+"N")
dropIndex(nil, ns, set, set+bin2.Name+"N"+"keys")
dropIndex(nil, ns, set, set+bin2.Name+"N"+"values")
diff --git a/connection.go b/connection.go
index 3b977a1d..255f11f9 100644
--- a/connection.go
+++ b/connection.go
@@ -15,7 +15,6 @@
package aerospike
import (
- "bytes"
"compress/zlib"
"crypto/tls"
"io"
@@ -25,9 +24,9 @@ import (
"sync"
"time"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
- "github.com/aerospike/aerospike-client-go/v7/types/histogram"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ "github.com/aerospike/aerospike-client-go/v8/types/histogram"
)
const _BUFF_ADJUST_INTERVAL = 5 * time.Second
@@ -94,10 +93,6 @@ type Connection struct {
limitReader *io.LimitedReader
closer sync.Once
-
- grpcConn bool
- grpcReadCallback func() ([]byte, Error)
- grpcReader io.ReadWriter
}
// makes sure that the connection is closed eventually, even if it is not consumed
@@ -134,18 +129,6 @@ func errToAerospikeErr(conn *Connection, err error) (aerr Error) {
return aerr
}
-// newGrpcFakeConnection creates a connection that fakes a real connection for when grpc connections are required.
-// These connections only support reading to allow parsing of the returned payload.
-func newGrpcFakeConnection(payload []byte, callback func() ([]byte, Error)) *Connection {
- buf := bytes.NewBuffer(payload)
- return &Connection{
- grpcConn: true,
- grpcReader: buf,
- grpcReadCallback: callback,
- limitReader: &io.LimitedReader{R: buf, N: 0},
- }
-}
-
// newConnection creates a connection on the network and returns the pointer
// A minimum timeout of 2 seconds will always be applied.
// If the connection is not established in the specified timeout,
@@ -258,11 +241,6 @@ func (ctn *Connection) Write(buf []byte) (total int, aerr Error) {
// Read reads from connection buffer to the provided slice.
func (ctn *Connection) Read(buf []byte, length int) (total int, aerr Error) {
- if ctn.grpcConn {
- // grpc fake conn
- return ctn.grpcRead(buf, length)
- }
-
var err error
// if all bytes are not read, retry until successful
@@ -307,68 +285,6 @@ func (ctn *Connection) Read(buf []byte, length int) (total int, aerr Error) {
return total, aerr
}
-// Reads the grpc payload
-func (ctn *Connection) grpcReadNext() (aerr Error) {
- // if there is no payload set, ask for the next chunk
- if ctn.grpcReadCallback != nil {
- grpcPayload, err := ctn.grpcReadCallback()
- if err != nil {
- return err
- }
-
- if _, err := ctn.grpcReader.Write(grpcPayload); err != nil {
- errToAerospikeErr(ctn, io.EOF)
- }
-
- if ctn.compressed {
- ctn.limitReader.R = ctn.grpcReader
- }
-
- return nil
- }
- return errToAerospikeErr(ctn, io.EOF)
-}
-
-// Reads the grpc payload
-func (ctn *Connection) grpcRead(buf []byte, length int) (total int, aerr Error) {
- var err error
-
- // if all bytes are not read, retry until successful
- // Don't worry about the loop; we've already set the timeout elsewhere
- for total < length {
- var r int
- if !ctn.compressed {
- r, err = ctn.grpcReader.Read(buf[total:length])
- } else {
- r, err = ctn.inflater.Read(buf[total:length])
- if err == io.EOF && total+r == length {
- ctn.compressed = false
- err = ctn.inflater.Close()
- }
- }
- total += r
- if err != nil {
- if err == io.EOF {
- if err := ctn.grpcReadNext(); err != nil {
- return total, err
- }
- continue
- }
- break
- }
- }
-
- if total == length {
- // If all required bytes are read, ignore any potential error.
- // The error will bubble up on the next network io if it matters.
- return total, nil
- }
-
- aerr = chainErrors(errToAerospikeErr(ctn, err), aerr)
-
- return total, aerr
-}
-
// IsConnected returns true if the connection is not closed yet.
func (ctn *Connection) IsConnected() bool {
return ctn.conn != nil
diff --git a/delete_command.go b/delete_command.go
index 4f059d7c..9a80425f 100644
--- a/delete_command.go
+++ b/delete_command.go
@@ -15,101 +15,68 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types"
-
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// guarantee deleteCommand implements command interface
var _ command = &deleteCommand{}
type deleteCommand struct {
- singleCommand
+ baseWriteCommand
- policy *WritePolicy
existed bool
}
-func newDeleteCommand(cluster *Cluster, policy *WritePolicy, key *Key) (*deleteCommand, Error) {
- var err Error
- var partition *Partition
- if cluster != nil {
- partition, err = PartitionForWrite(cluster, &policy.BasePolicy, key)
- if err != nil {
- return nil, err
- }
+func newDeleteCommand(
+ cluster *Cluster,
+ policy *WritePolicy,
+ key *Key,
+) (*deleteCommand, Error) {
+ bwc, err := newBaseWriteCommand(cluster, policy, key)
+ if err != nil {
+ return nil, err
}
newDeleteCmd := &deleteCommand{
- singleCommand: newSingleCommand(cluster, key, partition),
- policy: policy,
+ baseWriteCommand: bwc,
}
return newDeleteCmd, nil
}
-func (cmd *deleteCommand) getPolicy(ifc command) Policy {
- return cmd.policy
-}
-
func (cmd *deleteCommand) writeBuffer(ifc command) Error {
return cmd.setDelete(cmd.policy, cmd.key)
}
-func (cmd *deleteCommand) getNode(ifc command) (*Node, Error) {
- return cmd.partition.GetNodeWrite(cmd.cluster)
-}
-
-func (cmd *deleteCommand) prepareRetry(ifc command, isTimeout bool) bool {
- cmd.partition.PrepareRetryWrite(isTimeout)
- return true
-}
-
func (cmd *deleteCommand) parseResult(ifc command, conn *Connection) Error {
- // Read header.
- if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil {
- return err
+ resultCode, err := cmd.parseHeader()
+ if err != nil {
+ return newCustomNodeError(cmd.node, err.resultCode())
}
- header := Buffer.BytesToInt64(cmd.dataBuffer, 0)
-
- // Validate header to make sure we are at the beginning of a message
- if err := cmd.validateHeader(header); err != nil {
- return err
- }
-
- resultCode := cmd.dataBuffer[13] & 0xFF
-
switch types.ResultCode(resultCode) {
case 0:
cmd.existed = true
case types.KEY_NOT_FOUND_ERROR:
cmd.existed = false
case types.FILTERED_OUT:
- if err := cmd.emptySocket(conn); err != nil {
- return err
- }
cmd.existed = true
return ErrFilteredOut.err()
default:
return newError(types.ResultCode(resultCode))
}
- return cmd.emptySocket(conn)
+ return nil
}
func (cmd *deleteCommand) Existed() bool {
return cmd.existed
}
-func (cmd *deleteCommand) isRead() bool {
- return false
-}
-
func (cmd *deleteCommand) Execute() Error {
return cmd.execute(cmd)
}
-func (cmd *deleteCommand) transactionType() transactionType {
+func (cmd *deleteCommand) commandType() commandType {
return ttDelete
}
diff --git a/docs/README.md b/docs/README.md
index 10a5f950..e7e59f75 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -8,7 +8,7 @@ This package describes the Aerospike Go Client API in detail.
The aerospike Go client package is the main entry point to the client API.
```go
- import as "github.com/aerospike/aerospike-client-go/v7"
+ import as "github.com/aerospike/aerospike-client-go/v8"
```
Before connecting to a cluster, you must import the package.
diff --git a/docs/aerospike.md b/docs/aerospike.md
index 00448d7a..47047ccc 100644
--- a/docs/aerospike.md
+++ b/docs/aerospike.md
@@ -15,7 +15,7 @@
The aerospike package can be imported into your project via:
```go
- import as "github.com/aerospike/aerospike-client-go/v7"
+ import as "github.com/aerospike/aerospike-client-go/v8"
```
@@ -81,7 +81,7 @@ key
-->
-### NewKey(ns, set string, key interface{}): *
+### NewKey(ns, set string, key any): *
Creates a new [key object](datamodel.md#key) with the provided arguments.
diff --git a/docs/client.md b/docs/client.md
index de9a17fa..035b5fb0 100644
--- a/docs/client.md
+++ b/docs/client.md
@@ -392,7 +392,7 @@ Example:
"a": "Lack of skill dictates economy of style.",
"b": 123,
"c": []int{1, 2, 3},
- "d": map[string]interface{}{"a": 42, "b": "An elephant is mouse with an operating system."},
+ "d": map[string]any{"a": 42, "b": "An elephant is mouse with an operating system."},
}
err := client.Put(nil, key, bins)
@@ -424,7 +424,7 @@ Example:
bin1 := NewBin("a", "Lack of skill dictates economy of style.")
bin2 := NewBin("b", 123)
bin3 := NewBin("c", []int{1, 2, 3})
- bin4 := NewBin("d", map[string]interface{}{"a": 42, "b": "An elephant is mouse with an operating system."})
+ bin4 := NewBin("d", map[string]any{"a": 42, "b": "An elephant is mouse with an operating system."})
err := client.PutBins(nil, key, bin1, bin2, bin3, bin4)
```
@@ -647,7 +647,7 @@ execute()
-->
-### Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (interface{}, error)
+### Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (any, error)
Executes a UDF on a record with the given key, and returns the results.
@@ -666,7 +666,7 @@ Considering the UDF registered in RegisterUDF example above:
```go
res, err := client.Execute(nil, key, "udf1", "testFunc1")
- // res will be a: map[interface{}]interface{}{"status": "OK"}
+ // res will be a: map[any]any{"status": "OK"}
```
-## NewKey(ns, set string, key interface{})
+## NewKey(ns, set string, key any)
A record is addressable via its key. A key is a struct containing:
@@ -144,7 +144,7 @@ bin
-->
-## NewBin(name string, value interface{}) Value
+## NewBin(name string, value any) Value
Bins are analogous to fields in relational databases.
@@ -157,7 +157,7 @@ Example:
bin1 := NewBin("name", "Aerospike") // string value
bin2 := NewBin("maxTPS", 1000000) // number value
bin3 := NewBin("notes",
- map[interface{}]interface{}{
+ map[any]any{
"age": 5,
666: "not allowed in",
"clients": []string{"go", "c", "java", "python", "node", "erlang"},
@@ -219,7 +219,7 @@ filter
-->
-## NewEqualFilter(binName string, value interface{}) *Filter
+## NewEqualFilter(binName string, value any) *Filter
Create equality filter for query.
diff --git a/docs/log.md b/docs/log.md
index 2830a31f..e94443fb 100644
--- a/docs/log.md
+++ b/docs/log.md
@@ -4,7 +4,7 @@ Various log levels available to log from the Aerospike API.
Default is set to OFF.
```go
- import asl "github.com/aerospike/aerospike-client-go/v7/logger"
+ import asl "github.com/aerospike/aerospike-client-go/v8/logger"
asl.Logger.SetLevel(asl.OFF)
```
diff --git a/error.go b/error.go
index c1722a2f..db265a0c 100644
--- a/error.go
+++ b/error.go
@@ -15,19 +15,18 @@
package aerospike
import (
- "context"
"errors"
"fmt"
"runtime"
"strconv"
"strings"
- "github.com/aerospike/aerospike-client-go/v7/types"
- grpc "google.golang.org/grpc"
- "google.golang.org/grpc/codes"
- "google.golang.org/grpc/status"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
+// Specifies if stack traces should be included in the error.
+var StackTracesEnabled = false
+
// Error is the internal error interface for the Aerospike client's errors.
// All the public API return this error type. This interface is compatible
// with error interface, including errors.Is and errors.As.
@@ -145,70 +144,7 @@ func newCommonError(e error, messages ...string) Error {
return ne
}
-func newGrpcError(isWrite bool, e error, messages ...string) Error {
- if ae, ok := e.(Error); ok && ae.resultCode() == types.GRPC_ERROR {
- return ae
- }
-
- // convert error to Aerospike errors
- if e == context.DeadlineExceeded {
- return ErrNetTimeout.err().markInDoubt(isWrite)
- } else if e == grpc.ErrClientConnTimeout {
- return ErrNetTimeout.err().markInDoubt(isWrite)
- } else if e == grpc.ErrServerStopped {
- return ErrServerNotAvailable.err().markInDoubt(isWrite)
- }
-
- // try to convert the error
- code := status.Code(e)
- if code == codes.Unknown {
- if s := status.Convert(e); s != nil {
- code = s.Code()
- }
- }
-
- switch code {
- case codes.OK:
- return nil
- case codes.Canceled:
- return ErrNetTimeout.err().markInDoubt(isWrite)
- case codes.InvalidArgument:
- return newError(types.PARAMETER_ERROR, messages...)
- case codes.DeadlineExceeded:
- return ErrNetTimeout.err().markInDoubt(isWrite)
- case codes.NotFound:
- return newError(types.SERVER_NOT_AVAILABLE, messages...).markInDoubt(isWrite)
- case codes.PermissionDenied:
- return newError(types.FAIL_FORBIDDEN, messages...)
- case codes.ResourceExhausted:
- return newError(types.QUOTA_EXCEEDED, messages...)
- case codes.FailedPrecondition:
- return newError(types.PARAMETER_ERROR, messages...)
- case codes.Aborted:
- return newError(types.SERVER_ERROR).markInDoubt(isWrite)
- case codes.OutOfRange:
- return newError(types.PARAMETER_ERROR, messages...)
- case codes.Unimplemented:
- return newError(types.SERVER_NOT_AVAILABLE, messages...)
- case codes.Internal:
- return newError(types.SERVER_ERROR, messages...).markInDoubt(isWrite)
- case codes.Unavailable:
- return newError(types.SERVER_NOT_AVAILABLE, messages...).markInDoubt(isWrite)
- case codes.DataLoss:
- return ErrNetwork.err().markInDoubt(isWrite)
- case codes.Unauthenticated:
- return newError(types.NOT_AUTHENTICATED, messages...)
-
- case codes.AlreadyExists:
- case codes.Unknown:
- }
-
- ne := newError(types.GRPC_ERROR, messages...).markInDoubt(isWrite)
- ne.wrap(e)
- return ne
-}
-
-// SetInDoubt sets whether it is possible that the write transaction may have completed
+// SetInDoubt sets whether it is possible that the write command may have completed
// even though this error was generated. This may be the case when a
// client error occurs (like timeout) after the command was sent to the server.
func (ase *AerospikeError) setInDoubt(isRead bool, commandSentCounter int) Error {
@@ -455,8 +391,6 @@ var (
ErrMaxRetriesExceeded = newConstError(types.MAX_RETRIES_EXCEEDED, "command execution timed out on client: Exceeded number of retries. See `Policy.MaxRetries`.")
ErrInvalidParam = newConstError(types.PARAMETER_ERROR)
ErrLuaPoolEmpty = newConstError(types.COMMON_ERROR, "Error fetching a lua instance from pool")
-
- errGRPCStreamEnd = newError(types.OK, "GRPC Steam was ended successfully")
)
//revive:enable
@@ -505,24 +439,26 @@ func (st *stackFrame) String() string {
}
func stackTrace(err Error) []stackFrame {
- const maxDepth = 10
- sFrames := make([]stackFrame, 0, maxDepth)
- for i := 3; i <= maxDepth+3; i++ {
- pc, fl, ln, ok := runtime.Caller(i)
- if !ok {
- break
- }
- fn := runtime.FuncForPC(pc)
- sFrame := stackFrame{
- fl: fl,
- fn: fn.Name(),
- ln: ln,
+ if StackTracesEnabled {
+ const maxDepth = 10
+ sFrames := make([]stackFrame, 0, maxDepth)
+ for i := 3; i <= maxDepth+3; i++ {
+ pc, fl, ln, ok := runtime.Caller(i)
+ if !ok {
+ break
+ }
+ fn := runtime.FuncForPC(pc)
+ sFrame := stackFrame{
+ fl: fl,
+ fn: fn.Name(),
+ ln: ln,
+ }
+ sFrames = append(sFrames, sFrame)
}
- sFrames = append(sFrames, sFrame)
- }
- if len(sFrames) > 0 {
- return sFrames
+ if len(sFrames) > 0 {
+ return sFrames
+ }
}
return nil
}
diff --git a/error_test.go b/error_test.go
index e541c726..7e4d807d 100644
--- a/error_test.go
+++ b/error_test.go
@@ -17,7 +17,7 @@ package aerospike
import (
"errors"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/example_client_test.go b/example_client_test.go
index df884991..bdae322f 100644
--- a/example_client_test.go
+++ b/example_client_test.go
@@ -21,7 +21,7 @@ import (
"fmt"
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
func ExampleClient_Add() {
diff --git a/example_listiter_int_test.go b/example_listiter_int_test.go
index 13e44515..8363a853 100644
--- a/example_listiter_int_test.go
+++ b/example_listiter_int_test.go
@@ -18,7 +18,7 @@ import (
"fmt"
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
/*
diff --git a/example_listiter_string_test.go b/example_listiter_string_test.go
index 8d7a4232..a4b21f37 100644
--- a/example_listiter_string_test.go
+++ b/example_listiter_string_test.go
@@ -18,7 +18,7 @@ import (
"fmt"
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
/*
diff --git a/example_listiter_time_test.go b/example_listiter_time_test.go
index 9fb41538..8d21432c 100644
--- a/example_listiter_time_test.go
+++ b/example_listiter_time_test.go
@@ -19,7 +19,7 @@ import (
"log"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
/*
diff --git a/example_mapiter_test.go b/example_mapiter_test.go
index 8da567a5..8bdeeda2 100644
--- a/example_mapiter_test.go
+++ b/example_mapiter_test.go
@@ -19,7 +19,7 @@ import (
"log"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
/*
diff --git a/example_pagination_cursor_test.go b/example_pagination_cursor_test.go
index 6da9dac7..88b004f6 100644
--- a/example_pagination_cursor_test.go
+++ b/example_pagination_cursor_test.go
@@ -18,7 +18,7 @@ import (
"fmt"
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
func ExamplePartitionFilter_EncodeCursor() {
diff --git a/examples/add/add.go b/examples/add/add.go
index a3f561f4..6868d932 100644
--- a/examples/add/add.go
+++ b/examples/add/add.go
@@ -20,8 +20,8 @@ package main
import (
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/append/append.go b/examples/append/append.go
index e106bc33..b50bc571 100644
--- a/examples/append/append.go
+++ b/examples/append/append.go
@@ -20,8 +20,8 @@ package main
import (
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/batch/batch.go b/examples/batch/batch.go
index 7d875d3e..9ccbf142 100644
--- a/examples/batch/batch.go
+++ b/examples/batch/batch.go
@@ -21,9 +21,9 @@ import (
"log"
"strconv"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
- asl "github.com/aerospike/aerospike-client-go/v7/logger"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
+ asl "github.com/aerospike/aerospike-client-go/v8/logger"
)
func main() {
diff --git a/examples/blob/blob.go b/examples/blob/blob.go
index 6f9de262..cf8af577 100644
--- a/examples/blob/blob.go
+++ b/examples/blob/blob.go
@@ -15,7 +15,7 @@
package main
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
// Person is a custom data type to be converted to a blob
diff --git a/examples/count_set_objects/count_set_objects_using_request_info.go b/examples/count_set_objects/count_set_objects_using_request_info.go
index f5cb04a8..bc4f019a 100644
--- a/examples/count_set_objects/count_set_objects_using_request_info.go
+++ b/examples/count_set_objects/count_set_objects_using_request_info.go
@@ -6,8 +6,8 @@ import (
"strconv"
"strings"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/custom_list_iter/custom_list_iter.go b/examples/custom_list_iter/custom_list_iter.go
index 041d7050..92a95d18 100644
--- a/examples/custom_list_iter/custom_list_iter.go
+++ b/examples/custom_list_iter/custom_list_iter.go
@@ -22,8 +22,8 @@ import (
"reflect"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
var (
diff --git a/examples/expire/expire.go b/examples/expire/expire.go
index 59e70437..4d7137df 100644
--- a/examples/expire/expire.go
+++ b/examples/expire/expire.go
@@ -22,9 +22,9 @@ import (
"math"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
)
func main() {
diff --git a/examples/expressions/expressions.go b/examples/expressions/expressions.go
index 4b8a8dee..d9b089c2 100644
--- a/examples/expressions/expressions.go
+++ b/examples/expressions/expressions.go
@@ -21,8 +21,8 @@ import (
"log"
"strings"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/generation/generation.go b/examples/generation/generation.go
index fa4f9a14..7b2fa52e 100644
--- a/examples/generation/generation.go
+++ b/examples/generation/generation.go
@@ -21,9 +21,9 @@ import (
"errors"
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
)
func main() {
diff --git a/examples/geojson_query/geojson_query.go b/examples/geojson_query/geojson_query.go
index 1afece68..4277dfeb 100644
--- a/examples/geojson_query/geojson_query.go
+++ b/examples/geojson_query/geojson_query.go
@@ -18,8 +18,8 @@ package main
import (
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
- "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/get/get.go b/examples/get/get.go
index 4c6847b3..acadc89e 100644
--- a/examples/get/get.go
+++ b/examples/get/get.go
@@ -20,7 +20,7 @@ import (
"os"
"strconv"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
var (
diff --git a/examples/info/info.go b/examples/info/info.go
index 8a24b309..55aaf6b7 100644
--- a/examples/info/info.go
+++ b/examples/info/info.go
@@ -18,7 +18,7 @@ import (
"log"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
func main() {
diff --git a/examples/list_map/list_map.go b/examples/list_map/list_map.go
index 9ad559ab..fc947aa5 100644
--- a/examples/list_map/list_map.go
+++ b/examples/list_map/list_map.go
@@ -21,8 +21,8 @@ import (
"bytes"
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/operate/operate.go b/examples/operate/operate.go
index c180a277..f893d964 100644
--- a/examples/operate/operate.go
+++ b/examples/operate/operate.go
@@ -20,8 +20,8 @@ package main
import (
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/prepend/prepend.go b/examples/prepend/prepend.go
index 430d3ce3..1357efb5 100644
--- a/examples/prepend/prepend.go
+++ b/examples/prepend/prepend.go
@@ -20,8 +20,8 @@ package main
import (
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/put/put.go b/examples/put/put.go
index ea1a42a1..c1e619e9 100644
--- a/examples/put/put.go
+++ b/examples/put/put.go
@@ -21,7 +21,7 @@ import (
"os"
"strconv"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
var (
diff --git a/examples/putget/putget.go b/examples/putget/putget.go
index ede57ee9..fb20144d 100644
--- a/examples/putget/putget.go
+++ b/examples/putget/putget.go
@@ -22,8 +22,8 @@ import (
"log"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/query-aggregate/average/average.go b/examples/query-aggregate/average/average.go
index e5b39060..a190f9d6 100644
--- a/examples/query-aggregate/average/average.go
+++ b/examples/query-aggregate/average/average.go
@@ -22,8 +22,8 @@ import (
"os"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
const keyCount = 1000
diff --git a/examples/query-aggregate/single_bin_sum/single_bin_sum.go b/examples/query-aggregate/single_bin_sum/single_bin_sum.go
index 9099aeb8..dd6a7718 100644
--- a/examples/query-aggregate/single_bin_sum/single_bin_sum.go
+++ b/examples/query-aggregate/single_bin_sum/single_bin_sum.go
@@ -22,8 +22,8 @@ import (
"os"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
const keyCount = 1000
diff --git a/examples/replace/replace.go b/examples/replace/replace.go
index 8966a3ae..a0033e02 100644
--- a/examples/replace/replace.go
+++ b/examples/replace/replace.go
@@ -21,9 +21,9 @@ import (
"errors"
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
)
func main() {
diff --git a/examples/scan_paginate/scan_paginate.go b/examples/scan_paginate/scan_paginate.go
index d95adeb3..9cf76dba 100644
--- a/examples/scan_paginate/scan_paginate.go
+++ b/examples/scan_paginate/scan_paginate.go
@@ -21,8 +21,8 @@ import (
"log"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/scan_parallel/scan_parallel.go b/examples/scan_parallel/scan_parallel.go
index 3dd36128..843deb60 100644
--- a/examples/scan_parallel/scan_parallel.go
+++ b/examples/scan_parallel/scan_parallel.go
@@ -21,8 +21,8 @@ import (
"log"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/scan_serial/scan_serial.go b/examples/scan_serial/scan_serial.go
index 3f2d0092..e1a1cedb 100644
--- a/examples/scan_serial/scan_serial.go
+++ b/examples/scan_serial/scan_serial.go
@@ -21,8 +21,8 @@ import (
"log"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/shared/shared.go b/examples/shared/shared.go
index 47daa04d..ae7404be 100644
--- a/examples/shared/shared.go
+++ b/examples/shared/shared.go
@@ -27,7 +27,7 @@ import (
"runtime"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
// WritePolicy is shared for all examples
diff --git a/examples/simple/simple.go b/examples/simple/simple.go
index 2107840f..e83ec69f 100644
--- a/examples/simple/simple.go
+++ b/examples/simple/simple.go
@@ -17,7 +17,7 @@ package main
import (
"fmt"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
func main() {
diff --git a/examples/tls_secure_connection/tls_secure_connection.go b/examples/tls_secure_connection/tls_secure_connection.go
index 4cdbb4c6..b65fe166 100644
--- a/examples/tls_secure_connection/tls_secure_connection.go
+++ b/examples/tls_secure_connection/tls_secure_connection.go
@@ -25,7 +25,7 @@ import (
"os"
"path/filepath"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
var host = flag.String("h", "127.0.0.1", "Aerospike server seed hostnames or IP addresses")
diff --git a/examples/touch/touch.go b/examples/touch/touch.go
index 9b9ab62c..906d1c1c 100644
--- a/examples/touch/touch.go
+++ b/examples/touch/touch.go
@@ -21,8 +21,8 @@ import (
"log"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
func main() {
diff --git a/examples/udf/udf.go b/examples/udf/udf.go
index dc0a5115..04df0b6c 100644
--- a/examples/udf/udf.go
+++ b/examples/udf/udf.go
@@ -21,8 +21,8 @@ import (
"bytes"
"log"
- as "github.com/aerospike/aerospike-client-go/v7"
- shared "github.com/aerospike/aerospike-client-go/v7/examples/shared"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ shared "github.com/aerospike/aerospike-client-go/v8/examples/shared"
)
const udf = `
diff --git a/execute_command.go b/execute_command.go
index 9877a70c..f270c4b8 100644
--- a/execute_command.go
+++ b/execute_command.go
@@ -14,11 +14,16 @@
package aerospike
+import (
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+)
+
type executeCommand struct {
- readCommand
+ baseWriteCommand
+
+ record *Record
- // overwrite
- policy *WritePolicy
packageName string
functionName string
args *ValueArray
@@ -32,26 +37,16 @@ func newExecuteCommand(
functionName string,
args *ValueArray,
) (executeCommand, Error) {
- var err Error
- var partition *Partition
- if cluster != nil {
- partition, err = PartitionForWrite(cluster, &policy.BasePolicy, key)
- if err != nil {
- return executeCommand{}, err
- }
- }
-
- readCommand, err := newReadCommand(cluster, &policy.BasePolicy, key, nil, partition)
+ bwc, err := newBaseWriteCommand(cluster, policy, key)
if err != nil {
return executeCommand{}, err
}
return executeCommand{
- readCommand: readCommand,
- policy: policy,
- packageName: packageName,
- functionName: functionName,
- args: args,
+ baseWriteCommand: bwc,
+ packageName: packageName,
+ functionName: functionName,
+ args: args,
}, nil
}
@@ -59,23 +54,60 @@ func (cmd *executeCommand) writeBuffer(ifc command) Error {
return cmd.setUdf(cmd.policy, cmd.key, cmd.packageName, cmd.functionName, cmd.args)
}
-func (cmd *executeCommand) getNode(ifc command) (*Node, Error) {
- return cmd.partition.GetNodeWrite(cmd.cluster)
-}
+func (cmd *executeCommand) parseResult(ifc command, conn *Connection) Error {
+ rp, err := newRecordParser(&cmd.baseCommand)
+ if err != nil {
+ return err
+ }
-func (cmd *executeCommand) prepareRetry(ifc command, isTimeout bool) bool {
- cmd.partition.PrepareRetryWrite(isTimeout)
- return true
-}
+ if err := rp.parseFields(cmd.policy.Txn, cmd.key, true); err != nil {
+ return err
+ }
+
+ if rp.resultCode != 0 {
+ if rp.resultCode == types.KEY_NOT_FOUND_ERROR {
+ return ErrKeyNotFound.err()
+ } else if rp.resultCode == types.FILTERED_OUT {
+ return ErrFilteredOut.err()
+ } else if rp.resultCode == types.UDF_BAD_RESPONSE {
+ cmd.record, _ = rp.parseRecord(cmd.key, false)
+ err := cmd.handleUdfError(rp.resultCode)
+ logger.Logger.Debug("UDF execution error: " + err.Error())
+ return err
+ }
-func (cmd *executeCommand) isRead() bool {
- return false
+ return newError(rp.resultCode)
+ }
+
+ if rp.opCount == 0 {
+ // data Bin was not returned
+ cmd.record = newRecord(cmd.node, cmd.key, nil, rp.generation, rp.expiration)
+ return nil
+ }
+
+ cmd.record, err = rp.parseRecord(cmd.key, false)
+ if err != nil {
+ return err
+ }
+
+ return nil
}
func (cmd *executeCommand) Execute() Error {
return cmd.execute(cmd)
}
-func (cmd *executeCommand) transactionType() transactionType {
+func (cmd *executeCommand) commandType() commandType {
return ttUDF
}
+
+func (cmd *executeCommand) handleUdfError(resultCode types.ResultCode) Error {
+ if ret, exists := cmd.record.Bins["FAILURE"]; exists {
+ return newError(resultCode, ret.(string))
+ }
+ return newError(resultCode)
+}
+
+func (cmd *executeCommand) GetRecord() *Record {
+ return cmd.record
+}
diff --git a/execute_task.go b/execute_task.go
index 9d441f24..459d66dc 100644
--- a/execute_task.go
+++ b/execute_task.go
@@ -18,7 +18,7 @@ import (
"strconv"
"strings"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// ExecuteTask is used to poll for long running server execute job completion.
@@ -28,8 +28,6 @@ type ExecuteTask struct {
taskID uint64
scan bool
- clnt ClientIfc
-
// The following map keeps an account of what nodes were ever observed with the job registered on them.
// If the job was ever observed, the task will return true for it is not found anymore (purged from task queue after completion)
observed map[string]struct{}
@@ -52,10 +50,6 @@ func (etsk *ExecuteTask) TaskId() uint64 {
// IsDone queries all nodes for task completion status.
func (etsk *ExecuteTask) IsDone() (bool, Error) {
- if etsk.clnt != nil {
- return etsk.grpcIsDone()
- }
-
var module string
if etsk.scan {
module = "scan"
diff --git a/exists_command.go b/exists_command.go
index 6eebbf9d..f050daf2 100644
--- a/exists_command.go
+++ b/exists_command.go
@@ -15,85 +15,56 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types"
-
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// guarantee existsCommand implements command interface
var _ command = &existsCommand{}
type existsCommand struct {
- singleCommand
+ baseReadCommand
- policy *BasePolicy
exists bool
}
-func newExistsCommand(cluster *Cluster, policy *BasePolicy, key *Key) (*existsCommand, Error) {
- var err Error
- var partition *Partition
- if cluster != nil {
- partition, err = PartitionForRead(cluster, policy, key)
- if err != nil {
- return nil, err
- }
+func newExistsCommand(cluster *Cluster, policy *BasePolicy, key *Key) (existsCommand, Error) {
+ brc, err := newBaseReadCommand(cluster, policy, key)
+ if err != nil {
+ return existsCommand{}, err
}
- return &existsCommand{
- singleCommand: newSingleCommand(cluster, key, partition),
- policy: policy,
+ return existsCommand{
+ baseReadCommand: brc,
}, nil
}
-func (cmd *existsCommand) getPolicy(ifc command) Policy {
- return cmd.policy
-}
-
func (cmd *existsCommand) writeBuffer(ifc command) Error {
return cmd.setExists(cmd.policy, cmd.key)
}
-func (cmd *existsCommand) getNode(ifc command) (*Node, Error) {
- return cmd.partition.GetNodeRead(cmd.cluster)
-}
-
-func (cmd *existsCommand) prepareRetry(ifc command, isTimeout bool) bool {
- cmd.partition.PrepareRetryRead(isTimeout)
- return true
-}
-
func (cmd *existsCommand) parseResult(ifc command, conn *Connection) Error {
- // Read header.
- if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil {
+ rp, err := newRecordParser(&cmd.baseCommand)
+ if err != nil {
return err
}
- header := Buffer.BytesToInt64(cmd.dataBuffer, 0)
-
- // Validate header to make sure we are at the beginning of a message
- if err := cmd.validateHeader(header); err != nil {
+ if err := rp.parseFields(cmd.policy.Txn, cmd.key, false); err != nil {
return err
}
- resultCode := cmd.dataBuffer[13] & 0xFF
-
- switch types.ResultCode(resultCode) {
- case 0:
+ switch rp.resultCode {
+ case types.OK:
cmd.exists = true
case types.KEY_NOT_FOUND_ERROR:
cmd.exists = false
case types.FILTERED_OUT:
- if err := cmd.emptySocket(conn); err != nil {
- return err
- }
cmd.exists = true
return ErrFilteredOut.err()
default:
- return newError(types.ResultCode(resultCode))
+ return newError(rp.resultCode)
}
- return cmd.emptySocket(conn)
+ return nil
}
func (cmd *existsCommand) Exists() bool {
@@ -104,6 +75,6 @@ func (cmd *existsCommand) Execute() Error {
return cmd.execute(cmd)
}
-func (cmd *existsCommand) transactionType() transactionType {
+func (cmd *existsCommand) commandType() commandType {
return ttExists
}
diff --git a/exp_bit_test.go b/exp_bit_test.go
index c0cef8f7..3f5053ab 100644
--- a/exp_bit_test.go
+++ b/exp_bit_test.go
@@ -15,7 +15,7 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/exp_hll_test.go b/exp_hll_test.go
index 9b8410c5..2c7de539 100644
--- a/exp_hll_test.go
+++ b/exp_hll_test.go
@@ -15,7 +15,7 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/exp_list.go b/exp_list.go
index a48b0747..610e00b2 100644
--- a/exp_list.go
+++ b/exp_list.go
@@ -17,7 +17,7 @@ package aerospike
import (
"fmt"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
const expListMODULE int64 = 0
diff --git a/exp_list_test.go b/exp_list_test.go
index dd78e7fd..64aea581 100644
--- a/exp_list_test.go
+++ b/exp_list_test.go
@@ -15,7 +15,7 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/exp_map.go b/exp_map.go
index 7eb27219..b3789d28 100644
--- a/exp_map.go
+++ b/exp_map.go
@@ -17,7 +17,7 @@ package aerospike
import (
"fmt"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
const expMapMODULE int64 = 0
diff --git a/exp_map_test.go b/exp_map_test.go
index fa7f7633..54d872df 100644
--- a/exp_map_test.go
+++ b/exp_map_test.go
@@ -15,7 +15,7 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/exp_ops_test.go b/exp_ops_test.go
index 2b62ba64..70c765b4 100644
--- a/exp_ops_test.go
+++ b/exp_ops_test.go
@@ -21,8 +21,8 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- as "github.com/aerospike/aerospike-client-go/v7"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
)
const udfPredexpBody = `local function putBin(r,name,value)
diff --git a/expression.go b/expression.go
index 89a2bfd7..cb363840 100644
--- a/expression.go
+++ b/expression.go
@@ -17,8 +17,8 @@ package aerospike
import (
"encoding/base64"
- "github.com/aerospike/aerospike-client-go/v7/types"
- ParticleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ ParticleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
)
// ExpressionArgument is used for passing arguments to filter expressions.
@@ -478,13 +478,8 @@ func (fe *Expression) pack(buf BufferEx) (int, Error) {
}
func (fe *Expression) Base64() (string, Error) {
- buf := fe.grpc()
- return base64.StdEncoding.EncodeToString(buf), nil
-}
-
-func (fe *Expression) grpc() []byte {
if fe == nil {
- return nil
+ return "", nil
}
sz, err := fe.size()
@@ -498,7 +493,7 @@ func (fe *Expression) grpc() []byte {
panic(err)
}
- return buf.Bytes()
+ return base64.StdEncoding.EncodeToString(buf.Bytes()), nil
}
// ExpFromBase64 creates an expression from an encoded base64 expression.
@@ -658,7 +653,6 @@ func ExpBinExists(name string) *Expression {
// LDT = 21
// GEOJSON = 23
func ExpBinType(name string) *Expression {
- // TODO: Improve documentation and provide examples.
return newFilterExpression(
&expOpBIN_TYPE,
StringValue(name),
diff --git a/expression_ops_test.go b/expression_ops_test.go
index d4e39811..4642a843 100644
--- a/expression_ops_test.go
+++ b/expression_ops_test.go
@@ -15,8 +15,8 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -60,7 +60,7 @@ var _ = gg.Describe("Expression Operations", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(rec.Bins).To(gm.Equal(as.BinMap{"C": []interface{}{nil, []interface{}{"a", "b", "c", "d"}}, "var": []interface{}{"a", "b", "c", "d"}}))
+ gm.Expect(rec.Bins).To(gm.Equal(as.BinMap{"C": as.OpResults{nil, []any{"a", "b", "c", "d"}}, "var": []any{"a", "b", "c", "d"}}))
})
gg.It("Read Eval error should work", func() {
@@ -212,7 +212,7 @@ var _ = gg.Describe("Expression Operations", func() {
gm.Expect(err).ToNot(gm.HaveOccurred())
gm.Expect(len(r.Bins)).To(gm.BeNumerically(">", 0))
- gm.Expect(r.Bins).To(gm.Equal(as.BinMap{binC: []interface{}{nil, nil}}))
+ gm.Expect(r.Bins).To(gm.Equal(as.BinMap{binC: as.OpResults{nil, nil}}))
})
gg.It("Return Nil should work", func() {
@@ -235,7 +235,7 @@ var _ = gg.Describe("Expression Operations", func() {
as.ExpReadOp(expVar, exp, as.ExpReadFlagDefault),
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: 5, binC: []interface{}{nil, 5}}))
+ gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: 5, binC: as.OpResults{nil, 5}}))
r, err = client.Operate(nil, keyA,
as.ExpReadOp(expVar, exp, as.ExpReadFlagDefault),
@@ -253,7 +253,7 @@ var _ = gg.Describe("Expression Operations", func() {
as.ExpReadOp(expVar, exp, as.ExpReadFlagDefault),
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: 5.1, binC: []interface{}{nil, 5.1}}))
+ gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: 5.1, binC: as.OpResults{nil, 5.1}}))
r, err = client.Operate(nil, keyA,
as.ExpReadOp(expVar, exp, as.ExpReadFlagDefault),
@@ -272,7 +272,7 @@ var _ = gg.Describe("Expression Operations", func() {
as.ExpReadOp(expVar, exp, as.ExpReadFlagDefault),
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: str, binC: []interface{}{nil, str}}))
+ gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: str, binC: as.OpResults{nil, str}}))
r, err = client.Operate(nil, keyA,
as.ExpReadOp(expVar, exp, as.ExpReadFlagDefault),
@@ -291,7 +291,7 @@ var _ = gg.Describe("Expression Operations", func() {
as.ExpReadOp(expVar, exp, as.ExpReadFlagDefault),
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: blob, binC: []interface{}{nil, blob}}))
+ gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: blob, binC: as.OpResults{nil, blob}}))
r, err = client.Operate(nil, keyA,
as.ExpReadOp(expVar, exp, as.ExpReadFlagDefault),
@@ -309,7 +309,7 @@ var _ = gg.Describe("Expression Operations", func() {
as.ExpReadOp(expVar, exp, as.ExpReadFlagDefault),
)
gm.Expect(err).ToNot(gm.HaveOccurred())
- gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: true, binC: []interface{}{nil, true}}))
+ gm.Expect(r.Bins).To(gm.Equal(as.BinMap{expVar: true, binC: as.OpResults{nil, true}}))
})
gg.It("Return HLL should work", func() {
@@ -324,11 +324,11 @@ var _ = gg.Describe("Expression Operations", func() {
)
gm.Expect(err).ToNot(gm.HaveOccurred())
gm.Expect(r.Bins).To(gm.Equal(as.BinMap{
- binH: []interface{}{
+ binH: as.OpResults{
nil,
as.HLLValue([]uint8{0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
},
- binC: []interface{}{
+ binC: as.OpResults{
nil,
as.HLLValue([]uint8{0, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}),
},
diff --git a/expression_test.go b/expression_test.go
index 6af030c0..04926a95 100644
--- a/expression_test.go
+++ b/expression_test.go
@@ -18,8 +18,8 @@ import (
"fmt"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -41,19 +41,11 @@ var _ = gg.Describe("Expression Filters", func() {
const keyCount = 1000
gg.AfterAll(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
dropIndex(nil, ns, set, "intval")
dropIndex(nil, ns, set, "strval")
})
gg.BeforeAll(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
dropIndex(nil, ns, set, "intval")
dropIndex(nil, ns, set, "strval")
@@ -508,10 +500,6 @@ var _ = gg.Describe("Expression Filters", func() {
var _ = gg.Context("Record Ops", func() {
gg.It("ExpRecordSize must work", func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
if serverIsOlderThan("7") {
gg.Skip("Not supported servers before v7")
}
@@ -540,10 +528,6 @@ var _ = gg.Describe("Expression Filters", func() {
})
gg.It("ExpMemorySize must work", func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
if len(nsInfo(ns, "device_total_bytes")) > 0 {
gg.Skip("Skipping ExpDeviceSize test since the namespace is persisted and the test works only for Memory-Only namespaces.")
}
diff --git a/field_type.go b/field_type.go
index 84ff3382..1227a80a 100644
--- a/field_type.go
+++ b/field_type.go
@@ -19,19 +19,14 @@ type FieldType int
// FieldType constants used in the Aerospike Wire Protocol.
const (
- NAMESPACE FieldType = 0
- TABLE FieldType = 1
- KEY FieldType = 2
-
- //BIN FieldType = 3;
-
- DIGEST_RIPE FieldType = 4
-
- //GU_TID FieldType = 5;
-
- DIGEST_RIPE_ARRAY FieldType = 6
- TRAN_ID FieldType = 7 // user supplied transaction id, which is simply passed back
- SCAN_OPTIONS FieldType = 8
+ NAMESPACE FieldType = 0
+ TABLE FieldType = 1
+ KEY FieldType = 2
+ RECORD_VERSION FieldType = 3
+ DIGEST_RIPE FieldType = 4
+ MRT_ID FieldType = 5
+ MRT_DEADLINE FieldType = 6
+ QUERY_ID FieldType = 7
SOCKET_TIMEOUT FieldType = 9
RECORDS_PER_SECOND FieldType = 10
PID_ARRAY FieldType = 11
diff --git a/filter.go b/filter.go
index a07c0634..5e4d95c7 100644
--- a/filter.go
+++ b/filter.go
@@ -17,7 +17,7 @@ package aerospike
import (
"fmt"
- ParticleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
+ ParticleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
)
// Filter specifies a query filter definition.
@@ -146,18 +146,6 @@ func (fltr *Filter) EstimateSize() (int, Error) {
return len(fltr.name) + szBegin + szEnd + 10, nil
}
-func (fltr *Filter) grpcPackCtxPayload() []byte {
- sz, err := fltr.estimatePackedCtxSize()
- if err != nil {
- panic(err)
- }
- buf := newBuffer(sz)
- if _, err := fltr.packCtx(buf); err != nil {
- panic(err)
- }
- return buf.Bytes()
-}
-
// Retrieve packed Context.
// For internal use only.
func (fltr *Filter) packCtx(cmd BufferEx) (sz int, err Error) {
diff --git a/geo_test.go b/geo_test.go
index 1e5793ec..8ee2a629 100644
--- a/geo_test.go
+++ b/geo_test.go
@@ -17,7 +17,7 @@ package aerospike_test
import (
"fmt"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -44,19 +44,12 @@ var _ = gg.Describe("Geo Spacial Tests", gg.Ordered, func() {
var binName = "GeoBin"
gg.BeforeAll(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
// queries only work on indices
dropIndex(wpolicy, ns, set, set+binName)
createIndex(wpolicy, ns, set, set+binName, binName, as.GEO2DSPHERE)
})
gg.AfterAll(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
dropIndex(wpolicy, ns, set, set+binName)
})
diff --git a/go.mod b/go.mod
index 6f2b6d8e..4c9efd62 100644
--- a/go.mod
+++ b/go.mod
@@ -1,35 +1,27 @@
-module github.com/aerospike/aerospike-client-go/v7
+module github.com/aerospike/aerospike-client-go/v8
-go 1.20
+go 1.23
require (
- github.com/onsi/ginkgo/v2 v2.16.0
- github.com/onsi/gomega v1.32.0
+ github.com/onsi/ginkgo/v2 v2.22.0
+ github.com/onsi/gomega v1.36.1
github.com/yuin/gopher-lua v1.1.1
- golang.org/x/sync v0.7.0
- google.golang.org/grpc v1.63.3
- google.golang.org/protobuf v1.34.2
+ golang.org/x/sync v0.10.0
)
require (
github.com/go-logr/logr v1.4.2 // indirect
- github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
- github.com/golang/protobuf v1.5.4 // indirect
+ github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/google/go-cmp v0.6.0 // indirect
- github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da // indirect
- github.com/kr/pretty v0.1.0 // indirect
- github.com/stretchr/testify v1.8.4 // indirect
+ github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect
+ github.com/kr/pretty v0.3.1 // indirect
+ github.com/stretchr/testify v1.10.0 // indirect
github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad // indirect
- golang.org/x/net v0.26.0 // indirect
- golang.org/x/sys v0.21.0 // indirect
- golang.org/x/text v0.16.0 // indirect
- golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d // indirect
- gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect
+ golang.org/x/net v0.32.0 // indirect
+ golang.org/x/sys v0.28.0 // indirect
+ golang.org/x/text v0.21.0 // indirect
+ golang.org/x/tools v0.28.0 // indirect
+ google.golang.org/protobuf v1.35.2 // indirect
+ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)
-
-retract (
- v7.3.0 // `Client.BatchGetOperate` issue
- v7.7.0 // nil deref in tend logic
-)
diff --git a/go.sum b/go.sum
index b2dbf46a..882d2ea1 100644
--- a/go.sum
+++ b/go.sum
@@ -1,82 +1,48 @@
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
-github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
-github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
-github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
-github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
+github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
+github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
-github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7 h1:y3N7Bm7Y9/CtpiVkw/ZWj6lSlDF3F74SfKwfTCer72Q=
-github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
-github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da h1:xRmpO92tb8y+Z85iUOMOicpCfaYcv7o3Cg3wKrIpg8g=
-github.com/google/pprof v0.0.0-20240711041743-f6c9dda6c6da/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg=
+github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
+github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/onsi/ginkgo/v2 v2.16.0 h1:7q1w9frJDzninhXxjZd+Y/x54XNjG/UlRLIYPZafsPM=
-github.com/onsi/ginkgo/v2 v2.16.0/go.mod h1:llBI3WDLL9Z6taip6f33H76YcWtJv+7R3HigUjbIBOs=
-github.com/onsi/gomega v1.32.0 h1:JRYU78fJ1LPxlckP6Txi/EYqJvjtMrDC04/MM5XRHPk=
-github.com/onsi/gomega v1.32.0/go.mod h1:a4x4gW6Pz2yK1MAmvluYme5lvYTn61afQ2ETw/8n4Lg=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg=
+github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
+github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
+github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
-github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
-github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
-github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad h1:W0LEBv82YCGEtcmPA3uNZBI33/qF//HAAs3MawDjRa0=
-github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM=
+github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
+github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/yuin/gopher-lua v1.1.1 h1:kYKnWBjvbNP4XLT3+bPEwAXJx262OhaHDWDVOPjL46M=
github.com/yuin/gopher-lua v1.1.1/go.mod h1:GBR0iDaNXjAgGg9zfCvksxSRnQx76gclCIb7kdAd1Pw=
-golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
-golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
-golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w=
-golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8=
-golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
-golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
-golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o=
-golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
-golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
-golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4=
-golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI=
-golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
-golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
-golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda h1:LI5DOvAxUPMv/50agcLLoo+AdWc1irS9Rzz4vPuD1V4=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d h1:JU0iKnSg02Gmb5ZdV8nYsKEKsP6o/FGVWTrw4i1DA9A=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240711142825-46eb208f015d/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
-google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
-google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
-google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM=
-google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
-google.golang.org/grpc v1.63.3 h1:FGVegD7MHo/zhaGduk/R85WvSFJ+si70UQIJ0fg+BiU=
-google.golang.org/grpc v1.63.3/go.mod h1:5FFeE/YiGPD2flWFCrCx8K3Ay7hALATnKiI8U3avIuw=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
-google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
-google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
+golang.org/x/net v0.32.0 h1:ZqPmj8Kzc+Y6e0+skZsuACbx+wzMgo5MQsJh9Qd6aYI=
+golang.org/x/net v0.32.0/go.mod h1:CwU0IoeOlnQQWJ6ioyFrfRuomB8GKF6KbYXZVyeXNfs=
+golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
+golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
+golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
+golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
+golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
+golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
+golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
+google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
+google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
diff --git a/helper_test.go b/helper_test.go
index 30af0ba1..663d05d2 100644
--- a/helper_test.go
+++ b/helper_test.go
@@ -50,3 +50,50 @@ func (nd *Node) ConnsCount() int {
func (nd *Node) CloseConnections() {
nd.closeConnections()
}
+
+// PartitionForWrite returns a partition for write purposes
+func ConfiguredAsStrongConsistency(client *Client, namespace string) bool {
+ // Must copy hashmap reference for copy on write semantics to work.
+ pmap := client.cluster.getPartitions()
+ p := pmap[namespace]
+ if p == nil {
+ return false
+ }
+
+ return p.SCMode
+}
+
+func NewWriteCommand(
+ cluster *Cluster,
+ policy *WritePolicy,
+ key *Key,
+ bins []*Bin,
+ binMap BinMap) (writeCommand, Error) {
+ return newWriteCommand(
+ cluster,
+ policy,
+ key,
+ bins,
+ binMap,
+ _WRITE)
+}
+
+func (cmd *writeCommand) WriteBuffer(ifc command) Error {
+ return cmd.writeBuffer(ifc)
+}
+
+func (cmd *writeCommand) Buffer() []byte {
+ return cmd.dataBuffer[:cmd.dataOffset]
+}
+
+func NewDeleteCommand(cluster *Cluster, policy *WritePolicy, key *Key) (*deleteCommand, Error) {
+ return newDeleteCommand(cluster, policy, key)
+}
+
+func (cmd *deleteCommand) WriteBuffer(ifc command) Error {
+ return cmd.writeBuffer(ifc)
+}
+
+func (cmd *deleteCommand) Buffer() []byte {
+ return cmd.dataBuffer[:cmd.dataOffset]
+}
diff --git a/hll_operation.go b/hll_operation.go
index c32e079c..cc1f5d38 100644
--- a/hll_operation.go
+++ b/hll_operation.go
@@ -18,7 +18,7 @@ package aerospike
import (
"fmt"
- ParticleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
+ ParticleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
)
// HyperLogLog (HLL) operations.
diff --git a/hll_operation_test.go b/hll_operation_test.go
index 8bbd63f4..01ebab43 100644
--- a/hll_operation_test.go
+++ b/hll_operation_test.go
@@ -21,8 +21,8 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- as "github.com/aerospike/aerospike-client-go/v7"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
)
var _ = gg.Describe("HyperLogLog Test", func() {
@@ -50,10 +50,6 @@ var _ = gg.Describe("HyperLogLog Test", func() {
var illegalDescriptions [][]int
gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
for i := 0; i < nEntries; i++ {
entries = append(entries, as.StringValue("key "+strconv.Itoa(i)))
}
@@ -173,7 +169,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
}
record := expectSuccess(key, ops...)
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
count := result_list[1]
count1 := result_list[2]
description := result_list[3].([]interface{})
@@ -277,7 +273,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
}
record := expectSuccess(key, ops...)
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
count := result_list[1].(int)
count1 := result_list[2].(int)
description := result_list[3].([]interface{})
@@ -347,7 +343,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.HLLRefreshCountOp(binName),
as.HLLDescribeOp(binName))
- resulta_list := recorda.Bins[binName].([]interface{})
+ resulta_list := recorda.Bins[binName].(as.OpResults)
counta := resulta_list[1].(int)
counta1 := resulta_list[2].(int)
descriptiona := resulta_list[3].([]interface{})
@@ -364,7 +360,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.HLLGetCountOp(binName),
as.HLLDescribeOp(binName))
- resultb_list := recordb.Bins[binName].([]interface{})
+ resultb_list := recordb.Bins[binName].(as.OpResults)
countb := resultb_list[1].(int)
n_added0 := resultb_list[2].(int)
countb1 := resultb_list[4].(int)
@@ -451,7 +447,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.DeleteOp(),
as.HLLAddOp(p, binName, sub_vals, ix, -1),
as.HLLGetCountOp(binName))
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
count := result_list[1].(int)
expectHLLCount(ix, count, len(sub_vals))
@@ -461,7 +457,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
for i := 0; i < len(keys); i++ {
record := expectSuccess(keys[i], as.GetBinOp(binName), as.HLLGetCountOp(binName))
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
hll := result_list[0].(as.HLLValue)
gm.Expect(hll).NotTo(gm.BeNil())
@@ -484,7 +480,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
}
record_union := expectSuccess(key, ops...)
- union_result_list := record_union.Bins[binName].([]interface{})
+ union_result_list := record_union.Bins[binName].(as.OpResults)
union_count := union_result_list[2].(int)
union_count2 := union_result_list[4].(int)
@@ -496,7 +492,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
record := expectSuccess(key,
as.HLLAddOp(p, binName, sub_vals, index_bits, -1),
as.HLLGetCountOp(binName))
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
n_added := result_list[0].(int)
count := result_list[1].(int)
@@ -539,7 +535,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.DeleteOp(),
as.HLLAddOp(as.DefaultHLLPolicy(), otherName, entries, index_bits, -1),
as.GetBinOp(otherName))
- result_list := record.Bins[otherName].([]interface{})
+ result_list := record.Bins[otherName].(as.OpResults)
hll := result_list[1].(as.HLLValue)
hlls = append(hlls, hll)
@@ -644,7 +640,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.HLLAddOp(as.DefaultHLLPolicy(), binName, sub_vals, index_bits, -1),
as.GetBinOp(binName))
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
hlls = append(hlls, result_list[1].(as.HLLValue))
expected_union_count += len(sub_vals)
vals = append(vals, sub_vals)
@@ -659,7 +655,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
record := expectSuccess(key,
as.HLLGetUnionOp(binName, hlls),
as.HLLGetUnionCountOp(binName, hlls))
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
union_count := result_list[1].(int)
expectHLLCount(index_bits, union_count, expected_union_count)
@@ -669,7 +665,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
record = expectSuccess(key,
as.PutOp(as.NewBin(binName, union_hll)),
as.HLLGetCountOp(binName))
- result_list = record.Bins[binName].([]interface{})
+ result_list = record.Bins[binName].(as.OpResults)
union_count_2 := result_list[1].(int)
gm.Expect(union_count).To(gm.Equal(union_count_2))
@@ -694,7 +690,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.HLLGetCountOp(binName),
as.HLLDescribeOp(binName))
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
count := result_list[0].(int)
description := result_list[1].([]interface{})
@@ -737,7 +733,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.HLLAddOp(as.DefaultHLLPolicy(), binName, common, index_bits, minhash_bits),
as.GetBinOp(binName))
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
hlls = append(hlls, result_list[2].(as.HLLValue))
}
@@ -747,7 +743,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.HLLInitOp(as.DefaultHLLPolicy(), binName+"other", index_bits, minhash_bits),
as.HLLSetUnionOp(as.DefaultHLLPolicy(), binName, hlls),
as.HLLDescribeOp(binName))
- result_list := record.Bins[binName].([]interface{})
+ result_list := record.Bins[binName].(as.OpResults)
description := result_list[1].([]interface{})
expectDescription(description, index_bits, minhash_bits)
@@ -755,7 +751,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
record = expectSuccess(key,
as.HLLGetSimilarityOp(binName, hlls),
as.HLLGetIntersectCountOp(binName, hlls))
- result_list = record.Bins[binName].([]interface{})
+ result_list = record.Bins[binName].(as.OpResults)
sim := result_list[0].(float64)
intersect_count := result_list[1].(int)
expected_similarity := overlap
@@ -766,10 +762,6 @@ var _ = gg.Describe("HyperLogLog Test", func() {
}
gg.It("Similarity should work", func() {
- if *proxy {
- gg.Skip("Too long for the Proxy Client")
- }
-
overlaps := []float64{0.0001, 0.001, 0.01, 0.1, 0.5}
nEntries := 1 << 18
@@ -817,7 +809,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.HLLInitOp(as.DefaultHLLPolicy(), binName, nIndexBits, nMinhashBits),
as.GetBinOp(binName))
- resultList := record.Bins[binName].([]interface{})
+ resultList := record.Bins[binName].(as.OpResults)
var hlls []as.HLLValue
hlls = append(hlls, resultList[1].(as.HLLValue))
@@ -826,7 +818,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
as.HLLGetSimilarityOp(binName, hlls),
as.HLLGetIntersectCountOp(binName, hlls))
- resultList = record.Bins[binName].([]interface{})
+ resultList = record.Bins[binName].(as.OpResults)
sim := resultList[0].(float64)
intersectCount := resultList[1].(int)
@@ -856,19 +848,19 @@ var _ = gg.Describe("HyperLogLog Test", func() {
var hlls []as.HLLValue
var hmhs []as.HLLValue
- resultList := record.Bins[binName].([]interface{})
+ resultList := record.Bins[binName].(as.OpResults)
hlls = append(hlls, resultList[1].(as.HLLValue))
hlls = append(hlls, hlls[0])
- resultList = record.Bins[otherBinName].([]interface{})
+ resultList = record.Bins[otherBinName].(as.OpResults)
hmhs = append(hmhs, resultList[1].(as.HLLValue))
hmhs = append(hmhs, hmhs[0])
record = expectSuccess(key,
as.HLLGetIntersectCountOp(binName, hlls),
as.HLLGetSimilarityOp(binName, hlls))
- resultList = record.Bins[binName].([]interface{})
+ resultList = record.Bins[binName].(as.OpResults)
intersectCount := resultList[0].(int)
@@ -884,7 +876,7 @@ var _ = gg.Describe("HyperLogLog Test", func() {
record = expectSuccess(key,
as.HLLGetIntersectCountOp(binName, hmhs),
as.HLLGetSimilarityOp(binName, hmhs))
- resultList = record.Bins[binName].([]interface{})
+ resultList = record.Bins[binName].(as.OpResults)
intersectCount = resultList[0].(int)
gm.Expect(float64(intersectCount) < 1.8*float64(len(entries))).To(gm.BeTrue())
diff --git a/host.go b/host.go
index 0e3e969e..dd95ade4 100644
--- a/host.go
+++ b/host.go
@@ -19,7 +19,7 @@ import (
"net"
"strconv"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// Host name/port of database server.
diff --git a/host_test.go b/host_test.go
index 95c45f6b..50721da8 100644
--- a/host_test.go
+++ b/host_test.go
@@ -15,7 +15,7 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/index_test.go b/index_test.go
index 299f2688..dd130b0e 100644
--- a/index_test.go
+++ b/index_test.go
@@ -18,7 +18,7 @@ import (
"math"
"math/rand"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -27,12 +27,6 @@ import (
// ALL tests are isolated by SetName and Key, which are 50 random characters
var _ = gg.Describe("Index operations test", func() {
- gg.BeforeEach(func() {
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
- })
-
gg.Describe("Index creation", func() {
var err error
diff --git a/info.go b/info.go
index 79bc4697..d3e5b76d 100644
--- a/info.go
+++ b/info.go
@@ -20,8 +20,8 @@ import (
"strings"
"time"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
const (
diff --git a/internal/atomic/bool_test.go b/internal/atomic/bool_test.go
index f78496e0..03ab8b8c 100644
--- a/internal/atomic/bool_test.go
+++ b/internal/atomic/bool_test.go
@@ -18,7 +18,7 @@ import (
"runtime"
"sync"
- "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/internal/atomic/guard_test.go b/internal/atomic/guard_test.go
index dc868d79..17765fdb 100644
--- a/internal/atomic/guard_test.go
+++ b/internal/atomic/guard_test.go
@@ -17,7 +17,7 @@ package atomic_test
import (
"runtime"
- "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/internal/atomic/int_test.go b/internal/atomic/int_test.go
index 6e3b2ef5..790fbf32 100644
--- a/internal/atomic/int_test.go
+++ b/internal/atomic/int_test.go
@@ -18,7 +18,7 @@ import (
"runtime"
"sync"
- "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/internal/atomic/map/map.go b/internal/atomic/map/map.go
index 200c2ef3..5a75adb8 100644
--- a/internal/atomic/map/map.go
+++ b/internal/atomic/map/map.go
@@ -31,6 +31,14 @@ func New[K comparable, V any](length int) *Map[K, V] {
}
}
+// Exists atomically checks if a key exists in the map
+func (m *Map[K, V]) Exists(k K) bool {
+ m.mutex.RLock()
+ _, ok := m.m[k]
+ m.mutex.RUnlock()
+ return ok
+}
+
// Get atomically retrieves an element from the Map.
func (m *Map[K, V]) Get(k K) V {
m.mutex.RLock()
@@ -63,7 +71,7 @@ func (m *Map[K, V]) Length() int {
return res
}
-// Length returns the Map size.
+// Clone copies the map and returns the copy.
func (m *Map[K, V]) Clone() map[K]V {
m.mutex.RLock()
res := make(map[K]V, len(m.m))
@@ -75,6 +83,25 @@ func (m *Map[K, V]) Clone() map[K]V {
return res
}
+// Returns the keys from the map.
+func (m *Map[K, V]) Keys() []K {
+ m.mutex.RLock()
+ res := make([]K, 0, len(m.m))
+ for k := range m.m {
+ res = append(res, k)
+ }
+ m.mutex.RUnlock()
+
+ return res
+}
+
+// Clear will remove all entries.
+func (m *Map[K, V]) Clear() {
+ m.mutex.Lock()
+ m.m = make(map[K]V, len(m.m))
+ m.mutex.Unlock()
+}
+
// Delete will remove the key and return its value.
func (m *Map[K, V]) Delete(k K) V {
m.mutex.Lock()
diff --git a/internal/atomic/queue_test.go b/internal/atomic/queue_test.go
index 3299489f..f86935f8 100644
--- a/internal/atomic/queue_test.go
+++ b/internal/atomic/queue_test.go
@@ -17,7 +17,7 @@ package atomic_test
import (
"runtime"
- "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/internal/atomic/typed_val_test.go b/internal/atomic/typed_val_test.go
index e45e14a2..6fbf0f91 100644
--- a/internal/atomic/typed_val_test.go
+++ b/internal/atomic/typed_val_test.go
@@ -15,7 +15,7 @@
package atomic_test
import (
- "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/internal/lua/instance.go b/internal/lua/instance.go
index 7dba847b..a2d5d9ba 100644
--- a/internal/lua/instance.go
+++ b/internal/lua/instance.go
@@ -18,9 +18,9 @@
package lua
import (
- luaLib "github.com/aerospike/aerospike-client-go/v7/internal/lua/resources"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ luaLib "github.com/aerospike/aerospike-client-go/v8/internal/lua/resources"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
lua "github.com/yuin/gopher-lua"
)
diff --git a/internal/lua/lua_aerospike.go b/internal/lua/lua_aerospike.go
index 8ccb6973..49faced3 100644
--- a/internal/lua/lua_aerospike.go
+++ b/internal/lua/lua_aerospike.go
@@ -18,7 +18,7 @@
package lua
import (
- "github.com/aerospike/aerospike-client-go/v7/logger"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
lua "github.com/yuin/gopher-lua"
)
diff --git a/internal/lua/lua_aerospike_test.go b/internal/lua/lua_aerospike_test.go
index db052b9e..26d51b74 100644
--- a/internal/lua/lua_aerospike_test.go
+++ b/internal/lua/lua_aerospike_test.go
@@ -23,7 +23,7 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- ilua "github.com/aerospike/aerospike-client-go/v7/internal/lua"
+ ilua "github.com/aerospike/aerospike-client-go/v8/internal/lua"
)
var _ = gg.Describe("Lua Aerospike API Test", func() {
diff --git a/internal/lua/lua_list_test.go b/internal/lua/lua_list_test.go
index da097b83..c6017e5f 100644
--- a/internal/lua/lua_list_test.go
+++ b/internal/lua/lua_list_test.go
@@ -23,7 +23,7 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- ilua "github.com/aerospike/aerospike-client-go/v7/internal/lua"
+ ilua "github.com/aerospike/aerospike-client-go/v8/internal/lua"
)
var _ = gg.Describe("Lua List API Test", func() {
diff --git a/internal/lua/lua_map_test.go b/internal/lua/lua_map_test.go
index d73529e4..22d8d8fd 100644
--- a/internal/lua/lua_map_test.go
+++ b/internal/lua/lua_map_test.go
@@ -23,7 +23,7 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- ilua "github.com/aerospike/aerospike-client-go/v7/internal/lua"
+ ilua "github.com/aerospike/aerospike-client-go/v8/internal/lua"
)
var _ = gg.Describe("Lua Map API Test", func() {
diff --git a/key.go b/key.go
index 3b62bf0d..dddb6fe8 100644
--- a/key.go
+++ b/key.go
@@ -18,8 +18,8 @@ import (
"bytes"
"fmt"
- "github.com/aerospike/aerospike-client-go/v7/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
// Key is the unique record identifier. Records can be identified using a specified namespace,
@@ -164,3 +164,8 @@ func (ky *Key) PartitionId() int {
// First AND makes positive and negative correctly, then mod.
return int(Buffer.LittleBytesToInt32(ky.digest[:], 0)&0xFFFF) & (_PARTITIONS - 1)
}
+
+// returns true if the key has an associated value that can be sent to the server
+func (ky *Key) hasValueToSend() bool {
+ return ky.userKey != nil && ky.userKey != nullValue
+}
diff --git a/key_bench_test.go b/key_bench_test.go
index 5c9b183b..487829d2 100644
--- a/key_bench_test.go
+++ b/key_bench_test.go
@@ -19,7 +19,7 @@ import (
"strings"
"testing"
- "github.com/aerospike/aerospike-client-go/v7/pkg/ripemd160"
+ "github.com/aerospike/aerospike-client-go/v8/pkg/ripemd160"
)
var res = make([]byte, 20)
diff --git a/key_helper.go b/key_helper.go
index f237fc89..7ed1af02 100644
--- a/key_helper.go
+++ b/key_helper.go
@@ -16,10 +16,11 @@ package aerospike
import (
"encoding/binary"
+ "fmt"
"math"
- "github.com/aerospike/aerospike-client-go/v7/pkg/ripemd160"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/pkg/ripemd160"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
type keyWriter struct {
@@ -150,6 +151,5 @@ func (vb *keyWriter) writeKey(val Value) Error {
return nil
}
- // TODO: Replace the error message with fmt.Sprintf("Key Generation Error. Value type not supported: %T", val)
- return newError(types.PARAMETER_ERROR, "Key Generation Error. Value not supported: "+val.String())
+ return newError(types.PARAMETER_ERROR, fmt.Sprintf("Key Generation Error. Value type not supported: %T", val))
}
diff --git a/key_test.go b/key_test.go
index 541e80e5..3cb777ed 100644
--- a/key_test.go
+++ b/key_test.go
@@ -19,7 +19,7 @@ import (
"math"
"strings"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/load_test.go b/load_test.go
index a79b126e..c9d2897b 100644
--- a/load_test.go
+++ b/load_test.go
@@ -20,7 +20,7 @@ import (
"sync"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -33,11 +33,6 @@ func init() {
// ALL tests are isolated by SetName and Key, which are 50 random characters
var _ = gg.Describe("Aerospike load tests", func() {
- gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
- })
gg.Describe("Single long random string test", func() {
var ns = *namespace
diff --git a/login_command.go b/login_command.go
index 159cb9d2..473d5520 100644
--- a/login_command.go
+++ b/login_command.go
@@ -17,10 +17,10 @@ package aerospike
import (
"time"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type sessionInfo struct {
diff --git a/metrics_policy.go b/metrics_policy.go
index f53165f2..a55f20cf 100644
--- a/metrics_policy.go
+++ b/metrics_policy.go
@@ -15,7 +15,7 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types/histogram"
+ "github.com/aerospike/aerospike-client-go/v8/types/histogram"
)
// MetricsPolicy specifies client periodic metrics configuration.
diff --git a/multi_command.go b/multi_command.go
index ae23b137..87ef9ce1 100644
--- a/multi_command.go
+++ b/multi_command.go
@@ -19,8 +19,8 @@ import (
"math/rand"
"reflect"
- "github.com/aerospike/aerospike-client-go/v7/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type baseMultiCommand struct {
@@ -43,8 +43,7 @@ type baseMultiCommand struct {
resObjMappings map[string][]int
selectCases []reflect.SelectCase
- bc bufferedConn
- grpcEOS bool
+ bc bufferedConn
}
var multiObjectParser func(
@@ -231,6 +230,73 @@ func (cmd *baseMultiCommand) parseKey(fieldCount int, bval *int64) (*Key, Error)
return &Key{namespace: namespace, setName: setName, digest: digest, userKey: userKey}, nil
}
+func (cmd *baseMultiCommand) parseVersion(fieldCount int) (*uint64, Error) {
+ var version *uint64
+
+ for i := 0; i < fieldCount; i++ {
+ if err := cmd.readBytes(4); err != nil {
+ return nil, err
+ }
+
+ fieldlen := int(Buffer.BytesToUint32(cmd.dataBuffer, 0))
+ if err := cmd.readBytes(fieldlen); err != nil {
+ return nil, err
+ }
+
+ fieldType := FieldType(cmd.dataBuffer[0])
+ size := fieldlen - 1
+
+ if fieldType == RECORD_VERSION && size == 7 {
+ version = Buffer.VersionBytesToUint64(cmd.dataBuffer, cmd.dataOffset)
+ }
+ }
+ return version, nil
+}
+
+func (cmd *baseMultiCommand) parseFieldsRead(fieldCount int, key *Key) (err Error) {
+ if cmd.txn != nil {
+ version, err := cmd.parseVersion(fieldCount)
+ if err != nil {
+ return err
+ }
+ cmd.txn.OnRead(key, version)
+ return nil
+ } else {
+ return cmd.skipKey(fieldCount)
+ }
+}
+
+func (cmd *baseMultiCommand) parseFieldsBatch(resultCode types.ResultCode, fieldCount int, br BatchRecordIfc) (err Error) {
+ if cmd.txn != nil {
+ version, err := cmd.parseVersion(fieldCount)
+ if err != nil {
+ return err
+ }
+
+ if br.BatchRec().hasWrite {
+ cmd.txn.OnWrite(br.BatchRec().Key, version, resultCode)
+ } else {
+ cmd.txn.OnRead(br.BatchRec().Key, version)
+ }
+ return nil
+ } else {
+ return cmd.skipKey(fieldCount)
+ }
+}
+
+func (cmd *baseMultiCommand) parseFieldsWrite(resultCode types.ResultCode, fieldCount int, key *Key) (err Error) {
+ if cmd.txn != nil {
+ version, err := cmd.parseVersion(fieldCount)
+ if err != nil {
+ return err
+ }
+
+ cmd.txn.OnWrite(key, version, resultCode)
+ return nil
+ }
+ return cmd.skipKey(fieldCount)
+}
+
func (cmd *baseMultiCommand) skipKey(fieldCount int) (err Error) {
for i := 0; i < fieldCount; i++ {
if err = cmd.readBytes(4); err != nil {
@@ -360,7 +426,7 @@ func (cmd *baseMultiCommand) parseRecordResults(ifc command, receiveSize int) (b
}
}
- if cmd.grpcEOS || !cmd.tracker.allowRecord(cmd.nodePartitions) {
+ if !cmd.tracker.allowRecord(cmd.nodePartitions) {
continue
}
@@ -386,7 +452,7 @@ func (cmd *baseMultiCommand) parseRecordResults(ifc command, receiveSize int) (b
return false, err
}
- if cmd.grpcEOS || !cmd.tracker.allowRecord(cmd.nodePartitions) {
+ if !cmd.tracker.allowRecord(cmd.nodePartitions) {
continue
}
diff --git a/node.go b/node.go
index 6dc85ddc..02389d95 100644
--- a/node.go
+++ b/node.go
@@ -24,9 +24,9 @@ import (
"golang.org/x/sync/errgroup"
- iatomic "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ iatomic "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
const (
diff --git a/node_stats.go b/node_stats.go
index a4218c93..5b974815 100644
--- a/node_stats.go
+++ b/node_stats.go
@@ -18,14 +18,15 @@ import (
"encoding/json"
"sync"
- iatomic "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
- hist "github.com/aerospike/aerospike-client-go/v7/types/histogram"
+ iatomic "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
+ hist "github.com/aerospike/aerospike-client-go/v8/types/histogram"
)
// nodeStats keeps track of client's internal node statistics
// These statistics are aggregated once per tend in the cluster object
// and then are served to the end-user.
type nodeStats struct {
+ // TODO: Remove this lock and abstract it out using Generics
m sync.Mutex
// Attempts to open a connection (failed + successful)
ConnectionsAttempts iatomic.Int `json:"connections-attempts"`
@@ -62,9 +63,9 @@ type nodeStats struct {
// Total number of times nodes were removed from the client (not the same as actual nodes removed. Network disruptions between client and server may cause a node being dropped client-side)
NodeRemoved iatomic.Int `json:"node-removed-count"`
- // Total number of transaction retries
+ // Total number of command retries
TransactionRetryCount iatomic.Int `json:"transaction-retry-count"`
- // Total number of transaction errors
+ // Total number of command errors
TransactionErrorCount iatomic.Int `json:"transaction-error-count"`
// Metrics for Get commands
diff --git a/node_test.go b/node_test.go
index 1b2067dc..28484f2b 100644
--- a/node_test.go
+++ b/node_test.go
@@ -18,7 +18,7 @@ import (
"errors"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -27,12 +27,6 @@ import (
// ALL tests are isolated by SetName and Key, which are 50 random characters
var _ = gg.Describe("Aerospike Node Tests", func() {
- gg.BeforeEach(func() {
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
- })
-
gg.Describe("Node Connection Pool", func() {
// connection data
var err error
diff --git a/node_validator.go b/node_validator.go
index 03874211..0bf27f6f 100644
--- a/node_validator.go
+++ b/node_validator.go
@@ -21,8 +21,8 @@ import (
"strconv"
"strings"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
type nodesToAddT map[string]*Node
diff --git a/operate_args.go b/operate_args.go
index 2afe5365..61f32ed5 100644
--- a/operate_args.go
+++ b/operate_args.go
@@ -98,3 +98,15 @@ func newOperateArgs(
}
return res, nil
}
+
+func (oa *operateArgs) size() (int, Error) {
+ res := 0
+ for i := range oa.operations {
+ size, err := oa.operations[i].size()
+ if err != nil {
+ return -1, err
+ }
+ res += size
+ }
+ return res, nil
+}
diff --git a/operate_command.go b/operate_command.go
deleted file mode 100644
index d29f9a3b..00000000
--- a/operate_command.go
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-type operateCommand struct {
- readCommand
-
- policy *WritePolicy
- args operateArgs
- useOpResults bool
-}
-
-func newOperateCommand(cluster *Cluster, policy *WritePolicy, key *Key, args operateArgs, useOpResults bool) (operateCommand, Error) {
- rdCommand, err := newReadCommand(cluster, &policy.BasePolicy, key, nil, args.partition)
- if err != nil {
- return operateCommand{}, err
- }
-
- return operateCommand{
- readCommand: rdCommand,
- policy: policy,
- args: args,
- useOpResults: useOpResults,
- }, nil
-}
-
-func (cmd *operateCommand) writeBuffer(ifc command) (err Error) {
- return cmd.setOperate(cmd.policy, cmd.key, &cmd.args)
-}
-
-func (cmd *operateCommand) getNode(ifc command) (*Node, Error) {
- if cmd.args.hasWrite {
- return cmd.partition.GetNodeWrite(cmd.cluster)
- }
-
- // this may be affected by Rackaware
- return cmd.partition.GetNodeRead(cmd.cluster)
-}
-
-func (cmd *operateCommand) prepareRetry(ifc command, isTimeout bool) bool {
- if cmd.args.hasWrite {
- cmd.partition.PrepareRetryWrite(isTimeout)
- } else {
- cmd.partition.PrepareRetryRead(isTimeout)
- }
- return true
-}
-
-func (cmd *operateCommand) isRead() bool {
- return !cmd.args.hasWrite
-}
-
-func (cmd *operateCommand) Execute() Error {
- return cmd.execute(cmd)
-}
-
-func (cmd *operateCommand) transactionType() transactionType {
- return ttOperate
-}
diff --git a/operate_command_read.go b/operate_command_read.go
new file mode 100644
index 00000000..193e457e
--- /dev/null
+++ b/operate_command_read.go
@@ -0,0 +1,49 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+type operateCommandRead struct {
+ readCommand
+
+ args operateArgs
+}
+
+func newOperateCommandRead(cluster *Cluster, key *Key, args operateArgs) (operateCommandRead, Error) {
+ rdCommand, err := newReadCommand(cluster, &args.writePolicy.BasePolicy, key, nil)
+ if err != nil {
+ return operateCommandRead{}, err
+ }
+
+ res := operateCommandRead{
+ readCommand: rdCommand,
+ args: args,
+ }
+
+ res.isOperation = true
+
+ return res, nil
+}
+
+func (cmd *operateCommandRead) writeBuffer(ifc command) (err Error) {
+ return cmd.setOperate(cmd.args.writePolicy, cmd.key, &cmd.args)
+}
+
+func (cmd *operateCommandRead) Execute() Error {
+ return cmd.execute(cmd)
+}
+
+func (cmd *operateCommandRead) commandType() commandType {
+ return ttOperate
+}
diff --git a/operate_command_write.go b/operate_command_write.go
new file mode 100644
index 00000000..ed62241c
--- /dev/null
+++ b/operate_command_write.go
@@ -0,0 +1,79 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import "github.com/aerospike/aerospike-client-go/v8/types"
+
+type operateCommandWrite struct {
+ baseWriteCommand
+
+ record *Record
+ args operateArgs
+}
+
+func newOperateCommandWrite(cluster *Cluster, key *Key, args operateArgs) (operateCommandWrite, Error) {
+ bwc, err := newBaseWriteCommand(cluster, args.writePolicy, key)
+ if err != nil {
+ return operateCommandWrite{}, err
+ }
+
+ return operateCommandWrite{
+ baseWriteCommand: bwc,
+ args: args,
+ }, nil
+}
+
+func (cmd *operateCommandWrite) writeBuffer(ifc command) (err Error) {
+ return cmd.setOperate(cmd.policy, cmd.key, &cmd.args)
+}
+
+func (cmd *operateCommandWrite) parseResult(ifc command, conn *Connection) Error {
+ rp, err := newRecordParser(&cmd.baseCommand)
+ if err != nil {
+ return err
+ }
+
+ if err := rp.parseFields(cmd.policy.Txn, cmd.key, true); err != nil {
+ return err
+ }
+
+ switch rp.resultCode {
+ case types.OK:
+ var err Error
+ cmd.record, err = rp.parseRecord(cmd.key, true)
+ if err != nil {
+ return err
+ }
+ return nil
+ case types.KEY_NOT_FOUND_ERROR:
+ return ErrKeyNotFound.err()
+ case types.FILTERED_OUT:
+ return ErrFilteredOut.err()
+ default:
+ return newError(rp.resultCode)
+ }
+}
+
+func (cmd *operateCommandWrite) Execute() Error {
+ return cmd.execute(cmd)
+}
+
+func (cmd *operateCommandWrite) commandType() commandType {
+ return ttOperate
+}
+
+func (cmd *operateCommandWrite) GetRecord() *Record {
+ return cmd.record
+}
diff --git a/packer.go b/packer.go
index 4f588c01..b3e92017 100644
--- a/packer.go
+++ b/packer.go
@@ -22,10 +22,10 @@ import (
"reflect"
"time"
- "github.com/aerospike/aerospike-client-go/v7/types"
- ParticleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ ParticleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
var packObjectReflect func(BufferEx, interface{}, bool) (int, Error)
diff --git a/packer_reflect.go b/packer_reflect.go
index 5898cad4..5c0f178b 100644
--- a/packer_reflect.go
+++ b/packer_reflect.go
@@ -20,7 +20,7 @@ import (
"fmt"
"reflect"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
func init() {
diff --git a/partition.go b/partition.go
index 38b63633..1121dc7c 100644
--- a/partition.go
+++ b/partition.go
@@ -17,7 +17,7 @@ package aerospike
import (
"fmt"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// Partition encapsulates partition information.
diff --git a/partition_filter.go b/partition_filter.go
index 5dd37755..84c3db55 100644
--- a/partition_filter.go
+++ b/partition_filter.go
@@ -19,7 +19,7 @@ import (
"bytes"
"encoding/gob"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// PartitionFilter is used in scan/queries. This filter is also used as a cursor.
diff --git a/partition_parser.go b/partition_parser.go
index 7d769ffc..b5eed623 100644
--- a/partition_parser.go
+++ b/partition_parser.go
@@ -23,8 +23,8 @@ import (
"strconv"
"sync"
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
const (
diff --git a/partition_tracker.go b/partition_tracker.go
index 8e6f80c4..bd45c91c 100644
--- a/partition_tracker.go
+++ b/partition_tracker.go
@@ -20,8 +20,8 @@ import (
"strings"
"time"
- atmc "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ atmc "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
type partitionTracker struct {
@@ -273,10 +273,7 @@ func (pt *partitionTracker) setDigest(nodePartitions *nodePartitions, key *Key)
partitionId := key.PartitionId()
pt.partitions[partitionId-pt.partitionBegin].Digest = key.Digest()
- // nodePartitions is nil in Proxy client
- if nodePartitions != nil {
- nodePartitions.recordCount++
- }
+ nodePartitions.recordCount++
}
func (pt *partitionTracker) setLast(nodePartitions *nodePartitions, key *Key, bval *int64) {
@@ -290,10 +287,7 @@ func (pt *partitionTracker) setLast(nodePartitions *nodePartitions, key *Key, bv
ps.BVal = *bval
}
- // nodePartitions is nil in Proxy client
- if nodePartitions != nil {
- nodePartitions.recordCount++
- }
+ nodePartitions.recordCount++
}
func (pt *partitionTracker) allowRecord(np *nodePartitions) bool {
diff --git a/partitions.go b/partitions.go
index 18d8b3f5..16d55046 100644
--- a/partitions.go
+++ b/partitions.go
@@ -19,7 +19,7 @@ import (
"fmt"
"strconv"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// Partitions represents a list of partitions
diff --git a/peers.go b/peers.go
index 1dfae9bd..9eeb6c6f 100644
--- a/peers.go
+++ b/peers.go
@@ -17,7 +17,7 @@ package aerospike
import (
"sync"
- "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
)
type peers struct {
diff --git a/peers_parser.go b/peers_parser.go
index 2ef89c5d..3a644dc7 100644
--- a/peers_parser.go
+++ b/peers_parser.go
@@ -15,12 +15,12 @@
package aerospike
import (
- // "github.com/aerospike/aerospike-client-go/v7/logger"
+ // "github.com/aerospike/aerospike-client-go/v8/logger"
"io"
"strconv"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
var aeroerr = newError(types.PARSE_ERROR, "Error parsing peers list.")
diff --git a/policy.go b/policy.go
index cbfd4ff4..b19833d1 100644
--- a/policy.go
+++ b/policy.go
@@ -30,9 +30,13 @@ type Policy interface {
// enforce the interface
var _ Policy = &BasePolicy{}
-// BasePolicy encapsulates parameters for transaction policy attributes
+// BasePolicy encapsulates parameters for command policy attributes
// used in all database operation calls.
type BasePolicy struct {
+ // Multi-record transaction identifier (MRT). If this field is populated, the corresponding
+ // command will be included in the MRT. This field is ignored for scan/query.
+ Txn *Txn
+
// FilterExpression is the optional Filter Expression. Supported on Server v5.2+
FilterExpression *Expression
@@ -42,18 +46,18 @@ type BasePolicy struct {
// ReadModeSC indicates read policy for SC (strong consistency) namespaces.
ReadModeSC ReadModeSC //= SESSION;
- // TotalTimeout specifies total transaction timeout.
+ // TotalTimeout specifies total command timeout.
//
// The TotalTimeout is tracked on the client and also sent to the server along
- // with the transaction in the wire protocol. The client will most likely
- // timeout first, but the server has the capability to Timeout the transaction.
+ // with the command in the wire protocol. The client will most likely
+ // timeout first, but the server has the capability to Timeout the command.
//
- // If TotalTimeout is not zero and TotalTimeout is reached before the transaction
- // completes, the transaction will abort with TotalTimeout error.
+ // If TotalTimeout is not zero and TotalTimeout is reached before the command
+ // completes, the command will abort with TotalTimeout error.
//
- // If TotalTimeout is zero, there will be no time limit and the transaction will retry
+ // If TotalTimeout is zero, there will be no time limit and the command will retry
// on network timeouts/errors until MaxRetries is exceeded. If MaxRetries is exceeded, the
- // transaction also aborts with Timeout error.
+ // command also aborts with Timeout error.
//
// Default for scan/query: 0 (no time limit and rely on MaxRetries)
//
@@ -63,21 +67,21 @@ type BasePolicy struct {
// SocketTimeout determines network timeout for each attempt.
//
// If SocketTimeout is not zero and SocketTimeout is reached before an attempt completes,
- // the Timeout above is checked. If Timeout is not exceeded, the transaction
+ // the Timeout above is checked. If Timeout is not exceeded, the command
// is retried. If both SocketTimeout and Timeout are non-zero, SocketTimeout must be less
// than or equal to Timeout, otherwise Timeout will also be used for SocketTimeout.
//
// Default: 30s
SocketTimeout time.Duration
- // MaxRetries determines the maximum number of retries before aborting the current transaction.
+ // MaxRetries determines the maximum number of retries before aborting the current command.
// The initial attempt is not counted as a retry.
//
- // If MaxRetries is exceeded, the transaction will abort with an error.
+ // If MaxRetries is exceeded, the command will abort with an error.
//
// WARNING: Database writes that are not idempotent (such as AddOp)
// should not be retried because the write operation may be performed
- // multiple times if the client timed out previous transaction attempts.
+ // multiple times if the client timed out previous command attempts.
// It's important to use a distinct WritePolicy for non-idempotent
// writes which sets maxRetries = 0;
//
diff --git a/proto/auth/aerospike_proxy_auth.pb.go b/proto/auth/aerospike_proxy_auth.pb.go
deleted file mode 100644
index c472fc1c..00000000
--- a/proto/auth/aerospike_proxy_auth.pb.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.32.0
-// protoc v5.27.1
-// source: aerospike_proxy_auth.proto
-
-package auth
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// An auth request to get an access token to perform operations on Aerospike
-// database.
-type AerospikeAuthRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"`
- Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
-}
-
-func (x *AerospikeAuthRequest) Reset() {
- *x = AerospikeAuthRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_auth_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AerospikeAuthRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AerospikeAuthRequest) ProtoMessage() {}
-
-func (x *AerospikeAuthRequest) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_auth_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AerospikeAuthRequest.ProtoReflect.Descriptor instead.
-func (*AerospikeAuthRequest) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_auth_proto_rawDescGZIP(), []int{0}
-}
-
-func (x *AerospikeAuthRequest) GetUsername() string {
- if x != nil {
- return x.Username
- }
- return ""
-}
-
-func (x *AerospikeAuthRequest) GetPassword() string {
- if x != nil {
- return x.Password
- }
- return ""
-}
-
-// An auth token to perform operations on Aerospike database.
-type AerospikeAuthResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- Token string `protobuf:"bytes,1,opt,name=token,proto3" json:"token,omitempty"`
-}
-
-func (x *AerospikeAuthResponse) Reset() {
- *x = AerospikeAuthResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_auth_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AerospikeAuthResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AerospikeAuthResponse) ProtoMessage() {}
-
-func (x *AerospikeAuthResponse) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_auth_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AerospikeAuthResponse.ProtoReflect.Descriptor instead.
-func (*AerospikeAuthResponse) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_auth_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *AerospikeAuthResponse) GetToken() string {
- if x != nil {
- return x.Token
- }
- return ""
-}
-
-var File_aerospike_proxy_auth_proto protoreflect.FileDescriptor
-
-var file_aerospike_proxy_auth_proto_rawDesc = []byte{
- 0x0a, 0x1a, 0x61, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x78,
- 0x79, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x4e, 0x0a, 0x14,
- 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65,
- 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x2d, 0x0a, 0x15,
- 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0x45, 0x0a, 0x0b, 0x41,
- 0x75, 0x74, 0x68, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x36, 0x0a, 0x03, 0x47, 0x65,
- 0x74, 0x12, 0x15, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x41, 0x75, 0x74,
- 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73,
- 0x70, 0x69, 0x6b, 0x65, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x22, 0x00, 0x42, 0x59, 0x0a, 0x1a, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x65, 0x72, 0x6f, 0x73, 0x70,
- 0x69, 0x6b, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
- 0x5a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x65, 0x72,
- 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x2f, 0x61, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65,
- 0x2d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x37, 0x2f, 0x70, 0x72,
- 0x6f, 0x74, 0x6f, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x3b, 0x61, 0x75, 0x74, 0x68, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_aerospike_proxy_auth_proto_rawDescOnce sync.Once
- file_aerospike_proxy_auth_proto_rawDescData = file_aerospike_proxy_auth_proto_rawDesc
-)
-
-func file_aerospike_proxy_auth_proto_rawDescGZIP() []byte {
- file_aerospike_proxy_auth_proto_rawDescOnce.Do(func() {
- file_aerospike_proxy_auth_proto_rawDescData = protoimpl.X.CompressGZIP(file_aerospike_proxy_auth_proto_rawDescData)
- })
- return file_aerospike_proxy_auth_proto_rawDescData
-}
-
-var file_aerospike_proxy_auth_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_aerospike_proxy_auth_proto_goTypes = []interface{}{
- (*AerospikeAuthRequest)(nil), // 0: AerospikeAuthRequest
- (*AerospikeAuthResponse)(nil), // 1: AerospikeAuthResponse
-}
-var file_aerospike_proxy_auth_proto_depIdxs = []int32{
- 0, // 0: AuthService.Get:input_type -> AerospikeAuthRequest
- 1, // 1: AuthService.Get:output_type -> AerospikeAuthResponse
- 1, // [1:2] is the sub-list for method output_type
- 0, // [0:1] is the sub-list for method input_type
- 0, // [0:0] is the sub-list for extension type_name
- 0, // [0:0] is the sub-list for extension extendee
- 0, // [0:0] is the sub-list for field type_name
-}
-
-func init() { file_aerospike_proxy_auth_proto_init() }
-func file_aerospike_proxy_auth_proto_init() {
- if File_aerospike_proxy_auth_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_aerospike_proxy_auth_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AerospikeAuthRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_auth_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AerospikeAuthResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_aerospike_proxy_auth_proto_rawDesc,
- NumEnums: 0,
- NumMessages: 2,
- NumExtensions: 0,
- NumServices: 1,
- },
- GoTypes: file_aerospike_proxy_auth_proto_goTypes,
- DependencyIndexes: file_aerospike_proxy_auth_proto_depIdxs,
- MessageInfos: file_aerospike_proxy_auth_proto_msgTypes,
- }.Build()
- File_aerospike_proxy_auth_proto = out.File
- file_aerospike_proxy_auth_proto_rawDesc = nil
- file_aerospike_proxy_auth_proto_goTypes = nil
- file_aerospike_proxy_auth_proto_depIdxs = nil
-}
diff --git a/proto/auth/aerospike_proxy_auth.proto b/proto/auth/aerospike_proxy_auth.proto
deleted file mode 100644
index 4a56bf80..00000000
--- a/proto/auth/aerospike_proxy_auth.proto
+++ /dev/null
@@ -1,21 +0,0 @@
-syntax = "proto3";
-
-option go_package = "github.com/aerospike/aerospike-client-go/v7/proto/auth;auth";
-option java_package = "com.aerospike.proxy.client";
-
-// Proxy auth service
-service AuthService {
- rpc Get(AerospikeAuthRequest) returns (AerospikeAuthResponse) {}
-}
-
-// An auth request to get an access token to perform operations on Aerospike
-// database.
-message AerospikeAuthRequest {
- string username = 1;
- string password = 2;
-}
-
-// An auth token to perform operations on Aerospike database.
-message AerospikeAuthResponse {
- string token = 1;
-}
diff --git a/proto/auth/aerospike_proxy_auth_grpc.pb.go b/proto/auth/aerospike_proxy_auth_grpc.pb.go
deleted file mode 100644
index 766bb0ba..00000000
--- a/proto/auth/aerospike_proxy_auth_grpc.pb.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc v5.27.1
-// source: proto/auth/aerospike_proxy_auth.proto
-
-package auth
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-const (
- AuthService_Get_FullMethodName = "/AuthService/Get"
-)
-
-// AuthServiceClient is the client API for AuthService service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type AuthServiceClient interface {
- Get(ctx context.Context, in *AerospikeAuthRequest, opts ...grpc.CallOption) (*AerospikeAuthResponse, error)
-}
-
-type authServiceClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewAuthServiceClient(cc grpc.ClientConnInterface) AuthServiceClient {
- return &authServiceClient{cc}
-}
-
-func (c *authServiceClient) Get(ctx context.Context, in *AerospikeAuthRequest, opts ...grpc.CallOption) (*AerospikeAuthResponse, error) {
- out := new(AerospikeAuthResponse)
- err := c.cc.Invoke(ctx, AuthService_Get_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// AuthServiceServer is the server API for AuthService service.
-// All implementations must embed UnimplementedAuthServiceServer
-// for forward compatibility
-type AuthServiceServer interface {
- Get(context.Context, *AerospikeAuthRequest) (*AerospikeAuthResponse, error)
- mustEmbedUnimplementedAuthServiceServer()
-}
-
-// UnimplementedAuthServiceServer must be embedded to have forward compatible implementations.
-type UnimplementedAuthServiceServer struct {
-}
-
-func (UnimplementedAuthServiceServer) Get(context.Context, *AerospikeAuthRequest) (*AerospikeAuthResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
-}
-func (UnimplementedAuthServiceServer) mustEmbedUnimplementedAuthServiceServer() {}
-
-// UnsafeAuthServiceServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to AuthServiceServer will
-// result in compilation errors.
-type UnsafeAuthServiceServer interface {
- mustEmbedUnimplementedAuthServiceServer()
-}
-
-func RegisterAuthServiceServer(s grpc.ServiceRegistrar, srv AuthServiceServer) {
- s.RegisterService(&AuthService_ServiceDesc, srv)
-}
-
-func _AuthService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeAuthRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AuthServiceServer).Get(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: AuthService_Get_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AuthServiceServer).Get(ctx, req.(*AerospikeAuthRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// AuthService_ServiceDesc is the grpc.ServiceDesc for AuthService service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var AuthService_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "AuthService",
- HandlerType: (*AuthServiceServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Get",
- Handler: _AuthService_Get_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "proto/auth/aerospike_proxy_auth.proto",
-}
diff --git a/proto/kvs/aerospike_proxy_kv.pb.go b/proto/kvs/aerospike_proxy_kv.pb.go
deleted file mode 100644
index 97b82117..00000000
--- a/proto/kvs/aerospike_proxy_kv.pb.go
+++ /dev/null
@@ -1,3616 +0,0 @@
-// Code generated by protoc-gen-go. DO NOT EDIT.
-// versions:
-// protoc-gen-go v1.32.0
-// protoc v5.27.1
-// source: aerospike_proxy_kv.proto
-
-package kvs
-
-import (
- protoreflect "google.golang.org/protobuf/reflect/protoreflect"
- protoimpl "google.golang.org/protobuf/runtime/protoimpl"
- reflect "reflect"
- sync "sync"
-)
-
-const (
- // Verify that this generated code is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
- // Verify that runtime/protoimpl is sufficiently up-to-date.
- _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
-)
-
-// Read policy for AP (availability) namespaces.
-// How duplicates should be consulted in a read operation.
-// Only makes a difference during migrations and only applicable in AP mode.
-type ReadModeAP int32
-
-const (
- // Involve single node in the read operation.
- ReadModeAP_ONE ReadModeAP = 0
- // Involve all duplicates in the read operation.
- ReadModeAP_ALL ReadModeAP = 1
-)
-
-// Enum value maps for ReadModeAP.
-var (
- ReadModeAP_name = map[int32]string{
- 0: "ONE",
- 1: "ALL",
- }
- ReadModeAP_value = map[string]int32{
- "ONE": 0,
- "ALL": 1,
- }
-)
-
-func (x ReadModeAP) Enum() *ReadModeAP {
- p := new(ReadModeAP)
- *p = x
- return p
-}
-
-func (x ReadModeAP) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ReadModeAP) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[0].Descriptor()
-}
-
-func (ReadModeAP) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[0]
-}
-
-func (x ReadModeAP) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ReadModeAP.Descriptor instead.
-func (ReadModeAP) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{0}
-}
-
-// Read policy for SC (strong consistency) namespaces.
-// Determines SC read consistency options.
-type ReadModeSC int32
-
-const (
- // Ensures this client will only see an increasing sequence of record versions.
- // Server only reads from master. This is the default.
- ReadModeSC_SESSION ReadModeSC = 0
- // Ensures ALL clients will only see an increasing sequence of record versions.
- // Server only reads from master.
- ReadModeSC_LINEARIZE ReadModeSC = 1
- // Server may read from master or any full (non-migrating) replica.
- // Increasing sequence of record versions is not guaranteed.
- ReadModeSC_ALLOW_REPLICA ReadModeSC = 2
- // Server may read from master or any full (non-migrating) replica or from unavailable
- // partitions. Increasing sequence of record versions is not guaranteed.
- ReadModeSC_ALLOW_UNAVAILABLE ReadModeSC = 3
-)
-
-// Enum value maps for ReadModeSC.
-var (
- ReadModeSC_name = map[int32]string{
- 0: "SESSION",
- 1: "LINEARIZE",
- 2: "ALLOW_REPLICA",
- 3: "ALLOW_UNAVAILABLE",
- }
- ReadModeSC_value = map[string]int32{
- "SESSION": 0,
- "LINEARIZE": 1,
- "ALLOW_REPLICA": 2,
- "ALLOW_UNAVAILABLE": 3,
- }
-)
-
-func (x ReadModeSC) Enum() *ReadModeSC {
- p := new(ReadModeSC)
- *p = x
- return p
-}
-
-func (x ReadModeSC) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (ReadModeSC) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[1].Descriptor()
-}
-
-func (ReadModeSC) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[1]
-}
-
-func (x ReadModeSC) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use ReadModeSC.Descriptor instead.
-func (ReadModeSC) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{1}
-}
-
-// Defines algorithm used to determine the target node for a command.
-// Scan and query are not affected by replica algorithm.
-//
-// Note: The enum ordinals do not match the Aerospike Client ordinals because
-// the default has to be ordinal zero in protobuf.
-type Replica int32
-
-const (
- // Try node containing master partition first.
- // If connection fails, all commands try nodes containing replicated partitions.
- // If socketTimeout is reached, reads also try nodes containing replicated partitions,
- // but writes remain on master node.
- Replica_SEQUENCE Replica = 0
- // Use node containing key's master partition.
- Replica_MASTER Replica = 1
- // Distribute reads across nodes containing key's master and replicated partitions
- // in round-robin fashion. Writes always use node containing key's master partition.
- Replica_MASTER_PROLES Replica = 2
- // Try node on the same rack as the client first. If timeout or there are no nodes on the
- // same rack, use SEQUENCE instead.
- Replica_PREFER_RACK Replica = 3
- // Distribute reads across all nodes in cluster in round-robin fashion.
- // Writes always use node containing key's master partition.
- // This option is useful when the replication factor equals the number
- // of nodes in the cluster and the overhead of requesting proles is not desired.
- Replica_RANDOM Replica = 4
-)
-
-// Enum value maps for Replica.
-var (
- Replica_name = map[int32]string{
- 0: "SEQUENCE",
- 1: "MASTER",
- 2: "MASTER_PROLES",
- 3: "PREFER_RACK",
- 4: "RANDOM",
- }
- Replica_value = map[string]int32{
- "SEQUENCE": 0,
- "MASTER": 1,
- "MASTER_PROLES": 2,
- "PREFER_RACK": 3,
- "RANDOM": 4,
- }
-)
-
-func (x Replica) Enum() *Replica {
- p := new(Replica)
- *p = x
- return p
-}
-
-func (x Replica) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (Replica) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[2].Descriptor()
-}
-
-func (Replica) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[2]
-}
-
-func (x Replica) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use Replica.Descriptor instead.
-func (Replica) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{2}
-}
-
-type QueryDuration int32
-
-const (
- // The query is expected to return more than 100 records per node. The server optimizes for a large record set.
- QueryDuration_LONG QueryDuration = 0
- // The query is expected to return less than 100 records per node. The server optimizes for a small record set.
- QueryDuration_SHORT QueryDuration = 1
- // Treat query as a LONG query, but relax read consistency for AP namespaces.
- // This value is treated exactly like LONG for server versions < 7.1.
- QueryDuration_LONG_RELAX_AP QueryDuration = 2
-)
-
-// Enum value maps for QueryDuration.
-var (
- QueryDuration_name = map[int32]string{
- 0: "LONG",
- 1: "SHORT",
- 2: "LONG_RELAX_AP",
- }
- QueryDuration_value = map[string]int32{
- "LONG": 0,
- "SHORT": 1,
- "LONG_RELAX_AP": 2,
- }
-)
-
-func (x QueryDuration) Enum() *QueryDuration {
- p := new(QueryDuration)
- *p = x
- return p
-}
-
-func (x QueryDuration) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (QueryDuration) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[3].Descriptor()
-}
-
-func (QueryDuration) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[3]
-}
-
-func (x QueryDuration) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use QueryDuration.Descriptor instead.
-func (QueryDuration) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{3}
-}
-
-// Secondary index collection type.
-type IndexCollectionType int32
-
-const (
- // Normal scalar index.
- IndexCollectionType_DEFAULT IndexCollectionType = 0
- // Index list elements.
- IndexCollectionType_LIST IndexCollectionType = 1
- // Index map keys.
- IndexCollectionType_MAPKEYS IndexCollectionType = 2
- // Index map values.
- IndexCollectionType_MAPVALUES IndexCollectionType = 3
-)
-
-// Enum value maps for IndexCollectionType.
-var (
- IndexCollectionType_name = map[int32]string{
- 0: "DEFAULT",
- 1: "LIST",
- 2: "MAPKEYS",
- 3: "MAPVALUES",
- }
- IndexCollectionType_value = map[string]int32{
- "DEFAULT": 0,
- "LIST": 1,
- "MAPKEYS": 2,
- "MAPVALUES": 3,
- }
-)
-
-func (x IndexCollectionType) Enum() *IndexCollectionType {
- p := new(IndexCollectionType)
- *p = x
- return p
-}
-
-func (x IndexCollectionType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (IndexCollectionType) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[4].Descriptor()
-}
-
-func (IndexCollectionType) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[4]
-}
-
-func (x IndexCollectionType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use IndexCollectionType.Descriptor instead.
-func (IndexCollectionType) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{4}
-}
-
-type OperationType int32
-
-const (
- OperationType_READ OperationType = 0
- OperationType_READ_HEADER OperationType = 1
- OperationType_WRITE OperationType = 2
- OperationType_CDT_READ OperationType = 3
- OperationType_CDT_MODIFY OperationType = 4
- OperationType_MAP_READ OperationType = 5
- OperationType_MAP_MODIFY OperationType = 6
- OperationType_ADD OperationType = 7
- OperationType_EXP_READ OperationType = 8
- OperationType_EXP_MODIFY OperationType = 9
- OperationType_APPEND OperationType = 10
- OperationType_PREPEND OperationType = 11
- OperationType_TOUCH OperationType = 12
- OperationType_BIT_READ OperationType = 13
- OperationType_BIT_MODIFY OperationType = 14
- OperationType_DELETE OperationType = 15
- OperationType_HLL_READ OperationType = 16
- OperationType_HLL_MODIFY OperationType = 17
-)
-
-// Enum value maps for OperationType.
-var (
- OperationType_name = map[int32]string{
- 0: "READ",
- 1: "READ_HEADER",
- 2: "WRITE",
- 3: "CDT_READ",
- 4: "CDT_MODIFY",
- 5: "MAP_READ",
- 6: "MAP_MODIFY",
- 7: "ADD",
- 8: "EXP_READ",
- 9: "EXP_MODIFY",
- 10: "APPEND",
- 11: "PREPEND",
- 12: "TOUCH",
- 13: "BIT_READ",
- 14: "BIT_MODIFY",
- 15: "DELETE",
- 16: "HLL_READ",
- 17: "HLL_MODIFY",
- }
- OperationType_value = map[string]int32{
- "READ": 0,
- "READ_HEADER": 1,
- "WRITE": 2,
- "CDT_READ": 3,
- "CDT_MODIFY": 4,
- "MAP_READ": 5,
- "MAP_MODIFY": 6,
- "ADD": 7,
- "EXP_READ": 8,
- "EXP_MODIFY": 9,
- "APPEND": 10,
- "PREPEND": 11,
- "TOUCH": 12,
- "BIT_READ": 13,
- "BIT_MODIFY": 14,
- "DELETE": 15,
- "HLL_READ": 16,
- "HLL_MODIFY": 17,
- }
-)
-
-func (x OperationType) Enum() *OperationType {
- p := new(OperationType)
- *p = x
- return p
-}
-
-func (x OperationType) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (OperationType) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[5].Descriptor()
-}
-
-func (OperationType) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[5]
-}
-
-func (x OperationType) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use OperationType.Descriptor instead.
-func (OperationType) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{5}
-}
-
-type RecordExistsAction int32
-
-const (
- // Create or update record.
- // Merge write command bins with existing bins.
- RecordExistsAction_UPDATE RecordExistsAction = 0
- // Update record only. Fail if record does not exist.
- // Merge write command bins with existing bins.
- RecordExistsAction_UPDATE_ONLY RecordExistsAction = 1
- // Create or replace record.
- // Delete existing bins not referenced by write command bins.
- // Supported by Aerospike server versions >= 3.1.6.
- RecordExistsAction_REPLACE RecordExistsAction = 2
- // Replace record only. Fail if record does not exist.
- // Delete existing bins not referenced by write command bins.
- // Supported by Aerospike server versions >= 3.1.6.
- RecordExistsAction_REPLACE_ONLY RecordExistsAction = 3
- // Create only. Fail if record exists.
- RecordExistsAction_CREATE_ONLY RecordExistsAction = 4
-)
-
-// Enum value maps for RecordExistsAction.
-var (
- RecordExistsAction_name = map[int32]string{
- 0: "UPDATE",
- 1: "UPDATE_ONLY",
- 2: "REPLACE",
- 3: "REPLACE_ONLY",
- 4: "CREATE_ONLY",
- }
- RecordExistsAction_value = map[string]int32{
- "UPDATE": 0,
- "UPDATE_ONLY": 1,
- "REPLACE": 2,
- "REPLACE_ONLY": 3,
- "CREATE_ONLY": 4,
- }
-)
-
-func (x RecordExistsAction) Enum() *RecordExistsAction {
- p := new(RecordExistsAction)
- *p = x
- return p
-}
-
-func (x RecordExistsAction) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (RecordExistsAction) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[6].Descriptor()
-}
-
-func (RecordExistsAction) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[6]
-}
-
-func (x RecordExistsAction) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use RecordExistsAction.Descriptor instead.
-func (RecordExistsAction) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{6}
-}
-
-type GenerationPolicy int32
-
-const (
- // Do not use record generation to restrict writes.
- GenerationPolicy_NONE GenerationPolicy = 0
- // Update/delete record if expected generation is equal to server generation. Otherwise, fail.
- GenerationPolicy_EXPECT_GEN_EQUAL GenerationPolicy = 1
- // Update/delete record if expected generation greater than the server generation. Otherwise, fail.
- // This is useful for restore after backup.
- GenerationPolicy_EXPECT_GEN_GT GenerationPolicy = 2
-)
-
-// Enum value maps for GenerationPolicy.
-var (
- GenerationPolicy_name = map[int32]string{
- 0: "NONE",
- 1: "EXPECT_GEN_EQUAL",
- 2: "EXPECT_GEN_GT",
- }
- GenerationPolicy_value = map[string]int32{
- "NONE": 0,
- "EXPECT_GEN_EQUAL": 1,
- "EXPECT_GEN_GT": 2,
- }
-)
-
-func (x GenerationPolicy) Enum() *GenerationPolicy {
- p := new(GenerationPolicy)
- *p = x
- return p
-}
-
-func (x GenerationPolicy) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (GenerationPolicy) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[7].Descriptor()
-}
-
-func (GenerationPolicy) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[7]
-}
-
-func (x GenerationPolicy) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use GenerationPolicy.Descriptor instead.
-func (GenerationPolicy) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{7}
-}
-
-type CommitLevel int32
-
-const (
- // Server should wait until successfully committing master and all replicas.
- CommitLevel_COMMIT_ALL CommitLevel = 0
- // Server should wait until successfully committing master only.
- CommitLevel_COMMIT_MASTER CommitLevel = 1
-)
-
-// Enum value maps for CommitLevel.
-var (
- CommitLevel_name = map[int32]string{
- 0: "COMMIT_ALL",
- 1: "COMMIT_MASTER",
- }
- CommitLevel_value = map[string]int32{
- "COMMIT_ALL": 0,
- "COMMIT_MASTER": 1,
- }
-)
-
-func (x CommitLevel) Enum() *CommitLevel {
- p := new(CommitLevel)
- *p = x
- return p
-}
-
-func (x CommitLevel) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (CommitLevel) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[8].Descriptor()
-}
-
-func (CommitLevel) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[8]
-}
-
-func (x CommitLevel) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use CommitLevel.Descriptor instead.
-func (CommitLevel) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{8}
-}
-
-type BackgroundTaskStatus int32
-
-const (
- // Task not found.
- BackgroundTaskStatus_NOT_FOUND BackgroundTaskStatus = 0
- // Task in progress.
- BackgroundTaskStatus_IN_PROGRESS BackgroundTaskStatus = 1
- // Task completed.
- BackgroundTaskStatus_COMPLETE BackgroundTaskStatus = 2
-)
-
-// Enum value maps for BackgroundTaskStatus.
-var (
- BackgroundTaskStatus_name = map[int32]string{
- 0: "NOT_FOUND",
- 1: "IN_PROGRESS",
- 2: "COMPLETE",
- }
- BackgroundTaskStatus_value = map[string]int32{
- "NOT_FOUND": 0,
- "IN_PROGRESS": 1,
- "COMPLETE": 2,
- }
-)
-
-func (x BackgroundTaskStatus) Enum() *BackgroundTaskStatus {
- p := new(BackgroundTaskStatus)
- *p = x
- return p
-}
-
-func (x BackgroundTaskStatus) String() string {
- return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
-}
-
-func (BackgroundTaskStatus) Descriptor() protoreflect.EnumDescriptor {
- return file_aerospike_proxy_kv_proto_enumTypes[9].Descriptor()
-}
-
-func (BackgroundTaskStatus) Type() protoreflect.EnumType {
- return &file_aerospike_proxy_kv_proto_enumTypes[9]
-}
-
-func (x BackgroundTaskStatus) Number() protoreflect.EnumNumber {
- return protoreflect.EnumNumber(x)
-}
-
-// Deprecated: Use BackgroundTaskStatus.Descriptor instead.
-func (BackgroundTaskStatus) EnumDescriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{9}
-}
-
-// The about request message.
-type AboutRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-}
-
-func (x *AboutRequest) Reset() {
- *x = AboutRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[0]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AboutRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AboutRequest) ProtoMessage() {}
-
-func (x *AboutRequest) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[0]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AboutRequest.ProtoReflect.Descriptor instead.
-func (*AboutRequest) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{0}
-}
-
-// The about response message.
-type AboutResponse struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Proxy server version.
- Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
-}
-
-func (x *AboutResponse) Reset() {
- *x = AboutResponse{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[1]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AboutResponse) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AboutResponse) ProtoMessage() {}
-
-func (x *AboutResponse) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[1]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AboutResponse.ProtoReflect.Descriptor instead.
-func (*AboutResponse) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{1}
-}
-
-func (x *AboutResponse) GetVersion() string {
- if x != nil {
- return x.Version
- }
- return ""
-}
-
-// Read policy attributes used in read database commands that are not part of
-// the wire protocol.
-type ReadPolicy struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Read policy for AP (availability) namespaces.
- Replica Replica `protobuf:"varint,1,opt,name=replica,proto3,enum=Replica" json:"replica,omitempty"`
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP ReadModeAP `protobuf:"varint,2,opt,name=readModeAP,proto3,enum=ReadModeAP" json:"readModeAP,omitempty"`
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and query are not affected by replica algorithms.
- ReadModeSC ReadModeSC `protobuf:"varint,3,opt,name=readModeSC,proto3,enum=ReadModeSC" json:"readModeSC,omitempty"`
-}
-
-func (x *ReadPolicy) Reset() {
- *x = ReadPolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[2]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ReadPolicy) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ReadPolicy) ProtoMessage() {}
-
-func (x *ReadPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[2]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ReadPolicy.ProtoReflect.Descriptor instead.
-func (*ReadPolicy) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{2}
-}
-
-func (x *ReadPolicy) GetReplica() Replica {
- if x != nil {
- return x.Replica
- }
- return Replica_SEQUENCE
-}
-
-func (x *ReadPolicy) GetReadModeAP() ReadModeAP {
- if x != nil {
- return x.ReadModeAP
- }
- return ReadModeAP_ONE
-}
-
-func (x *ReadPolicy) GetReadModeSC() ReadModeSC {
- if x != nil {
- return x.ReadModeSC
- }
- return ReadModeSC_SESSION
-}
-
-// Write policy attributes used in write database commands that are not part of
-// the wire protocol.
-type WritePolicy struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Read policy for AP (availability) namespaces.
- Replica Replica `protobuf:"varint,1,opt,name=replica,proto3,enum=Replica" json:"replica,omitempty"`
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP ReadModeAP `protobuf:"varint,2,opt,name=readModeAP,proto3,enum=ReadModeAP" json:"readModeAP,omitempty"`
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and query are not affected by replica algorithms.
- ReadModeSC ReadModeSC `protobuf:"varint,3,opt,name=readModeSC,proto3,enum=ReadModeSC" json:"readModeSC,omitempty"`
-}
-
-func (x *WritePolicy) Reset() {
- *x = WritePolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[3]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *WritePolicy) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*WritePolicy) ProtoMessage() {}
-
-func (x *WritePolicy) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[3]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use WritePolicy.ProtoReflect.Descriptor instead.
-func (*WritePolicy) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{3}
-}
-
-func (x *WritePolicy) GetReplica() Replica {
- if x != nil {
- return x.Replica
- }
- return Replica_SEQUENCE
-}
-
-func (x *WritePolicy) GetReadModeAP() ReadModeAP {
- if x != nil {
- return x.ReadModeAP
- }
- return ReadModeAP_ONE
-}
-
-func (x *WritePolicy) GetReadModeSC() ReadModeSC {
- if x != nil {
- return x.ReadModeSC
- }
- return ReadModeSC_SESSION
-}
-
-// The request message containing the user's name.
-type AerospikeRequestPayload struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Unique identifier of the request in the stream.
- Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- // Client iteration number starting at 1. On first attempt iteration should
- // be 1. On first retry iteration should be 2, on second retry iteration
- // should be 3, and so on.
- Iteration uint32 `protobuf:"varint,2,opt,name=iteration,proto3" json:"iteration,omitempty"`
- // Aerospike wire format request payload.
- Payload []byte `protobuf:"bytes,3,opt,name=payload,proto3" json:"payload,omitempty"`
- // Read policy for read requests.
- ReadPolicy *ReadPolicy `protobuf:"bytes,4,opt,name=readPolicy,proto3,oneof" json:"readPolicy,omitempty"`
- // Write policy for write requests.
- WritePolicy *WritePolicy `protobuf:"bytes,5,opt,name=writePolicy,proto3,oneof" json:"writePolicy,omitempty"`
- // Scan request for scan.
- ScanRequest *ScanRequest `protobuf:"bytes,6,opt,name=scanRequest,proto3,oneof" json:"scanRequest,omitempty"`
- // Request for running a query.
- QueryRequest *QueryRequest `protobuf:"bytes,7,opt,name=queryRequest,proto3,oneof" json:"queryRequest,omitempty"`
- // Abort a scan/query on application error.
- AbortRequest *AbortRequest `protobuf:"bytes,8,opt,name=abortRequest,proto3,oneof" json:"abortRequest,omitempty"`
- // Request for executing operations background on matching records.
- BackgroundExecuteRequest *BackgroundExecuteRequest `protobuf:"bytes,9,opt,name=backgroundExecuteRequest,proto3,oneof" json:"backgroundExecuteRequest,omitempty"`
- // Request for getting background task status.
- BackgroundTaskStatusRequest *BackgroundTaskStatusRequest `protobuf:"bytes,10,opt,name=backgroundTaskStatusRequest,proto3,oneof" json:"backgroundTaskStatusRequest,omitempty"`
- // Info request
- InfoRequest *InfoRequest `protobuf:"bytes,11,opt,name=infoRequest,proto3,oneof" json:"infoRequest,omitempty"`
-}
-
-func (x *AerospikeRequestPayload) Reset() {
- *x = AerospikeRequestPayload{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[4]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AerospikeRequestPayload) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AerospikeRequestPayload) ProtoMessage() {}
-
-func (x *AerospikeRequestPayload) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[4]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AerospikeRequestPayload.ProtoReflect.Descriptor instead.
-func (*AerospikeRequestPayload) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{4}
-}
-
-func (x *AerospikeRequestPayload) GetId() uint32 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *AerospikeRequestPayload) GetIteration() uint32 {
- if x != nil {
- return x.Iteration
- }
- return 0
-}
-
-func (x *AerospikeRequestPayload) GetPayload() []byte {
- if x != nil {
- return x.Payload
- }
- return nil
-}
-
-func (x *AerospikeRequestPayload) GetReadPolicy() *ReadPolicy {
- if x != nil {
- return x.ReadPolicy
- }
- return nil
-}
-
-func (x *AerospikeRequestPayload) GetWritePolicy() *WritePolicy {
- if x != nil {
- return x.WritePolicy
- }
- return nil
-}
-
-func (x *AerospikeRequestPayload) GetScanRequest() *ScanRequest {
- if x != nil {
- return x.ScanRequest
- }
- return nil
-}
-
-func (x *AerospikeRequestPayload) GetQueryRequest() *QueryRequest {
- if x != nil {
- return x.QueryRequest
- }
- return nil
-}
-
-func (x *AerospikeRequestPayload) GetAbortRequest() *AbortRequest {
- if x != nil {
- return x.AbortRequest
- }
- return nil
-}
-
-func (x *AerospikeRequestPayload) GetBackgroundExecuteRequest() *BackgroundExecuteRequest {
- if x != nil {
- return x.BackgroundExecuteRequest
- }
- return nil
-}
-
-func (x *AerospikeRequestPayload) GetBackgroundTaskStatusRequest() *BackgroundTaskStatusRequest {
- if x != nil {
- return x.BackgroundTaskStatusRequest
- }
- return nil
-}
-
-func (x *AerospikeRequestPayload) GetInfoRequest() *InfoRequest {
- if x != nil {
- return x.InfoRequest
- }
- return nil
-}
-
-// The request message containing the user's name.
-type AerospikeResponsePayload struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Unique identifier of the corresponding request in the stream.
- Id uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
- // Status of the corresponding request.
- //
- // if status equals 0
- // The proxy received a valid response from Aerospike. The payload's
- // result code should be used as the client result code.
- // else
- // The request failed at the proxy. This status should be used
- // as the client result code.
- Status int32 `protobuf:"varint,2,opt,name=status,proto3" json:"status,omitempty"`
- // This flag indicates that the write transaction may have completed,
- // even though the client sees an error.
- InDoubt bool `protobuf:"varint,3,opt,name=inDoubt,proto3" json:"inDoubt,omitempty"`
- // Aerospike wire format request payload.
- Payload []byte `protobuf:"bytes,4,opt,name=payload,proto3" json:"payload,omitempty"`
- // For requests with multiple responses like batch and queries,
- // hasNext flag indicates if there are more responses to follow this
- // response or if this is the last response for this request.
- HasNext bool `protobuf:"varint,5,opt,name=hasNext,proto3" json:"hasNext,omitempty"`
- // Background task status, populated for background task request.
- BackgroundTaskStatus *BackgroundTaskStatus `protobuf:"varint,6,opt,name=backgroundTaskStatus,proto3,enum=BackgroundTaskStatus,oneof" json:"backgroundTaskStatus,omitempty"`
-}
-
-func (x *AerospikeResponsePayload) Reset() {
- *x = AerospikeResponsePayload{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[5]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AerospikeResponsePayload) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AerospikeResponsePayload) ProtoMessage() {}
-
-func (x *AerospikeResponsePayload) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[5]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AerospikeResponsePayload.ProtoReflect.Descriptor instead.
-func (*AerospikeResponsePayload) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{5}
-}
-
-func (x *AerospikeResponsePayload) GetId() uint32 {
- if x != nil {
- return x.Id
- }
- return 0
-}
-
-func (x *AerospikeResponsePayload) GetStatus() int32 {
- if x != nil {
- return x.Status
- }
- return 0
-}
-
-func (x *AerospikeResponsePayload) GetInDoubt() bool {
- if x != nil {
- return x.InDoubt
- }
- return false
-}
-
-func (x *AerospikeResponsePayload) GetPayload() []byte {
- if x != nil {
- return x.Payload
- }
- return nil
-}
-
-func (x *AerospikeResponsePayload) GetHasNext() bool {
- if x != nil {
- return x.HasNext
- }
- return false
-}
-
-func (x *AerospikeResponsePayload) GetBackgroundTaskStatus() BackgroundTaskStatus {
- if x != nil && x.BackgroundTaskStatus != nil {
- return *x.BackgroundTaskStatus
- }
- return BackgroundTaskStatus_NOT_FOUND
-}
-
-// Scan policy attributes used by queries.
-// Scan requests are send completely using proto buffers and hence include all policy attributes.
-type ScanPolicy struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Read policy for AP (availability) namespaces.
- Replica Replica `protobuf:"varint,1,opt,name=replica,proto3,enum=Replica" json:"replica,omitempty"`
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP ReadModeAP `protobuf:"varint,2,opt,name=readModeAP,proto3,enum=ReadModeAP" json:"readModeAP,omitempty"`
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and scan are not affected by replica algorithms.
- ReadModeSC ReadModeSC `protobuf:"varint,3,opt,name=readModeSC,proto3,enum=ReadModeSC" json:"readModeSC,omitempty"`
- // Use zlib compression on command buffers sent to the server and responses received
- // from the server when the buffer size is greater than 128 bytes.
- // This option will increase cpu and memory usage (for extra compressed buffers),but
- // decrease the size of data sent over the network.
- Compress bool `protobuf:"varint,4,opt,name=compress,proto3" json:"compress,omitempty"`
- // Optional expression filter. If filterExp exists and evaluates to false, the
- // transaction is ignored.
- Expression []byte `protobuf:"bytes,5,opt,name=expression,proto3,oneof" json:"expression,omitempty"`
- // Total transaction timeout in milliseconds.
- // Default for all other commands: 1000ms
- TotalTimeout *uint32 `protobuf:"varint,6,opt,name=totalTimeout,proto3,oneof" json:"totalTimeout,omitempty"`
- // Approximate number of records to return to client. This number is divided by the
- // number of nodes involved in the scan. The actual number of records returned
- // may be less than maxRecords if node record counts are small and unbalanced across
- // nodes.
- // Default: 0 (do not limit record count)
- MaxRecords *uint64 `protobuf:"varint,7,opt,name=maxRecords,proto3,oneof" json:"maxRecords,omitempty"`
- // Limit returned records per second (rps) rate for each server.
- // Do not apply rps limit if recordsPerSecond is zero.
- // Default: 0
- RecordsPerSecond *uint32 `protobuf:"varint,8,opt,name=recordsPerSecond,proto3,oneof" json:"recordsPerSecond,omitempty"`
- // Should scan requests be issued in parallel.
- // Default: true
- ConcurrentNodes *bool `protobuf:"varint,9,opt,name=concurrentNodes,proto3,oneof" json:"concurrentNodes,omitempty"`
- // Maximum number of concurrent requests to server nodes at any point in time.
- // If there are 16 nodes in the cluster and maxConcurrentNodes is 8, then queries
- // will be made to 8 nodes in parallel. When a scan completes, a new scan will
- // be issued until all 16 nodes have been queried.
- // Default: 0 (issue requests to all server nodes in parallel)
- MaxConcurrentNodes *uint32 `protobuf:"varint,10,opt,name=maxConcurrentNodes,proto3,oneof" json:"maxConcurrentNodes,omitempty"`
- // Should bin data be retrieved. If false, only record digests (and user keys
- // if stored on the server) are retrieved.
- // Default: true
- IncludeBinData *bool `protobuf:"varint,11,opt,name=includeBinData,proto3,oneof" json:"includeBinData,omitempty"`
-}
-
-func (x *ScanPolicy) Reset() {
- *x = ScanPolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[6]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ScanPolicy) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ScanPolicy) ProtoMessage() {}
-
-func (x *ScanPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[6]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ScanPolicy.ProtoReflect.Descriptor instead.
-func (*ScanPolicy) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{6}
-}
-
-func (x *ScanPolicy) GetReplica() Replica {
- if x != nil {
- return x.Replica
- }
- return Replica_SEQUENCE
-}
-
-func (x *ScanPolicy) GetReadModeAP() ReadModeAP {
- if x != nil {
- return x.ReadModeAP
- }
- return ReadModeAP_ONE
-}
-
-func (x *ScanPolicy) GetReadModeSC() ReadModeSC {
- if x != nil {
- return x.ReadModeSC
- }
- return ReadModeSC_SESSION
-}
-
-func (x *ScanPolicy) GetCompress() bool {
- if x != nil {
- return x.Compress
- }
- return false
-}
-
-func (x *ScanPolicy) GetExpression() []byte {
- if x != nil {
- return x.Expression
- }
- return nil
-}
-
-func (x *ScanPolicy) GetTotalTimeout() uint32 {
- if x != nil && x.TotalTimeout != nil {
- return *x.TotalTimeout
- }
- return 0
-}
-
-func (x *ScanPolicy) GetMaxRecords() uint64 {
- if x != nil && x.MaxRecords != nil {
- return *x.MaxRecords
- }
- return 0
-}
-
-func (x *ScanPolicy) GetRecordsPerSecond() uint32 {
- if x != nil && x.RecordsPerSecond != nil {
- return *x.RecordsPerSecond
- }
- return 0
-}
-
-func (x *ScanPolicy) GetConcurrentNodes() bool {
- if x != nil && x.ConcurrentNodes != nil {
- return *x.ConcurrentNodes
- }
- return false
-}
-
-func (x *ScanPolicy) GetMaxConcurrentNodes() uint32 {
- if x != nil && x.MaxConcurrentNodes != nil {
- return *x.MaxConcurrentNodes
- }
- return 0
-}
-
-func (x *ScanPolicy) GetIncludeBinData() bool {
- if x != nil && x.IncludeBinData != nil {
- return *x.IncludeBinData
- }
- return false
-}
-
-// Partition status used to perform partial scans on client side retries.
-type PartitionStatus struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The partition status.
- Id *uint32 `protobuf:"varint,1,opt,name=id,proto3,oneof" json:"id,omitempty"`
- // Begin value to start scanning / querying after.
- BVal *int64 `protobuf:"varint,2,opt,name=bVal,proto3,oneof" json:"bVal,omitempty"`
- // Digest to start scanning / querying after.
- Digest []byte `protobuf:"bytes,3,opt,name=digest,proto3,oneof" json:"digest,omitempty"`
- // Indicates this partition should be tried.
- // Should be set to true for the first attempt as well.
- Retry bool `protobuf:"varint,5,opt,name=retry,proto3" json:"retry,omitempty"`
-}
-
-func (x *PartitionStatus) Reset() {
- *x = PartitionStatus{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[7]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PartitionStatus) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PartitionStatus) ProtoMessage() {}
-
-func (x *PartitionStatus) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[7]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PartitionStatus.ProtoReflect.Descriptor instead.
-func (*PartitionStatus) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{7}
-}
-
-func (x *PartitionStatus) GetId() uint32 {
- if x != nil && x.Id != nil {
- return *x.Id
- }
- return 0
-}
-
-func (x *PartitionStatus) GetBVal() int64 {
- if x != nil && x.BVal != nil {
- return *x.BVal
- }
- return 0
-}
-
-func (x *PartitionStatus) GetDigest() []byte {
- if x != nil {
- return x.Digest
- }
- return nil
-}
-
-func (x *PartitionStatus) GetRetry() bool {
- if x != nil {
- return x.Retry
- }
- return false
-}
-
-// A partition filter for scans and queries.
-type PartitionFilter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Start partition id.
- // Not required if the digest to start scanning from is specified.
- Begin *uint32 `protobuf:"varint,1,opt,name=begin,proto3,oneof" json:"begin,omitempty"`
- // The number of records to scan.
- Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count,omitempty"`
- // Optional digest to start scanning from.
- Digest []byte `protobuf:"bytes,3,opt,name=digest,proto3,oneof" json:"digest,omitempty"`
- // Optional partition statuses used on retries to restart
- // from last known record for the partition.
- PartitionStatuses []*PartitionStatus `protobuf:"bytes,4,rep,name=partitionStatuses,proto3" json:"partitionStatuses,omitempty"`
- // Indicates if all partitions in this filter should
- // be retried ignoring the partition status
- Retry bool `protobuf:"varint,5,opt,name=retry,proto3" json:"retry,omitempty"`
-}
-
-func (x *PartitionFilter) Reset() {
- *x = PartitionFilter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[8]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *PartitionFilter) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*PartitionFilter) ProtoMessage() {}
-
-func (x *PartitionFilter) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[8]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use PartitionFilter.ProtoReflect.Descriptor instead.
-func (*PartitionFilter) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{8}
-}
-
-func (x *PartitionFilter) GetBegin() uint32 {
- if x != nil && x.Begin != nil {
- return *x.Begin
- }
- return 0
-}
-
-func (x *PartitionFilter) GetCount() uint32 {
- if x != nil {
- return x.Count
- }
- return 0
-}
-
-func (x *PartitionFilter) GetDigest() []byte {
- if x != nil {
- return x.Digest
- }
- return nil
-}
-
-func (x *PartitionFilter) GetPartitionStatuses() []*PartitionStatus {
- if x != nil {
- return x.PartitionStatuses
- }
- return nil
-}
-
-func (x *PartitionFilter) GetRetry() bool {
- if x != nil {
- return x.Retry
- }
- return false
-}
-
-// A scan request.
-type ScanRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Optional scan policy.
- ScanPolicy *ScanPolicy `protobuf:"bytes,1,opt,name=scanPolicy,proto3,oneof" json:"scanPolicy,omitempty"`
- // The namespace to scan.
- Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"`
- // Optional set name.
- SetName *string `protobuf:"bytes,3,opt,name=setName,proto3,oneof" json:"setName,omitempty"`
- // Optional bin to retrieve. All bins will be returned
- // if not specified.
- BinNames []string `protobuf:"bytes,4,rep,name=binNames,proto3" json:"binNames,omitempty"`
- // Optional partition filter to selectively scan partitions.
- PartitionFilter *PartitionFilter `protobuf:"bytes,5,opt,name=partitionFilter,proto3,oneof" json:"partitionFilter,omitempty"`
-}
-
-func (x *ScanRequest) Reset() {
- *x = ScanRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[9]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *ScanRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*ScanRequest) ProtoMessage() {}
-
-func (x *ScanRequest) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[9]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use ScanRequest.ProtoReflect.Descriptor instead.
-func (*ScanRequest) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{9}
-}
-
-func (x *ScanRequest) GetScanPolicy() *ScanPolicy {
- if x != nil {
- return x.ScanPolicy
- }
- return nil
-}
-
-func (x *ScanRequest) GetNamespace() string {
- if x != nil {
- return x.Namespace
- }
- return ""
-}
-
-func (x *ScanRequest) GetSetName() string {
- if x != nil && x.SetName != nil {
- return *x.SetName
- }
- return ""
-}
-
-func (x *ScanRequest) GetBinNames() []string {
- if x != nil {
- return x.BinNames
- }
- return nil
-}
-
-func (x *ScanRequest) GetPartitionFilter() *PartitionFilter {
- if x != nil {
- return x.PartitionFilter
- }
- return nil
-}
-
-// Query policy attributes used by queries.
-// Query requests are send completely using proto buffers and hence include all policy attributes.
-type QueryPolicy struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Read policy for AP (availability) namespaces.
- Replica Replica `protobuf:"varint,1,opt,name=replica,proto3,enum=Replica" json:"replica,omitempty"`
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP ReadModeAP `protobuf:"varint,2,opt,name=readModeAP,proto3,enum=ReadModeAP" json:"readModeAP,omitempty"`
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and query are not affected by replica algorithms.
- ReadModeSC ReadModeSC `protobuf:"varint,3,opt,name=readModeSC,proto3,enum=ReadModeSC" json:"readModeSC,omitempty"`
- // Send user defined key in addition to hash digest on both reads and writes.
- // If the key is sent on a write, the key will be stored with the record on
- // the server.
- // Default: false (do not send the user defined key)
- SendKey *bool `protobuf:"varint,4,opt,name=sendKey,proto3,oneof" json:"sendKey,omitempty"`
- // Use zlib compression on command buffers sent to the server and responses received
- // from the server when the buffer size is greater than 128 bytes.
- // This option will increase cpu and memory usage (for extra compressed buffers),but
- // decrease the size of data sent over the network.
- Compress bool `protobuf:"varint,5,opt,name=compress,proto3" json:"compress,omitempty"`
- // Optional expression filter. If filterExp exists and evaluates to false, the
- // transaction is ignored.
- Expression []byte `protobuf:"bytes,6,opt,name=expression,proto3,oneof" json:"expression,omitempty"`
- // Total transaction timeout in milliseconds.
- // Default for all other commands: 1000ms
- TotalTimeout *uint32 `protobuf:"varint,7,opt,name=totalTimeout,proto3,oneof" json:"totalTimeout,omitempty"`
- // Maximum number of concurrent requests to server nodes at any point in time.
- // If there are 16 nodes in the cluster and maxConcurrentNodes is 8, then queries
- // will be made to 8 nodes in parallel. When a query completes, a new query will
- // be issued until all 16 nodes have been queried.
- // Default: 0 (issue requests to all server nodes in parallel)
- MaxConcurrentNodes *uint32 `protobuf:"varint,8,opt,name=maxConcurrentNodes,proto3,oneof" json:"maxConcurrentNodes,omitempty"`
- // Number of records to place in queue before blocking.
- // Records received from multiple server nodes will be placed in a queue.
- // A separate thread consumes these records in parallel.
- // If the queue is full, the producer threads will block until records are consumed.
- // Default: 5000
- RecordQueueSize *uint32 `protobuf:"varint,9,opt,name=recordQueueSize,proto3,oneof" json:"recordQueueSize,omitempty"`
- // Should bin data be retrieved. If false, only record digests (and user keys
- // if stored on the server) are retrieved.
- // Default: true
- IncludeBinData *bool `protobuf:"varint,10,opt,name=includeBinData,proto3,oneof" json:"includeBinData,omitempty"`
- // Terminate query if cluster is in migration state. If the server supports partition
- // queries or the query filter is null (scan), this field is ignored.
- // Default: false
- FailOnClusterChange *bool `protobuf:"varint,11,opt,name=failOnClusterChange,proto3,oneof" json:"failOnClusterChange,omitempty"`
- // Deprecated, use expectedDuration instead.
- // Is query expected to return less than 100 records per node.
- // If true, the server will optimize the query for a small record set.
- // This field is ignored for aggregation queries, background queries
- // and server versions < 6.0.
- // Default: false
- ShortQuery *bool `protobuf:"varint,12,opt,name=shortQuery,proto3,oneof" json:"shortQuery,omitempty"`
- // Timeout in milliseconds for "cluster-stable" info command that is run when
- // failOnClusterChange is true and server version is less than 6.0.
- //
- // Default: 1000
- InfoTimeout *uint32 `protobuf:"varint,13,opt,name=infoTimeout,proto3,oneof" json:"infoTimeout,omitempty"`
- // Expected query duration. The server treats the query in different ways depending on the expected duration.
- // This field is ignored for aggregation queries, background queries and server versions less than 6.0.
- // Default: QueryDuration.LONG
- ExpectedDuration *QueryDuration `protobuf:"varint,14,opt,name=expectedDuration,proto3,enum=QueryDuration,oneof" json:"expectedDuration,omitempty"`
-}
-
-func (x *QueryPolicy) Reset() {
- *x = QueryPolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[10]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueryPolicy) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueryPolicy) ProtoMessage() {}
-
-func (x *QueryPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[10]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueryPolicy.ProtoReflect.Descriptor instead.
-func (*QueryPolicy) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{10}
-}
-
-func (x *QueryPolicy) GetReplica() Replica {
- if x != nil {
- return x.Replica
- }
- return Replica_SEQUENCE
-}
-
-func (x *QueryPolicy) GetReadModeAP() ReadModeAP {
- if x != nil {
- return x.ReadModeAP
- }
- return ReadModeAP_ONE
-}
-
-func (x *QueryPolicy) GetReadModeSC() ReadModeSC {
- if x != nil {
- return x.ReadModeSC
- }
- return ReadModeSC_SESSION
-}
-
-func (x *QueryPolicy) GetSendKey() bool {
- if x != nil && x.SendKey != nil {
- return *x.SendKey
- }
- return false
-}
-
-func (x *QueryPolicy) GetCompress() bool {
- if x != nil {
- return x.Compress
- }
- return false
-}
-
-func (x *QueryPolicy) GetExpression() []byte {
- if x != nil {
- return x.Expression
- }
- return nil
-}
-
-func (x *QueryPolicy) GetTotalTimeout() uint32 {
- if x != nil && x.TotalTimeout != nil {
- return *x.TotalTimeout
- }
- return 0
-}
-
-func (x *QueryPolicy) GetMaxConcurrentNodes() uint32 {
- if x != nil && x.MaxConcurrentNodes != nil {
- return *x.MaxConcurrentNodes
- }
- return 0
-}
-
-func (x *QueryPolicy) GetRecordQueueSize() uint32 {
- if x != nil && x.RecordQueueSize != nil {
- return *x.RecordQueueSize
- }
- return 0
-}
-
-func (x *QueryPolicy) GetIncludeBinData() bool {
- if x != nil && x.IncludeBinData != nil {
- return *x.IncludeBinData
- }
- return false
-}
-
-func (x *QueryPolicy) GetFailOnClusterChange() bool {
- if x != nil && x.FailOnClusterChange != nil {
- return *x.FailOnClusterChange
- }
- return false
-}
-
-func (x *QueryPolicy) GetShortQuery() bool {
- if x != nil && x.ShortQuery != nil {
- return *x.ShortQuery
- }
- return false
-}
-
-func (x *QueryPolicy) GetInfoTimeout() uint32 {
- if x != nil && x.InfoTimeout != nil {
- return *x.InfoTimeout
- }
- return 0
-}
-
-func (x *QueryPolicy) GetExpectedDuration() QueryDuration {
- if x != nil && x.ExpectedDuration != nil {
- return *x.ExpectedDuration
- }
- return QueryDuration_LONG
-}
-
-// Query statement filter
-type Filter struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Name of the filter.
- Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
- // Secondary index collection type.
- ColType IndexCollectionType `protobuf:"varint,2,opt,name=colType,proto3,enum=IndexCollectionType" json:"colType,omitempty"`
- // Optional filter context packed in Aerospike format.
- PackedCtx []byte `protobuf:"bytes,3,opt,name=packedCtx,proto3,oneof" json:"packedCtx,omitempty"`
- // The queried column particle type.
- ValType int32 `protobuf:"varint,4,opt,name=valType,proto3" json:"valType,omitempty"`
- // The Aerospike encoded query start "Value"
- Begin []byte `protobuf:"bytes,5,opt,name=begin,proto3,oneof" json:"begin,omitempty"`
- // The Aerospike encoded query end "Value"
- End []byte `protobuf:"bytes,6,opt,name=end,proto3,oneof" json:"end,omitempty"`
-}
-
-func (x *Filter) Reset() {
- *x = Filter{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[11]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Filter) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Filter) ProtoMessage() {}
-
-func (x *Filter) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[11]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Filter.ProtoReflect.Descriptor instead.
-func (*Filter) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{11}
-}
-
-func (x *Filter) GetName() string {
- if x != nil {
- return x.Name
- }
- return ""
-}
-
-func (x *Filter) GetColType() IndexCollectionType {
- if x != nil {
- return x.ColType
- }
- return IndexCollectionType_DEFAULT
-}
-
-func (x *Filter) GetPackedCtx() []byte {
- if x != nil {
- return x.PackedCtx
- }
- return nil
-}
-
-func (x *Filter) GetValType() int32 {
- if x != nil {
- return x.ValType
- }
- return 0
-}
-
-func (x *Filter) GetBegin() []byte {
- if x != nil {
- return x.Begin
- }
- return nil
-}
-
-func (x *Filter) GetEnd() []byte {
- if x != nil {
- return x.End
- }
- return nil
-}
-
-// Single record operation.
-type Operation struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The operation type.
- Type OperationType `protobuf:"varint,1,opt,name=type,proto3,enum=OperationType" json:"type,omitempty"`
- // Optional bin name.
- BinName *string `protobuf:"bytes,2,opt,name=binName,proto3,oneof" json:"binName,omitempty"`
- // Optional bin value.
- Value []byte `protobuf:"bytes,3,opt,name=value,proto3,oneof" json:"value,omitempty"`
-}
-
-func (x *Operation) Reset() {
- *x = Operation{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[12]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Operation) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Operation) ProtoMessage() {}
-
-func (x *Operation) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[12]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Operation.ProtoReflect.Descriptor instead.
-func (*Operation) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{12}
-}
-
-func (x *Operation) GetType() OperationType {
- if x != nil {
- return x.Type
- }
- return OperationType_READ
-}
-
-func (x *Operation) GetBinName() string {
- if x != nil && x.BinName != nil {
- return *x.BinName
- }
- return ""
-}
-
-func (x *Operation) GetValue() []byte {
- if x != nil {
- return x.Value
- }
- return nil
-}
-
-// Query statement.
-type Statement struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The namespace to query.
- Namespace string `protobuf:"bytes,1,opt,name=namespace,proto3" json:"namespace,omitempty"`
- // Optional set name.
- SetName *string `protobuf:"bytes,2,opt,name=setName,proto3,oneof" json:"setName,omitempty"`
- // Optional index name.
- IndexName *string `protobuf:"bytes,3,opt,name=indexName,proto3,oneof" json:"indexName,omitempty"`
- // Optional bins names to return for each result record.
- // If not specified all bins are returned.
- BinNames []string `protobuf:"bytes,4,rep,name=binNames,proto3" json:"binNames,omitempty"`
- // Optional Filter encoded in Aerospike wire format.
- Filter *Filter `protobuf:"bytes,5,opt,name=filter,proto3,oneof" json:"filter,omitempty"`
- // Aggregation file name.
- PackageName string `protobuf:"bytes,6,opt,name=packageName,proto3" json:"packageName,omitempty"`
- // Aggregation function name.
- FunctionName string `protobuf:"bytes,7,opt,name=functionName,proto3" json:"functionName,omitempty"`
- // Aggregation function arguments encoded as bytes using Aerospike wire format.
- FunctionArgs [][]byte `protobuf:"bytes,8,rep,name=functionArgs,proto3" json:"functionArgs,omitempty"`
- // Operations to be performed on query encoded as bytes using Aerospike wire format.
- Operations []*Operation `protobuf:"bytes,9,rep,name=operations,proto3" json:"operations,omitempty"`
- // Optional taskId.
- TaskId *int64 `protobuf:"varint,10,opt,name=taskId,proto3,oneof" json:"taskId,omitempty"`
- // Approximate number of records to return to client. This number is divided by the
- // number of nodes involved in the scan. The actual number of records returned
- // may be less than maxRecords if node record counts are small and unbalanced across
- // nodes.
- // Default: 0 (do not limit record count)
- MaxRecords *uint64 `protobuf:"varint,11,opt,name=maxRecords,proto3,oneof" json:"maxRecords,omitempty"`
- // Limit returned records per second (rps) rate for each server.
- // Do not apply rps limit if recordsPerSecond is zero.
- // Default: 0
- RecordsPerSecond *uint32 `protobuf:"varint,12,opt,name=recordsPerSecond,proto3,oneof" json:"recordsPerSecond,omitempty"`
-}
-
-func (x *Statement) Reset() {
- *x = Statement{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[13]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *Statement) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*Statement) ProtoMessage() {}
-
-func (x *Statement) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[13]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use Statement.ProtoReflect.Descriptor instead.
-func (*Statement) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{13}
-}
-
-func (x *Statement) GetNamespace() string {
- if x != nil {
- return x.Namespace
- }
- return ""
-}
-
-func (x *Statement) GetSetName() string {
- if x != nil && x.SetName != nil {
- return *x.SetName
- }
- return ""
-}
-
-func (x *Statement) GetIndexName() string {
- if x != nil && x.IndexName != nil {
- return *x.IndexName
- }
- return ""
-}
-
-func (x *Statement) GetBinNames() []string {
- if x != nil {
- return x.BinNames
- }
- return nil
-}
-
-func (x *Statement) GetFilter() *Filter {
- if x != nil {
- return x.Filter
- }
- return nil
-}
-
-func (x *Statement) GetPackageName() string {
- if x != nil {
- return x.PackageName
- }
- return ""
-}
-
-func (x *Statement) GetFunctionName() string {
- if x != nil {
- return x.FunctionName
- }
- return ""
-}
-
-func (x *Statement) GetFunctionArgs() [][]byte {
- if x != nil {
- return x.FunctionArgs
- }
- return nil
-}
-
-func (x *Statement) GetOperations() []*Operation {
- if x != nil {
- return x.Operations
- }
- return nil
-}
-
-func (x *Statement) GetTaskId() int64 {
- if x != nil && x.TaskId != nil {
- return *x.TaskId
- }
- return 0
-}
-
-func (x *Statement) GetMaxRecords() uint64 {
- if x != nil && x.MaxRecords != nil {
- return *x.MaxRecords
- }
- return 0
-}
-
-func (x *Statement) GetRecordsPerSecond() uint32 {
- if x != nil && x.RecordsPerSecond != nil {
- return *x.RecordsPerSecond
- }
- return 0
-}
-
-// A query request.
-type QueryRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Optional query policy.
- QueryPolicy *QueryPolicy `protobuf:"bytes,1,opt,name=queryPolicy,proto3,oneof" json:"queryPolicy,omitempty"`
- // The query statement.
- Statement *Statement `protobuf:"bytes,2,opt,name=statement,proto3" json:"statement,omitempty"`
- // Set to true for background queries.
- Background bool `protobuf:"varint,3,opt,name=background,proto3" json:"background,omitempty"`
- // Optional partition filter to selectively query partitions.
- PartitionFilter *PartitionFilter `protobuf:"bytes,4,opt,name=partitionFilter,proto3,oneof" json:"partitionFilter,omitempty"`
-}
-
-func (x *QueryRequest) Reset() {
- *x = QueryRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[14]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *QueryRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*QueryRequest) ProtoMessage() {}
-
-func (x *QueryRequest) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[14]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use QueryRequest.ProtoReflect.Descriptor instead.
-func (*QueryRequest) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{14}
-}
-
-func (x *QueryRequest) GetQueryPolicy() *QueryPolicy {
- if x != nil {
- return x.QueryPolicy
- }
- return nil
-}
-
-func (x *QueryRequest) GetStatement() *Statement {
- if x != nil {
- return x.Statement
- }
- return nil
-}
-
-func (x *QueryRequest) GetBackground() bool {
- if x != nil {
- return x.Background
- }
- return false
-}
-
-func (x *QueryRequest) GetPartitionFilter() *PartitionFilter {
- if x != nil {
- return x.PartitionFilter
- }
- return nil
-}
-
-type BackgroundExecutePolicy struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Read policy for AP (availability) namespaces.
- Replica Replica `protobuf:"varint,1,opt,name=replica,proto3,enum=Replica" json:"replica,omitempty"`
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP ReadModeAP `protobuf:"varint,2,opt,name=readModeAP,proto3,enum=ReadModeAP" json:"readModeAP,omitempty"`
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and scan are not affected by replica algorithms.
- ReadModeSC ReadModeSC `protobuf:"varint,3,opt,name=readModeSC,proto3,enum=ReadModeSC" json:"readModeSC,omitempty"`
- // Use zlib compression on command buffers sent to the server and responses received
- // from the server when the buffer size is greater than 128 bytes.
- // This option will increase cpu and memory usage (for extra compressed buffers),but
- // decrease the size of data sent over the network.
- Compress bool `protobuf:"varint,4,opt,name=compress,proto3" json:"compress,omitempty"`
- // Optional expression filter. If filterExp exists and evaluates to false, the
- // transaction is ignored.
- Expression []byte `protobuf:"bytes,5,opt,name=expression,proto3,oneof" json:"expression,omitempty"`
- // Total transaction timeout in milliseconds.
- // Default for all other commands: 1000ms
- TotalTimeout *uint32 `protobuf:"varint,6,opt,name=totalTimeout,proto3,oneof" json:"totalTimeout,omitempty"`
- // Send user defined key in addition to hash digest on both reads and writes.
- // If the key is sent on a write, the key will be stored with the record on
- // the server.
- //
- // Default: false (do not send the user defined key)
- SendKey *bool `protobuf:"varint,7,opt,name=sendKey,proto3,oneof" json:"sendKey,omitempty"`
- // Qualify how to handle writes where the record already exists.
- //
- // Default: RecordExistsAction.UPDATE
- RecordExistsAction *RecordExistsAction `protobuf:"varint,8,opt,name=recordExistsAction,proto3,enum=RecordExistsAction,oneof" json:"recordExistsAction,omitempty"`
- // Qualify how to handle record writes based on record generation. The default (NONE)
- // indicates that the generation is not used to restrict writes.
- //
- // The server does not support this field for UDF execute() calls. The read-modify-write
- // usage model can still be enforced inside the UDF code itself.
- //
- // Default: GenerationPolicy.NONE
- GenerationPolicy *GenerationPolicy `protobuf:"varint,9,opt,name=generationPolicy,proto3,enum=GenerationPolicy,oneof" json:"generationPolicy,omitempty"`
- // Desired consistency guarantee when committing a transaction on the server. The default
- // (COMMIT_ALL) indicates that the server should wait for master and all replica commits to
- // be successful before returning success to the client.
- //
- // Default: CommitLevel.COMMIT_ALL
- CommitLevel *CommitLevel `protobuf:"varint,10,opt,name=commitLevel,proto3,enum=CommitLevel,oneof" json:"commitLevel,omitempty"`
- // Expected generation. Generation is the number of times a record has been modified
- // (including creation) on the server. If a write operation is creating a record,
- // the expected generation would be 0
. This field is only relevant when
- // generationPolicy is not NONE.
- //
- // The server does not support this field for UDF execute() calls. The read-modify-write
- // usage model can still be enforced inside the UDF code itself.
- //
- // Default: 0
- Generation *uint32 `protobuf:"varint,11,opt,name=generation,proto3,oneof" json:"generation,omitempty"`
- // Record expiration. Also known as ttl (time to live).
- // Seconds record will live before being removed by the server.
- //
- // Expiration values:
- //
- // - -2: Do not change ttl when record is updated.
- // - -1: Never expire.
- // - 0: Default to namespace configuration variable "default-ttl" on the server.
- // - > 0: Actual ttl in seconds.
- //
- // Default: 0
- Expiration *uint32 `protobuf:"varint,12,opt,name=expiration,proto3,oneof" json:"expiration,omitempty"`
- // For client operate(), return a result for every operation.
- //
- // Some operations do not return results by default (ListOperation.clear() for example).
- // This can make it difficult to determine the desired result offset in the returned
- // bin's result list.
- //
- // Setting respondAllOps to true makes it easier to identify the desired result offset
- // (result offset equals bin's operate sequence). If there is a map operation in operate(),
- // respondAllOps will be forced to true for that operate() call.
- //
- // Default: false
- RespondAllOps *bool `protobuf:"varint,13,opt,name=respondAllOps,proto3,oneof" json:"respondAllOps,omitempty"`
- // If the transaction results in a record deletion, leave a tombstone for the record.
- // This prevents deleted records from reappearing after node failures.
- // Valid for Aerospike Server Enterprise Edition 3.10+ only.
- //
- // Default: false (do not tombstone deleted records).
- DurableDelete *bool `protobuf:"varint,14,opt,name=durableDelete,proto3,oneof" json:"durableDelete,omitempty"`
- // Operate in XDR mode. Some external connectors may need to emulate an XDR client.
- // If enabled, an XDR bit is set for writes in the wire protocol.
- //
- // Default: false.
- Xdr *bool `protobuf:"varint,15,opt,name=xdr,proto3,oneof" json:"xdr,omitempty"`
-}
-
-func (x *BackgroundExecutePolicy) Reset() {
- *x = BackgroundExecutePolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[15]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BackgroundExecutePolicy) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BackgroundExecutePolicy) ProtoMessage() {}
-
-func (x *BackgroundExecutePolicy) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[15]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BackgroundExecutePolicy.ProtoReflect.Descriptor instead.
-func (*BackgroundExecutePolicy) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{15}
-}
-
-func (x *BackgroundExecutePolicy) GetReplica() Replica {
- if x != nil {
- return x.Replica
- }
- return Replica_SEQUENCE
-}
-
-func (x *BackgroundExecutePolicy) GetReadModeAP() ReadModeAP {
- if x != nil {
- return x.ReadModeAP
- }
- return ReadModeAP_ONE
-}
-
-func (x *BackgroundExecutePolicy) GetReadModeSC() ReadModeSC {
- if x != nil {
- return x.ReadModeSC
- }
- return ReadModeSC_SESSION
-}
-
-func (x *BackgroundExecutePolicy) GetCompress() bool {
- if x != nil {
- return x.Compress
- }
- return false
-}
-
-func (x *BackgroundExecutePolicy) GetExpression() []byte {
- if x != nil {
- return x.Expression
- }
- return nil
-}
-
-func (x *BackgroundExecutePolicy) GetTotalTimeout() uint32 {
- if x != nil && x.TotalTimeout != nil {
- return *x.TotalTimeout
- }
- return 0
-}
-
-func (x *BackgroundExecutePolicy) GetSendKey() bool {
- if x != nil && x.SendKey != nil {
- return *x.SendKey
- }
- return false
-}
-
-func (x *BackgroundExecutePolicy) GetRecordExistsAction() RecordExistsAction {
- if x != nil && x.RecordExistsAction != nil {
- return *x.RecordExistsAction
- }
- return RecordExistsAction_UPDATE
-}
-
-func (x *BackgroundExecutePolicy) GetGenerationPolicy() GenerationPolicy {
- if x != nil && x.GenerationPolicy != nil {
- return *x.GenerationPolicy
- }
- return GenerationPolicy_NONE
-}
-
-func (x *BackgroundExecutePolicy) GetCommitLevel() CommitLevel {
- if x != nil && x.CommitLevel != nil {
- return *x.CommitLevel
- }
- return CommitLevel_COMMIT_ALL
-}
-
-func (x *BackgroundExecutePolicy) GetGeneration() uint32 {
- if x != nil && x.Generation != nil {
- return *x.Generation
- }
- return 0
-}
-
-func (x *BackgroundExecutePolicy) GetExpiration() uint32 {
- if x != nil && x.Expiration != nil {
- return *x.Expiration
- }
- return 0
-}
-
-func (x *BackgroundExecutePolicy) GetRespondAllOps() bool {
- if x != nil && x.RespondAllOps != nil {
- return *x.RespondAllOps
- }
- return false
-}
-
-func (x *BackgroundExecutePolicy) GetDurableDelete() bool {
- if x != nil && x.DurableDelete != nil {
- return *x.DurableDelete
- }
- return false
-}
-
-func (x *BackgroundExecutePolicy) GetXdr() bool {
- if x != nil && x.Xdr != nil {
- return *x.Xdr
- }
- return false
-}
-
-type BackgroundExecuteRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Background write policy
- WritePolicy *BackgroundExecutePolicy `protobuf:"bytes,1,opt,name=writePolicy,proto3,oneof" json:"writePolicy,omitempty"`
- // The statement containing the UDF function reference
- // or the operations to be performed on matching record
- Statement *Statement `protobuf:"bytes,2,opt,name=statement,proto3" json:"statement,omitempty"`
-}
-
-func (x *BackgroundExecuteRequest) Reset() {
- *x = BackgroundExecuteRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[16]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BackgroundExecuteRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BackgroundExecuteRequest) ProtoMessage() {}
-
-func (x *BackgroundExecuteRequest) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[16]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BackgroundExecuteRequest.ProtoReflect.Descriptor instead.
-func (*BackgroundExecuteRequest) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{16}
-}
-
-func (x *BackgroundExecuteRequest) GetWritePolicy() *BackgroundExecutePolicy {
- if x != nil {
- return x.WritePolicy
- }
- return nil
-}
-
-func (x *BackgroundExecuteRequest) GetStatement() *Statement {
- if x != nil {
- return x.Statement
- }
- return nil
-}
-
-type BackgroundTaskStatusRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // The id of the task.
- TaskId int64 `protobuf:"varint,1,opt,name=taskId,proto3" json:"taskId,omitempty"`
- // If true indicates the task is a scan task else task is a query
- IsScan bool `protobuf:"varint,2,opt,name=isScan,proto3" json:"isScan,omitempty"`
-}
-
-func (x *BackgroundTaskStatusRequest) Reset() {
- *x = BackgroundTaskStatusRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[17]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *BackgroundTaskStatusRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*BackgroundTaskStatusRequest) ProtoMessage() {}
-
-func (x *BackgroundTaskStatusRequest) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[17]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use BackgroundTaskStatusRequest.ProtoReflect.Descriptor instead.
-func (*BackgroundTaskStatusRequest) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{17}
-}
-
-func (x *BackgroundTaskStatusRequest) GetTaskId() int64 {
- if x != nil {
- return x.TaskId
- }
- return 0
-}
-
-func (x *BackgroundTaskStatusRequest) GetIsScan() bool {
- if x != nil {
- return x.IsScan
- }
- return false
-}
-
-// Abort a request identified by id in the stream.
-type AbortRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Unique identifier of the corresponding request in the stream to abort.
- // Not to be confused with the AbortRequest's id in the stream.
- AbortId uint32 `protobuf:"varint,1,opt,name=abortId,proto3" json:"abortId,omitempty"`
-}
-
-func (x *AbortRequest) Reset() {
- *x = AbortRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[18]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *AbortRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*AbortRequest) ProtoMessage() {}
-
-func (x *AbortRequest) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[18]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use AbortRequest.ProtoReflect.Descriptor instead.
-func (*AbortRequest) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{18}
-}
-
-func (x *AbortRequest) GetAbortId() uint32 {
- if x != nil {
- return x.AbortId
- }
- return 0
-}
-
-// Info policy for info request
-type InfoPolicy struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- // Info command socket timeout in milliseconds.
- //
- // Default: 1000
- Timeout *uint32 `protobuf:"varint,1,opt,name=timeout,proto3,oneof" json:"timeout,omitempty"`
-}
-
-func (x *InfoPolicy) Reset() {
- *x = InfoPolicy{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[19]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *InfoPolicy) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*InfoPolicy) ProtoMessage() {}
-
-func (x *InfoPolicy) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[19]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use InfoPolicy.ProtoReflect.Descriptor instead.
-func (*InfoPolicy) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{19}
-}
-
-func (x *InfoPolicy) GetTimeout() uint32 {
- if x != nil && x.Timeout != nil {
- return *x.Timeout
- }
- return 0
-}
-
-// Info request
-type InfoRequest struct {
- state protoimpl.MessageState
- sizeCache protoimpl.SizeCache
- unknownFields protoimpl.UnknownFields
-
- InfoPolicy *InfoPolicy `protobuf:"bytes,1,opt,name=infoPolicy,proto3,oneof" json:"infoPolicy,omitempty"`
- Commands []string `protobuf:"bytes,2,rep,name=commands,proto3" json:"commands,omitempty"`
-}
-
-func (x *InfoRequest) Reset() {
- *x = InfoRequest{}
- if protoimpl.UnsafeEnabled {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[20]
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- ms.StoreMessageInfo(mi)
- }
-}
-
-func (x *InfoRequest) String() string {
- return protoimpl.X.MessageStringOf(x)
-}
-
-func (*InfoRequest) ProtoMessage() {}
-
-func (x *InfoRequest) ProtoReflect() protoreflect.Message {
- mi := &file_aerospike_proxy_kv_proto_msgTypes[20]
- if protoimpl.UnsafeEnabled && x != nil {
- ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
- if ms.LoadMessageInfo() == nil {
- ms.StoreMessageInfo(mi)
- }
- return ms
- }
- return mi.MessageOf(x)
-}
-
-// Deprecated: Use InfoRequest.ProtoReflect.Descriptor instead.
-func (*InfoRequest) Descriptor() ([]byte, []int) {
- return file_aerospike_proxy_kv_proto_rawDescGZIP(), []int{20}
-}
-
-func (x *InfoRequest) GetInfoPolicy() *InfoPolicy {
- if x != nil {
- return x.InfoPolicy
- }
- return nil
-}
-
-func (x *InfoRequest) GetCommands() []string {
- if x != nil {
- return x.Commands
- }
- return nil
-}
-
-var File_aerospike_proxy_kv_proto protoreflect.FileDescriptor
-
-var file_aerospike_proxy_kv_proto_rawDesc = []byte{
- 0x0a, 0x18, 0x61, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x5f, 0x70, 0x72, 0x6f, 0x78,
- 0x79, 0x5f, 0x6b, 0x76, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x0e, 0x0a, 0x0c, 0x41, 0x62,
- 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x29, 0x0a, 0x0d, 0x41, 0x62,
- 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76,
- 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x8a, 0x01, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x18,
- 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x08, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52,
- 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x2b, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x52,
- 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d,
- 0x6f, 0x64, 0x65, 0x41, 0x50, 0x12, 0x2b, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64,
- 0x65, 0x53, 0x43, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x52, 0x65, 0x61, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65,
- 0x53, 0x43, 0x22, 0x8b, 0x01, 0x0a, 0x0b, 0x57, 0x72, 0x69, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x12, 0x22, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x08, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x07, 0x72,
- 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x2b, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f,
- 0x64, 0x65, 0x41, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x52, 0x65, 0x61,
- 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64,
- 0x65, 0x41, 0x50, 0x12, 0x2b, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x53,
- 0x43, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f,
- 0x64, 0x65, 0x53, 0x43, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43,
- 0x22, 0x81, 0x06, 0x0a, 0x17, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x0e, 0x0a, 0x02,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09,
- 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52,
- 0x09, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61,
- 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79,
- 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x30, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x50,
- 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x50, 0x6f, 0x6c,
- 0x69, 0x63, 0x79, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x50,
- 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x57, 0x72,
- 0x69, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x01, 0x52, 0x0b, 0x77, 0x72, 0x69,
- 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x0b, 0x73,
- 0x63, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x0c, 0x2e, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x02,
- 0x52, 0x0b, 0x73, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x88, 0x01, 0x01,
- 0x12, 0x36, 0x0a, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x03, 0x52, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x88, 0x01, 0x01, 0x12, 0x36, 0x0a, 0x0c, 0x61, 0x62, 0x6f, 0x72,
- 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d,
- 0x2e, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x04, 0x52,
- 0x0c, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x88, 0x01, 0x01,
- 0x12, 0x5a, 0x0a, 0x18, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x45, 0x78,
- 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x09, 0x20, 0x01,
- 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x45,
- 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x05, 0x52,
- 0x18, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75,
- 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x88, 0x01, 0x01, 0x12, 0x63, 0x0a, 0x1b,
- 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1c, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61,
- 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48,
- 0x06, 0x52, 0x1b, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73,
- 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x88, 0x01,
- 0x01, 0x12, 0x33, 0x0a, 0x0b, 0x69, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x48, 0x07, 0x52, 0x0b, 0x69, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x72, 0x65, 0x61, 0x64, 0x50,
- 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x50,
- 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x73, 0x63, 0x61, 0x6e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x61, 0x62, 0x6f, 0x72, 0x74,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x42, 0x1b, 0x0a, 0x19, 0x5f, 0x62, 0x61, 0x63, 0x6b,
- 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x42, 0x1e, 0x0a, 0x1c, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f,
- 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x22, 0xf9, 0x01, 0x0a, 0x18, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69,
- 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61,
- 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x02, 0x69,
- 0x64, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x05, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x69, 0x6e, 0x44,
- 0x6f, 0x75, 0x62, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x6e, 0x44, 0x6f,
- 0x75, 0x62, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x04,
- 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x18, 0x0a,
- 0x07, 0x68, 0x61, 0x73, 0x4e, 0x65, 0x78, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07,
- 0x68, 0x61, 0x73, 0x4e, 0x65, 0x78, 0x74, 0x12, 0x4e, 0x0a, 0x14, 0x62, 0x61, 0x63, 0x6b, 0x67,
- 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x15, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75,
- 0x6e, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x14,
- 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x88, 0x01, 0x01, 0x42, 0x17, 0x0a, 0x15, 0x5f, 0x62, 0x61, 0x63, 0x6b,
- 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x22, 0xdd, 0x04, 0x0a, 0x0a, 0x53, 0x63, 0x61, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12,
- 0x22, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x08, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x12, 0x2b, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41,
- 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f,
- 0x64, 0x65, 0x41, 0x50, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50,
- 0x12, 0x2b, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43, 0x18, 0x03,
- 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x53,
- 0x43, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43, 0x12, 0x1a, 0x0a,
- 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x78, 0x70,
- 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52,
- 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x27,
- 0x0a, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x69, 0x6d,
- 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x52, 0x65,
- 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x48, 0x02, 0x52, 0x0a, 0x6d,
- 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x88, 0x01, 0x01, 0x12, 0x2f, 0x0a, 0x10,
- 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64,
- 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x03, 0x52, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64,
- 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a,
- 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73,
- 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, 0x48, 0x04, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72,
- 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x12,
- 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64,
- 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x05, 0x52, 0x12, 0x6d, 0x61, 0x78, 0x43,
- 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x88, 0x01,
- 0x01, 0x12, 0x2b, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x42, 0x69, 0x6e, 0x44,
- 0x61, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x06, 0x52, 0x0e, 0x69, 0x6e, 0x63,
- 0x6c, 0x75, 0x64, 0x65, 0x42, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01, 0x42, 0x0d,
- 0x0a, 0x0b, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a,
- 0x0d, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x0d,
- 0x0a, 0x0b, 0x5f, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x42, 0x13, 0x0a,
- 0x11, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f,
- 0x6e, 0x64, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
- 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6d, 0x61, 0x78, 0x43, 0x6f,
- 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x11, 0x0a,
- 0x0f, 0x5f, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x42, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61,
- 0x22, 0x8d, 0x01, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x12, 0x13, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d,
- 0x48, 0x00, 0x52, 0x02, 0x69, 0x64, 0x88, 0x01, 0x01, 0x12, 0x17, 0x0a, 0x04, 0x62, 0x56, 0x61,
- 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x48, 0x01, 0x52, 0x04, 0x62, 0x56, 0x61, 0x6c, 0x88,
- 0x01, 0x01, 0x12, 0x1b, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0c, 0x48, 0x02, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x88, 0x01, 0x01, 0x12,
- 0x14, 0x0a, 0x05, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05,
- 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x05, 0x0a, 0x03, 0x5f, 0x69, 0x64, 0x42, 0x07, 0x0a, 0x05,
- 0x5f, 0x62, 0x56, 0x61, 0x6c, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74,
- 0x22, 0xca, 0x01, 0x0a, 0x0f, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69,
- 0x6c, 0x74, 0x65, 0x72, 0x12, 0x19, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x88, 0x01, 0x01, 0x12,
- 0x14, 0x0a, 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05,
- 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x18,
- 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x06, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x88,
- 0x01, 0x01, 0x12, 0x3e, 0x0a, 0x11, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53,
- 0x74, 0x61, 0x74, 0x75, 0x73, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e,
- 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52,
- 0x11, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x65, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x72, 0x65, 0x74, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x05, 0x72, 0x65, 0x74, 0x72, 0x79, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x62, 0x65, 0x67,
- 0x69, 0x6e, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x64, 0x69, 0x67, 0x65, 0x73, 0x74, 0x22, 0x88, 0x02,
- 0x0a, 0x0b, 0x53, 0x63, 0x61, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a,
- 0x0a, 0x73, 0x63, 0x61, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x0b, 0x2e, 0x53, 0x63, 0x61, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00,
- 0x52, 0x0a, 0x73, 0x63, 0x61, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x88, 0x01, 0x01, 0x12,
- 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a,
- 0x07, 0x73, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01,
- 0x52, 0x07, 0x73, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1a, 0x0a, 0x08,
- 0x62, 0x69, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08,
- 0x62, 0x69, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x10, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c,
- 0x74, 0x65, 0x72, 0x48, 0x02, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e,
- 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x73, 0x63,
- 0x61, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x73, 0x65, 0x74,
- 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69,
- 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x9f, 0x06, 0x0a, 0x0b, 0x51, 0x75, 0x65,
- 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x08, 0x2e, 0x52, 0x65, 0x70, 0x6c,
- 0x69, 0x63, 0x61, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x2b, 0x0a, 0x0a,
- 0x72, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e,
- 0x32, 0x0b, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x52, 0x0a, 0x72,
- 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x12, 0x2b, 0x0a, 0x0a, 0x72, 0x65, 0x61,
- 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e,
- 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x4b, 0x65,
- 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x4b,
- 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73,
- 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73,
- 0x73, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18,
- 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x27, 0x0a, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54,
- 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x02, 0x52, 0x0c,
- 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12,
- 0x33, 0x0a, 0x12, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74,
- 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x03, 0x52, 0x12, 0x6d,
- 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65,
- 0x73, 0x88, 0x01, 0x01, 0x12, 0x2d, 0x0a, 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75,
- 0x65, 0x75, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x04, 0x52,
- 0x0f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x69, 0x7a, 0x65,
- 0x88, 0x01, 0x01, 0x12, 0x2b, 0x0a, 0x0e, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x42, 0x69,
- 0x6e, 0x44, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x48, 0x05, 0x52, 0x0e, 0x69,
- 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x42, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x88, 0x01, 0x01,
- 0x12, 0x35, 0x0a, 0x13, 0x66, 0x61, 0x69, 0x6c, 0x4f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65,
- 0x72, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x48, 0x06, 0x52,
- 0x13, 0x66, 0x61, 0x69, 0x6c, 0x4f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x43, 0x68,
- 0x61, 0x6e, 0x67, 0x65, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x73, 0x68, 0x6f, 0x72, 0x74,
- 0x51, 0x75, 0x65, 0x72, 0x79, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x48, 0x07, 0x52, 0x0a, 0x73,
- 0x68, 0x6f, 0x72, 0x74, 0x51, 0x75, 0x65, 0x72, 0x79, 0x88, 0x01, 0x01, 0x12, 0x25, 0x0a, 0x0b,
- 0x69, 0x6e, 0x66, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x0d, 0x20, 0x01, 0x28,
- 0x0d, 0x48, 0x08, 0x52, 0x0b, 0x69, 0x6e, 0x66, 0x6f, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x88, 0x01, 0x01, 0x12, 0x3f, 0x0a, 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x44,
- 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e,
- 0x51, 0x75, 0x65, 0x72, 0x79, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x09, 0x52,
- 0x10, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f,
- 0x6e, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x73, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79,
- 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42,
- 0x0f, 0x0a, 0x0d, 0x5f, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x6d, 0x61, 0x78, 0x43, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65,
- 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x72, 0x65, 0x63, 0x6f,
- 0x72, 0x64, 0x51, 0x75, 0x65, 0x75, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x11, 0x0a, 0x0f, 0x5f,
- 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x42, 0x69, 0x6e, 0x44, 0x61, 0x74, 0x61, 0x42, 0x16,
- 0x0a, 0x14, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x4f, 0x6e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
- 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x73, 0x68, 0x6f, 0x72, 0x74,
- 0x51, 0x75, 0x65, 0x72, 0x79, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x54, 0x69,
- 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
- 0x65, 0x64, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xdb, 0x01, 0x0a, 0x06, 0x46,
- 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2e, 0x0a, 0x07, 0x63, 0x6f, 0x6c,
- 0x54, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x49, 0x6e, 0x64,
- 0x65, 0x78, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65,
- 0x52, 0x07, 0x63, 0x6f, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x70, 0x61, 0x63,
- 0x6b, 0x65, 0x64, 0x43, 0x74, 0x78, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x09,
- 0x70, 0x61, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x74, 0x78, 0x88, 0x01, 0x01, 0x12, 0x18, 0x0a, 0x07,
- 0x76, 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76,
- 0x61, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x19, 0x0a, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x05, 0x62, 0x65, 0x67, 0x69, 0x6e, 0x88, 0x01,
- 0x01, 0x12, 0x15, 0x0a, 0x03, 0x65, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x02,
- 0x52, 0x03, 0x65, 0x6e, 0x64, 0x88, 0x01, 0x01, 0x42, 0x0c, 0x0a, 0x0a, 0x5f, 0x70, 0x61, 0x63,
- 0x6b, 0x65, 0x64, 0x43, 0x74, 0x78, 0x42, 0x08, 0x0a, 0x06, 0x5f, 0x62, 0x65, 0x67, 0x69, 0x6e,
- 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x65, 0x6e, 0x64, 0x22, 0x7f, 0x0a, 0x09, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54,
- 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x62, 0x69, 0x6e,
- 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x62, 0x69,
- 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x19, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
- 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x62, 0x69, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x42,
- 0x08, 0x0a, 0x06, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x8a, 0x04, 0x0a, 0x09, 0x53, 0x74,
- 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73,
- 0x70, 0x61, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65,
- 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x07, 0x73, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x73, 0x65, 0x74, 0x4e, 0x61, 0x6d,
- 0x65, 0x88, 0x01, 0x01, 0x12, 0x21, 0x0a, 0x09, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d,
- 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x69, 0x6e, 0x64, 0x65, 0x78,
- 0x4e, 0x61, 0x6d, 0x65, 0x88, 0x01, 0x01, 0x12, 0x1a, 0x0a, 0x08, 0x62, 0x69, 0x6e, 0x4e, 0x61,
- 0x6d, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x62, 0x69, 0x6e, 0x4e, 0x61,
- 0x6d, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20,
- 0x01, 0x28, 0x0b, 0x32, 0x07, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x02, 0x52, 0x06,
- 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x88, 0x01, 0x01, 0x12, 0x20, 0x0a, 0x0b, 0x70, 0x61, 0x63,
- 0x6b, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b,
- 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x22, 0x0a, 0x0c, 0x66,
- 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x0c, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12,
- 0x22, 0x0a, 0x0c, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41, 0x72, 0x67, 0x73, 0x18,
- 0x08, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0c, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x41,
- 0x72, 0x67, 0x73, 0x12, 0x2a, 0x0a, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
- 0x1b, 0x0a, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x48,
- 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a,
- 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04,
- 0x48, 0x04, 0x52, 0x0a, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x88, 0x01,
- 0x01, 0x12, 0x2f, 0x0a, 0x10, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x50, 0x65, 0x72, 0x53,
- 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x05, 0x52, 0x10, 0x72,
- 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x88,
- 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x73, 0x65, 0x74, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0c,
- 0x0a, 0x0a, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x09, 0x0a, 0x07,
- 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x42, 0x09, 0x0a, 0x07, 0x5f, 0x74, 0x61, 0x73, 0x6b,
- 0x49, 0x64, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64,
- 0x73, 0x42, 0x13, 0x0a, 0x11, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x73, 0x50, 0x65, 0x72,
- 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x22, 0xf2, 0x01, 0x0a, 0x0c, 0x51, 0x75, 0x65, 0x72, 0x79,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, 0x0b, 0x71, 0x75, 0x65, 0x72, 0x79,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x51,
- 0x75, 0x65, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x71, 0x75,
- 0x65, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x09,
- 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x0a, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61,
- 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x61, 0x63, 0x6b, 0x67, 0x72,
- 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x62, 0x61, 0x63, 0x6b,
- 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3f, 0x0a, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74,
- 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x10, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65,
- 0x72, 0x48, 0x01, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69,
- 0x6c, 0x74, 0x65, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x71, 0x75, 0x65, 0x72,
- 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x12, 0x0a, 0x10, 0x5f, 0x70, 0x61, 0x72, 0x74,
- 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0xcc, 0x06, 0x0a, 0x17,
- 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74,
- 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x08, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69,
- 0x63, 0x61, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12, 0x2b, 0x0a, 0x0a, 0x72,
- 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32,
- 0x0b, 0x2e, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x52, 0x0a, 0x72, 0x65,
- 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x12, 0x2b, 0x0a, 0x0a, 0x72, 0x65, 0x61, 0x64,
- 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0b, 0x2e, 0x52,
- 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43, 0x52, 0x0a, 0x72, 0x65, 0x61, 0x64, 0x4d,
- 0x6f, 0x64, 0x65, 0x53, 0x43, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73,
- 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x70, 0x72, 0x65, 0x73,
- 0x73, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x18,
- 0x05, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x27, 0x0a, 0x0c, 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54,
- 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x01, 0x52, 0x0c,
- 0x74, 0x6f, 0x74, 0x61, 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x12,
- 0x1d, 0x0a, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08,
- 0x48, 0x02, 0x52, 0x07, 0x73, 0x65, 0x6e, 0x64, 0x4b, 0x65, 0x79, 0x88, 0x01, 0x01, 0x12, 0x48,
- 0x0a, 0x12, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x41, 0x63,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x52, 0x65, 0x63,
- 0x6f, 0x72, 0x64, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x48,
- 0x03, 0x52, 0x12, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x41,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x42, 0x0a, 0x10, 0x67, 0x65, 0x6e, 0x65,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x09, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50,
- 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x04, 0x52, 0x10, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x88, 0x01, 0x01, 0x12, 0x33, 0x0a, 0x0b,
- 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x18, 0x0a, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x0c, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x48,
- 0x05, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x88, 0x01,
- 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18,
- 0x0b, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x06, 0x52, 0x0a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x23, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x07, 0x52, 0x0a, 0x65, 0x78,
- 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x88, 0x01, 0x01, 0x12, 0x29, 0x0a, 0x0d, 0x72,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x4f, 0x70, 0x73, 0x18, 0x0d, 0x20, 0x01,
- 0x28, 0x08, 0x48, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x6c, 0x6c,
- 0x4f, 0x70, 0x73, 0x88, 0x01, 0x01, 0x12, 0x29, 0x0a, 0x0d, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c,
- 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x48, 0x09, 0x52,
- 0x0d, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x88, 0x01,
- 0x01, 0x12, 0x15, 0x0a, 0x03, 0x78, 0x64, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x48, 0x0a,
- 0x52, 0x03, 0x78, 0x64, 0x72, 0x88, 0x01, 0x01, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x78, 0x70,
- 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x0f, 0x0a, 0x0d, 0x5f, 0x74, 0x6f, 0x74, 0x61,
- 0x6c, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x73, 0x65, 0x6e,
- 0x64, 0x4b, 0x65, 0x79, 0x42, 0x15, 0x0a, 0x13, 0x5f, 0x72, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x45,
- 0x78, 0x69, 0x73, 0x74, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x13, 0x0a, 0x11, 0x5f,
- 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
- 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c,
- 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42,
- 0x0d, 0x0a, 0x0b, 0x5f, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x10,
- 0x0a, 0x0e, 0x5f, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x41, 0x6c, 0x6c, 0x4f, 0x70, 0x73,
- 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x44, 0x65, 0x6c, 0x65,
- 0x74, 0x65, 0x42, 0x06, 0x0a, 0x04, 0x5f, 0x78, 0x64, 0x72, 0x22, 0x95, 0x01, 0x0a, 0x18, 0x42,
- 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x42,
- 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65,
- 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x77, 0x72, 0x69, 0x74, 0x65, 0x50,
- 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x88, 0x01, 0x01, 0x12, 0x28, 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74,
- 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x53, 0x74,
- 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x42, 0x0e, 0x0a, 0x0c, 0x5f, 0x77, 0x72, 0x69, 0x74, 0x65, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x22, 0x4d, 0x0a, 0x1b, 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64,
- 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x03, 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x69, 0x73, 0x53,
- 0x63, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x06, 0x69, 0x73, 0x53, 0x63, 0x61,
- 0x6e, 0x22, 0x28, 0x0a, 0x0c, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x49, 0x64, 0x18, 0x01, 0x20, 0x01,
- 0x28, 0x0d, 0x52, 0x07, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x49, 0x64, 0x22, 0x37, 0x0a, 0x0a, 0x49,
- 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1d, 0x0a, 0x07, 0x74, 0x69, 0x6d,
- 0x65, 0x6f, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x48, 0x00, 0x52, 0x07, 0x74, 0x69,
- 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x88, 0x01, 0x01, 0x42, 0x0a, 0x0a, 0x08, 0x5f, 0x74, 0x69, 0x6d,
- 0x65, 0x6f, 0x75, 0x74, 0x22, 0x6a, 0x0a, 0x0b, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x0a, 0x69, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x6c, 0x69, 0x63,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0b, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x50, 0x6f,
- 0x6c, 0x69, 0x63, 0x79, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x6c, 0x69,
- 0x63, 0x79, 0x88, 0x01, 0x01, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64,
- 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64,
- 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79,
- 0x2a, 0x1e, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x41, 0x50, 0x12, 0x07,
- 0x0a, 0x03, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, 0x07, 0x0a, 0x03, 0x41, 0x4c, 0x4c, 0x10, 0x01,
- 0x2a, 0x52, 0x0a, 0x0a, 0x52, 0x65, 0x61, 0x64, 0x4d, 0x6f, 0x64, 0x65, 0x53, 0x43, 0x12, 0x0b,
- 0x0a, 0x07, 0x53, 0x45, 0x53, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4c,
- 0x49, 0x4e, 0x45, 0x41, 0x52, 0x49, 0x5a, 0x45, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x4c,
- 0x4c, 0x4f, 0x57, 0x5f, 0x52, 0x45, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x10, 0x02, 0x12, 0x15, 0x0a,
- 0x11, 0x41, 0x4c, 0x4c, 0x4f, 0x57, 0x5f, 0x55, 0x4e, 0x41, 0x56, 0x41, 0x49, 0x4c, 0x41, 0x42,
- 0x4c, 0x45, 0x10, 0x03, 0x2a, 0x53, 0x0a, 0x07, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x12,
- 0x0c, 0x0a, 0x08, 0x53, 0x45, 0x51, 0x55, 0x45, 0x4e, 0x43, 0x45, 0x10, 0x00, 0x12, 0x0a, 0x0a,
- 0x06, 0x4d, 0x41, 0x53, 0x54, 0x45, 0x52, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x4d, 0x41, 0x53,
- 0x54, 0x45, 0x52, 0x5f, 0x50, 0x52, 0x4f, 0x4c, 0x45, 0x53, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b,
- 0x50, 0x52, 0x45, 0x46, 0x45, 0x52, 0x5f, 0x52, 0x41, 0x43, 0x4b, 0x10, 0x03, 0x12, 0x0a, 0x0a,
- 0x06, 0x52, 0x41, 0x4e, 0x44, 0x4f, 0x4d, 0x10, 0x04, 0x2a, 0x37, 0x0a, 0x0d, 0x51, 0x75, 0x65,
- 0x72, 0x79, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x4f,
- 0x4e, 0x47, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x53, 0x48, 0x4f, 0x52, 0x54, 0x10, 0x01, 0x12,
- 0x11, 0x0a, 0x0d, 0x4c, 0x4f, 0x4e, 0x47, 0x5f, 0x52, 0x45, 0x4c, 0x41, 0x58, 0x5f, 0x41, 0x50,
- 0x10, 0x02, 0x2a, 0x48, 0x0a, 0x13, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x43, 0x6f, 0x6c, 0x6c, 0x65,
- 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46,
- 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x01,
- 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x41, 0x50, 0x4b, 0x45, 0x59, 0x53, 0x10, 0x02, 0x12, 0x0d, 0x0a,
- 0x09, 0x4d, 0x41, 0x50, 0x56, 0x41, 0x4c, 0x55, 0x45, 0x53, 0x10, 0x03, 0x2a, 0x84, 0x02, 0x0a,
- 0x0d, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x08,
- 0x0a, 0x04, 0x52, 0x45, 0x41, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x44,
- 0x5f, 0x48, 0x45, 0x41, 0x44, 0x45, 0x52, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x57, 0x52, 0x49,
- 0x54, 0x45, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x44, 0x54, 0x5f, 0x52, 0x45, 0x41, 0x44,
- 0x10, 0x03, 0x12, 0x0e, 0x0a, 0x0a, 0x43, 0x44, 0x54, 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x59,
- 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x4d, 0x41, 0x50, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x05,
- 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x41, 0x50, 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x59, 0x10, 0x06,
- 0x12, 0x07, 0x0a, 0x03, 0x41, 0x44, 0x44, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x45, 0x58, 0x50,
- 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x08, 0x12, 0x0e, 0x0a, 0x0a, 0x45, 0x58, 0x50, 0x5f, 0x4d,
- 0x4f, 0x44, 0x49, 0x46, 0x59, 0x10, 0x09, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x50, 0x50, 0x45, 0x4e,
- 0x44, 0x10, 0x0a, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x45, 0x50, 0x45, 0x4e, 0x44, 0x10, 0x0b,
- 0x12, 0x09, 0x0a, 0x05, 0x54, 0x4f, 0x55, 0x43, 0x48, 0x10, 0x0c, 0x12, 0x0c, 0x0a, 0x08, 0x42,
- 0x49, 0x54, 0x5f, 0x52, 0x45, 0x41, 0x44, 0x10, 0x0d, 0x12, 0x0e, 0x0a, 0x0a, 0x42, 0x49, 0x54,
- 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x59, 0x10, 0x0e, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c,
- 0x45, 0x54, 0x45, 0x10, 0x0f, 0x12, 0x0c, 0x0a, 0x08, 0x48, 0x4c, 0x4c, 0x5f, 0x52, 0x45, 0x41,
- 0x44, 0x10, 0x10, 0x12, 0x0e, 0x0a, 0x0a, 0x48, 0x4c, 0x4c, 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46,
- 0x59, 0x10, 0x11, 0x2a, 0x61, 0x0a, 0x12, 0x52, 0x65, 0x63, 0x6f, 0x72, 0x64, 0x45, 0x78, 0x69,
- 0x73, 0x74, 0x73, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44,
- 0x41, 0x54, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f,
- 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43,
- 0x45, 0x10, 0x02, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x5f, 0x4f,
- 0x4e, 0x4c, 0x59, 0x10, 0x03, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x5f,
- 0x4f, 0x4e, 0x4c, 0x59, 0x10, 0x04, 0x2a, 0x45, 0x0a, 0x10, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61,
- 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x4f,
- 0x4e, 0x45, 0x10, 0x00, 0x12, 0x14, 0x0a, 0x10, 0x45, 0x58, 0x50, 0x45, 0x43, 0x54, 0x5f, 0x47,
- 0x45, 0x4e, 0x5f, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x45, 0x58,
- 0x50, 0x45, 0x43, 0x54, 0x5f, 0x47, 0x45, 0x4e, 0x5f, 0x47, 0x54, 0x10, 0x02, 0x2a, 0x30, 0x0a,
- 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x4c, 0x65, 0x76, 0x65, 0x6c, 0x12, 0x0e, 0x0a, 0x0a,
- 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d,
- 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x5f, 0x4d, 0x41, 0x53, 0x54, 0x45, 0x52, 0x10, 0x01, 0x2a,
- 0x44, 0x0a, 0x14, 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73,
- 0x6b, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46,
- 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x49, 0x4e, 0x5f, 0x50, 0x52, 0x4f,
- 0x47, 0x52, 0x45, 0x53, 0x53, 0x10, 0x01, 0x12, 0x0c, 0x0a, 0x08, 0x43, 0x4f, 0x4d, 0x50, 0x4c,
- 0x45, 0x54, 0x45, 0x10, 0x02, 0x32, 0x2f, 0x0a, 0x05, 0x41, 0x62, 0x6f, 0x75, 0x74, 0x12, 0x26,
- 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x0d, 0x2e, 0x41, 0x62, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x1a, 0x0e, 0x2e, 0x41, 0x62, 0x6f, 0x75, 0x74, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x32, 0x9c, 0x0a, 0x0a, 0x03, 0x4b, 0x56, 0x53, 0x12, 0x3d,
- 0x0a, 0x04, 0x52, 0x65, 0x61, 0x64, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69,
- 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
- 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x12, 0x4a, 0x0a,
- 0x0d, 0x52, 0x65, 0x61, 0x64, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x18,
- 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73,
- 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c,
- 0x6f, 0x61, 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x42, 0x0a, 0x09, 0x47, 0x65, 0x74,
- 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69,
- 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
- 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x12, 0x4f, 0x0a,
- 0x12, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d,
- 0x69, 0x6e, 0x67, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e,
- 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x3f,
- 0x0a, 0x06, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73,
- 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x12,
- 0x4c, 0x0a, 0x0f, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69,
- 0x6e, 0x67, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41,
- 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x3e, 0x0a,
- 0x05, 0x57, 0x72, 0x69, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69,
- 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
- 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x12, 0x4b, 0x0a,
- 0x0e, 0x57, 0x72, 0x69, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12,
- 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
- 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f,
- 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79,
- 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x3f, 0x0a, 0x06, 0x44, 0x65,
- 0x6c, 0x65, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19,
- 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
- 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x12, 0x4c, 0x0a, 0x0f, 0x44,
- 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x18,
- 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73,
- 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c,
- 0x6f, 0x61, 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x3e, 0x0a, 0x05, 0x54, 0x6f, 0x75,
- 0x63, 0x68, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41,
- 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x12, 0x4b, 0x0a, 0x0e, 0x54, 0x6f, 0x75,
- 0x63, 0x68, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x2e, 0x41, 0x65,
- 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61,
- 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
- 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x40, 0x0a, 0x07, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x65, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65,
- 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50,
- 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x10, 0x4f, 0x70, 0x65, 0x72,
- 0x61, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x2e, 0x41,
- 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50,
- 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69,
- 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61,
- 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x40, 0x0a, 0x07, 0x45, 0x78, 0x65, 0x63, 0x75,
- 0x74, 0x65, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41,
- 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x12, 0x4d, 0x0a, 0x10, 0x45, 0x78, 0x65,
- 0x63, 0x75, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x2e,
- 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70,
- 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x47, 0x0a, 0x0c, 0x42, 0x61, 0x74, 0x63,
- 0x68, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73,
- 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x30,
- 0x01, 0x12, 0x52, 0x0a, 0x15, 0x42, 0x61, 0x74, 0x63, 0x68, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74,
- 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72,
- 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79,
- 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22,
- 0x00, 0x28, 0x01, 0x30, 0x01, 0x32, 0x93, 0x01, 0x0a, 0x04, 0x53, 0x63, 0x61, 0x6e, 0x12, 0x3f,
- 0x0a, 0x04, 0x53, 0x63, 0x61, 0x6e, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69,
- 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64,
- 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x30, 0x01, 0x12,
- 0x4a, 0x0a, 0x0d, 0x53, 0x63, 0x61, 0x6e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
- 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72,
- 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61,
- 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x32, 0xea, 0x03, 0x0a, 0x05,
- 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x40, 0x0a, 0x05, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x18,
- 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
- 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73,
- 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c,
- 0x6f, 0x61, 0x64, 0x22, 0x00, 0x30, 0x01, 0x12, 0x4b, 0x0a, 0x0e, 0x51, 0x75, 0x65, 0x72, 0x79,
- 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f,
- 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c,
- 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00,
- 0x28, 0x01, 0x30, 0x01, 0x12, 0x4c, 0x0a, 0x11, 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75,
- 0x6e, 0x64, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f,
- 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c,
- 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00,
- 0x30, 0x01, 0x12, 0x57, 0x0a, 0x1a, 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64,
- 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67,
- 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72,
- 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61,
- 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0x4f, 0x0a, 0x14, 0x42,
- 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52,
- 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e,
- 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
- 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x30, 0x01, 0x12, 0x5a, 0x0a, 0x1d,
- 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x53, 0x74,
- 0x61, 0x74, 0x75, 0x73, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x12, 0x18, 0x2e,
- 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70,
- 0x69, 0x6b, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x32, 0x45, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f,
- 0x12, 0x3d, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73,
- 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x50, 0x61, 0x79, 0x6c, 0x6f,
- 0x61, 0x64, 0x1a, 0x19, 0x2e, 0x41, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x52, 0x65,
- 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0x00, 0x42,
- 0x57, 0x0a, 0x1a, 0x63, 0x6f, 0x6d, 0x2e, 0x61, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65,
- 0x2e, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x39, 0x67,
- 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x65, 0x72, 0x6f, 0x73, 0x70,
- 0x69, 0x6b, 0x65, 0x2f, 0x61, 0x65, 0x72, 0x6f, 0x73, 0x70, 0x69, 0x6b, 0x65, 0x2d, 0x63, 0x6c,
- 0x69, 0x65, 0x6e, 0x74, 0x2d, 0x67, 0x6f, 0x2f, 0x76, 0x37, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x2f, 0x6b, 0x76, 0x73, 0x3b, 0x6b, 0x76, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
-}
-
-var (
- file_aerospike_proxy_kv_proto_rawDescOnce sync.Once
- file_aerospike_proxy_kv_proto_rawDescData = file_aerospike_proxy_kv_proto_rawDesc
-)
-
-func file_aerospike_proxy_kv_proto_rawDescGZIP() []byte {
- file_aerospike_proxy_kv_proto_rawDescOnce.Do(func() {
- file_aerospike_proxy_kv_proto_rawDescData = protoimpl.X.CompressGZIP(file_aerospike_proxy_kv_proto_rawDescData)
- })
- return file_aerospike_proxy_kv_proto_rawDescData
-}
-
-var file_aerospike_proxy_kv_proto_enumTypes = make([]protoimpl.EnumInfo, 10)
-var file_aerospike_proxy_kv_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
-var file_aerospike_proxy_kv_proto_goTypes = []interface{}{
- (ReadModeAP)(0), // 0: ReadModeAP
- (ReadModeSC)(0), // 1: ReadModeSC
- (Replica)(0), // 2: Replica
- (QueryDuration)(0), // 3: QueryDuration
- (IndexCollectionType)(0), // 4: IndexCollectionType
- (OperationType)(0), // 5: OperationType
- (RecordExistsAction)(0), // 6: RecordExistsAction
- (GenerationPolicy)(0), // 7: GenerationPolicy
- (CommitLevel)(0), // 8: CommitLevel
- (BackgroundTaskStatus)(0), // 9: BackgroundTaskStatus
- (*AboutRequest)(nil), // 10: AboutRequest
- (*AboutResponse)(nil), // 11: AboutResponse
- (*ReadPolicy)(nil), // 12: ReadPolicy
- (*WritePolicy)(nil), // 13: WritePolicy
- (*AerospikeRequestPayload)(nil), // 14: AerospikeRequestPayload
- (*AerospikeResponsePayload)(nil), // 15: AerospikeResponsePayload
- (*ScanPolicy)(nil), // 16: ScanPolicy
- (*PartitionStatus)(nil), // 17: PartitionStatus
- (*PartitionFilter)(nil), // 18: PartitionFilter
- (*ScanRequest)(nil), // 19: ScanRequest
- (*QueryPolicy)(nil), // 20: QueryPolicy
- (*Filter)(nil), // 21: Filter
- (*Operation)(nil), // 22: Operation
- (*Statement)(nil), // 23: Statement
- (*QueryRequest)(nil), // 24: QueryRequest
- (*BackgroundExecutePolicy)(nil), // 25: BackgroundExecutePolicy
- (*BackgroundExecuteRequest)(nil), // 26: BackgroundExecuteRequest
- (*BackgroundTaskStatusRequest)(nil), // 27: BackgroundTaskStatusRequest
- (*AbortRequest)(nil), // 28: AbortRequest
- (*InfoPolicy)(nil), // 29: InfoPolicy
- (*InfoRequest)(nil), // 30: InfoRequest
-}
-var file_aerospike_proxy_kv_proto_depIdxs = []int32{
- 2, // 0: ReadPolicy.replica:type_name -> Replica
- 0, // 1: ReadPolicy.readModeAP:type_name -> ReadModeAP
- 1, // 2: ReadPolicy.readModeSC:type_name -> ReadModeSC
- 2, // 3: WritePolicy.replica:type_name -> Replica
- 0, // 4: WritePolicy.readModeAP:type_name -> ReadModeAP
- 1, // 5: WritePolicy.readModeSC:type_name -> ReadModeSC
- 12, // 6: AerospikeRequestPayload.readPolicy:type_name -> ReadPolicy
- 13, // 7: AerospikeRequestPayload.writePolicy:type_name -> WritePolicy
- 19, // 8: AerospikeRequestPayload.scanRequest:type_name -> ScanRequest
- 24, // 9: AerospikeRequestPayload.queryRequest:type_name -> QueryRequest
- 28, // 10: AerospikeRequestPayload.abortRequest:type_name -> AbortRequest
- 26, // 11: AerospikeRequestPayload.backgroundExecuteRequest:type_name -> BackgroundExecuteRequest
- 27, // 12: AerospikeRequestPayload.backgroundTaskStatusRequest:type_name -> BackgroundTaskStatusRequest
- 30, // 13: AerospikeRequestPayload.infoRequest:type_name -> InfoRequest
- 9, // 14: AerospikeResponsePayload.backgroundTaskStatus:type_name -> BackgroundTaskStatus
- 2, // 15: ScanPolicy.replica:type_name -> Replica
- 0, // 16: ScanPolicy.readModeAP:type_name -> ReadModeAP
- 1, // 17: ScanPolicy.readModeSC:type_name -> ReadModeSC
- 17, // 18: PartitionFilter.partitionStatuses:type_name -> PartitionStatus
- 16, // 19: ScanRequest.scanPolicy:type_name -> ScanPolicy
- 18, // 20: ScanRequest.partitionFilter:type_name -> PartitionFilter
- 2, // 21: QueryPolicy.replica:type_name -> Replica
- 0, // 22: QueryPolicy.readModeAP:type_name -> ReadModeAP
- 1, // 23: QueryPolicy.readModeSC:type_name -> ReadModeSC
- 3, // 24: QueryPolicy.expectedDuration:type_name -> QueryDuration
- 4, // 25: Filter.colType:type_name -> IndexCollectionType
- 5, // 26: Operation.type:type_name -> OperationType
- 21, // 27: Statement.filter:type_name -> Filter
- 22, // 28: Statement.operations:type_name -> Operation
- 20, // 29: QueryRequest.queryPolicy:type_name -> QueryPolicy
- 23, // 30: QueryRequest.statement:type_name -> Statement
- 18, // 31: QueryRequest.partitionFilter:type_name -> PartitionFilter
- 2, // 32: BackgroundExecutePolicy.replica:type_name -> Replica
- 0, // 33: BackgroundExecutePolicy.readModeAP:type_name -> ReadModeAP
- 1, // 34: BackgroundExecutePolicy.readModeSC:type_name -> ReadModeSC
- 6, // 35: BackgroundExecutePolicy.recordExistsAction:type_name -> RecordExistsAction
- 7, // 36: BackgroundExecutePolicy.generationPolicy:type_name -> GenerationPolicy
- 8, // 37: BackgroundExecutePolicy.commitLevel:type_name -> CommitLevel
- 25, // 38: BackgroundExecuteRequest.writePolicy:type_name -> BackgroundExecutePolicy
- 23, // 39: BackgroundExecuteRequest.statement:type_name -> Statement
- 29, // 40: InfoRequest.infoPolicy:type_name -> InfoPolicy
- 10, // 41: About.Get:input_type -> AboutRequest
- 14, // 42: KVS.Read:input_type -> AerospikeRequestPayload
- 14, // 43: KVS.ReadStreaming:input_type -> AerospikeRequestPayload
- 14, // 44: KVS.GetHeader:input_type -> AerospikeRequestPayload
- 14, // 45: KVS.GetHeaderStreaming:input_type -> AerospikeRequestPayload
- 14, // 46: KVS.Exists:input_type -> AerospikeRequestPayload
- 14, // 47: KVS.ExistsStreaming:input_type -> AerospikeRequestPayload
- 14, // 48: KVS.Write:input_type -> AerospikeRequestPayload
- 14, // 49: KVS.WriteStreaming:input_type -> AerospikeRequestPayload
- 14, // 50: KVS.Delete:input_type -> AerospikeRequestPayload
- 14, // 51: KVS.DeleteStreaming:input_type -> AerospikeRequestPayload
- 14, // 52: KVS.Touch:input_type -> AerospikeRequestPayload
- 14, // 53: KVS.TouchStreaming:input_type -> AerospikeRequestPayload
- 14, // 54: KVS.Operate:input_type -> AerospikeRequestPayload
- 14, // 55: KVS.OperateStreaming:input_type -> AerospikeRequestPayload
- 14, // 56: KVS.Execute:input_type -> AerospikeRequestPayload
- 14, // 57: KVS.ExecuteStreaming:input_type -> AerospikeRequestPayload
- 14, // 58: KVS.BatchOperate:input_type -> AerospikeRequestPayload
- 14, // 59: KVS.BatchOperateStreaming:input_type -> AerospikeRequestPayload
- 14, // 60: Scan.Scan:input_type -> AerospikeRequestPayload
- 14, // 61: Scan.ScanStreaming:input_type -> AerospikeRequestPayload
- 14, // 62: Query.Query:input_type -> AerospikeRequestPayload
- 14, // 63: Query.QueryStreaming:input_type -> AerospikeRequestPayload
- 14, // 64: Query.BackgroundExecute:input_type -> AerospikeRequestPayload
- 14, // 65: Query.BackgroundExecuteStreaming:input_type -> AerospikeRequestPayload
- 14, // 66: Query.BackgroundTaskStatus:input_type -> AerospikeRequestPayload
- 14, // 67: Query.BackgroundTaskStatusStreaming:input_type -> AerospikeRequestPayload
- 14, // 68: Info.Info:input_type -> AerospikeRequestPayload
- 11, // 69: About.Get:output_type -> AboutResponse
- 15, // 70: KVS.Read:output_type -> AerospikeResponsePayload
- 15, // 71: KVS.ReadStreaming:output_type -> AerospikeResponsePayload
- 15, // 72: KVS.GetHeader:output_type -> AerospikeResponsePayload
- 15, // 73: KVS.GetHeaderStreaming:output_type -> AerospikeResponsePayload
- 15, // 74: KVS.Exists:output_type -> AerospikeResponsePayload
- 15, // 75: KVS.ExistsStreaming:output_type -> AerospikeResponsePayload
- 15, // 76: KVS.Write:output_type -> AerospikeResponsePayload
- 15, // 77: KVS.WriteStreaming:output_type -> AerospikeResponsePayload
- 15, // 78: KVS.Delete:output_type -> AerospikeResponsePayload
- 15, // 79: KVS.DeleteStreaming:output_type -> AerospikeResponsePayload
- 15, // 80: KVS.Touch:output_type -> AerospikeResponsePayload
- 15, // 81: KVS.TouchStreaming:output_type -> AerospikeResponsePayload
- 15, // 82: KVS.Operate:output_type -> AerospikeResponsePayload
- 15, // 83: KVS.OperateStreaming:output_type -> AerospikeResponsePayload
- 15, // 84: KVS.Execute:output_type -> AerospikeResponsePayload
- 15, // 85: KVS.ExecuteStreaming:output_type -> AerospikeResponsePayload
- 15, // 86: KVS.BatchOperate:output_type -> AerospikeResponsePayload
- 15, // 87: KVS.BatchOperateStreaming:output_type -> AerospikeResponsePayload
- 15, // 88: Scan.Scan:output_type -> AerospikeResponsePayload
- 15, // 89: Scan.ScanStreaming:output_type -> AerospikeResponsePayload
- 15, // 90: Query.Query:output_type -> AerospikeResponsePayload
- 15, // 91: Query.QueryStreaming:output_type -> AerospikeResponsePayload
- 15, // 92: Query.BackgroundExecute:output_type -> AerospikeResponsePayload
- 15, // 93: Query.BackgroundExecuteStreaming:output_type -> AerospikeResponsePayload
- 15, // 94: Query.BackgroundTaskStatus:output_type -> AerospikeResponsePayload
- 15, // 95: Query.BackgroundTaskStatusStreaming:output_type -> AerospikeResponsePayload
- 15, // 96: Info.Info:output_type -> AerospikeResponsePayload
- 69, // [69:97] is the sub-list for method output_type
- 41, // [41:69] is the sub-list for method input_type
- 41, // [41:41] is the sub-list for extension type_name
- 41, // [41:41] is the sub-list for extension extendee
- 0, // [0:41] is the sub-list for field type_name
-}
-
-func init() { file_aerospike_proxy_kv_proto_init() }
-func file_aerospike_proxy_kv_proto_init() {
- if File_aerospike_proxy_kv_proto != nil {
- return
- }
- if !protoimpl.UnsafeEnabled {
- file_aerospike_proxy_kv_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AboutRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AboutResponse); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ReadPolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*WritePolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AerospikeRequestPayload); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AerospikeResponsePayload); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ScanPolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PartitionStatus); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*PartitionFilter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*ScanRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueryPolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Filter); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Operation); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*Statement); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*QueryRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BackgroundExecutePolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BackgroundExecuteRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*BackgroundTaskStatusRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*AbortRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*InfoPolicy); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
- switch v := v.(*InfoRequest); i {
- case 0:
- return &v.state
- case 1:
- return &v.sizeCache
- case 2:
- return &v.unknownFields
- default:
- return nil
- }
- }
- }
- file_aerospike_proxy_kv_proto_msgTypes[4].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[5].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[6].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[7].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[8].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[9].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[10].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[11].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[12].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[13].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[14].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[15].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[16].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[19].OneofWrappers = []interface{}{}
- file_aerospike_proxy_kv_proto_msgTypes[20].OneofWrappers = []interface{}{}
- type x struct{}
- out := protoimpl.TypeBuilder{
- File: protoimpl.DescBuilder{
- GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_aerospike_proxy_kv_proto_rawDesc,
- NumEnums: 10,
- NumMessages: 21,
- NumExtensions: 0,
- NumServices: 5,
- },
- GoTypes: file_aerospike_proxy_kv_proto_goTypes,
- DependencyIndexes: file_aerospike_proxy_kv_proto_depIdxs,
- EnumInfos: file_aerospike_proxy_kv_proto_enumTypes,
- MessageInfos: file_aerospike_proxy_kv_proto_msgTypes,
- }.Build()
- File_aerospike_proxy_kv_proto = out.File
- file_aerospike_proxy_kv_proto_rawDesc = nil
- file_aerospike_proxy_kv_proto_goTypes = nil
- file_aerospike_proxy_kv_proto_depIdxs = nil
-}
diff --git a/proto/kvs/aerospike_proxy_kv.proto b/proto/kvs/aerospike_proxy_kv.proto
deleted file mode 100644
index be32978c..00000000
--- a/proto/kvs/aerospike_proxy_kv.proto
+++ /dev/null
@@ -1,831 +0,0 @@
-syntax = "proto3";
-
-option go_package = "github.com/aerospike/aerospike-client-go/v7/proto/kvs;kvs";
-option java_package = "com.aerospike.proxy.client";
-
-
-// The about request message.
-message AboutRequest {
- // Empty for now.
-}
-
-// The about response message.
-message AboutResponse {
- // Proxy server version.
- string version = 1;
-}
-
-// Read policy for AP (availability) namespaces.
-// How duplicates should be consulted in a read operation.
-// Only makes a difference during migrations and only applicable in AP mode.
-enum ReadModeAP {
- // Involve single node in the read operation.
- ONE = 0;
-
- // Involve all duplicates in the read operation.
- ALL = 1;
-}
-
-// Read policy for SC (strong consistency) namespaces.
-// Determines SC read consistency options.
-enum ReadModeSC {
- // Ensures this client will only see an increasing sequence of record versions.
- // Server only reads from master. This is the default.
- SESSION = 0;
-
- // Ensures ALL clients will only see an increasing sequence of record versions.
- // Server only reads from master.
- LINEARIZE = 1;
-
- // Server may read from master or any full (non-migrating) replica.
- // Increasing sequence of record versions is not guaranteed.
- ALLOW_REPLICA = 2;
-
- // Server may read from master or any full (non-migrating) replica or from unavailable
- // partitions. Increasing sequence of record versions is not guaranteed.
- ALLOW_UNAVAILABLE = 3;
-}
-
-// Defines algorithm used to determine the target node for a command.
-// Scan and query are not affected by replica algorithm.
-//
-// Note: The enum ordinals do not match the Aerospike Client ordinals because
-// the default has to be ordinal zero in protobuf.
-enum Replica {
- // Try node containing master partition first.
- // If connection fails, all commands try nodes containing replicated partitions.
- // If socketTimeout is reached, reads also try nodes containing replicated partitions,
- // but writes remain on master node.
- SEQUENCE = 0;
-
- // Use node containing key's master partition.
- MASTER = 1;
-
- // Distribute reads across nodes containing key's master and replicated partitions
- // in round-robin fashion. Writes always use node containing key's master partition.
- MASTER_PROLES = 2;
-
- // Try node on the same rack as the client first. If timeout or there are no nodes on the
- // same rack, use SEQUENCE instead.
- PREFER_RACK = 3;
-
- // Distribute reads across all nodes in cluster in round-robin fashion.
- // Writes always use node containing key's master partition.
- // This option is useful when the replication factor equals the number
- // of nodes in the cluster and the overhead of requesting proles is not desired.
- RANDOM = 4;
-}
-
-enum QueryDuration {
- // The query is expected to return more than 100 records per node. The server optimizes for a large record set.
- LONG = 0;
-
- // The query is expected to return less than 100 records per node. The server optimizes for a small record set.
- SHORT = 1;
-
- // Treat query as a LONG query, but relax read consistency for AP namespaces.
- // This value is treated exactly like LONG for server versions < 7.1.
- LONG_RELAX_AP = 2;
-}
-
-// Read policy attributes used in read database commands that are not part of
-// the wire protocol.
-message ReadPolicy {
- // Read policy for AP (availability) namespaces.
- Replica replica = 1;
-
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP readModeAP = 2;
-
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and query are not affected by replica algorithms.
- ReadModeSC readModeSC = 3;
-}
-
-// Write policy attributes used in write database commands that are not part of
-// the wire protocol.
-message WritePolicy {
- // Read policy for AP (availability) namespaces.
- Replica replica = 1;
-
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP readModeAP = 2;
-
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and query are not affected by replica algorithms.
- ReadModeSC readModeSC = 3;
-}
-
-
-// The request message containing the user's name.
-message AerospikeRequestPayload {
- // Unique identifier of the request in the stream.
- uint32 id = 1;
-
- // Client iteration number starting at 1. On first attempt iteration should
- // be 1. On first retry iteration should be 2, on second retry iteration
- // should be 3, and so on.
- uint32 iteration = 2;
-
- // Aerospike wire format request payload.
- bytes payload = 3;
-
- // Read policy for read requests.
- optional ReadPolicy readPolicy = 4;
-
- // Write policy for write requests.
- optional WritePolicy writePolicy = 5;
-
- // Scan request for scan.
- optional ScanRequest scanRequest = 6;
-
- // Request for running a query.
- optional QueryRequest queryRequest = 7;
-
- // Abort a scan/query on application error.
- optional AbortRequest abortRequest = 8;
-
- // Request for executing operations background on matching records.
- optional BackgroundExecuteRequest backgroundExecuteRequest = 9;
-
- // Request for getting background task status.
- optional BackgroundTaskStatusRequest backgroundTaskStatusRequest = 10;
-
- // Info request
- optional InfoRequest infoRequest = 11;
-}
-
-// The request message containing the user's name.
-message AerospikeResponsePayload {
- // Unique identifier of the corresponding request in the stream.
- uint32 id = 1;
-
- // Status of the corresponding request.
- // if status equals 0
- // The proxy received a valid response from Aerospike. The payload's
- // result code should be used as the client result code.
- // else
- // The request failed at the proxy. This status should be used
- // as the client result code.
- int32 status = 2;
-
- // This flag indicates that the write transaction may have completed,
- // even though the client sees an error.
- bool inDoubt = 3;
-
- // Aerospike wire format request payload.
- bytes payload = 4;
-
- // For requests with multiple responses like batch and queries,
- // hasNext flag indicates if there are more responses to follow this
- // response or if this is the last response for this request.
- bool hasNext = 5;
-
- // Background task status, populated for background task request.
- optional BackgroundTaskStatus backgroundTaskStatus = 6;
-}
-
-// Information about the service.
-service About {
- rpc Get (AboutRequest) returns (AboutResponse) {}
-}
-
-// Aerospike KVS operations service
-service KVS {
- // Read a single record
- rpc Read (AerospikeRequestPayload) returns
- (AerospikeResponsePayload) {}
-
- // Process stream of single record read requests.
- rpc ReadStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Get a single record header containing metadata like generation, expiration
- rpc GetHeader (AerospikeRequestPayload) returns
- (AerospikeResponsePayload) {}
-
- // Process stream of single record get header requests.
- rpc GetHeaderStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Check if a record exists.
- rpc Exists (AerospikeRequestPayload) returns
- (AerospikeResponsePayload) {}
-
- // Process stream of single record exist requests.
- rpc ExistsStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Write a single record
- rpc Write (AerospikeRequestPayload) returns
- (AerospikeResponsePayload) {}
-
- // Process a stream of single record write requests.
- rpc WriteStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Delete a single record.
- rpc Delete (AerospikeRequestPayload) returns
- (AerospikeResponsePayload) {}
-
- // Process a stream of single record delete requests.
- rpc DeleteStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Reset single record's time to expiration using the write policy's expiration.
- rpc Touch (AerospikeRequestPayload) returns
- (AerospikeResponsePayload) {}
-
- // Process a stream of single record touch requests.
- rpc TouchStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Perform multiple read/write operations on a single key in one batch call.
- rpc Operate (AerospikeRequestPayload) returns
- (AerospikeResponsePayload) {}
-
- // Perform a stream of operate requests.
- rpc OperateStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Execute single key user defined function on server and return results.
- rpc Execute (AerospikeRequestPayload) returns
- (AerospikeResponsePayload) {}
-
- // Process a stream of single record execute requests.
- rpc ExecuteStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Process batch requests.
- rpc BatchOperate (AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Process a stream of batch requests.
- rpc BatchOperateStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-}
-
-// Scan policy attributes used by queries.
-// Scan requests are send completely using proto buffers and hence include all policy attributes.
-message ScanPolicy {
- // Read policy for AP (availability) namespaces.
- Replica replica = 1;
-
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP readModeAP = 2;
-
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and scan are not affected by replica algorithms.
- ReadModeSC readModeSC = 3;
-
- // Use zlib compression on command buffers sent to the server and responses received
- // from the server when the buffer size is greater than 128 bytes.
- // This option will increase cpu and memory usage (for extra compressed buffers),but
- // decrease the size of data sent over the network.
- bool compress = 4;
-
- // Optional expression filter. If filterExp exists and evaluates to false, the
- // transaction is ignored.
- optional bytes expression = 5;
-
- // Total transaction timeout in milliseconds.
- // Default for all other commands: 1000ms
- optional uint32 totalTimeout = 6;
-
- // Approximate number of records to return to client. This number is divided by the
- // number of nodes involved in the scan. The actual number of records returned
- // may be less than maxRecords if node record counts are small and unbalanced across
- // nodes.
- // Default: 0 (do not limit record count)
- optional uint64 maxRecords = 7;
-
- // Limit returned records per second (rps) rate for each server.
- // Do not apply rps limit if recordsPerSecond is zero.
- // Default: 0
- optional uint32 recordsPerSecond = 8;
-
- // Should scan requests be issued in parallel.
- // Default: true
- optional bool concurrentNodes = 9;
-
- // Maximum number of concurrent requests to server nodes at any point in time.
- // If there are 16 nodes in the cluster and maxConcurrentNodes is 8, then queries
- // will be made to 8 nodes in parallel. When a scan completes, a new scan will
- // be issued until all 16 nodes have been queried.
- // Default: 0 (issue requests to all server nodes in parallel)
- optional uint32 maxConcurrentNodes = 10;
-
- // Should bin data be retrieved. If false, only record digests (and user keys
- // if stored on the server) are retrieved.
- // Default: true
- optional bool includeBinData = 11;
-}
-
-// Partition status used to perform partial scans on client side retries.
-message PartitionStatus {
- // The partition status.
- optional uint32 id = 1;
-
- // Begin value to start scanning / querying after.
- optional int64 bVal = 2;
-
- // Digest to start scanning / querying after.
- optional bytes digest = 3;
-
- // Indicates this partition should be tried.
- // Should be set to true for the first attempt as well.
- bool retry = 5;
-}
-
-// A partition filter for scans and queries.
-message PartitionFilter {
- // Start partition id.
- // Not required if the digest to start scanning from is specified.
- optional uint32 begin = 1;
-
- // The number of records to scan.
- uint32 count = 2;
-
- // Optional digest to start scanning from.
- optional bytes digest = 3;
-
- // Optional partition statuses used on retries to restart
- // from last known record for the partition.
- repeated PartitionStatus partitionStatuses = 4;
-
- // Indicates if all partitions in this filter should
- // be retried ignoring the partition status
- bool retry = 5;
-}
-
-// A scan request.
-message ScanRequest {
- // Optional scan policy.
- optional ScanPolicy scanPolicy = 1;
-
- // The namespace to scan.
- string namespace = 2;
-
- // Optional set name.
- optional string setName = 3;
-
- // Optional bin to retrieve. All bins will be returned
- // if not specified.
- repeated string binNames = 4;
-
- // Optional partition filter to selectively scan partitions.
- optional PartitionFilter partitionFilter = 5;
-}
-
-// Aerospike scan
-service Scan {
- // Scan Aerospike
- rpc Scan (AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Process a stream of scan requests
- rpc ScanStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-}
-
-// Query policy attributes used by queries.
-// Query requests are send completely using proto buffers and hence include all policy attributes.
-message QueryPolicy {
- // Read policy for AP (availability) namespaces.
- Replica replica = 1;
-
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP readModeAP = 2;
-
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and query are not affected by replica algorithms.
- ReadModeSC readModeSC = 3;
-
- // Send user defined key in addition to hash digest on both reads and writes.
- // If the key is sent on a write, the key will be stored with the record on
- // the server.
- // Default: false (do not send the user defined key)
- optional bool sendKey = 4;
-
- // Use zlib compression on command buffers sent to the server and responses received
- // from the server when the buffer size is greater than 128 bytes.
- // This option will increase cpu and memory usage (for extra compressed buffers),but
- // decrease the size of data sent over the network.
- bool compress = 5;
-
- // Optional expression filter. If filterExp exists and evaluates to false, the
- // transaction is ignored.
- optional bytes expression = 6;
-
- // Total transaction timeout in milliseconds.
- // Default for all other commands: 1000ms
- optional uint32 totalTimeout = 7;
-
- // Maximum number of concurrent requests to server nodes at any point in time.
- // If there are 16 nodes in the cluster and maxConcurrentNodes is 8, then queries
- // will be made to 8 nodes in parallel. When a query completes, a new query will
- // be issued until all 16 nodes have been queried.
- // Default: 0 (issue requests to all server nodes in parallel)
- optional uint32 maxConcurrentNodes = 8;
-
- // Number of records to place in queue before blocking.
- // Records received from multiple server nodes will be placed in a queue.
- // A separate thread consumes these records in parallel.
- // If the queue is full, the producer threads will block until records are consumed.
- // Default: 5000
- optional uint32 recordQueueSize = 9;
-
- // Should bin data be retrieved. If false, only record digests (and user keys
- // if stored on the server) are retrieved.
- // Default: true
- optional bool includeBinData = 10;
-
- // Terminate query if cluster is in migration state. If the server supports partition
- // queries or the query filter is null (scan), this field is ignored.
- // Default: false
- optional bool failOnClusterChange = 11;
-
- // Deprecated, use expectedDuration instead.
- // Is query expected to return less than 100 records per node.
- // If true, the server will optimize the query for a small record set.
- // This field is ignored for aggregation queries, background queries
- // and server versions < 6.0.
- // Default: false
- optional bool shortQuery = 12;
-
- // Timeout in milliseconds for "cluster-stable" info command that is run when
- // failOnClusterChange is true and server version is less than 6.0.
- // Default: 1000
- optional uint32 infoTimeout = 13;
-
- // Expected query duration. The server treats the query in different ways depending on the expected duration.
- // This field is ignored for aggregation queries, background queries and server versions less than 6.0.
- // Default: QueryDuration.LONG
- optional QueryDuration expectedDuration = 14;
-}
-
-
-// Secondary index collection type.
-enum IndexCollectionType {
- // Normal scalar index.
- DEFAULT = 0;
-
- // Index list elements.
- LIST = 1;
-
- // Index map keys.
- MAPKEYS = 2;
-
- // Index map values.
- MAPVALUES = 3;
-}
-
-// Query statement filter
-message Filter {
- // Name of the filter.
- string name = 1;
-
- // Secondary index collection type.
- IndexCollectionType colType = 2;
-
- // Optional filter context packed in Aerospike format.
- optional bytes packedCtx = 3;
-
- // The queried column particle type.
- int32 valType = 4;
-
- // The Aerospike encoded query start "Value"
- optional bytes begin = 5 ;
-
- // The Aerospike encoded query end "Value"
- optional bytes end = 6;
-}
-
-enum OperationType {
- READ = 0;
- READ_HEADER = 1;
- WRITE = 2;
- CDT_READ = 3;
- CDT_MODIFY = 4;
- MAP_READ = 5;
- MAP_MODIFY = 6;
- ADD = 7;
- EXP_READ = 8;
- EXP_MODIFY = 9;
- APPEND = 10;
- PREPEND = 11;
- TOUCH = 12;
- BIT_READ = 13;
- BIT_MODIFY = 14;
- DELETE = 15;
- HLL_READ = 16;
- HLL_MODIFY = 17;
-}
-
-// Single record operation.
-message Operation {
- // The operation type.
- OperationType type = 1;
-
- // Optional bin name.
- optional string binName = 2;
-
- // Optional bin value.
- optional bytes value = 3;
-}
-
-// Query statement.
-message Statement {
- // The namespace to query.
- string namespace = 1;
-
- // Optional set name.
- optional string setName = 2;
-
- // Optional index name.
- optional string indexName = 3;
-
- // Optional bins names to return for each result record.
- // If not specified all bins are returned.
- repeated string binNames = 4;
-
- // Optional Filter encoded in Aerospike wire format.
- optional Filter filter = 5;
-
- // Aggregation file name.
- string packageName = 6;
-
- // Aggregation function name.
- string functionName = 7;
-
- // Aggregation function arguments encoded as bytes using Aerospike wire format.
- repeated bytes functionArgs = 8;
-
- // Operations to be performed on query encoded as bytes using Aerospike wire format.
- repeated Operation operations = 9;
-
- // Optional taskId.
- optional int64 taskId = 10;
-
- // Approximate number of records to return to client. This number is divided by the
- // number of nodes involved in the scan. The actual number of records returned
- // may be less than maxRecords if node record counts are small and unbalanced across
- // nodes.
- // Default: 0 (do not limit record count)
- optional uint64 maxRecords = 11;
-
- // Limit returned records per second (rps) rate for each server.
- // Do not apply rps limit if recordsPerSecond is zero.
- // Default: 0
- optional uint32 recordsPerSecond = 12;
-}
-
-// A query request.
-message QueryRequest {
- // Optional query policy.
- optional QueryPolicy queryPolicy = 1;
-
- // The query statement.
- Statement statement = 2;
-
- // Set to true for background queries.
- bool background = 3;
-
- // Optional partition filter to selectively query partitions.
- optional PartitionFilter partitionFilter = 4;
-}
-
-enum RecordExistsAction {
- // Create or update record.
- // Merge write command bins with existing bins.
- UPDATE = 0;
-
- // Update record only. Fail if record does not exist.
- // Merge write command bins with existing bins.
- UPDATE_ONLY = 1;
-
- // Create or replace record.
- // Delete existing bins not referenced by write command bins.
- // Supported by Aerospike server versions >= 3.1.6.
- REPLACE = 2;
-
- // Replace record only. Fail if record does not exist.
- // Delete existing bins not referenced by write command bins.
- // Supported by Aerospike server versions >= 3.1.6.
- REPLACE_ONLY = 3;
-
- // Create only. Fail if record exists.
- CREATE_ONLY = 4;
-}
-
-enum GenerationPolicy {
- // Do not use record generation to restrict writes.
- NONE = 0;
-
- // Update/delete record if expected generation is equal to server generation. Otherwise, fail.
- EXPECT_GEN_EQUAL = 1;
-
- // Update/delete record if expected generation greater than the server generation. Otherwise, fail.
- // This is useful for restore after backup.
- EXPECT_GEN_GT = 2;
-}
-
-enum CommitLevel {
- // Server should wait until successfully committing master and all replicas.
- COMMIT_ALL = 0;
-
- // Server should wait until successfully committing master only.
- COMMIT_MASTER = 1;
-}
-
-message BackgroundExecutePolicy {
- // Read policy for AP (availability) namespaces.
- Replica replica = 1;
-
- // Read policy for SC (strong consistency) namespaces.
- ReadModeAP readModeAP = 2;
-
- // Replica algorithm used to determine the target node
- // for a single record command.
- // Scan and scan are not affected by replica algorithms.
- ReadModeSC readModeSC = 3;
-
- // Use zlib compression on command buffers sent to the server and responses received
- // from the server when the buffer size is greater than 128 bytes.
- // This option will increase cpu and memory usage (for extra compressed buffers),but
- // decrease the size of data sent over the network.
- bool compress = 4;
-
- // Optional expression filter. If filterExp exists and evaluates to false, the
- // transaction is ignored.
- optional bytes expression = 5;
-
- // Total transaction timeout in milliseconds.
- // Default for all other commands: 1000ms
- optional uint32 totalTimeout = 6;
-
- // Send user defined key in addition to hash digest on both reads and writes.
- // If the key is sent on a write, the key will be stored with the record on
- // the server.
- //
- // Default: false (do not send the user defined key)
- optional bool sendKey = 7;
-
- // Qualify how to handle writes where the record already exists.
- //
- // Default: RecordExistsAction.UPDATE
- optional RecordExistsAction recordExistsAction = 8;
-
- // Qualify how to handle record writes based on record generation. The default (NONE)
- // indicates that the generation is not used to restrict writes.
- //
- // The server does not support this field for UDF execute() calls. The read-modify-write
- // usage model can still be enforced inside the UDF code itself.
- //
- // Default: GenerationPolicy.NONE
- optional GenerationPolicy generationPolicy = 9;
-
- // Desired consistency guarantee when committing a transaction on the server. The default
- // (COMMIT_ALL) indicates that the server should wait for master and all replica commits to
- // be successful before returning success to the client.
- //
- // Default: CommitLevel.COMMIT_ALL
- optional CommitLevel commitLevel = 10;
-
- // Expected generation. Generation is the number of times a record has been modified
- // (including creation) on the server. If a write operation is creating a record,
- // the expected generation would be 0
. This field is only relevant when
- // generationPolicy is not NONE.
- //
- // The server does not support this field for UDF execute() calls. The read-modify-write
- // usage model can still be enforced inside the UDF code itself.
- //
- // Default: 0
- optional uint32 generation = 11;
-
- // Record expiration. Also known as ttl (time to live).
- // Seconds record will live before being removed by the server.
- //
- // Expiration values:
- //
- // - -2: Do not change ttl when record is updated.
- // - -1: Never expire.
- // - 0: Default to namespace configuration variable "default-ttl" on the server.
- // - > 0: Actual ttl in seconds.
- //
- // Default: 0
- optional uint32 expiration = 12;
-
- // For client operate(), return a result for every operation.
- //
- // Some operations do not return results by default (ListOperation.clear() for example).
- // This can make it difficult to determine the desired result offset in the returned
- // bin's result list.
- //
- // Setting respondAllOps to true makes it easier to identify the desired result offset
- // (result offset equals bin's operate sequence). If there is a map operation in operate(),
- // respondAllOps will be forced to true for that operate() call.
- //
- // Default: false
- optional bool respondAllOps = 13;
-
- // If the transaction results in a record deletion, leave a tombstone for the record.
- // This prevents deleted records from reappearing after node failures.
- // Valid for Aerospike Server Enterprise Edition 3.10+ only.
- //
- // Default: false (do not tombstone deleted records).
- optional bool durableDelete = 14;
-
- // Operate in XDR mode. Some external connectors may need to emulate an XDR client.
- // If enabled, an XDR bit is set for writes in the wire protocol.
- //
- // Default: false.
- optional bool xdr = 15;
-}
-
-message BackgroundExecuteRequest {
- // Background write policy
- optional BackgroundExecutePolicy writePolicy = 1;
-
- // The statement containing the UDF function reference
- // or the operations to be performed on matching record
- Statement statement = 2;
-}
-
-enum BackgroundTaskStatus {
- // Task not found.
- NOT_FOUND = 0;
-
- // Task in progress.
- IN_PROGRESS = 1;
-
- // Task completed.
- COMPLETE = 2;
-}
-
-message BackgroundTaskStatusRequest {
- // The id of the task.
- int64 taskId = 1;
-
- // If true indicates the task is a scan task else task is a query
- bool isScan = 2;
-}
-
-// Abort a request identified by id in the stream.
-message AbortRequest {
- // Unique identifier of the corresponding request in the stream to abort.
- // Not to be confused with the AbortRequest's id in the stream.
- uint32 abortId = 1;
-}
-
-// Aerospike queries
-service Query {
- // Query Aerospike
- rpc Query (AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Process a stream of query requests
- rpc QueryStreaming (stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Execute background write on selected records.
- rpc BackgroundExecute(AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Execute a stream of background write requests.
- rpc BackgroundExecuteStreaming(stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Get status of a background task.
- rpc BackgroundTaskStatus(AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-
- // Get status of a stream of background tasks.
- rpc BackgroundTaskStatusStreaming(stream AerospikeRequestPayload) returns
- (stream AerospikeResponsePayload) {}
-}
-
-// Info policy for info request
-message InfoPolicy {
- // Info command socket timeout in milliseconds.
- //
- // Default: 1000
- optional uint32 timeout = 1;
-}
-
-// Info request
-message InfoRequest {
- optional InfoPolicy infoPolicy = 1;
- repeated string commands = 2;
-}
-
-// Aerospike info requests
-service Info {
- // Send an info request
- rpc Info (AerospikeRequestPayload) returns
- (AerospikeResponsePayload) {}
-}
\ No newline at end of file
diff --git a/proto/kvs/aerospike_proxy_kv_grpc.pb.go b/proto/kvs/aerospike_proxy_kv_grpc.pb.go
deleted file mode 100644
index 7ee945e9..00000000
--- a/proto/kvs/aerospike_proxy_kv_grpc.pb.go
+++ /dev/null
@@ -1,1926 +0,0 @@
-// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
-// versions:
-// - protoc-gen-go-grpc v1.3.0
-// - protoc v5.27.1
-// source: proto/kvs/aerospike_proxy_kv.proto
-
-package kvs
-
-import (
- context "context"
- grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
-)
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
-
-const (
- About_Get_FullMethodName = "/About/Get"
-)
-
-// AboutClient is the client API for About service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type AboutClient interface {
- Get(ctx context.Context, in *AboutRequest, opts ...grpc.CallOption) (*AboutResponse, error)
-}
-
-type aboutClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewAboutClient(cc grpc.ClientConnInterface) AboutClient {
- return &aboutClient{cc}
-}
-
-func (c *aboutClient) Get(ctx context.Context, in *AboutRequest, opts ...grpc.CallOption) (*AboutResponse, error) {
- out := new(AboutResponse)
- err := c.cc.Invoke(ctx, About_Get_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// AboutServer is the server API for About service.
-// All implementations must embed UnimplementedAboutServer
-// for forward compatibility
-type AboutServer interface {
- Get(context.Context, *AboutRequest) (*AboutResponse, error)
- mustEmbedUnimplementedAboutServer()
-}
-
-// UnimplementedAboutServer must be embedded to have forward compatible implementations.
-type UnimplementedAboutServer struct {
-}
-
-func (UnimplementedAboutServer) Get(context.Context, *AboutRequest) (*AboutResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
-}
-func (UnimplementedAboutServer) mustEmbedUnimplementedAboutServer() {}
-
-// UnsafeAboutServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to AboutServer will
-// result in compilation errors.
-type UnsafeAboutServer interface {
- mustEmbedUnimplementedAboutServer()
-}
-
-func RegisterAboutServer(s grpc.ServiceRegistrar, srv AboutServer) {
- s.RegisterService(&About_ServiceDesc, srv)
-}
-
-func _About_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AboutRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(AboutServer).Get(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: About_Get_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(AboutServer).Get(ctx, req.(*AboutRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// About_ServiceDesc is the grpc.ServiceDesc for About service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var About_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "About",
- HandlerType: (*AboutServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Get",
- Handler: _About_Get_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "proto/kvs/aerospike_proxy_kv.proto",
-}
-
-const (
- KVS_Read_FullMethodName = "/KVS/Read"
- KVS_ReadStreaming_FullMethodName = "/KVS/ReadStreaming"
- KVS_GetHeader_FullMethodName = "/KVS/GetHeader"
- KVS_GetHeaderStreaming_FullMethodName = "/KVS/GetHeaderStreaming"
- KVS_Exists_FullMethodName = "/KVS/Exists"
- KVS_ExistsStreaming_FullMethodName = "/KVS/ExistsStreaming"
- KVS_Write_FullMethodName = "/KVS/Write"
- KVS_WriteStreaming_FullMethodName = "/KVS/WriteStreaming"
- KVS_Delete_FullMethodName = "/KVS/Delete"
- KVS_DeleteStreaming_FullMethodName = "/KVS/DeleteStreaming"
- KVS_Touch_FullMethodName = "/KVS/Touch"
- KVS_TouchStreaming_FullMethodName = "/KVS/TouchStreaming"
- KVS_Operate_FullMethodName = "/KVS/Operate"
- KVS_OperateStreaming_FullMethodName = "/KVS/OperateStreaming"
- KVS_Execute_FullMethodName = "/KVS/Execute"
- KVS_ExecuteStreaming_FullMethodName = "/KVS/ExecuteStreaming"
- KVS_BatchOperate_FullMethodName = "/KVS/BatchOperate"
- KVS_BatchOperateStreaming_FullMethodName = "/KVS/BatchOperateStreaming"
-)
-
-// KVSClient is the client API for KVS service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type KVSClient interface {
- // Read a single record
- Read(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error)
- // Process stream of single record read requests.
- ReadStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_ReadStreamingClient, error)
- // Get a single record header containing metadata like generation, expiration
- GetHeader(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error)
- // Process stream of single record get header requests.
- GetHeaderStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_GetHeaderStreamingClient, error)
- // Check if a record exists.
- Exists(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error)
- // Process stream of single record exist requests.
- ExistsStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_ExistsStreamingClient, error)
- // Write a single record
- Write(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error)
- // Process a stream of single record write requests.
- WriteStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_WriteStreamingClient, error)
- // Delete a single record.
- Delete(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error)
- // Process a stream of single record delete requests.
- DeleteStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_DeleteStreamingClient, error)
- // Reset single record's time to expiration using the write policy's expiration.
- Touch(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error)
- // Process a stream of single record touch requests.
- TouchStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_TouchStreamingClient, error)
- // Perform multiple read/write operations on a single key in one batch call.
- Operate(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error)
- // Perform a stream of operate requests.
- OperateStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_OperateStreamingClient, error)
- // Execute single key user defined function on server and return results.
- Execute(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error)
- // Process a stream of single record execute requests.
- ExecuteStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_ExecuteStreamingClient, error)
- // Process batch requests.
- BatchOperate(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (KVS_BatchOperateClient, error)
- // Process a stream of batch requests.
- BatchOperateStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_BatchOperateStreamingClient, error)
-}
-
-type kVSClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewKVSClient(cc grpc.ClientConnInterface) KVSClient {
- return &kVSClient{cc}
-}
-
-func (c *kVSClient) Read(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error) {
- out := new(AerospikeResponsePayload)
- err := c.cc.Invoke(ctx, KVS_Read_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVSClient) ReadStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_ReadStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[0], KVS_ReadStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSReadStreamingClient{stream}
- return x, nil
-}
-
-type KVS_ReadStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSReadStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSReadStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *kVSReadStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *kVSClient) GetHeader(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error) {
- out := new(AerospikeResponsePayload)
- err := c.cc.Invoke(ctx, KVS_GetHeader_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVSClient) GetHeaderStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_GetHeaderStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[1], KVS_GetHeaderStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSGetHeaderStreamingClient{stream}
- return x, nil
-}
-
-type KVS_GetHeaderStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSGetHeaderStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSGetHeaderStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *kVSGetHeaderStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *kVSClient) Exists(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error) {
- out := new(AerospikeResponsePayload)
- err := c.cc.Invoke(ctx, KVS_Exists_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVSClient) ExistsStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_ExistsStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[2], KVS_ExistsStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSExistsStreamingClient{stream}
- return x, nil
-}
-
-type KVS_ExistsStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSExistsStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSExistsStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *kVSExistsStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *kVSClient) Write(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error) {
- out := new(AerospikeResponsePayload)
- err := c.cc.Invoke(ctx, KVS_Write_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVSClient) WriteStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_WriteStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[3], KVS_WriteStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSWriteStreamingClient{stream}
- return x, nil
-}
-
-type KVS_WriteStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSWriteStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSWriteStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *kVSWriteStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *kVSClient) Delete(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error) {
- out := new(AerospikeResponsePayload)
- err := c.cc.Invoke(ctx, KVS_Delete_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVSClient) DeleteStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_DeleteStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[4], KVS_DeleteStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSDeleteStreamingClient{stream}
- return x, nil
-}
-
-type KVS_DeleteStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSDeleteStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSDeleteStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *kVSDeleteStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *kVSClient) Touch(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error) {
- out := new(AerospikeResponsePayload)
- err := c.cc.Invoke(ctx, KVS_Touch_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVSClient) TouchStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_TouchStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[5], KVS_TouchStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSTouchStreamingClient{stream}
- return x, nil
-}
-
-type KVS_TouchStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSTouchStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSTouchStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *kVSTouchStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *kVSClient) Operate(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error) {
- out := new(AerospikeResponsePayload)
- err := c.cc.Invoke(ctx, KVS_Operate_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVSClient) OperateStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_OperateStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[6], KVS_OperateStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSOperateStreamingClient{stream}
- return x, nil
-}
-
-type KVS_OperateStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSOperateStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSOperateStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *kVSOperateStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *kVSClient) Execute(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error) {
- out := new(AerospikeResponsePayload)
- err := c.cc.Invoke(ctx, KVS_Execute_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-func (c *kVSClient) ExecuteStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_ExecuteStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[7], KVS_ExecuteStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSExecuteStreamingClient{stream}
- return x, nil
-}
-
-type KVS_ExecuteStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSExecuteStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSExecuteStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *kVSExecuteStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *kVSClient) BatchOperate(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (KVS_BatchOperateClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[8], KVS_BatchOperate_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSBatchOperateClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type KVS_BatchOperateClient interface {
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSBatchOperateClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSBatchOperateClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *kVSClient) BatchOperateStreaming(ctx context.Context, opts ...grpc.CallOption) (KVS_BatchOperateStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &KVS_ServiceDesc.Streams[9], KVS_BatchOperateStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &kVSBatchOperateStreamingClient{stream}
- return x, nil
-}
-
-type KVS_BatchOperateStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type kVSBatchOperateStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *kVSBatchOperateStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *kVSBatchOperateStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// KVSServer is the server API for KVS service.
-// All implementations must embed UnimplementedKVSServer
-// for forward compatibility
-type KVSServer interface {
- // Read a single record
- Read(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error)
- // Process stream of single record read requests.
- ReadStreaming(KVS_ReadStreamingServer) error
- // Get a single record header containing metadata like generation, expiration
- GetHeader(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error)
- // Process stream of single record get header requests.
- GetHeaderStreaming(KVS_GetHeaderStreamingServer) error
- // Check if a record exists.
- Exists(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error)
- // Process stream of single record exist requests.
- ExistsStreaming(KVS_ExistsStreamingServer) error
- // Write a single record
- Write(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error)
- // Process a stream of single record write requests.
- WriteStreaming(KVS_WriteStreamingServer) error
- // Delete a single record.
- Delete(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error)
- // Process a stream of single record delete requests.
- DeleteStreaming(KVS_DeleteStreamingServer) error
- // Reset single record's time to expiration using the write policy's expiration.
- Touch(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error)
- // Process a stream of single record touch requests.
- TouchStreaming(KVS_TouchStreamingServer) error
- // Perform multiple read/write operations on a single key in one batch call.
- Operate(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error)
- // Perform a stream of operate requests.
- OperateStreaming(KVS_OperateStreamingServer) error
- // Execute single key user defined function on server and return results.
- Execute(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error)
- // Process a stream of single record execute requests.
- ExecuteStreaming(KVS_ExecuteStreamingServer) error
- // Process batch requests.
- BatchOperate(*AerospikeRequestPayload, KVS_BatchOperateServer) error
- // Process a stream of batch requests.
- BatchOperateStreaming(KVS_BatchOperateStreamingServer) error
- mustEmbedUnimplementedKVSServer()
-}
-
-// UnimplementedKVSServer must be embedded to have forward compatible implementations.
-type UnimplementedKVSServer struct {
-}
-
-func (UnimplementedKVSServer) Read(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Read not implemented")
-}
-func (UnimplementedKVSServer) ReadStreaming(KVS_ReadStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method ReadStreaming not implemented")
-}
-func (UnimplementedKVSServer) GetHeader(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error) {
- return nil, status.Errorf(codes.Unimplemented, "method GetHeader not implemented")
-}
-func (UnimplementedKVSServer) GetHeaderStreaming(KVS_GetHeaderStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method GetHeaderStreaming not implemented")
-}
-func (UnimplementedKVSServer) Exists(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Exists not implemented")
-}
-func (UnimplementedKVSServer) ExistsStreaming(KVS_ExistsStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method ExistsStreaming not implemented")
-}
-func (UnimplementedKVSServer) Write(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Write not implemented")
-}
-func (UnimplementedKVSServer) WriteStreaming(KVS_WriteStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method WriteStreaming not implemented")
-}
-func (UnimplementedKVSServer) Delete(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
-}
-func (UnimplementedKVSServer) DeleteStreaming(KVS_DeleteStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method DeleteStreaming not implemented")
-}
-func (UnimplementedKVSServer) Touch(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Touch not implemented")
-}
-func (UnimplementedKVSServer) TouchStreaming(KVS_TouchStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method TouchStreaming not implemented")
-}
-func (UnimplementedKVSServer) Operate(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Operate not implemented")
-}
-func (UnimplementedKVSServer) OperateStreaming(KVS_OperateStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method OperateStreaming not implemented")
-}
-func (UnimplementedKVSServer) Execute(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Execute not implemented")
-}
-func (UnimplementedKVSServer) ExecuteStreaming(KVS_ExecuteStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method ExecuteStreaming not implemented")
-}
-func (UnimplementedKVSServer) BatchOperate(*AerospikeRequestPayload, KVS_BatchOperateServer) error {
- return status.Errorf(codes.Unimplemented, "method BatchOperate not implemented")
-}
-func (UnimplementedKVSServer) BatchOperateStreaming(KVS_BatchOperateStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method BatchOperateStreaming not implemented")
-}
-func (UnimplementedKVSServer) mustEmbedUnimplementedKVSServer() {}
-
-// UnsafeKVSServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to KVSServer will
-// result in compilation errors.
-type UnsafeKVSServer interface {
- mustEmbedUnimplementedKVSServer()
-}
-
-func RegisterKVSServer(s grpc.ServiceRegistrar, srv KVSServer) {
- s.RegisterService(&KVS_ServiceDesc, srv)
-}
-
-func _KVS_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeRequestPayload)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVSServer).Read(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: KVS_Read_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVSServer).Read(ctx, req.(*AerospikeRequestPayload))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KVS_ReadStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(KVSServer).ReadStreaming(&kVSReadStreamingServer{stream})
-}
-
-type KVS_ReadStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type kVSReadStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSReadStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *kVSReadStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _KVS_GetHeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeRequestPayload)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVSServer).GetHeader(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: KVS_GetHeader_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVSServer).GetHeader(ctx, req.(*AerospikeRequestPayload))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KVS_GetHeaderStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(KVSServer).GetHeaderStreaming(&kVSGetHeaderStreamingServer{stream})
-}
-
-type KVS_GetHeaderStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type kVSGetHeaderStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSGetHeaderStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *kVSGetHeaderStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _KVS_Exists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeRequestPayload)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVSServer).Exists(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: KVS_Exists_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVSServer).Exists(ctx, req.(*AerospikeRequestPayload))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KVS_ExistsStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(KVSServer).ExistsStreaming(&kVSExistsStreamingServer{stream})
-}
-
-type KVS_ExistsStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type kVSExistsStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSExistsStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *kVSExistsStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _KVS_Write_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeRequestPayload)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVSServer).Write(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: KVS_Write_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVSServer).Write(ctx, req.(*AerospikeRequestPayload))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KVS_WriteStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(KVSServer).WriteStreaming(&kVSWriteStreamingServer{stream})
-}
-
-type KVS_WriteStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type kVSWriteStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSWriteStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *kVSWriteStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _KVS_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeRequestPayload)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVSServer).Delete(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: KVS_Delete_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVSServer).Delete(ctx, req.(*AerospikeRequestPayload))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KVS_DeleteStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(KVSServer).DeleteStreaming(&kVSDeleteStreamingServer{stream})
-}
-
-type KVS_DeleteStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type kVSDeleteStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSDeleteStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *kVSDeleteStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _KVS_Touch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeRequestPayload)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVSServer).Touch(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: KVS_Touch_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVSServer).Touch(ctx, req.(*AerospikeRequestPayload))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KVS_TouchStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(KVSServer).TouchStreaming(&kVSTouchStreamingServer{stream})
-}
-
-type KVS_TouchStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type kVSTouchStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSTouchStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *kVSTouchStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _KVS_Operate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeRequestPayload)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVSServer).Operate(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: KVS_Operate_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVSServer).Operate(ctx, req.(*AerospikeRequestPayload))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KVS_OperateStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(KVSServer).OperateStreaming(&kVSOperateStreamingServer{stream})
-}
-
-type KVS_OperateStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type kVSOperateStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSOperateStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *kVSOperateStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _KVS_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeRequestPayload)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(KVSServer).Execute(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: KVS_Execute_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(KVSServer).Execute(ctx, req.(*AerospikeRequestPayload))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _KVS_ExecuteStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(KVSServer).ExecuteStreaming(&kVSExecuteStreamingServer{stream})
-}
-
-type KVS_ExecuteStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type kVSExecuteStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSExecuteStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *kVSExecuteStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _KVS_BatchOperate_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(AerospikeRequestPayload)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(KVSServer).BatchOperate(m, &kVSBatchOperateServer{stream})
-}
-
-type KVS_BatchOperateServer interface {
- Send(*AerospikeResponsePayload) error
- grpc.ServerStream
-}
-
-type kVSBatchOperateServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSBatchOperateServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _KVS_BatchOperateStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(KVSServer).BatchOperateStreaming(&kVSBatchOperateStreamingServer{stream})
-}
-
-type KVS_BatchOperateStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type kVSBatchOperateStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *kVSBatchOperateStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *kVSBatchOperateStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// KVS_ServiceDesc is the grpc.ServiceDesc for KVS service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var KVS_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "KVS",
- HandlerType: (*KVSServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Read",
- Handler: _KVS_Read_Handler,
- },
- {
- MethodName: "GetHeader",
- Handler: _KVS_GetHeader_Handler,
- },
- {
- MethodName: "Exists",
- Handler: _KVS_Exists_Handler,
- },
- {
- MethodName: "Write",
- Handler: _KVS_Write_Handler,
- },
- {
- MethodName: "Delete",
- Handler: _KVS_Delete_Handler,
- },
- {
- MethodName: "Touch",
- Handler: _KVS_Touch_Handler,
- },
- {
- MethodName: "Operate",
- Handler: _KVS_Operate_Handler,
- },
- {
- MethodName: "Execute",
- Handler: _KVS_Execute_Handler,
- },
- },
- Streams: []grpc.StreamDesc{
- {
- StreamName: "ReadStreaming",
- Handler: _KVS_ReadStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "GetHeaderStreaming",
- Handler: _KVS_GetHeaderStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "ExistsStreaming",
- Handler: _KVS_ExistsStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "WriteStreaming",
- Handler: _KVS_WriteStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "DeleteStreaming",
- Handler: _KVS_DeleteStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "TouchStreaming",
- Handler: _KVS_TouchStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "OperateStreaming",
- Handler: _KVS_OperateStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "ExecuteStreaming",
- Handler: _KVS_ExecuteStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "BatchOperate",
- Handler: _KVS_BatchOperate_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "BatchOperateStreaming",
- Handler: _KVS_BatchOperateStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "proto/kvs/aerospike_proxy_kv.proto",
-}
-
-const (
- Scan_Scan_FullMethodName = "/Scan/Scan"
- Scan_ScanStreaming_FullMethodName = "/Scan/ScanStreaming"
-)
-
-// ScanClient is the client API for Scan service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type ScanClient interface {
- // Scan Aerospike
- Scan(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (Scan_ScanClient, error)
- // Process a stream of scan requests
- ScanStreaming(ctx context.Context, opts ...grpc.CallOption) (Scan_ScanStreamingClient, error)
-}
-
-type scanClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewScanClient(cc grpc.ClientConnInterface) ScanClient {
- return &scanClient{cc}
-}
-
-func (c *scanClient) Scan(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (Scan_ScanClient, error) {
- stream, err := c.cc.NewStream(ctx, &Scan_ServiceDesc.Streams[0], Scan_Scan_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &scanScanClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Scan_ScanClient interface {
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type scanScanClient struct {
- grpc.ClientStream
-}
-
-func (x *scanScanClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *scanClient) ScanStreaming(ctx context.Context, opts ...grpc.CallOption) (Scan_ScanStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &Scan_ServiceDesc.Streams[1], Scan_ScanStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &scanScanStreamingClient{stream}
- return x, nil
-}
-
-type Scan_ScanStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type scanScanStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *scanScanStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *scanScanStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// ScanServer is the server API for Scan service.
-// All implementations must embed UnimplementedScanServer
-// for forward compatibility
-type ScanServer interface {
- // Scan Aerospike
- Scan(*AerospikeRequestPayload, Scan_ScanServer) error
- // Process a stream of scan requests
- ScanStreaming(Scan_ScanStreamingServer) error
- mustEmbedUnimplementedScanServer()
-}
-
-// UnimplementedScanServer must be embedded to have forward compatible implementations.
-type UnimplementedScanServer struct {
-}
-
-func (UnimplementedScanServer) Scan(*AerospikeRequestPayload, Scan_ScanServer) error {
- return status.Errorf(codes.Unimplemented, "method Scan not implemented")
-}
-func (UnimplementedScanServer) ScanStreaming(Scan_ScanStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method ScanStreaming not implemented")
-}
-func (UnimplementedScanServer) mustEmbedUnimplementedScanServer() {}
-
-// UnsafeScanServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to ScanServer will
-// result in compilation errors.
-type UnsafeScanServer interface {
- mustEmbedUnimplementedScanServer()
-}
-
-func RegisterScanServer(s grpc.ServiceRegistrar, srv ScanServer) {
- s.RegisterService(&Scan_ServiceDesc, srv)
-}
-
-func _Scan_Scan_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(AerospikeRequestPayload)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(ScanServer).Scan(m, &scanScanServer{stream})
-}
-
-type Scan_ScanServer interface {
- Send(*AerospikeResponsePayload) error
- grpc.ServerStream
-}
-
-type scanScanServer struct {
- grpc.ServerStream
-}
-
-func (x *scanScanServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _Scan_ScanStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(ScanServer).ScanStreaming(&scanScanStreamingServer{stream})
-}
-
-type Scan_ScanStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type scanScanStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *scanScanStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *scanScanStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// Scan_ServiceDesc is the grpc.ServiceDesc for Scan service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var Scan_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "Scan",
- HandlerType: (*ScanServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Scan",
- Handler: _Scan_Scan_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "ScanStreaming",
- Handler: _Scan_ScanStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "proto/kvs/aerospike_proxy_kv.proto",
-}
-
-const (
- Query_Query_FullMethodName = "/Query/Query"
- Query_QueryStreaming_FullMethodName = "/Query/QueryStreaming"
- Query_BackgroundExecute_FullMethodName = "/Query/BackgroundExecute"
- Query_BackgroundExecuteStreaming_FullMethodName = "/Query/BackgroundExecuteStreaming"
- Query_BackgroundTaskStatus_FullMethodName = "/Query/BackgroundTaskStatus"
- Query_BackgroundTaskStatusStreaming_FullMethodName = "/Query/BackgroundTaskStatusStreaming"
-)
-
-// QueryClient is the client API for Query service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type QueryClient interface {
- // Query Aerospike
- Query(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (Query_QueryClient, error)
- // Process a stream of query requests
- QueryStreaming(ctx context.Context, opts ...grpc.CallOption) (Query_QueryStreamingClient, error)
- // Execute background write on selected records.
- BackgroundExecute(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (Query_BackgroundExecuteClient, error)
- // Execute a stream of background write requests.
- BackgroundExecuteStreaming(ctx context.Context, opts ...grpc.CallOption) (Query_BackgroundExecuteStreamingClient, error)
- // Get status of a background task.
- BackgroundTaskStatus(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (Query_BackgroundTaskStatusClient, error)
- // Get status of a stream of background tasks.
- BackgroundTaskStatusStreaming(ctx context.Context, opts ...grpc.CallOption) (Query_BackgroundTaskStatusStreamingClient, error)
-}
-
-type queryClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewQueryClient(cc grpc.ClientConnInterface) QueryClient {
- return &queryClient{cc}
-}
-
-func (c *queryClient) Query(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (Query_QueryClient, error) {
- stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[0], Query_Query_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &queryQueryClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Query_QueryClient interface {
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type queryQueryClient struct {
- grpc.ClientStream
-}
-
-func (x *queryQueryClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *queryClient) QueryStreaming(ctx context.Context, opts ...grpc.CallOption) (Query_QueryStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[1], Query_QueryStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &queryQueryStreamingClient{stream}
- return x, nil
-}
-
-type Query_QueryStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type queryQueryStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *queryQueryStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *queryQueryStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *queryClient) BackgroundExecute(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (Query_BackgroundExecuteClient, error) {
- stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[2], Query_BackgroundExecute_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &queryBackgroundExecuteClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Query_BackgroundExecuteClient interface {
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type queryBackgroundExecuteClient struct {
- grpc.ClientStream
-}
-
-func (x *queryBackgroundExecuteClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *queryClient) BackgroundExecuteStreaming(ctx context.Context, opts ...grpc.CallOption) (Query_BackgroundExecuteStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[3], Query_BackgroundExecuteStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &queryBackgroundExecuteStreamingClient{stream}
- return x, nil
-}
-
-type Query_BackgroundExecuteStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type queryBackgroundExecuteStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *queryBackgroundExecuteStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *queryBackgroundExecuteStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *queryClient) BackgroundTaskStatus(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (Query_BackgroundTaskStatusClient, error) {
- stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[4], Query_BackgroundTaskStatus_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &queryBackgroundTaskStatusClient{stream}
- if err := x.ClientStream.SendMsg(in); err != nil {
- return nil, err
- }
- if err := x.ClientStream.CloseSend(); err != nil {
- return nil, err
- }
- return x, nil
-}
-
-type Query_BackgroundTaskStatusClient interface {
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type queryBackgroundTaskStatusClient struct {
- grpc.ClientStream
-}
-
-func (x *queryBackgroundTaskStatusClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func (c *queryClient) BackgroundTaskStatusStreaming(ctx context.Context, opts ...grpc.CallOption) (Query_BackgroundTaskStatusStreamingClient, error) {
- stream, err := c.cc.NewStream(ctx, &Query_ServiceDesc.Streams[5], Query_BackgroundTaskStatusStreaming_FullMethodName, opts...)
- if err != nil {
- return nil, err
- }
- x := &queryBackgroundTaskStatusStreamingClient{stream}
- return x, nil
-}
-
-type Query_BackgroundTaskStatusStreamingClient interface {
- Send(*AerospikeRequestPayload) error
- Recv() (*AerospikeResponsePayload, error)
- grpc.ClientStream
-}
-
-type queryBackgroundTaskStatusStreamingClient struct {
- grpc.ClientStream
-}
-
-func (x *queryBackgroundTaskStatusStreamingClient) Send(m *AerospikeRequestPayload) error {
- return x.ClientStream.SendMsg(m)
-}
-
-func (x *queryBackgroundTaskStatusStreamingClient) Recv() (*AerospikeResponsePayload, error) {
- m := new(AerospikeResponsePayload)
- if err := x.ClientStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// QueryServer is the server API for Query service.
-// All implementations must embed UnimplementedQueryServer
-// for forward compatibility
-type QueryServer interface {
- // Query Aerospike
- Query(*AerospikeRequestPayload, Query_QueryServer) error
- // Process a stream of query requests
- QueryStreaming(Query_QueryStreamingServer) error
- // Execute background write on selected records.
- BackgroundExecute(*AerospikeRequestPayload, Query_BackgroundExecuteServer) error
- // Execute a stream of background write requests.
- BackgroundExecuteStreaming(Query_BackgroundExecuteStreamingServer) error
- // Get status of a background task.
- BackgroundTaskStatus(*AerospikeRequestPayload, Query_BackgroundTaskStatusServer) error
- // Get status of a stream of background tasks.
- BackgroundTaskStatusStreaming(Query_BackgroundTaskStatusStreamingServer) error
- mustEmbedUnimplementedQueryServer()
-}
-
-// UnimplementedQueryServer must be embedded to have forward compatible implementations.
-type UnimplementedQueryServer struct {
-}
-
-func (UnimplementedQueryServer) Query(*AerospikeRequestPayload, Query_QueryServer) error {
- return status.Errorf(codes.Unimplemented, "method Query not implemented")
-}
-func (UnimplementedQueryServer) QueryStreaming(Query_QueryStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method QueryStreaming not implemented")
-}
-func (UnimplementedQueryServer) BackgroundExecute(*AerospikeRequestPayload, Query_BackgroundExecuteServer) error {
- return status.Errorf(codes.Unimplemented, "method BackgroundExecute not implemented")
-}
-func (UnimplementedQueryServer) BackgroundExecuteStreaming(Query_BackgroundExecuteStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method BackgroundExecuteStreaming not implemented")
-}
-func (UnimplementedQueryServer) BackgroundTaskStatus(*AerospikeRequestPayload, Query_BackgroundTaskStatusServer) error {
- return status.Errorf(codes.Unimplemented, "method BackgroundTaskStatus not implemented")
-}
-func (UnimplementedQueryServer) BackgroundTaskStatusStreaming(Query_BackgroundTaskStatusStreamingServer) error {
- return status.Errorf(codes.Unimplemented, "method BackgroundTaskStatusStreaming not implemented")
-}
-func (UnimplementedQueryServer) mustEmbedUnimplementedQueryServer() {}
-
-// UnsafeQueryServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to QueryServer will
-// result in compilation errors.
-type UnsafeQueryServer interface {
- mustEmbedUnimplementedQueryServer()
-}
-
-func RegisterQueryServer(s grpc.ServiceRegistrar, srv QueryServer) {
- s.RegisterService(&Query_ServiceDesc, srv)
-}
-
-func _Query_Query_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(AerospikeRequestPayload)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(QueryServer).Query(m, &queryQueryServer{stream})
-}
-
-type Query_QueryServer interface {
- Send(*AerospikeResponsePayload) error
- grpc.ServerStream
-}
-
-type queryQueryServer struct {
- grpc.ServerStream
-}
-
-func (x *queryQueryServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _Query_QueryStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(QueryServer).QueryStreaming(&queryQueryStreamingServer{stream})
-}
-
-type Query_QueryStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type queryQueryStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *queryQueryStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *queryQueryStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _Query_BackgroundExecute_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(AerospikeRequestPayload)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(QueryServer).BackgroundExecute(m, &queryBackgroundExecuteServer{stream})
-}
-
-type Query_BackgroundExecuteServer interface {
- Send(*AerospikeResponsePayload) error
- grpc.ServerStream
-}
-
-type queryBackgroundExecuteServer struct {
- grpc.ServerStream
-}
-
-func (x *queryBackgroundExecuteServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _Query_BackgroundExecuteStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(QueryServer).BackgroundExecuteStreaming(&queryBackgroundExecuteStreamingServer{stream})
-}
-
-type Query_BackgroundExecuteStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type queryBackgroundExecuteStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *queryBackgroundExecuteStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *queryBackgroundExecuteStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-func _Query_BackgroundTaskStatus_Handler(srv interface{}, stream grpc.ServerStream) error {
- m := new(AerospikeRequestPayload)
- if err := stream.RecvMsg(m); err != nil {
- return err
- }
- return srv.(QueryServer).BackgroundTaskStatus(m, &queryBackgroundTaskStatusServer{stream})
-}
-
-type Query_BackgroundTaskStatusServer interface {
- Send(*AerospikeResponsePayload) error
- grpc.ServerStream
-}
-
-type queryBackgroundTaskStatusServer struct {
- grpc.ServerStream
-}
-
-func (x *queryBackgroundTaskStatusServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func _Query_BackgroundTaskStatusStreaming_Handler(srv interface{}, stream grpc.ServerStream) error {
- return srv.(QueryServer).BackgroundTaskStatusStreaming(&queryBackgroundTaskStatusStreamingServer{stream})
-}
-
-type Query_BackgroundTaskStatusStreamingServer interface {
- Send(*AerospikeResponsePayload) error
- Recv() (*AerospikeRequestPayload, error)
- grpc.ServerStream
-}
-
-type queryBackgroundTaskStatusStreamingServer struct {
- grpc.ServerStream
-}
-
-func (x *queryBackgroundTaskStatusStreamingServer) Send(m *AerospikeResponsePayload) error {
- return x.ServerStream.SendMsg(m)
-}
-
-func (x *queryBackgroundTaskStatusStreamingServer) Recv() (*AerospikeRequestPayload, error) {
- m := new(AerospikeRequestPayload)
- if err := x.ServerStream.RecvMsg(m); err != nil {
- return nil, err
- }
- return m, nil
-}
-
-// Query_ServiceDesc is the grpc.ServiceDesc for Query service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var Query_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "Query",
- HandlerType: (*QueryServer)(nil),
- Methods: []grpc.MethodDesc{},
- Streams: []grpc.StreamDesc{
- {
- StreamName: "Query",
- Handler: _Query_Query_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "QueryStreaming",
- Handler: _Query_QueryStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "BackgroundExecute",
- Handler: _Query_BackgroundExecute_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "BackgroundExecuteStreaming",
- Handler: _Query_BackgroundExecuteStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- {
- StreamName: "BackgroundTaskStatus",
- Handler: _Query_BackgroundTaskStatus_Handler,
- ServerStreams: true,
- },
- {
- StreamName: "BackgroundTaskStatusStreaming",
- Handler: _Query_BackgroundTaskStatusStreaming_Handler,
- ServerStreams: true,
- ClientStreams: true,
- },
- },
- Metadata: "proto/kvs/aerospike_proxy_kv.proto",
-}
-
-const (
- Info_Info_FullMethodName = "/Info/Info"
-)
-
-// InfoClient is the client API for Info service.
-//
-// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
-type InfoClient interface {
- // Send an info request
- Info(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error)
-}
-
-type infoClient struct {
- cc grpc.ClientConnInterface
-}
-
-func NewInfoClient(cc grpc.ClientConnInterface) InfoClient {
- return &infoClient{cc}
-}
-
-func (c *infoClient) Info(ctx context.Context, in *AerospikeRequestPayload, opts ...grpc.CallOption) (*AerospikeResponsePayload, error) {
- out := new(AerospikeResponsePayload)
- err := c.cc.Invoke(ctx, Info_Info_FullMethodName, in, out, opts...)
- if err != nil {
- return nil, err
- }
- return out, nil
-}
-
-// InfoServer is the server API for Info service.
-// All implementations must embed UnimplementedInfoServer
-// for forward compatibility
-type InfoServer interface {
- // Send an info request
- Info(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error)
- mustEmbedUnimplementedInfoServer()
-}
-
-// UnimplementedInfoServer must be embedded to have forward compatible implementations.
-type UnimplementedInfoServer struct {
-}
-
-func (UnimplementedInfoServer) Info(context.Context, *AerospikeRequestPayload) (*AerospikeResponsePayload, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Info not implemented")
-}
-func (UnimplementedInfoServer) mustEmbedUnimplementedInfoServer() {}
-
-// UnsafeInfoServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to InfoServer will
-// result in compilation errors.
-type UnsafeInfoServer interface {
- mustEmbedUnimplementedInfoServer()
-}
-
-func RegisterInfoServer(s grpc.ServiceRegistrar, srv InfoServer) {
- s.RegisterService(&Info_ServiceDesc, srv)
-}
-
-func _Info_Info_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(AerospikeRequestPayload)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(InfoServer).Info(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: Info_Info_FullMethodName,
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(InfoServer).Info(ctx, req.(*AerospikeRequestPayload))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// Info_ServiceDesc is the grpc.ServiceDesc for Info service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var Info_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "Info",
- HandlerType: (*InfoServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Info",
- Handler: _Info_Info_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "proto/kvs/aerospike_proxy_kv.proto",
-}
diff --git a/proxy_auth_interceptor.go b/proxy_auth_interceptor.go
deleted file mode 100644
index adbe4ee6..00000000
--- a/proxy_auth_interceptor.go
+++ /dev/null
@@ -1,218 +0,0 @@
-//go:build as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "context"
- "encoding/base64"
- "encoding/json"
- "runtime/debug"
- "strings"
- "time"
-
- grpc "google.golang.org/grpc"
- "google.golang.org/grpc/metadata"
-
- "github.com/aerospike/aerospike-client-go/v7/logger"
- auth "github.com/aerospike/aerospike-client-go/v7/proto/auth"
- "github.com/aerospike/aerospike-client-go/v7/types"
-)
-
-type authInterceptor struct {
- clnt *ProxyClient
- closer chan struct{}
-
- expiry time.Time
- fullToken string // "Bearer "
-}
-
-func newAuthInterceptor(clnt *ProxyClient) (*authInterceptor, Error) {
- interceptor := &authInterceptor{
- clnt: clnt,
- closer: make(chan struct{}),
- }
-
- err := interceptor.scheduleRefreshToken()
- if err != nil {
- return nil, err
- }
-
- return interceptor, nil
-}
-
-func (interceptor *authInterceptor) close() {
- if interceptor.active() {
- close(interceptor.closer)
- }
-}
-
-func (interceptor *authInterceptor) active() bool {
- active := true
- select {
- case _, active = <-interceptor.closer:
- default:
- }
- return active
-}
-
-func (interceptor *authInterceptor) scheduleRefreshToken() Error {
- err := interceptor.refreshToken()
- if err != nil {
- return err
- }
-
- // launch the refresher go routine
- go interceptor.tokenRefresher()
-
- return nil
-}
-
-func (interceptor *authInterceptor) tokenRefresher() {
- // make sure the goroutine is restarted if something panics downstream
- defer func() {
- if r := recover(); r != nil {
- logger.Logger.Error("Interceptor refresh goroutine crashed: %s", debug.Stack())
- go interceptor.tokenRefresher()
- }
- }()
-
- // provide 5 secs of buffer before expiry due to network latency
- wait := interceptor.expiry.Sub(time.Now()) - 5*time.Second
- ticker := time.NewTicker(wait)
- defer ticker.Stop()
-
- for {
- ticker.Reset(wait)
- select {
- case <-ticker.C:
- err := interceptor.refreshToken()
- if err != nil {
- wait = time.Second
- } else {
- wait = interceptor.expiry.Sub(time.Now()) - 5*time.Second
- }
-
- case <-interceptor.closer:
- // channel closed; return from the goroutine
- return
- }
- }
-}
-
-func (interceptor *authInterceptor) refreshToken() Error {
- err := interceptor.login()
- if err != nil {
- return err
- }
-
- interceptor.clnt.setAuthToken(interceptor.fullToken)
-
- return nil
-}
-
-func (interceptor *authInterceptor) RequireTransportSecurity() bool {
- return true
-}
-
-func (interceptor *authInterceptor) Unary() grpc.UnaryClientInterceptor {
- return func(
- ctx context.Context,
- method string,
- req, reply interface{},
- cc *grpc.ClientConn,
- invoker grpc.UnaryInvoker,
- opts ...grpc.CallOption,
- ) error {
- return invoker(interceptor.attachToken(ctx), method, req, reply, cc, opts...)
- }
-}
-
-func (interceptor *authInterceptor) Stream() grpc.StreamClientInterceptor {
- return func(
- ctx context.Context,
- desc *grpc.StreamDesc,
- cc *grpc.ClientConn,
- method string,
- streamer grpc.Streamer,
- opts ...grpc.CallOption,
- ) (grpc.ClientStream, error) {
- return streamer(interceptor.attachToken(ctx), desc, cc, method, opts...)
- }
-}
-
-func (interceptor *authInterceptor) attachToken(ctx context.Context) context.Context {
- token := interceptor.clnt.token()
- return metadata.AppendToOutgoingContext(ctx, "Authorization", token)
-}
-
-func (interceptor *authInterceptor) login() Error {
- conn, err := interceptor.clnt.createGrpcConn(true)
- if err != nil {
- return err
- }
- defer conn.Close()
-
- req := auth.AerospikeAuthRequest{
- Username: interceptor.clnt.clientPolicy.User,
- Password: interceptor.clnt.clientPolicy.Password,
- }
-
- client := auth.NewAuthServiceClient(conn)
-
- ctx, cancel := context.WithTimeout(context.Background(), interceptor.clnt.clientPolicy.Timeout)
- defer cancel()
-
- res, gerr := client.Get(ctx, &req)
- if gerr != nil {
- return newGrpcError(false, gerr, gerr.Error())
- }
-
- claims := strings.Split(res.GetToken(), ".")
- decClaims, gerr := base64.RawURLEncoding.DecodeString(claims[1])
- if gerr != nil {
- return newGrpcError(false, gerr, "Invalid token encoding. Expected base64.")
- }
-
- tokenMap := make(map[string]interface{}, 8)
- gerr = json.Unmarshal(decClaims, &tokenMap)
- if gerr != nil {
- return newError(types.PARSE_ERROR, "Invalid token encoding. Expected json.")
- }
-
- expiryToken, ok := tokenMap["exp"].(float64)
- if !ok {
- return newError(types.PARSE_ERROR, "Invalid expiry value. Expected float64.")
- }
-
- iat, ok := tokenMap["iat"].(float64)
- if !ok {
- return newError(types.PARSE_ERROR, "Invalid iat value. Expected float64.")
-
- }
-
- ttl := time.Duration(expiryToken-iat) * time.Second
- if ttl <= 0 {
- return newError(types.PARSE_ERROR, "Invalid token values. token 'iat' > 'exp'")
- }
-
- // Set expiry based on local clock.
- expiry := time.Now().Add(ttl)
- interceptor.fullToken = "Bearer " + res.GetToken()
- interceptor.expiry = expiry
-
- return nil
-}
diff --git a/proxy_client.go b/proxy_client.go
deleted file mode 100644
index 15257ae5..00000000
--- a/proxy_client.go
+++ /dev/null
@@ -1,1531 +0,0 @@
-//go:build as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "context"
- "math/rand"
- "runtime"
- "sync"
- "time"
-
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/credentials/insecure"
-
- iatomic "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
- kvs "github.com/aerospike/aerospike-client-go/v7/proto/kvs"
- "github.com/aerospike/aerospike-client-go/v7/types"
-)
-
-const notSupportedInProxyClient = "NOT SUPPORTED IN THE PROXY CLIENT"
-
-// ProxyClient encapsulates an Aerospike cluster.
-// All database operations are available against this object.
-type ProxyClient struct {
- // only for GRPC
- clientPolicy ClientPolicy
- grpcConnPool *grpcConnectionHeap
- grpcHost *Host
- dialOptions []grpc.DialOption
-
- authToken iatomic.TypedVal[string]
- authInterceptor *authInterceptor
-
- active iatomic.Bool
-
- // DefaultPolicy is used for all read commands without a specific policy.
- DefaultPolicy *BasePolicy
- // DefaultBatchPolicy is the default parent policy used in batch read commands. Base policy fields
- // include socketTimeout, totalTimeout, maxRetries, etc...
- DefaultBatchPolicy *BatchPolicy
- // DefaultBatchReadPolicy is the default read policy used in batch operate commands.
- DefaultBatchReadPolicy *BatchReadPolicy
- // DefaultBatchWritePolicy is the default write policy used in batch operate commands.
- // Write policy fields include generation, expiration, durableDelete, etc...
- DefaultBatchWritePolicy *BatchWritePolicy
- // DefaultBatchDeletePolicy is the default delete policy used in batch delete commands.
- DefaultBatchDeletePolicy *BatchDeletePolicy
- // DefaultBatchUDFPolicy is the default user defined function policy used in batch UDF execute commands.
- DefaultBatchUDFPolicy *BatchUDFPolicy
- // DefaultWritePolicy is used for all write commands without a specific policy.
- DefaultWritePolicy *WritePolicy
- // DefaultScanPolicy is used for all scan commands without a specific policy.
- DefaultScanPolicy *ScanPolicy
- // DefaultQueryPolicy is used for all query commands without a specific policy.
- DefaultQueryPolicy *QueryPolicy
- // DefaultAdminPolicy is used for all security commands without a specific policy.
- DefaultAdminPolicy *AdminPolicy
- // DefaultInfoPolicy is used for all info commands without a specific policy.
- DefaultInfoPolicy *InfoPolicy
-}
-
-func grpcClientFinalizer(f *ProxyClient) {
- f.Close()
-}
-
-//-------------------------------------------------------
-// Constructors
-//-------------------------------------------------------
-
-// NewProxyClientWithPolicyAndHost generates a new ProxyClient with the specified ClientPolicy and
-// sets up the cluster using the provided hosts.
-// You must pass the tag 'as_proxy' to the compiler during build.
-// If the policy is nil, the default relevant policy will be used.
-// Pass "dns:///:" (note the 3 slashes) for dns load balancing,
-// automatically supported internally by grpc-go.
-// The connection pool after connecting to the database is initially empty,
-// and connections are established on a per need basis, which can be slow and
-// time out some initial commands.
-// It is recommended to call the client.WarmUp() method right after connecting to the database
-// to fill up the connection pool to the required service level.
-func NewProxyClientWithPolicyAndHost(policy *ClientPolicy, host *Host, dialOptions ...grpc.DialOption) (*ProxyClient, Error) {
- if policy == nil {
- policy = NewClientPolicy()
- }
-
- grpcClient := &ProxyClient{
- clientPolicy: *policy,
- grpcConnPool: newGrpcConnectionHeap(policy.ConnectionQueueSize),
- grpcHost: host,
- dialOptions: dialOptions,
-
- active: *iatomic.NewBool(true),
-
- DefaultPolicy: NewPolicy(),
- DefaultBatchPolicy: NewBatchPolicy(),
- DefaultBatchReadPolicy: NewBatchReadPolicy(),
- DefaultBatchWritePolicy: NewBatchWritePolicy(),
- DefaultBatchDeletePolicy: NewBatchDeletePolicy(),
- DefaultBatchUDFPolicy: NewBatchUDFPolicy(),
- DefaultWritePolicy: NewWritePolicy(0, 0),
- DefaultScanPolicy: NewScanPolicy(),
- DefaultQueryPolicy: NewQueryPolicy(),
- DefaultAdminPolicy: NewAdminPolicy(),
- DefaultInfoPolicy: NewInfoPolicy(),
- }
-
- if policy.RequiresAuthentication() {
- authInterceptor, err := newAuthInterceptor(grpcClient)
- if err != nil {
- return nil, err
- }
-
- grpcClient.authInterceptor = authInterceptor
- }
-
- // check the version to make sure we are connected to the server
- infoPolicy := NewInfoPolicy()
- infoPolicy.Timeout = policy.Timeout
- _, err := grpcClient.ServerVersion(infoPolicy)
- if err != nil {
- return nil, err
- }
-
- runtime.SetFinalizer(grpcClient, grpcClientFinalizer)
- return grpcClient, nil
-}
-
-//-------------------------------------------------------
-// Policy methods
-//-------------------------------------------------------
-
-// DefaultPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultPolicy() *BasePolicy {
- return clnt.DefaultPolicy
-}
-
-// DefaultBatchPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultBatchPolicy() *BatchPolicy {
- return clnt.DefaultBatchPolicy
-}
-
-// DefaultBatchWritePolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultBatchWritePolicy() *BatchWritePolicy {
- return clnt.DefaultBatchWritePolicy
-}
-
-// DefaultBatchReadPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultBatchReadPolicy() *BatchReadPolicy {
- return clnt.DefaultBatchReadPolicy
-}
-
-// DefaultBatchDeletePolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultBatchDeletePolicy() *BatchDeletePolicy {
- return clnt.DefaultBatchDeletePolicy
-}
-
-// DefaultBatchUDFPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultBatchUDFPolicy() *BatchUDFPolicy {
- return clnt.DefaultBatchUDFPolicy
-}
-
-// DefaultWritePolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultWritePolicy() *WritePolicy {
- return clnt.DefaultWritePolicy
-}
-
-// DefaultScanPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultScanPolicy() *ScanPolicy {
- return clnt.DefaultScanPolicy
-}
-
-// DefaultQueryPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultQueryPolicy() *QueryPolicy {
- return clnt.DefaultQueryPolicy
-}
-
-// DefaultAdminPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultAdminPolicy() *AdminPolicy {
- return clnt.DefaultAdminPolicy
-}
-
-// DefaultInfoPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) GetDefaultInfoPolicy() *InfoPolicy {
- return clnt.DefaultInfoPolicy
-}
-
-// DefaultPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultPolicy(policy *BasePolicy) {
- clnt.DefaultPolicy = policy
-}
-
-// DefaultBatchPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultBatchPolicy(policy *BatchPolicy) {
- clnt.DefaultBatchPolicy = policy
-}
-
-// DefaultBatchReadPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultBatchReadPolicy(policy *BatchReadPolicy) {
- clnt.DefaultBatchReadPolicy = policy
-}
-
-// DefaultBatchWritePolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultBatchWritePolicy(policy *BatchWritePolicy) {
- clnt.DefaultBatchWritePolicy = policy
-}
-
-// DefaultBatchDeletePolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultBatchDeletePolicy(policy *BatchDeletePolicy) {
- clnt.DefaultBatchDeletePolicy = policy
-}
-
-// DefaultBatchUDFPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultBatchUDFPolicy(policy *BatchUDFPolicy) {
- clnt.DefaultBatchUDFPolicy = policy
-}
-
-// DefaultWritePolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultWritePolicy(policy *WritePolicy) {
- clnt.DefaultWritePolicy = policy
-}
-
-// DefaultScanPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultScanPolicy(policy *ScanPolicy) {
- clnt.DefaultScanPolicy = policy
-}
-
-// DefaultQueryPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultQueryPolicy(policy *QueryPolicy) {
- clnt.DefaultQueryPolicy = policy
-}
-
-// DefaultAdminPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultAdminPolicy(policy *AdminPolicy) {
- clnt.DefaultAdminPolicy = policy
-}
-
-// DefaultInfoPolicy returns corresponding default policy from the client
-func (clnt *ProxyClient) SetDefaultInfoPolicy(policy *InfoPolicy) {
- clnt.DefaultInfoPolicy = policy
-}
-
-//-------------------------------------------------------
-// Cluster Connection Management
-//-------------------------------------------------------
-
-func (clnt *ProxyClient) token() string {
- return clnt.authToken.Get()
-}
-
-func (clnt *ProxyClient) setAuthToken(token string) {
- clnt.authToken.Set(token)
-}
-
-func (clnt *ProxyClient) grpcConn() (*grpc.ClientConn, Error) {
- pconn := clnt.grpcConnPool.Get()
- if pconn != nil {
- return pconn, nil
- }
-
- return clnt.createGrpcConn(!clnt.clientPolicy.RequiresAuthentication())
-}
-
-func (clnt *ProxyClient) returnGrpcConnToPool(conn *grpc.ClientConn) {
- if conn != nil {
- clnt.grpcConnPool.Put(conn)
- }
-}
-
-func (clnt *ProxyClient) createGrpcConn(noInterceptor bool) (*grpc.ClientConn, Error) {
- // make a new connection
- // Implement TLS and auth
- dialOptions := []grpc.DialOption{grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(MaxBufferSize)), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(MaxBufferSize))}
- if clnt.clientPolicy.TlsConfig != nil {
- dialOptions = append(dialOptions, grpc.WithTransportCredentials(credentials.NewTLS(clnt.clientPolicy.TlsConfig)))
- } else {
- dialOptions = append(dialOptions, grpc.WithTransportCredentials(insecure.NewCredentials()))
- }
-
- ctx, cancel := context.WithTimeout(context.Background(), clnt.clientPolicy.Timeout)
- defer cancel()
-
- allOptions := append(dialOptions, clnt.dialOptions...)
- if !noInterceptor {
- allOptions = append(dialOptions,
- grpc.WithUnaryInterceptor(clnt.authInterceptor.Unary()),
- grpc.WithStreamInterceptor(clnt.authInterceptor.Stream()),
- )
- }
-
- conn, err := grpc.DialContext(ctx, clnt.grpcHost.String(), allOptions...)
- if err != nil {
- return nil, newError(types.NO_AVAILABLE_CONNECTIONS_TO_NODE, err.Error())
- }
-
- return conn, nil
-}
-
-// Close closes all Grpcclient connections to database server nodes.
-func (clnt *ProxyClient) Close() {
- clnt.active.Set(false)
- clnt.grpcConnPool.cleanup()
- if clnt.authInterceptor != nil {
- clnt.authInterceptor.close()
- }
-}
-
-// IsConnected determines if the Grpcclient is ready to talk to the database server cluster.
-func (clnt *ProxyClient) IsConnected() bool {
- return clnt.active.Get()
-}
-
-// GetNodes returns an array of active server nodes in the cluster.
-func (clnt *ProxyClient) GetNodes() []*Node {
- panic(notSupportedInProxyClient)
-}
-
-// GetNodeNames returns a list of active server node names in the cluster.
-func (clnt *ProxyClient) GetNodeNames() []string {
- panic(notSupportedInProxyClient)
-}
-
-// ServerVersion will return the version of the proxy server.
-func (clnt *ProxyClient) ServerVersion(policy *InfoPolicy) (string, Error) {
- policy = clnt.getUsableInfoPolicy(policy)
-
- req := kvs.AboutRequest{}
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return "", err
- }
-
- client := kvs.NewAboutClient(conn)
-
- ctx, cancel := policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.Get(ctx, &req)
- if gerr != nil {
- return "", newGrpcError(false, gerr, gerr.Error())
- }
-
- clnt.returnGrpcConnToPool(conn)
-
- return res.GetVersion(), nil
-}
-
-//-------------------------------------------------------
-// Write Record Operations
-//-------------------------------------------------------
-
-// Put writes record bin(s) to the server.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
-// handled when the record already exists.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Put(policy *WritePolicy, key *Key, binMap BinMap) Error {
- policy = clnt.getUsableWritePolicy(policy)
- command, err := newWriteCommand(nil, policy, key, nil, binMap, _WRITE)
- if err != nil {
- return err
- }
-
- return command.ExecuteGRPC(clnt)
-}
-
-// PutBins writes record bin(s) to the server.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
-// handled when the record already exists.
-// This method avoids using the BinMap allocation and iteration and is lighter on GC.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) PutBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
- policy = clnt.getUsableWritePolicy(policy)
- command, err := newWriteCommand(nil, policy, key, bins, nil, _WRITE)
- if err != nil {
- return err
- }
-
- return command.ExecuteGRPC(clnt)
-}
-
-//-------------------------------------------------------
-// Operations string
-//-------------------------------------------------------
-
-// Append appends bin value's string to existing record bin values.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
-// handled when the record already exists.
-// This call only works for string and []byte values.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Append(policy *WritePolicy, key *Key, binMap BinMap) Error {
- ops := make([]*Operation, 0, len(binMap))
- for k, v := range binMap {
- ops = append(ops, AppendOp(NewBin(k, v)))
- }
-
- _, err := clnt.Operate(policy, key, ops...)
- return err
-}
-
-// AppendBins works the same as Append, but avoids BinMap allocation and iteration.
-func (clnt *ProxyClient) AppendBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
- ops := make([]*Operation, 0, len(bins))
- for _, bin := range bins {
- ops = append(ops, AppendOp(bin))
- }
-
- _, err := clnt.Operate(policy, key, ops...)
- return err
-}
-
-// Prepend prepends bin value's string to existing record bin values.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
-// handled when the record already exists.
-// This call works only for string and []byte values.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Prepend(policy *WritePolicy, key *Key, binMap BinMap) Error {
- ops := make([]*Operation, 0, len(binMap))
- for k, v := range binMap {
- ops = append(ops, PrependOp(NewBin(k, v)))
- }
-
- _, err := clnt.Operate(policy, key, ops...)
- return err
-}
-
-// PrependBins works the same as Prepend, but avoids BinMap allocation and iteration.
-func (clnt *ProxyClient) PrependBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
- ops := make([]*Operation, 0, len(bins))
- for _, bin := range bins {
- ops = append(ops, PrependOp(bin))
- }
-
- _, err := clnt.Operate(policy, key, ops...)
- return err
-}
-
-//-------------------------------------------------------
-// Arithmetic Operations
-//-------------------------------------------------------
-
-// Add adds integer bin values to existing record bin values.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
-// handled when the record already exists.
-// This call only works for integer values.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Add(policy *WritePolicy, key *Key, binMap BinMap) Error {
- ops := make([]*Operation, 0, len(binMap))
- for k, v := range binMap {
- ops = append(ops, AddOp(NewBin(k, v)))
- }
-
- _, err := clnt.Operate(policy, key, ops...)
- return err
-}
-
-// AddBins works the same as Add, but avoids BinMap allocation and iteration.
-func (clnt *ProxyClient) AddBins(policy *WritePolicy, key *Key, bins ...*Bin) Error {
- ops := make([]*Operation, 0, len(bins))
- for _, bin := range bins {
- ops = append(ops, AddOp(bin))
- }
-
- _, err := clnt.Operate(policy, key, ops...)
- return err
-}
-
-//-------------------------------------------------------
-// Delete Operations
-//-------------------------------------------------------
-
-// Delete deletes a record for specified key.
-// The policy specifies the transaction timeout.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Delete(policy *WritePolicy, key *Key) (bool, Error) {
- policy = clnt.getUsableWritePolicy(policy)
- command, err := newDeleteCommand(nil, policy, key)
- if err != nil {
- return false, err
- }
-
- err = command.ExecuteGRPC(clnt)
- return command.Existed(), err
-}
-
-//-------------------------------------------------------
-// Touch Operations
-//-------------------------------------------------------
-
-// Touch updates a record's metadata.
-// If the record exists, the record's TTL will be reset to the
-// policy's expiration.
-// If the record doesn't exist, it will return an error.
-func (clnt *ProxyClient) Touch(policy *WritePolicy, key *Key) Error {
- policy = clnt.getUsableWritePolicy(policy)
- command, err := newTouchCommand(nil, policy, key)
- if err != nil {
- return err
- }
-
- return command.ExecuteGRPC(clnt)
-}
-
-//-------------------------------------------------------
-// Existence-Check Operations
-//-------------------------------------------------------
-
-// Exists determine if a record key exists.
-// The policy can be used to specify timeouts.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Exists(policy *BasePolicy, key *Key) (bool, Error) {
- policy = clnt.getUsablePolicy(policy)
- command, err := newExistsCommand(nil, policy, key)
- if err != nil {
- return false, err
- }
-
- err = command.ExecuteGRPC(clnt)
- return command.Exists(), err
-}
-
-// BatchExists determines if multiple record keys exist in one batch request.
-// The returned boolean array is in positional order with the original key array order.
-// The policy can be used to specify timeouts.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) BatchExists(policy *BatchPolicy, keys []*Key) ([]bool, Error) {
- batchRecordsIfc := make([]BatchRecordIfc, 0, len(keys))
- for _, key := range keys {
- batchRecordsIfc = append(batchRecordsIfc, NewBatchReadHeader(nil, key))
- }
-
- err := clnt.BatchOperate(policy, batchRecordsIfc)
- records := make([]bool, 0, len(keys))
- for i := range batchRecordsIfc {
- records = append(records, batchRecordsIfc[i].BatchRec().Record != nil)
- // if nerr := batchRecordsIfc[i].BatchRec().Err; nerr != nil {
- // err = chainErrors(err, nerr)
- // }
- }
-
- return records, err
-}
-
-//-------------------------------------------------------
-// Read Record Operations
-//-------------------------------------------------------
-
-// Get reads a record header and bins for specified key.
-// The policy can be used to specify timeouts.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Get(policy *BasePolicy, key *Key, binNames ...string) (*Record, Error) {
- policy = clnt.getUsablePolicy(policy)
-
- command, err := newReadCommand(nil, policy, key, binNames, nil)
- if err != nil {
- return nil, err
- }
-
- if err := command.ExecuteGRPC(clnt); err != nil {
- return nil, err
- }
- return command.GetRecord(), nil
-}
-
-// GetHeader reads a record generation and expiration only for specified key.
-// Bins are not read.
-// The policy can be used to specify timeouts.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) GetHeader(policy *BasePolicy, key *Key) (*Record, Error) {
- policy = clnt.getUsablePolicy(policy)
-
- command, err := newReadHeaderCommand(nil, policy, key)
- if err != nil {
- return nil, err
- }
-
- if err := command.ExecuteGRPC(clnt); err != nil {
- return nil, err
- }
- return command.GetRecord(), nil
-}
-
-//-------------------------------------------------------
-// Batch Read Operations
-//-------------------------------------------------------
-
-// BatchGet reads multiple record headers and bins for specified keys in one batch request.
-// The returned records are in positional order with the original key array order.
-// If a key is not found, the positional record will be nil.
-// The policy can be used to specify timeouts.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) BatchGet(policy *BatchPolicy, keys []*Key, binNames ...string) ([]*Record, Error) {
- policy = clnt.getUsableBatchPolicy(policy)
-
- if len(keys) == 0 {
- return []*Record{}, nil
- }
-
- batchRecordsIfc := make([]BatchRecordIfc, 0, len(keys))
- batchRecords := make([]*BatchRecord, 0, len(keys))
- for _, key := range keys {
- batchRead, batchRecord := newBatchRead(clnt.DefaultBatchReadPolicy, key, binNames)
- batchRecordsIfc = append(batchRecordsIfc, batchRead)
- batchRecords = append(batchRecords, batchRecord)
- }
-
- filteredOut, err := clnt.batchOperate(policy, batchRecordsIfc)
- if filteredOut > 0 {
- err = chainErrors(ErrFilteredOut.err(), err)
- }
-
- records := make([]*Record, 0, len(keys))
- for i := range batchRecords {
- records = append(records, batchRecords[i].Record)
- }
-
- return records, err
-}
-
-// BatchGetOperate reads multiple records for specified keys using read operations in one batch call.
-// The returned records are in positional order with the original key array order.
-// If a key is not found, the positional record will be nil.
-//
-// If a batch request to a node fails, the entire batch is cancelled.
-func (clnt *ProxyClient) BatchGetOperate(policy *BatchPolicy, keys []*Key, ops ...*Operation) ([]*Record, Error) {
- policy = clnt.getUsableBatchPolicy(policy)
-
- if len(keys) == 0 {
- return []*Record{}, nil
- }
-
- batchRecordsIfc := make([]BatchRecordIfc, 0, len(keys))
- batchRecords := make([]*BatchRecord, 0, len(keys))
- for _, key := range keys {
- batchRead, batchRecord := newBatchReadOps(clnt.DefaultBatchReadPolicy, key, ops...)
- batchRecordsIfc = append(batchRecordsIfc, batchRead)
- batchRecords = append(batchRecords, batchRecord)
- }
-
- filteredOut, err := clnt.batchOperate(policy, batchRecordsIfc)
- if filteredOut > 0 {
- err = chainErrors(ErrFilteredOut.err(), err)
- }
-
- records := make([]*Record, 0, len(keys))
- for i := range batchRecords {
- records = append(records, batchRecords[i].Record)
- }
-
- return records, err
-}
-
-// BatchGetComplex reads multiple records for specified batch keys in one batch call.
-// This method allows different namespaces/bins to be requested for each key in the batch.
-// The returned records are located in the same list.
-// If the BatchRead key field is not found, the corresponding record field will be nil.
-// The policy can be used to specify timeouts and maximum concurrent goroutines.
-// This method requires Aerospike Server version >= 3.6.0.
-func (clnt *ProxyClient) BatchGetComplex(policy *BatchPolicy, records []*BatchRead) Error {
- policy = clnt.getUsableBatchPolicy(policy)
- batchRecordsIfc := make([]BatchRecordIfc, 0, len(records))
- for _, record := range records {
- batchRecordsIfc = append(batchRecordsIfc, record)
- }
-
- filteredOut, err := clnt.batchOperate(policy, batchRecordsIfc)
- if filteredOut > 0 {
- err = chainErrors(ErrFilteredOut.err(), err)
- }
-
- return err
-}
-
-// BatchGetHeader reads multiple record header data for specified keys in one batch request.
-// The returned records are in positional order with the original key array order.
-// If a key is not found, the positional record will be nil.
-// The policy can be used to specify timeouts.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) BatchGetHeader(policy *BatchPolicy, keys []*Key) ([]*Record, Error) {
- policy = clnt.getUsableBatchPolicy(policy)
-
- if len(keys) == 0 {
- return []*Record{}, nil
- }
-
- batchRecordsIfc := make([]BatchRecordIfc, 0, len(keys))
- for _, key := range keys {
- batchRecordsIfc = append(batchRecordsIfc, NewBatchReadHeader(clnt.DefaultBatchReadPolicy, key))
- }
-
- filteredOut, err := clnt.batchOperate(policy, batchRecordsIfc)
- records := make([]*Record, 0, len(keys))
- for i := range batchRecordsIfc {
- records = append(records, batchRecordsIfc[i].BatchRec().Record)
- }
-
- if filteredOut > 0 {
- err = chainErrors(ErrFilteredOut.err(), err)
- }
-
- return records, err
-}
-
-// BatchDelete deletes records for specified keys. If a key is not found, the corresponding result
-// BatchRecord.ResultCode will be types.KEY_NOT_FOUND_ERROR.
-//
-// Requires server version 6.0+
-func (clnt *ProxyClient) BatchDelete(policy *BatchPolicy, deletePolicy *BatchDeletePolicy, keys []*Key) ([]*BatchRecord, Error) {
- policy = clnt.getUsableBatchPolicy(policy)
-
- if len(keys) == 0 {
- return []*BatchRecord{}, nil
- }
-
- deletePolicy = clnt.getUsableBatchDeletePolicy(deletePolicy)
-
- batchRecordsIfc := make([]BatchRecordIfc, 0, len(keys))
- batchRecords := make([]*BatchRecord, 0, len(keys))
- for _, key := range keys {
- batchDelete, batchRecord := newBatchDelete(deletePolicy, key)
- batchRecordsIfc = append(batchRecordsIfc, batchDelete)
- batchRecords = append(batchRecords, batchRecord)
- }
-
- filteredOut, err := clnt.batchOperate(policy, batchRecordsIfc)
- if filteredOut > 0 {
- err = chainErrors(ErrFilteredOut.err(), err)
- }
- return batchRecords, err
-}
-
-func (clnt *ProxyClient) batchOperate(policy *BatchPolicy, records []BatchRecordIfc) (int, Error) {
- policy = clnt.getUsableBatchPolicy(policy)
-
- batchNode, err := newGrpcBatchOperateListIfc(policy, records)
- if err != nil && policy.RespondAllKeys {
- return 0, err
- }
-
- cmd := newBatchCommandOperate(clnt, batchNode, policy, records)
- return cmd.filteredOutCnt, cmd.ExecuteGRPC(clnt)
-}
-
-// BatchOperate will read/write multiple records for specified batch keys in one batch call.
-// This method allows different namespaces/bins for each key in the batch.
-// The returned records are located in the same list.
-//
-// BatchRecord can be *BatchRead, *BatchWrite, *BatchDelete or *BatchUDF.
-//
-// Requires server version 6.0+
-func (clnt *ProxyClient) BatchOperate(policy *BatchPolicy, records []BatchRecordIfc) Error {
- _, err := clnt.batchOperate(policy, records)
- return err
-}
-
-// BatchExecute will read/write multiple records for specified batch keys in one batch call.
-// This method allows different namespaces/bins for each key in the batch.
-// The returned records are located in the same list.
-//
-// BatchRecord can be *BatchRead, *BatchWrite, *BatchDelete or *BatchUDF.
-//
-// Requires server version 6.0+
-func (clnt *ProxyClient) BatchExecute(policy *BatchPolicy, udfPolicy *BatchUDFPolicy, keys []*Key, packageName string, functionName string, args ...Value) ([]*BatchRecord, Error) {
- if len(keys) == 0 {
- return []*BatchRecord{}, nil
- }
-
- batchRecordsIfc := make([]BatchRecordIfc, 0, len(keys))
- batchRecords := make([]*BatchRecord, 0, len(keys))
- for _, key := range keys {
- batchUDF, batchRecord := newBatchUDF(udfPolicy, key, packageName, functionName, args...)
- batchRecordsIfc = append(batchRecordsIfc, batchUDF)
- batchRecords = append(batchRecords, batchRecord)
- }
-
- filteredOut, err := clnt.batchOperate(policy, batchRecordsIfc)
- if filteredOut > 0 {
- err = chainErrors(ErrFilteredOut.err(), err)
- }
-
- return batchRecords, err
-}
-
-//-------------------------------------------------------
-// Generic Database Operations
-//-------------------------------------------------------
-
-// Operate performs multiple read/write operations on a single key in one batch request.
-// An example would be to add an integer value to an existing record and then
-// read the result, all in one database call.
-//
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Operate(policy *WritePolicy, key *Key, operations ...*Operation) (*Record, Error) {
- return clnt.operate(policy, key, false, operations...)
-}
-
-func (clnt *ProxyClient) operate(policy *WritePolicy, key *Key, useOpResults bool, operations ...*Operation) (*Record, Error) {
- policy = clnt.getUsableWritePolicy(policy)
- args, err := newOperateArgs(nil, policy, key, operations)
- if err != nil {
- return nil, err
- }
-
- command, err := newOperateCommand(nil, policy, key, args, useOpResults)
- if err != nil {
- return nil, err
- }
-
- if err := command.ExecuteGRPC(clnt); err != nil {
- return nil, err
- }
- return command.GetRecord(), nil
-}
-
-//-------------------------------------------------------
-// Scan Operations
-//-------------------------------------------------------
-
-// ScanPartitions Read records in specified namespace, set and partition filter.
-// If the policy's concurrentNodes is specified, each server node will be read in
-// parallel. Otherwise, server nodes are read sequentially.
-// If partitionFilter is nil, all partitions will be scanned.
-// If the policy is nil, the default relevant policy will be used.
-// This method is only supported by Aerospike 4.9+ servers.
-func (clnt *ProxyClient) ScanPartitions(apolicy *ScanPolicy, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error) {
- policy := *clnt.getUsableScanPolicy(apolicy)
-
- // result recordset
- tracker := newPartitionTracker(&policy.MultiPolicy, partitionFilter, nil)
- res := newRecordset(policy.RecordQueueSize, 1)
- cmd := newGrpcScanPartitionCommand(&policy, tracker, partitionFilter, namespace, setName, binNames, res)
- go cmd.ExecuteGRPC(clnt)
-
- return res, nil
-}
-
-// ScanAll reads all records in specified namespace and set from all nodes.
-// If the policy's concurrentNodes is specified, each server node will be read in
-// parallel. Otherwise, server nodes are read sequentially.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) ScanAll(apolicy *ScanPolicy, namespace string, setName string, binNames ...string) (*Recordset, Error) {
- return clnt.ScanPartitions(apolicy, NewPartitionFilterAll(), namespace, setName, binNames...)
-}
-
-// scanNodePartitions reads all records in specified namespace and set for one node only.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) scanNodePartitions(apolicy *ScanPolicy, node *Node, namespace string, setName string, binNames ...string) (*Recordset, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// ScanNode reads all records in specified namespace and set for one node only.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) ScanNode(apolicy *ScanPolicy, node *Node, namespace string, setName string, binNames ...string) (*Recordset, Error) {
- panic(notSupportedInProxyClient)
-}
-
-//---------------------------------------------------------------
-// User defined functions (Supported by Aerospike 3+ servers only)
-//---------------------------------------------------------------
-
-// RegisterUDFFromFile reads a file from file system and registers
-// the containing a package user defined functions with the server.
-// This asynchronous server call will return before command is complete.
-// The user can optionally wait for command completion by using the returned
-// RegisterTask instance.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) RegisterUDFFromFile(policy *WritePolicy, clientPath string, serverPath string, language Language) (*RegisterTask, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// RegisterUDF registers a package containing user defined functions with server.
-// This asynchronous server call will return before command is complete.
-// The user can optionally wait for command completion by using the returned
-// RegisterTask instance.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) RegisterUDF(policy *WritePolicy, udfBody []byte, serverPath string, language Language) (*RegisterTask, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// RemoveUDF removes a package containing user defined functions in the server.
-// This asynchronous server call will return before command is complete.
-// The user can optionally wait for command completion by using the returned
-// RemoveTask instance.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) RemoveUDF(policy *WritePolicy, udfName string) (*RemoveTask, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// ListUDF lists all packages containing user defined functions in the server.
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) ListUDF(policy *BasePolicy) ([]*UDF, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// Execute executes a user defined function on server and return results.
-// The function operates on a single record.
-// The package name is used to locate the udf file location:
-//
-// udf file = /.lua
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (interface{}, Error) {
- policy = clnt.getUsableWritePolicy(policy)
-
- command, err := newExecuteCommand(nil, policy, key, packageName, functionName, NewValueArray(args))
- if err != nil {
- return nil, err
- }
-
- if err := command.ExecuteGRPC(clnt); err != nil {
- return nil, err
- }
-
- if rec := command.GetRecord(); rec != nil && rec.Bins != nil {
- return rec.Bins["SUCCESS"], nil
- }
-
- return nil, nil
-}
-
-func (clnt *ProxyClient) execute(policy *WritePolicy, key *Key, packageName string, functionName string, args ...Value) (*Record, Error) {
- return nil, newError(types.UNSUPPORTED_FEATURE)
-}
-
-//----------------------------------------------------------
-// Query/Execute (Supported by Aerospike 3+ servers only)
-//----------------------------------------------------------
-
-// QueryExecute applies operations on records that match the statement filter.
-// Records are not returned to the Grpcclient.
-// This asynchronous server call will return before the command is complete.
-// The user can optionally wait for command completion by using the returned
-// ExecuteTask instance.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) QueryExecute(policy *QueryPolicy,
- writePolicy *WritePolicy,
- statement *Statement,
- ops ...*Operation,
-) (*ExecuteTask, Error) {
- policy = clnt.getUsableQueryPolicy(policy)
- writePolicy = clnt.getUsableWritePolicy(writePolicy)
-
- command := newServerCommand(nil, policy, writePolicy, statement, statement.TaskId, ops)
-
- if err := command.ExecuteGRPC(clnt); err != nil {
- return nil, err
- }
-
- return newGRPCExecuteTask(clnt, statement), nil
-}
-
-// ExecuteUDF applies user defined function on records that match the statement filter.
-// Records are not returned to the Grpcclient.
-// This asynchronous server call will return before command is complete.
-// The user can optionally wait for command completion by using the returned
-// ExecuteTask instance.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) ExecuteUDF(policy *QueryPolicy,
- statement *Statement,
- packageName string,
- functionName string,
- functionArgs ...Value,
-) (*ExecuteTask, Error) {
- policy = clnt.getUsableQueryPolicy(policy)
- wpolicy := clnt.getUsableWritePolicy(nil)
-
- nstatement := *statement
- nstatement.SetAggregateFunction(packageName, functionName, functionArgs, false)
- command := newServerCommand(nil, policy, wpolicy, &nstatement, nstatement.TaskId, nil)
-
- if err := command.ExecuteGRPC(clnt); err != nil {
- return nil, err
- }
-
- return newGRPCExecuteTask(clnt, &nstatement), nil
-}
-
-// ExecuteUDFNode applies user defined function on records that match the statement filter on the specified node.
-// Records are not returned to the Grpcclient.
-// This asynchronous server call will return before command is complete.
-// The user can optionally wait for command completion by using the returned
-// ExecuteTask instance.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) ExecuteUDFNode(policy *QueryPolicy,
- node *Node,
- statement *Statement,
- packageName string,
- functionName string,
- functionArgs ...Value,
-) (*ExecuteTask, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// SetXDRFilter sets XDR filter for given datacenter name and namespace. The expression filter indicates
-// which records XDR should ship to the datacenter.
-// Pass nil as filter to remove the currentl filter on the server.
-func (clnt *ProxyClient) SetXDRFilter(policy *InfoPolicy, datacenter string, namespace string, filter *Expression) Error {
- panic(notSupportedInProxyClient)
-}
-
-//--------------------------------------------------------
-// Query functions (Supported by Aerospike 3+ servers only)
-//--------------------------------------------------------
-
-// QueryPartitions executes a query for specified partitions and returns a recordset.
-// The query executor puts records on the channel from separate goroutines.
-// The caller can concurrently pop records off the channel through the
-// Recordset.Records channel.
-//
-// This method is only supported by Aerospike 4.9+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) QueryPartitions(policy *QueryPolicy, statement *Statement, partitionFilter *PartitionFilter) (*Recordset, Error) {
- policy = clnt.getUsableQueryPolicy(policy)
- // result recordset
- tracker := newPartitionTracker(&policy.MultiPolicy, partitionFilter, nil)
- res := newRecordset(policy.RecordQueueSize, 1)
- cmd := newGrpcQueryPartitionCommand(policy, nil, statement, nil, tracker, partitionFilter, res)
- go cmd.ExecuteGRPC(clnt)
-
- return res, nil
-}
-
-// Query executes a query and returns a Recordset.
-// The query executor puts records on the channel from separate goroutines.
-// The caller can concurrently pop records off the channel through the
-// Recordset.Records channel.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) Query(policy *QueryPolicy, statement *Statement) (*Recordset, Error) {
- return clnt.QueryPartitions(policy, statement, NewPartitionFilterAll())
-}
-
-// QueryNode executes a query on a specific node and returns a recordset.
-// The caller can concurrently pop records off the channel through the
-// record channel.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) QueryNode(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error) {
- panic(notSupportedInProxyClient)
-}
-
-func (clnt *ProxyClient) queryNodePartitions(policy *QueryPolicy, node *Node, statement *Statement) (*Recordset, Error) {
- panic(notSupportedInProxyClient)
-}
-
-//--------------------------------------------------------
-// Index functions (Supported by Aerospike 3+ servers only)
-//--------------------------------------------------------
-
-// CreateIndex creates a secondary index.
-// This asynchronous server call will return before the command is complete.
-// The user can optionally wait for command completion by using the returned
-// IndexTask instance.
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) CreateIndex(
- policy *WritePolicy,
- namespace string,
- setName string,
- indexName string,
- binName string,
- indexType IndexType,
-) (*IndexTask, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// CreateComplexIndex creates a secondary index, with the ability to put indexes
-// on bin containing complex data types, e.g: Maps and Lists.
-// This asynchronous server call will return before the command is complete.
-// The user can optionally wait for command completion by using the returned
-// IndexTask instance.
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) CreateComplexIndex(
- policy *WritePolicy,
- namespace string,
- setName string,
- indexName string,
- binName string,
- indexType IndexType,
- indexCollectionType IndexCollectionType,
- ctx ...*CDTContext,
-) (*IndexTask, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// DropIndex deletes a secondary index. It will block until index is dropped on all nodes.
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) DropIndex(
- policy *WritePolicy,
- namespace string,
- setName string,
- indexName string,
-) Error {
- panic(notSupportedInProxyClient)
-}
-
-// Truncate removes records in specified namespace/set efficiently. This method is many orders of magnitude
-// faster than deleting records one at a time. Works with Aerospike Server versions >= 3.12.
-// This asynchronous server call may return before the truncation is complete. The user can still
-// write new records after the server call returns because new records will have last update times
-// greater than the truncate cutoff (set at the time of truncate call).
-// For more information, See https://www.aerospike.com/docs/reference/info#truncate
-func (clnt *ProxyClient) Truncate(policy *InfoPolicy, namespace, set string, beforeLastUpdate *time.Time) Error {
- panic(notSupportedInProxyClient)
-}
-
-//-------------------------------------------------------
-// User administration
-//-------------------------------------------------------
-
-// CreateUser creates a new user with password and roles. Clear-text password will be hashed using bcrypt
-// before sending to server.
-func (clnt *ProxyClient) CreateUser(policy *AdminPolicy, user string, password string, roles []string) Error {
- panic(notSupportedInProxyClient)
-}
-
-// DropUser removes a user from the cluster.
-func (clnt *ProxyClient) DropUser(policy *AdminPolicy, user string) Error {
- panic(notSupportedInProxyClient)
-}
-
-// ChangePassword changes a user's password. Clear-text password will be hashed using bcrypt before sending to server.
-func (clnt *ProxyClient) ChangePassword(policy *AdminPolicy, user string, password string) Error {
- panic(notSupportedInProxyClient)
-}
-
-// GrantRoles adds roles to user's list of roles.
-func (clnt *ProxyClient) GrantRoles(policy *AdminPolicy, user string, roles []string) Error {
- panic(notSupportedInProxyClient)
-}
-
-// RevokeRoles removes roles from user's list of roles.
-func (clnt *ProxyClient) RevokeRoles(policy *AdminPolicy, user string, roles []string) Error {
- panic(notSupportedInProxyClient)
-}
-
-// QueryUser retrieves roles for a given user.
-func (clnt *ProxyClient) QueryUser(policy *AdminPolicy, user string) (*UserRoles, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// QueryUsers retrieves all users and their roles.
-func (clnt *ProxyClient) QueryUsers(policy *AdminPolicy) ([]*UserRoles, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// QueryRole retrieves privileges for a given role.
-func (clnt *ProxyClient) QueryRole(policy *AdminPolicy, role string) (*Role, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// QueryRoles retrieves all roles and their privileges.
-func (clnt *ProxyClient) QueryRoles(policy *AdminPolicy) ([]*Role, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// CreateRole creates a user-defined role.
-// Quotas require server security configuration "enable-quotas" to be set to true.
-// Pass 0 for quota values for no limit.
-func (clnt *ProxyClient) CreateRole(policy *AdminPolicy, roleName string, privileges []Privilege, whitelist []string, readQuota, writeQuota uint32) Error {
- panic(notSupportedInProxyClient)
-}
-
-// DropRole removes a user-defined role.
-func (clnt *ProxyClient) DropRole(policy *AdminPolicy, roleName string) Error {
- panic(notSupportedInProxyClient)
-}
-
-// GrantPrivileges grant privileges to a user-defined role.
-func (clnt *ProxyClient) GrantPrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error {
- panic(notSupportedInProxyClient)
-}
-
-// RevokePrivileges revokes privileges from a user-defined role.
-func (clnt *ProxyClient) RevokePrivileges(policy *AdminPolicy, roleName string, privileges []Privilege) Error {
- panic(notSupportedInProxyClient)
-}
-
-// SetWhitelist sets IP address whitelist for a role. If whitelist is nil or empty, it removes existing whitelist from role.
-func (clnt *ProxyClient) SetWhitelist(policy *AdminPolicy, roleName string, whitelist []string) Error {
- panic(notSupportedInProxyClient)
-}
-
-// SetQuotas sets maximum reads/writes per second limits for a role. If a quota is zero, the limit is removed.
-// Quotas require server security configuration "enable-quotas" to be set to true.
-// Pass 0 for quota values for no limit.
-func (clnt *ProxyClient) SetQuotas(policy *AdminPolicy, roleName string, readQuota, writeQuota uint32) Error {
- panic(notSupportedInProxyClient)
-}
-
-// RequestInfo sends an info command to the server. The proxy server should be configured to have allowed
-// the commands to go through.
-func (clnt *ProxyClient) RequestInfo(policy *InfoPolicy, commands ...string) (map[string]string, Error) {
- policy = clnt.getUsableInfoPolicy(policy)
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- InfoRequest: &kvs.InfoRequest{
- InfoPolicy: policy.grpc(),
- Commands: commands,
- },
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return nil, err
- }
-
- client := kvs.NewInfoClient(conn)
-
- ctx, cancel := policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.Info(ctx, &req)
- if gerr != nil {
- return nil, newGrpcError(false, gerr, gerr.Error())
- }
-
- defer clnt.returnGrpcConnToPool(conn)
-
- if res.GetStatus() != 0 {
- return nil, newGrpcStatusError(res)
- }
-
- info := info{
- msg: &types.Message{
- Data: res.Payload,
- },
- }
-
- return info.parseMultiResponse()
-}
-
-//-------------------------------------------------------
-// Access Methods
-//-------------------------------------------------------
-
-// Cluster exposes the cluster object to the user
-func (clnt *ProxyClient) Cluster() *Cluster {
- panic(notSupportedInProxyClient)
-}
-
-// String implements the Stringer interface for Grpcclient
-func (clnt *ProxyClient) String() string {
- return ""
-}
-
-// Stats returns internal statistics regarding the inner state of the Grpcclient and the cluster.
-func (clnt *ProxyClient) Stats() (map[string]interface{}, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// WarmUp fills the connection pool with connections for all nodes.
-// This is necessary on startup for high traffic programs.
-// If the count is <= 0, the connection queue will be filled.
-// If the count is more than the size of the pool, the pool will be filled.
-// Note: One connection per node is reserved for tend operations and is not used for transactions.
-func (clnt *ProxyClient) WarmUp(count int) (int, Error) {
- if count <= 0 || count > clnt.clientPolicy.ConnectionQueueSize {
- count = clnt.clientPolicy.ConnectionQueueSize
- }
-
- for i := 0; i < count; i++ {
- conn, err := clnt.createGrpcConn(!clnt.clientPolicy.RequiresAuthentication())
- if err != nil {
- return i, err
- }
- clnt.returnGrpcConnToPool(conn)
- }
-
- return count, nil
-}
-
-//-------------------------------------------------------
-// Internal Methods
-//-------------------------------------------------------
-
-func (clnt *ProxyClient) grpcMode() bool {
- return clnt.grpcConnPool != nil
-}
-
-//-------------------------------------------------------
-// Policy Methods
-//-------------------------------------------------------
-
-func (clnt *ProxyClient) getUsablePolicy(policy *BasePolicy) *BasePolicy {
- if policy == nil {
- if clnt.DefaultPolicy != nil {
- return clnt.DefaultPolicy
- }
- return NewPolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableBatchPolicy(policy *BatchPolicy) *BatchPolicy {
- if policy == nil {
- if clnt.DefaultBatchPolicy != nil {
- return clnt.DefaultBatchPolicy
- }
- return NewBatchPolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableBaseBatchWritePolicy(policy *BatchPolicy) *BatchPolicy {
- if policy == nil {
- if clnt.DefaultBatchPolicy != nil {
- return clnt.DefaultBatchPolicy
- }
- return NewBatchPolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableBatchReadPolicy(policy *BatchReadPolicy) *BatchReadPolicy {
- if policy == nil {
- if clnt.DefaultBatchReadPolicy != nil {
- return clnt.DefaultBatchReadPolicy
- }
- return NewBatchReadPolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableBatchWritePolicy(policy *BatchWritePolicy) *BatchWritePolicy {
- if policy == nil {
- if clnt.DefaultBatchWritePolicy != nil {
- return clnt.DefaultBatchWritePolicy
- }
- return NewBatchWritePolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableBatchDeletePolicy(policy *BatchDeletePolicy) *BatchDeletePolicy {
- if policy == nil {
- if clnt.DefaultBatchDeletePolicy != nil {
- return clnt.DefaultBatchDeletePolicy
- }
- return NewBatchDeletePolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableBatchUDFPolicy(policy *BatchUDFPolicy) *BatchUDFPolicy {
- if policy == nil {
- if clnt.DefaultBatchUDFPolicy != nil {
- return clnt.DefaultBatchUDFPolicy
- }
- return NewBatchUDFPolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableWritePolicy(policy *WritePolicy) *WritePolicy {
- if policy == nil {
- if clnt.DefaultWritePolicy != nil {
- return clnt.DefaultWritePolicy
- }
- return NewWritePolicy(0, 0)
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableScanPolicy(policy *ScanPolicy) *ScanPolicy {
- if policy == nil {
- if clnt.DefaultScanPolicy != nil {
- return clnt.DefaultScanPolicy
- }
- return NewScanPolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableQueryPolicy(policy *QueryPolicy) *QueryPolicy {
- if policy == nil {
- if clnt.DefaultQueryPolicy != nil {
- return clnt.DefaultQueryPolicy
- }
- return NewQueryPolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableAdminPolicy(policy *AdminPolicy) *AdminPolicy {
- if policy == nil {
- if clnt.DefaultAdminPolicy != nil {
- return clnt.DefaultAdminPolicy
- }
- return NewAdminPolicy()
- }
- return policy
-}
-
-func (clnt *ProxyClient) getUsableInfoPolicy(policy *InfoPolicy) *InfoPolicy {
- if policy == nil {
- if clnt.DefaultInfoPolicy != nil {
- return clnt.DefaultInfoPolicy
- }
- return NewInfoPolicy()
- }
- return policy
-}
-
-//-------------------------------------------------------
-// Utility Functions
-//-------------------------------------------------------
-
-// grpcConnectionHeap is a non-blocking LIFO heap.
-// If the heap is empty, nil is returned.
-// if the heap is full, offer will return false
-type grpcConnectionHeap struct {
- head, tail uint32
- data []*grpc.ClientConn
- size uint32
- full bool
- mutex sync.Mutex
-}
-
-// newGrpcConnectionHeap creates a new heap with initial size.
-func newGrpcConnectionHeap(size int) *grpcConnectionHeap {
- if size <= 0 {
- panic("Heap size cannot be less than 1")
- }
-
- return &grpcConnectionHeap{
- full: false,
- data: make([]*grpc.ClientConn, uint32(size)),
- size: uint32(size),
- }
-}
-
-func (h *grpcConnectionHeap) cleanup() {
- h.mutex.Lock()
- defer h.mutex.Unlock()
-
- for i := range h.data {
- if h.data[i] != nil {
- h.data[i].Close()
- }
-
- h.data[i] = nil
- }
-
- // make sure offer and poll both fail
- h.data = nil
- h.full = true
- h.head = 0
- h.tail = 0
-}
-
-// Put adds an item to the heap unless the heap is full.
-// In case the heap is full, the item will not be added to the heap
-// and false will be returned
-func (h *grpcConnectionHeap) Put(conn *grpc.ClientConn) bool {
- h.mutex.Lock()
-
- // make sure heap is not full or cleaned up
- if h.full || len(h.data) == 0 {
- h.mutex.Unlock()
- return false
- }
-
- h.head = (h.head + 1) % h.size
- h.full = (h.head == h.tail)
- h.data[h.head] = conn
- h.mutex.Unlock()
- return true
-}
-
-// Poll removes and returns an item from the heap.
-// If the heap is empty, nil will be returned.
-func (h *grpcConnectionHeap) Get() (res *grpc.ClientConn) {
- h.mutex.Lock()
-
- // the heap has been cleaned up
- if len(h.data) == 0 {
- h.mutex.Unlock()
- return nil
- }
-
- // if heap is not empty
- if (h.tail != h.head) || h.full {
- res = h.data[h.head]
- h.data[h.head] = nil
-
- h.full = false
- if h.head == 0 {
- h.head = h.size - 1
- } else {
- h.head--
- }
- }
-
- h.mutex.Unlock()
- return res
-}
diff --git a/proxy_client_app_engine_exclusions.go b/proxy_client_app_engine_exclusions.go
deleted file mode 100644
index f6484ef8..00000000
--- a/proxy_client_app_engine_exclusions.go
+++ /dev/null
@@ -1,28 +0,0 @@
-//go:build !app_engine && as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-// QueryAggregate executes a Map/Reduce query and returns the results.
-// The query executor puts records on the channel from separate goroutines.
-// The caller can concurrently pop records off the channel through the
-// Recordset.Records channel.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) QueryAggregate(policy *QueryPolicy, statement *Statement, packageName, functionName string, functionArgs ...Value) (*Recordset, Error) {
- panic("NOT SUPPORTED")
-}
diff --git a/proxy_client_reflect.go b/proxy_client_reflect.go
deleted file mode 100644
index 8c656ff8..00000000
--- a/proxy_client_reflect.go
+++ /dev/null
@@ -1,231 +0,0 @@
-//go:build !as_performance && as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "reflect"
-
- "github.com/aerospike/aerospike-client-go/v7/types"
-)
-
-// PutObject writes record bin(s) to the server.
-// The policy specifies the transaction timeout, record expiration and how the transaction is
-// handled when the record already exists.
-// If the policy is nil, the default relevant policy will be used.
-// A struct can be tagged to influence the way the object is put in the database:
-//
-// type Person struct {
-// TTL uint32 `asm:"ttl"`
-// RecGen uint32 `asm:"gen"`
-// Name string `as:"name"`
-// Address string `as:"desc,omitempty"`
-// Age uint8 `as:",omitempty"`
-// Password string `as:"-"`
-// }
-//
-// Tag `as:` denotes Aerospike fields. The first value will be the alias for the field.
-// `,omitempty` (without any spaces between the comma and the word) will act like the
-// json package, and will not send the value of the field to the database if the value is zero value.
-// Tag `asm:` denotes Aerospike Meta fields, and includes ttl and generation values.
-// If a tag is marked with `-`, it will not be sent to the database at all.
-// Note: Tag `as` can be replaced with any other user-defined tag via the function `SetAerospikeTag`.
-func (clnt *ProxyClient) PutObject(policy *WritePolicy, key *Key, obj interface{}) (err Error) {
- policy = clnt.getUsableWritePolicy(policy)
-
- binMap := marshal(obj)
- command, err := newWriteCommand(nil, policy, key, nil, binMap, _WRITE)
- if err != nil {
- return err
- }
-
- res := command.ExecuteGRPC(clnt)
- return res
-}
-
-// GetObject reads a record for specified key and puts the result into the provided object.
-// The policy can be used to specify timeouts.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) GetObject(policy *BasePolicy, key *Key, obj interface{}) Error {
- policy = clnt.getUsablePolicy(policy)
-
- rval := reflect.ValueOf(obj)
- binNames := objectMappings.getFields(rval.Type())
-
- command, err := newReadCommand(nil, policy, key, binNames, nil)
- if err != nil {
- return err
- }
-
- command.object = &rval
-
- return command.ExecuteGRPC(clnt)
-}
-
-// BatchGetObjects reads multiple record headers and bins for specified keys in one batch request.
-// The returned objects are in positional order with the original key array order.
-// If a key is not found, the positional object will not change, and the positional found boolean will be false.
-// The policy can be used to specify timeouts.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) BatchGetObjects(policy *BatchPolicy, keys []*Key, objects []interface{}) (found []bool, err Error) {
- policy = clnt.getUsableBatchPolicy(policy)
-
- // check the size of key and objects
- if len(keys) != len(objects) {
- return nil, newError(types.PARAMETER_ERROR, "wrong number of arguments to BatchGetObjects: number of keys and objects do not match")
- }
-
- if len(keys) == 0 {
- return nil, newError(types.PARAMETER_ERROR, "wrong number of arguments to BatchGetObjects: keys are empty")
- }
-
- binSet := map[string]struct{}{}
- objectsVal := make([]*reflect.Value, len(objects))
- for i := range objects {
- rval := reflect.ValueOf(objects[i])
- objectsVal[i] = &rval
- for _, bn := range objectMappings.getFields(rval.Type()) {
- binSet[bn] = struct{}{}
- }
- }
-
- binNames := make([]string, 0, len(binSet))
- for binName := range binSet {
- binNames = append(binNames, binName)
- }
-
- batchRecordsIfc := make([]BatchRecordIfc, 0, len(keys))
- batchRecords := make([]*BatchRecord, 0, len(keys))
- for _, key := range keys {
- batchRead, batchRecord := newBatchRead(nil, key, binNames)
- batchRecordsIfc = append(batchRecordsIfc, batchRead)
- batchRecords = append(batchRecords, batchRecord)
- }
-
- batchNode, err := newGrpcBatchOperateListIfc(policy, batchRecordsIfc)
- if err != nil && policy.RespondAllKeys {
- return nil, err
- }
-
- cmd := newBatchCommandOperate(clnt, batchNode, policy, batchRecordsIfc)
-
- objectsFound := make([]bool, len(keys))
- cmd.objects = objectsVal
- cmd.objectsFound = objectsFound
-
- err = cmd.ExecuteGRPC(clnt)
- // if filteredOut > 0 {
- // err = chainErrors(ErrFilteredOut.err(), err)
- // }
-
- return objectsFound, err
-}
-
-// ScanPartitionObjects Reads records in specified namespace, set and partition filter.
-// If the policy's concurrentNodes is specified, each server node will be read in
-// parallel. Otherwise, server nodes are read sequentially.
-// If partitionFilter is nil, all partitions will be scanned.
-// If the policy is nil, the default relevant policy will be used.
-// This method is only supported by Aerospike 4.9+ servers.
-func (clnt *ProxyClient) ScanPartitionObjects(apolicy *ScanPolicy, objChan interface{}, partitionFilter *PartitionFilter, namespace string, setName string, binNames ...string) (*Recordset, Error) {
- policy := *clnt.getUsableScanPolicy(apolicy)
-
- // result recordset
- res := &Recordset{
- objectset: *newObjectset(reflect.ValueOf(objChan), 1),
- }
- tracker := newPartitionTracker(&policy.MultiPolicy, partitionFilter, nil)
- cmd := newGrpcScanPartitionCommand(&policy, tracker, partitionFilter, namespace, setName, binNames, res)
- go cmd.ExecuteGRPC(clnt)
-
- return res, nil
-
-}
-
-// ScanAllObjects reads all records in specified namespace and set from all nodes.
-// If the policy's concurrentNodes is specified, each server node will be read in
-// parallel. Otherwise, server nodes are read sequentially.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) ScanAllObjects(apolicy *ScanPolicy, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error) {
- return clnt.ScanPartitionObjects(apolicy, objChan, NewPartitionFilterAll(), namespace, setName, binNames...)
-}
-
-// scanNodePartitions reads all records in specified namespace and set for one node only.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) scanNodePartitionsObjects(apolicy *ScanPolicy, node *Node, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// ScanNodeObjects reads all records in specified namespace and set for one node only,
-// and marshalls the results into the objects of the provided channel in Recordset.
-// If the policy is nil, the default relevant policy will be used.
-// The resulting records will be marshalled into the objChan.
-// objChan will be closed after all the records are read.
-func (clnt *ProxyClient) ScanNodeObjects(apolicy *ScanPolicy, node *Node, objChan interface{}, namespace string, setName string, binNames ...string) (*Recordset, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// scanNodeObjects reads all records in specified namespace and set for one node only,
-// and marshalls the results into the objects of the provided channel in Recordset.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) scanNodeObjects(policy *ScanPolicy, node *Node, recordset *Recordset, namespace string, setName string, binNames ...string) Error {
- panic(notSupportedInProxyClient)
-}
-
-// QueryPartitionObjects executes a query for specified partitions and returns a recordset.
-// The query executor puts records on the channel from separate goroutines.
-// The caller can concurrently pop records off the channel through the
-// Recordset.Records channel.
-//
-// This method is only supported by Aerospike 4.9+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) QueryPartitionObjects(policy *QueryPolicy, statement *Statement, objChan interface{}, partitionFilter *PartitionFilter) (*Recordset, Error) {
- policy = clnt.getUsableQueryPolicy(policy)
-
- // result recordset
- res := &Recordset{
- objectset: *newObjectset(reflect.ValueOf(objChan), 1),
- }
- tracker := newPartitionTracker(&policy.MultiPolicy, partitionFilter, nil)
- cmd := newGrpcQueryPartitionCommand(policy, nil, statement, nil, tracker, partitionFilter, res)
- go cmd.ExecuteGRPC(clnt)
-
- return res, nil
-
-}
-
-// QueryObjects executes a query on all nodes in the cluster and marshals the records into the given channel.
-// The query executor puts records on the channel from separate goroutines.
-// The caller can concurrently pop objects.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) QueryObjects(policy *QueryPolicy, statement *Statement, objChan interface{}) (*Recordset, Error) {
- return clnt.QueryPartitionObjects(policy, statement, objChan, NewPartitionFilterAll())
-}
-
-func (clnt *ProxyClient) queryNodePartitionsObjects(policy *QueryPolicy, node *Node, statement *Statement, objChan interface{}) (*Recordset, Error) {
- panic(notSupportedInProxyClient)
-}
-
-// QueryNodeObjects executes a query on a specific node and marshals the records into the given channel.
-// The caller can concurrently pop records off the channel.
-//
-// This method is only supported by Aerospike 3+ servers.
-// If the policy is nil, the default relevant policy will be used.
-func (clnt *ProxyClient) QueryNodeObjects(policy *QueryPolicy, node *Node, statement *Statement, objChan interface{}) (*Recordset, Error) {
- panic(notSupportedInProxyClient)
-}
diff --git a/proxy_client_test.go b/proxy_client_test.go
deleted file mode 100644
index 9a50ebff..00000000
--- a/proxy_client_test.go
+++ /dev/null
@@ -1,42 +0,0 @@
-//go:build as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike_test
-
-import (
- as "github.com/aerospike/aerospike-client-go/v7"
-
- gg "github.com/onsi/ginkgo/v2"
- gm "github.com/onsi/gomega"
-)
-
-// ALL tests are isolated by SetName and Key, which are 50 random characters
-var _ = gg.Describe("Aerospike Proxy Client", func() {
-
- gg.Describe("Info operations on proxy client", func() {
- gg.BeforeEach(func() {
- if !*proxy {
- gg.Skip("Only supported in grpc environment")
- }
- })
-
- gg.It("must successfully call info command", func() {
- _, err := client.(*as.ProxyClient).RequestInfo(nil)
- gm.Expect(err).ToNot(gm.HaveOccurred())
- })
- })
-
-})
diff --git a/proxy_commands.go b/proxy_commands.go
deleted file mode 100644
index 11cfcfae..00000000
--- a/proxy_commands.go
+++ /dev/null
@@ -1,517 +0,0 @@
-//go:build as_proxy
-
-package aerospike
-
-import (
- "math/rand"
-
- kvs "github.com/aerospike/aerospike-client-go/v7/proto/kvs"
-)
-
-func (cmd *readCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- ReadPolicy: cmd.policy.grpc(),
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewKVSClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.Read(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- defer clnt.returnGrpcConnToPool(conn)
-
- if res.GetStatus() != 0 {
- return newGrpcStatusError(res)
- }
-
- cmd.conn = newGrpcFakeConnection(res.GetPayload(), nil)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (cmd *batchCommandOperate) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- ReadPolicy: cmd.policy.grpc(),
- WritePolicy: cmd.policy.grpc_write(),
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewKVSClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- streamRes, gerr := client.BatchOperate(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- readCallback := func() ([]byte, Error) {
- if cmd.grpcEOS {
- return nil, errGRPCStreamEnd
- }
-
- res, gerr := streamRes.Recv()
- if gerr != nil {
- e := newGrpcError(!cmd.isRead(), gerr)
- return nil, e
- }
-
- if res.GetStatus() != 0 {
- e := newGrpcStatusError(res)
- return res.GetPayload(), e
- }
-
- cmd.grpcEOS = !res.GetHasNext()
-
- return res.GetPayload(), nil
- }
-
- cmd.conn = newGrpcFakeConnection(nil, readCallback)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil && err != errGRPCStreamEnd {
- return err
- }
-
- clnt.returnGrpcConnToPool(conn)
-
- return nil
-}
-
-func (cmd *deleteCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- WritePolicy: cmd.policy.grpc(),
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewKVSClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.Delete(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- defer clnt.returnGrpcConnToPool(conn)
-
- if res.GetStatus() != 0 {
- return newGrpcStatusError(res)
- }
-
- cmd.conn = newGrpcFakeConnection(res.GetPayload(), nil)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (cmd *executeCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- WritePolicy: cmd.policy.grpc(),
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewKVSClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.Execute(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- defer clnt.returnGrpcConnToPool(conn)
-
- if res.GetStatus() != 0 {
- return newGrpcStatusError(res)
- }
-
- cmd.conn = newGrpcFakeConnection(res.GetPayload(), nil)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (cmd *existsCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- ReadPolicy: cmd.policy.grpc(),
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewKVSClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.Exists(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- defer clnt.returnGrpcConnToPool(conn)
-
- if res.GetStatus() != 0 {
- return newGrpcStatusError(res)
- }
-
- cmd.conn = newGrpcFakeConnection(res.GetPayload(), nil)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (cmd *operateCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- WritePolicy: cmd.policy.grpc(),
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewKVSClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.Operate(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- defer clnt.returnGrpcConnToPool(conn)
-
- if res.GetStatus() != 0 {
- return newGrpcStatusError(res)
- }
-
- cmd.conn = newGrpcFakeConnection(res.GetPayload(), nil)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (cmd *readHeaderCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- ReadPolicy: cmd.policy.grpc(),
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewKVSClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.GetHeader(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- defer clnt.returnGrpcConnToPool(conn)
-
- if res.GetStatus() != 0 {
- return newGrpcStatusError(res)
- }
-
- cmd.conn = newGrpcFakeConnection(res.GetPayload(), nil)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (cmd *serverCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- execReq := &kvs.BackgroundExecuteRequest{
- Statement: cmd.statement.grpc(cmd.policy, cmd.operations),
- WritePolicy: cmd.writePolicy.grpc_exec(cmd.policy.FilterExpression),
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- BackgroundExecuteRequest: execReq,
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewQueryClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- streamRes, gerr := client.BackgroundExecute(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- readCallback := func() ([]byte, Error) {
- res, gerr := streamRes.Recv()
- if gerr != nil {
- e := newGrpcError(!cmd.isRead(), gerr)
- return nil, e
- }
-
- if res.GetStatus() != 0 {
- e := newGrpcStatusError(res)
- return res.GetPayload(), e
- }
-
- if !res.GetHasNext() {
- return nil, errGRPCStreamEnd
- }
-
- return res.GetPayload(), nil
- }
-
- cmd.conn = newGrpcFakeConnection(nil, readCallback)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil && err != errGRPCStreamEnd {
- return err
- }
-
- clnt.returnGrpcConnToPool(conn)
-
- return nil
-}
-
-func (cmd *touchCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- WritePolicy: cmd.policy.grpc(),
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewKVSClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.Touch(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- defer clnt.returnGrpcConnToPool(conn)
-
- if res.GetStatus() != 0 {
- return newGrpcStatusError(res)
- }
-
- cmd.conn = newGrpcFakeConnection(res.GetPayload(), nil)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (cmd *writeCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- WritePolicy: cmd.policy.grpc(),
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewKVSClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- res, gerr := client.Write(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- defer clnt.returnGrpcConnToPool(conn)
-
- if res.GetStatus() != 0 {
- return newGrpcStatusError(res)
- }
-
- cmd.conn = newGrpcFakeConnection(res.GetPayload(), nil)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil {
- return err
- }
-
- return nil
-}
diff --git a/proxy_conv.go b/proxy_conv.go
deleted file mode 100644
index ecd4fefc..00000000
--- a/proxy_conv.go
+++ /dev/null
@@ -1,479 +0,0 @@
-//go:build as_proxy
-
-package aerospike
-
-import (
- "context"
- "math/rand"
- "time"
-
- kvs "github.com/aerospike/aerospike-client-go/v7/proto/kvs"
- "github.com/aerospike/aerospike-client-go/v7/types"
-)
-
-func (fltr *Filter) grpc() *kvs.Filter {
- if fltr == nil {
- return nil
- }
-
- res := &kvs.Filter{
- Name: fltr.name,
- ColType: fltr.idxType.grpc(),
- PackedCtx: fltr.grpcPackCtxPayload(),
- ValType: int32(fltr.valueParticleType),
- Begin: grpcValuePacked(fltr.begin),
- End: grpcValuePacked(fltr.end),
- }
-
- return res
-}
-
-///////////////////////////////////////////////////////////////////
-
-var simpleCancelFunc = func() {}
-
-func (p *InfoPolicy) grpcDeadlineContext() (context.Context, context.CancelFunc) {
- timeout := p.timeout()
- if timeout <= 0 {
- return context.Background(), simpleCancelFunc
-
- }
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- return ctx, cancel
-}
-
-func (p *InfoPolicy) grpc() *kvs.InfoPolicy {
- if p == nil {
- return nil
- }
-
- Timeout := uint32(p.Timeout / time.Millisecond)
- res := &kvs.InfoPolicy{
- Timeout: &Timeout,
- }
-
- return res
-}
-
-///////////////////////////////////////////////////////////////////
-
-func (op *Operation) grpc() *kvs.Operation {
- BinName := op.binName
- return &kvs.Operation{
- Type: op.grpc_op_type(),
- BinName: &BinName,
- Value: grpcValuePacked(op.binValue),
- }
-}
-
-///////////////////////////////////////////////////////////////////
-
-func (pf *PartitionFilter) grpc() *kvs.PartitionFilter {
- begin := uint32(pf.Begin)
- ps := make([]*kvs.PartitionStatus, len(pf.Partitions))
- for i := range pf.Partitions {
- ps[i] = pf.Partitions[i].grpc()
- }
-
- return &kvs.PartitionFilter{
- Begin: &begin,
- Count: uint32(pf.Count),
- Digest: pf.Digest,
- PartitionStatuses: ps,
- Retry: true,
- }
-
-}
-
-///////////////////////////////////////////////////////////////////
-
-func (ps *PartitionStatus) grpc() *kvs.PartitionStatus {
- id := uint32(ps.Id)
- bVal := ps.BVal
- digest := ps.Digest
- return &kvs.PartitionStatus{
- Id: &id,
- BVal: &bVal,
- Digest: digest,
- Retry: ps.Retry,
- }
-}
-
-///////////////////////////////////////////////////////////////////
-
-func (p *BasePolicy) grpc() *kvs.ReadPolicy {
- return &kvs.ReadPolicy{
- Replica: p.ReplicaPolicy.grpc(),
- ReadModeSC: p.ReadModeSC.grpc(),
- ReadModeAP: p.ReadModeAP.grpc(),
- }
-}
-
-func (p *BasePolicy) grpcDeadlineContext() (context.Context, context.CancelFunc) {
- timeout := p.timeout()
- if timeout <= 0 {
- return context.Background(), simpleCancelFunc
-
- }
- ctx, cancel := context.WithTimeout(context.Background(), timeout)
- return ctx, cancel
-}
-
-///////////////////////////////////////////////////////////////////
-
-func (qp *QueryPolicy) grpc() *kvs.QueryPolicy {
- SendKey := qp.SendKey
- TotalTimeout := uint32(qp.TotalTimeout / time.Millisecond)
- RecordQueueSize := uint32(qp.RecordQueueSize)
- MaxConcurrentNodes := uint32(qp.MaxConcurrentNodes)
- IncludeBinData := qp.IncludeBinData
- FailOnClusterChange := false //qp.FailOnClusterChange
- ShortQuery := qp.ShortQuery || qp.ExpectedDuration == SHORT
- InfoTimeout := uint32(qp.SocketTimeout / time.Millisecond)
- ExpectedDuration := qp.ExpectedDuration.grpc()
-
- return &kvs.QueryPolicy{
- Replica: qp.ReplicaPolicy.grpc(),
- ReadModeAP: qp.ReadModeAP.grpc(),
- ReadModeSC: qp.ReadModeSC.grpc(),
- SendKey: &SendKey,
- Compress: qp.UseCompression,
- Expression: qp.FilterExpression.grpc(),
- TotalTimeout: &TotalTimeout,
- MaxConcurrentNodes: &MaxConcurrentNodes,
- RecordQueueSize: &RecordQueueSize,
- IncludeBinData: &IncludeBinData,
- FailOnClusterChange: &FailOnClusterChange,
- ShortQuery: &ShortQuery,
- InfoTimeout: &InfoTimeout,
- ExpectedDuration: &ExpectedDuration,
- }
-}
-
-///////////////////////////////////////////////////////////////////
-
-func (sp *ScanPolicy) grpc() *kvs.ScanPolicy {
- TotalTimeout := uint32(sp.TotalTimeout / time.Millisecond)
- MaxRecords := uint64(sp.MaxRecords)
- RecordsPerSecond := uint32(sp.RecordsPerSecond)
- MaxConcurrentNodes := uint32(sp.MaxConcurrentNodes)
- IncludeBinData := sp.IncludeBinData
- ConcurrentNodes := MaxConcurrentNodes > 1
-
- return &kvs.ScanPolicy{
- Replica: sp.ReplicaPolicy.grpc(),
- ReadModeAP: sp.ReadModeAP.grpc(),
- ReadModeSC: sp.ReadModeSC.grpc(),
- Compress: sp.UseCompression,
- Expression: sp.FilterExpression.grpc(),
- TotalTimeout: &TotalTimeout,
- MaxRecords: &MaxRecords,
- RecordsPerSecond: &RecordsPerSecond,
- ConcurrentNodes: &ConcurrentNodes,
- MaxConcurrentNodes: &MaxConcurrentNodes,
- IncludeBinData: &IncludeBinData,
- }
-}
-
-///////////////////////////////////////////////////////////////////
-
-func (p *WritePolicy) grpc() *kvs.WritePolicy {
- return &kvs.WritePolicy{
- Replica: p.ReplicaPolicy.grpc(),
- ReadModeSC: p.ReadModeSC.grpc(),
- ReadModeAP: p.ReadModeAP.grpc(),
- }
-}
-
-func (p *WritePolicy) grpc_exec(expr *Expression) *kvs.BackgroundExecutePolicy {
- if p == nil {
- return nil
- }
-
- SendKey := p.SendKey
- TotalTimeout := uint32(p.TotalTimeout / time.Millisecond)
- RecordExistsAction := p.RecordExistsAction.grpc()
- GenerationPolicy := p.GenerationPolicy.grpc()
- CommitLevel := p.CommitLevel.grpc()
- Generation := p.Generation
- Expiration := p.Expiration
- RespondAllOps := p.RespondPerEachOp
- DurableDelete := p.DurableDelete
-
- fe := expr
- if fe == nil {
- fe = p.FilterExpression
- }
-
- res := &kvs.BackgroundExecutePolicy{
- Replica: p.ReplicaPolicy.grpc(),
- ReadModeAP: p.ReadModeAP.grpc(),
- ReadModeSC: p.ReadModeSC.grpc(),
- SendKey: &SendKey,
- Compress: p.UseCompression,
- Expression: fe.grpc(),
- TotalTimeout: &TotalTimeout,
-
- Xdr: nil,
-
- RecordExistsAction: &RecordExistsAction,
- GenerationPolicy: &GenerationPolicy,
- CommitLevel: &CommitLevel,
- Generation: &Generation,
- Expiration: &Expiration,
- RespondAllOps: &RespondAllOps,
- DurableDelete: &DurableDelete,
- }
-
- return res
-}
-
-func (p *BatchPolicy) grpc_write() *kvs.WritePolicy {
- return &kvs.WritePolicy{
- Replica: p.ReplicaPolicy.grpc(),
- ReadModeSC: p.ReadModeSC.grpc(),
- ReadModeAP: p.ReadModeAP.grpc(),
- }
-}
-
-func (cl CommitLevel) grpc() kvs.CommitLevel {
- switch cl {
- case COMMIT_ALL:
- return kvs.CommitLevel_COMMIT_ALL
- case COMMIT_MASTER:
- return kvs.CommitLevel_COMMIT_MASTER
- }
- panic(unreachable)
-}
-
-func newGrpcStatusError(res *kvs.AerospikeResponsePayload) Error {
- if res.GetStatus() >= 0 {
- return newError(types.ResultCode(res.GetStatus())).markInDoubt(res.GetInDoubt())
- }
-
- var resultCode = types.OK
- switch res.GetStatus() {
- case -16:
- // BATCH_FAILED
- resultCode = types.BATCH_FAILED
- case -15:
- // NO_RESPONSE
- resultCode = types.NO_RESPONSE
- case -12:
- // MAX_ERROR_RATE
- resultCode = types.MAX_ERROR_RATE
- case -11:
- // MAX_RETRIES_EXCEEDED
- resultCode = types.MAX_RETRIES_EXCEEDED
- case -10:
- // SERIALIZE_ERROR
- resultCode = types.SERIALIZE_ERROR
- case -9:
- // ASYNC_QUEUE_FULL
- // resultCode = types.ASYNC_QUEUE_FULL
- return newError(types.SERVER_ERROR, "Server ASYNC_QUEUE_FULL").markInDoubt(res.GetInDoubt())
- case -8:
- // SERVER_NOT_AVAILABLE
- resultCode = types.SERVER_NOT_AVAILABLE
- case -7:
- // NO_MORE_CONNECTIONS
- resultCode = types.NO_AVAILABLE_CONNECTIONS_TO_NODE
- case -5:
- // QUERY_TERMINATED
- resultCode = types.QUERY_TERMINATED
- case -4:
- // SCAN_TERMINATED
- resultCode = types.SCAN_TERMINATED
- case -3:
- // INVALID_NODE_ERROR
- resultCode = types.INVALID_NODE_ERROR
- case -2:
- // PARSE_ERROR
- resultCode = types.PARSE_ERROR
- case -1:
- // CLIENT_ERROR
- resultCode = types.COMMON_ERROR
- }
-
- return newError(resultCode).markInDoubt(res.GetInDoubt())
-}
-
-func (gp GenerationPolicy) grpc() kvs.GenerationPolicy {
- switch gp {
- case NONE:
- return kvs.GenerationPolicy_NONE
- case EXPECT_GEN_EQUAL:
- return kvs.GenerationPolicy_EXPECT_GEN_EQUAL
- case EXPECT_GEN_GT:
- return kvs.GenerationPolicy_EXPECT_GEN_GT
- }
- panic(unreachable)
-}
-
-func (ict IndexCollectionType) grpc() kvs.IndexCollectionType {
- switch ict {
- // Normal scalar index.
- case ICT_DEFAULT:
- return kvs.IndexCollectionType_DEFAULT
- // Index list elements.
- case ICT_LIST:
- return kvs.IndexCollectionType_LIST
- // Index map keys.
- case ICT_MAPKEYS:
- return kvs.IndexCollectionType_MAPKEYS
- // Index map values.
- case ICT_MAPVALUES:
- return kvs.IndexCollectionType_MAPVALUES
- }
- panic(unreachable)
-}
-
-func (o *Operation) grpc_op_type() kvs.OperationType {
- // case _READ: return kvs.OperationType_READ
- switch o.opType {
- case _READ:
- return kvs.OperationType_READ
- case _READ_HEADER:
- return kvs.OperationType_READ_HEADER
- case _WRITE:
- return kvs.OperationType_WRITE
- case _CDT_READ:
- return kvs.OperationType_CDT_READ
- case _CDT_MODIFY:
- return kvs.OperationType_CDT_MODIFY
- case _MAP_READ:
- return kvs.OperationType_MAP_READ
- case _MAP_MODIFY:
- return kvs.OperationType_MAP_MODIFY
- case _ADD:
- return kvs.OperationType_ADD
- case _EXP_READ:
- return kvs.OperationType_EXP_READ
- case _EXP_MODIFY:
- return kvs.OperationType_EXP_MODIFY
- case _APPEND:
- return kvs.OperationType_APPEND
- case _PREPEND:
- return kvs.OperationType_PREPEND
- case _TOUCH:
- return kvs.OperationType_TOUCH
- case _BIT_READ:
- return kvs.OperationType_BIT_READ
- case _BIT_MODIFY:
- return kvs.OperationType_BIT_MODIFY
- case _DELETE:
- return kvs.OperationType_DELETE
- case _HLL_READ:
- return kvs.OperationType_HLL_READ
- case _HLL_MODIFY:
- return kvs.OperationType_HLL_MODIFY
- }
-
- panic(unreachable)
-}
-
-func (stmt *Statement) grpc(policy *QueryPolicy, ops []*Operation) *kvs.Statement {
- IndexName := stmt.IndexName
- // reset taskID every time
- TaskId := rand.Int63()
- SetName := stmt.SetName
-
- MaxRecords := uint64(policy.MaxRecords)
- RecordsPerSecond := uint32(policy.RecordsPerSecond)
-
- funcArgs := make([][]byte, 0, len(stmt.functionArgs))
- for i := range stmt.functionArgs {
- funcArgs = append(funcArgs, grpcValuePacked(stmt.functionArgs[i]))
- }
-
- return &kvs.Statement{
- Namespace: stmt.Namespace,
- SetName: &SetName,
- IndexName: &IndexName,
- BinNames: stmt.BinNames,
- Filter: stmt.Filter.grpc(),
- PackageName: stmt.packageName,
- FunctionName: stmt.functionName,
- FunctionArgs: funcArgs,
- Operations: grpcOperations(ops),
- TaskId: &TaskId,
- MaxRecords: &MaxRecords,
- RecordsPerSecond: &RecordsPerSecond,
- }
-}
-
-func grpcOperations(ops []*Operation) []*kvs.Operation {
- res := make([]*kvs.Operation, 0, len(ops))
- for i := range ops {
- res = append(res, ops[i].grpc())
- }
- return res
-}
-
-func (qd QueryDuration) grpc() kvs.QueryDuration {
- switch qd {
- case LONG:
- return kvs.QueryDuration(kvs.QueryDuration_LONG)
- case SHORT:
- return kvs.QueryDuration(kvs.QueryDuration_SHORT)
- case LONG_RELAX_AP:
- return kvs.QueryDuration(kvs.QueryDuration_LONG_RELAX_AP)
- }
- panic(unreachable)
-}
-
-func (rm ReadModeAP) grpc() kvs.ReadModeAP {
- switch rm {
- case ReadModeAPOne:
- return kvs.ReadModeAP_ONE
- case ReadModeAPAll:
- return kvs.ReadModeAP_ALL
- }
- panic(unreachable)
-}
-
-func (rm ReadModeSC) grpc() kvs.ReadModeSC {
- switch rm {
- case ReadModeSCSession:
- return kvs.ReadModeSC_SESSION
- case ReadModeSCLinearize:
- return kvs.ReadModeSC_LINEARIZE
- case ReadModeSCAllowReplica:
- return kvs.ReadModeSC_ALLOW_REPLICA
- case ReadModeSCAllowUnavailable:
- return kvs.ReadModeSC_ALLOW_UNAVAILABLE
- }
- panic(unreachable)
-}
-
-func (rea RecordExistsAction) grpc() kvs.RecordExistsAction {
- switch rea {
- case UPDATE:
- return kvs.RecordExistsAction_UPDATE
- case UPDATE_ONLY:
- return kvs.RecordExistsAction_UPDATE_ONLY
- case REPLACE:
- return kvs.RecordExistsAction_REPLACE
- case REPLACE_ONLY:
- return kvs.RecordExistsAction_REPLACE_ONLY
- case CREATE_ONLY:
- return kvs.RecordExistsAction_CREATE_ONLY
- }
- panic(unreachable)
-}
-
-func (rp ReplicaPolicy) grpc() kvs.Replica {
- switch rp {
- case MASTER:
- return kvs.Replica_MASTER
- case MASTER_PROLES:
- return kvs.Replica_MASTER_PROLES
- case RANDOM:
- return kvs.Replica_RANDOM
- case SEQUENCE:
- return kvs.Replica_SEQUENCE
- case PREFER_RACK:
- return kvs.Replica_PREFER_RACK
- }
- panic(unreachable)
-}
diff --git a/proxy_execute_task.go b/proxy_execute_task.go
deleted file mode 100644
index bc89b966..00000000
--- a/proxy_execute_task.go
+++ /dev/null
@@ -1,89 +0,0 @@
-//go:build as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "context"
- "math/rand"
- "time"
-
- kvs "github.com/aerospike/aerospike-client-go/v7/proto/kvs"
-)
-
-// newGRPCExecuteTask initializes task with fields needed to query server nodes.
-func newGRPCExecuteTask(clnt *ProxyClient, statement *Statement) *ExecuteTask {
- return &ExecuteTask{
- baseTask: newTask(nil),
- taskID: statement.TaskId,
- scan: statement.IsScan(),
- clnt: clnt,
- }
-}
-
-func (etsk *ExecuteTask) grpcIsDone() (bool, Error) {
- statusReq := &kvs.BackgroundTaskStatusRequest{
- TaskId: int64(etsk.taskID),
- IsScan: etsk.scan,
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- BackgroundTaskStatusRequest: statusReq,
- }
-
- clnt := etsk.clnt.(*ProxyClient)
- conn, err := clnt.grpcConn()
- if err != nil {
- return false, err
- }
-
- client := kvs.NewQueryClient(conn)
-
- ctx, cancel := context.WithTimeout(context.Background(), NewInfoPolicy().Timeout)
- defer cancel()
-
- streamRes, gerr := client.BackgroundTaskStatus(ctx, &req)
- if gerr != nil {
- return false, newGrpcError(true, gerr, gerr.Error())
- }
-
- for {
- time.Sleep(time.Second)
-
- res, gerr := streamRes.Recv()
- if gerr != nil {
- e := newGrpcError(true, gerr)
- return false, e
- }
-
- if res.GetStatus() != 0 {
- e := newGrpcStatusError(res)
- clnt.returnGrpcConnToPool(conn)
- return false, e
- }
-
- switch res.GetBackgroundTaskStatus() {
- case kvs.BackgroundTaskStatus_COMPLETE:
- clnt.returnGrpcConnToPool(conn)
- return true, nil
- default:
- clnt.returnGrpcConnToPool(conn)
- return false, nil
- }
- }
-}
diff --git a/proxy_query_partition_command.go b/proxy_query_partition_command.go
deleted file mode 100644
index de389261..00000000
--- a/proxy_query_partition_command.go
+++ /dev/null
@@ -1,162 +0,0 @@
-//go:build as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "math/rand"
-
- kvs "github.com/aerospike/aerospike-client-go/v7/proto/kvs"
-)
-
-type grpcQueryPartitionCommand struct {
- baseMultiCommand
-
- policy *QueryPolicy
- writePolicy *WritePolicy
- statement *Statement
- partitionFilter *PartitionFilter
- operations []*Operation
-}
-
-func newGrpcQueryPartitionCommand(
- policy *QueryPolicy,
- writePolicy *WritePolicy,
- statement *Statement,
- operations []*Operation,
- partitionTracker *partitionTracker,
- partitionFilter *PartitionFilter,
- recordset *Recordset,
-) *grpcQueryPartitionCommand {
- cmd := &grpcQueryPartitionCommand{
- baseMultiCommand: *newCorrectStreamingMultiCommand(recordset, statement.Namespace),
- policy: policy,
- writePolicy: writePolicy,
- statement: statement,
- partitionFilter: partitionFilter,
- operations: operations,
- }
- cmd.rawCDT = policy.RawCDT
- cmd.tracker = partitionTracker
- cmd.terminationErrorType = statement.terminationError()
- cmd.nodePartitions = newNodePartitions(nil, _PARTITIONS)
-
- return cmd
-}
-
-func (cmd *grpcQueryPartitionCommand) getPolicy(ifc command) Policy {
- return cmd.policy
-}
-
-func (cmd *grpcQueryPartitionCommand) writeBuffer(ifc command) Error {
- return cmd.setQuery(cmd.policy, cmd.writePolicy, cmd.statement, cmd.recordset.TaskId(), cmd.operations, cmd.writePolicy != nil, nil)
-}
-
-func (cmd *grpcQueryPartitionCommand) shouldRetry(e Error) bool {
- panic(unreachable)
-}
-
-func (cmd *grpcQueryPartitionCommand) transactionType() transactionType {
- return ttQuery
-}
-
-func (cmd *grpcQueryPartitionCommand) Execute() Error {
- panic(unreachable)
-}
-
-func (cmd *grpcQueryPartitionCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.recordset.signalEnd()
-
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- queryReq := &kvs.QueryRequest{
- Statement: cmd.statement.grpc(cmd.policy, cmd.operations),
- PartitionFilter: cmd.partitionFilter.grpc(),
- QueryPolicy: cmd.policy.grpc(),
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- QueryRequest: queryReq,
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewQueryClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- streamRes, gerr := client.Query(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- readCallback := func() ([]byte, Error) {
- if cmd.grpcEOS {
- return nil, errGRPCStreamEnd
- }
-
- res, gerr := streamRes.Recv()
- if gerr != nil {
- e := newGrpcError(!cmd.isRead(), gerr)
- cmd.recordset.sendError(e)
- return nil, e
- }
-
- if res.GetStatus() != 0 {
- e := newGrpcStatusError(res)
- cmd.recordset.sendError(e)
- return res.GetPayload(), e
- }
-
- cmd.grpcEOS = !res.GetHasNext()
-
- return res.GetPayload(), nil
- }
-
- cmd.conn = newGrpcFakeConnection(nil, readCallback)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil && err != errGRPCStreamEnd {
- cmd.recordset.sendError(err)
- return err
- }
-
- done, err := cmd.tracker.isComplete(false, &cmd.policy.BasePolicy, []*nodePartitions{cmd.nodePartitions})
- if !cmd.recordset.IsActive() || done || err != nil {
- // Query is complete.
- if err != nil {
- cmd.tracker.partitionError()
- cmd.recordset.sendError(err)
- }
- }
-
- clnt.returnGrpcConnToPool(conn)
-
- return nil
-}
diff --git a/proxy_scan_command.go b/proxy_scan_command.go
deleted file mode 100644
index 7f0a06c7..00000000
--- a/proxy_scan_command.go
+++ /dev/null
@@ -1,165 +0,0 @@
-//go:build as_proxy
-
-// Copyright 2014-2022 Aerospike, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package aerospike
-
-import (
- "math/rand"
-
- kvs "github.com/aerospike/aerospike-client-go/v7/proto/kvs"
- "github.com/aerospike/aerospike-client-go/v7/types"
-)
-
-type grpcScanPartitionCommand struct {
- baseMultiCommand
-
- policy *ScanPolicy
- namespace string
- setName string
- binNames []string
- partitionFilter *PartitionFilter
-}
-
-func newGrpcScanPartitionCommand(
- policy *ScanPolicy,
- partitionTracker *partitionTracker,
- partitionFilter *PartitionFilter,
- namespace string,
- setName string,
- binNames []string,
- recordset *Recordset,
-) *grpcScanPartitionCommand {
- cmd := &grpcScanPartitionCommand{
- baseMultiCommand: *newCorrectStreamingMultiCommand(recordset, namespace),
- policy: policy,
- namespace: namespace,
- setName: setName,
- binNames: binNames,
- partitionFilter: partitionFilter,
- }
- cmd.rawCDT = policy.RawCDT
- cmd.tracker = partitionTracker
- cmd.terminationErrorType = types.SCAN_TERMINATED
- cmd.nodePartitions = newNodePartitions(nil, _PARTITIONS)
-
- return cmd
-}
-
-func (cmd *grpcScanPartitionCommand) getPolicy(ifc command) Policy {
- return cmd.policy
-}
-
-func (cmd *grpcScanPartitionCommand) writeBuffer(ifc command) Error {
- return cmd.setScan(cmd.policy, &cmd.namespace, &cmd.setName, cmd.binNames, cmd.recordset.taskID, nil)
-}
-
-func (cmd *grpcScanPartitionCommand) shouldRetry(e Error) bool {
- panic(unreachable)
-}
-
-func (cmd *grpcScanPartitionCommand) transactionType() transactionType {
- return ttScan
-}
-
-func (cmd *grpcScanPartitionCommand) Execute() Error {
- panic(unreachable)
-}
-
-func (cmd *grpcScanPartitionCommand) ExecuteGRPC(clnt *ProxyClient) Error {
- defer cmd.recordset.signalEnd()
-
- defer cmd.grpcPutBufferBack()
-
- err := cmd.prepareBuffer(cmd, cmd.policy.deadline())
- if err != nil {
- return err
- }
-
- scanReq := &kvs.ScanRequest{
- Namespace: cmd.namespace,
- SetName: &cmd.setName,
- BinNames: cmd.binNames,
- PartitionFilter: cmd.partitionFilter.grpc(),
- ScanPolicy: cmd.policy.grpc(),
- }
-
- req := kvs.AerospikeRequestPayload{
- Id: rand.Uint32(),
- Iteration: 1,
- Payload: cmd.dataBuffer[:cmd.dataOffset],
- ScanRequest: scanReq,
- }
-
- conn, err := clnt.grpcConn()
- if err != nil {
- return err
- }
-
- client := kvs.NewScanClient(conn)
-
- ctx, cancel := cmd.policy.grpcDeadlineContext()
- defer cancel()
-
- streamRes, gerr := client.Scan(ctx, &req)
- if gerr != nil {
- return newGrpcError(!cmd.isRead(), gerr, gerr.Error())
- }
-
- cmd.commandWasSent = true
-
- readCallback := func() ([]byte, Error) {
- if cmd.grpcEOS {
- return nil, errGRPCStreamEnd
- }
-
- res, gerr := streamRes.Recv()
- if gerr != nil {
- e := newGrpcError(!cmd.isRead(), gerr)
- cmd.recordset.sendError(e)
- return nil, e
- }
-
- cmd.grpcEOS = !res.GetHasNext()
-
- if res.GetStatus() != 0 {
- e := newGrpcStatusError(res)
- cmd.recordset.sendError(e)
- return res.GetPayload(), e
- }
-
- return res.GetPayload(), nil
- }
-
- cmd.conn = newGrpcFakeConnection(nil, readCallback)
- err = cmd.parseResult(cmd, cmd.conn)
- if err != nil && err != errGRPCStreamEnd {
- cmd.recordset.sendError(err)
- return err
- }
-
- done, err := cmd.tracker.isComplete(false, &cmd.policy.BasePolicy, []*nodePartitions{cmd.nodePartitions})
- if !cmd.recordset.IsActive() || done || err != nil {
- // Query is complete.
- if err != nil {
- cmd.tracker.partitionError()
- cmd.recordset.sendError(err)
- }
- }
-
- clnt.returnGrpcConnToPool(conn)
-
- return nil
-}
diff --git a/query_aggregate_command.go b/query_aggregate_command.go
index c4693478..321819ca 100644
--- a/query_aggregate_command.go
+++ b/query_aggregate_command.go
@@ -19,9 +19,9 @@ package aerospike
import (
"fmt"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
lua "github.com/yuin/gopher-lua"
)
diff --git a/query_aggregate_test.go b/query_aggregate_test.go
index adaaac9f..d2429b4b 100644
--- a/query_aggregate_test.go
+++ b/query_aggregate_test.go
@@ -20,14 +20,14 @@ import (
"os"
"sync"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
)
func registerUDFFromFile(path, filename string) {
- regTask, err := nativeClient.RegisterUDFFromFile(nil, path+filename+".lua", filename+".lua", as.LUA)
+ regTask, err := client.RegisterUDFFromFile(nil, path+filename+".lua", filename+".lua", as.LUA)
gm.Expect(err).ToNot(gm.HaveOccurred())
// wait until UDF is created
@@ -35,7 +35,7 @@ func registerUDFFromFile(path, filename string) {
}
func registerUDF(udf, moduleName string) {
- regTask, err := nativeClient.RegisterUDF(nil, []byte(udf), moduleName, as.LUA)
+ regTask, err := client.RegisterUDF(nil, []byte(udf), moduleName, as.LUA)
gm.Expect(err).ToNot(gm.HaveOccurred())
// wait until UDF is created
@@ -43,7 +43,7 @@ func registerUDF(udf, moduleName string) {
}
func removeUDF(moduleName string) {
- remTask, err := nativeClient.RemoveUDF(nil, moduleName)
+ remTask, err := client.RemoveUDF(nil, moduleName)
gm.Expect(err).ToNot(gm.HaveOccurred())
// wait until UDF is created
@@ -73,14 +73,6 @@ var _ = gg.Describe("Query Aggregate operations", func() {
createUDFs := new(sync.Once)
gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
-
createUDFs.Do(func() {
registerUDFFromFile(luaPath, "sum_single_bin")
registerUDFFromFile(luaPath, "average")
diff --git a/query_command.go b/query_command.go
index b5c2a042..9b2479b8 100644
--- a/query_command.go
+++ b/query_command.go
@@ -48,7 +48,7 @@ func (cmd *queryCommand) parseResult(ifc command, conn *Connection) Error {
return cmd.baseMultiCommand.parseResult(ifc, conn)
}
-func (cmd *queryCommand) transactionType() transactionType {
+func (cmd *queryCommand) commandType() commandType {
return ttQuery
}
diff --git a/query_context_test.go b/query_context_test.go
index 5a18a2ac..8d47ae87 100644
--- a/query_context_test.go
+++ b/query_context_test.go
@@ -15,7 +15,7 @@
package aerospike_test
import (
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -36,10 +36,6 @@ var _ = gg.Describe("Query operations with Context", func() {
var keys map[string]*as.Key
gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
keys = make(map[string]*as.Key, keyCount)
set = randString(50)
for i := 0; i < keyCount; i++ {
diff --git a/query_objects_executor.go b/query_objects_executor.go
index 63ad74d4..0a7e4f0d 100644
--- a/query_objects_executor.go
+++ b/query_objects_executor.go
@@ -19,7 +19,7 @@ import (
"sync"
"time"
- "github.com/aerospike/aerospike-client-go/v7/logger"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
"golang.org/x/sync/semaphore"
)
diff --git a/query_partition_command.go b/query_partition_command.go
index abb0523a..7f85e708 100644
--- a/query_partition_command.go
+++ b/query_partition_command.go
@@ -51,7 +51,7 @@ func (cmd *queryPartitionCommand) shouldRetry(e Error) bool {
return cmd.tracker != nil && cmd.tracker.shouldRetry(cmd.nodePartitions, e)
}
-func (cmd *queryPartitionCommand) transactionType() transactionType {
+func (cmd *queryPartitionCommand) commandType() commandType {
return ttQuery
}
diff --git a/query_partitiopn_objects_command.go b/query_partitiopn_objects_command.go
index 4c68a14c..11684760 100644
--- a/query_partitiopn_objects_command.go
+++ b/query_partitiopn_objects_command.go
@@ -50,7 +50,7 @@ func (cmd *queryPartitionObjectsCommand) shouldRetry(e Error) bool {
return cmd.tracker != nil && cmd.tracker.shouldRetry(cmd.nodePartitions, e)
}
-func (cmd *queryPartitionObjectsCommand) transactionType() transactionType {
+func (cmd *queryPartitionObjectsCommand) commandType() commandType {
return ttQuery
}
diff --git a/query_policy.go b/query_policy.go
index 747a06c2..4309bd7b 100644
--- a/query_policy.go
+++ b/query_policy.go
@@ -15,6 +15,8 @@
package aerospike
// QueryPolicy encapsulates parameters for policy attributes used in query operations.
+//
+// Inherited Policy fields Policy.Txn are ignored in query commands.
type QueryPolicy struct {
MultiPolicy
diff --git a/query_test.go b/query_test.go
index b8c1570e..b7ca6d6b 100644
--- a/query_test.go
+++ b/query_test.go
@@ -20,8 +20,8 @@ import (
"math"
"math/rand"
- as "github.com/aerospike/aerospike-client-go/v7"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -91,11 +91,7 @@ var _ = gg.Describe("Query operations", func() {
}
gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
- nativeClient.Truncate(nil, ns, set, nil)
+ client.Truncate(nil, ns, set, nil)
keys = make(map[string]*as.Key, keyCount)
set = randString(50)
@@ -124,27 +120,19 @@ var _ = gg.Describe("Query operations", func() {
})
gg.AfterEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
indexName = set + bin3.Name
- gm.Expect(nativeClient.DropIndex(nil, ns, set, indexName)).ToNot(gm.HaveOccurred())
+ gm.Expect(client.DropIndex(nil, ns, set, indexName)).ToNot(gm.HaveOccurred())
indexName = set + bin6.Name
- gm.Expect(nativeClient.DropIndex(nil, ns, set, indexName)).ToNot(gm.HaveOccurred())
+ gm.Expect(client.DropIndex(nil, ns, set, indexName)).ToNot(gm.HaveOccurred())
indexName = set + bin7.Name
- gm.Expect(nativeClient.DropIndex(nil, ns, set, indexName)).ToNot(gm.HaveOccurred())
+ gm.Expect(client.DropIndex(nil, ns, set, indexName)).ToNot(gm.HaveOccurred())
})
var queryPolicy = as.NewQueryPolicy()
gg.It("must Query and get all records back for a specified node using Results() channel", func() {
- if *proxy {
- gg.Skip("Not Supported for Proxy Client")
- }
-
gm.Expect(len(keys)).To(gm.Equal(keyCount))
stm := as.NewStatement(ns, set)
@@ -395,7 +383,7 @@ var _ = gg.Describe("Query operations", func() {
})
gg.It("must Query a specific range by applying a udf filter and get only relevant records back", func() {
- regTask, err := nativeClient.RegisterUDF(nil, []byte(udfFilter), "udfFilter.lua", as.LUA)
+ regTask, err := client.RegisterUDF(nil, []byte(udfFilter), "udfFilter.lua", as.LUA)
gm.Expect(err).ToNot(gm.HaveOccurred())
// wait until UDF is created
diff --git a/random_operation_test.go b/random_operation_test.go
index 450c384b..d2b32e0e 100644
--- a/random_operation_test.go
+++ b/random_operation_test.go
@@ -19,7 +19,7 @@ import (
"strings"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -30,12 +30,6 @@ const RANDOM_OPS_RUNS = 1000
// ALL tests are isolated by SetName and Key, which are 50 random characters
var _ = gg.Describe("Aerospike", func() {
- gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
- })
-
gg.Describe("Random Data Operations", func() {
// connection data
var err error
diff --git a/read_command.go b/read_command.go
index b6a6aaf5..1839e7d5 100644
--- a/read_command.go
+++ b/read_command.go
@@ -15,162 +15,71 @@
package aerospike
import (
- "fmt"
- "reflect"
-
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
-
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
type readCommand struct {
- singleCommand
-
- policy *BasePolicy
- binNames []string
- record *Record
-
- // pointer to the object that's going to be unmarshalled
- object *reflect.Value
+ baseReadCommand
- replicaSequence int
+ binNames []string
+ isOperation bool
}
-// this method uses reflection.
-// Will not be set if performance flag is passed for the build.
-var objectParser func(
- cmd *readCommand,
- opCount int,
- fieldCount int,
- generation uint32,
- expiration uint32,
-) Error
-
-func newReadCommand(cluster *Cluster, policy *BasePolicy, key *Key, binNames []string, partition *Partition) (readCommand, Error) {
- var err Error
- if partition == nil {
- if cluster != nil {
- partition, err = PartitionForRead(cluster, policy, key)
- if err != nil {
- return readCommand{}, err
- }
- }
+func newReadCommand(
+ cluster *Cluster,
+ policy *BasePolicy,
+ key *Key,
+ binNames []string,
+) (readCommand, Error) {
+ brc, err := newBaseReadCommand(cluster, policy, key)
+ if err != nil {
+ return readCommand{}, err
}
return readCommand{
- singleCommand: newSingleCommand(cluster, key, partition),
- binNames: binNames,
- policy: policy,
+ baseReadCommand: brc,
+ binNames: binNames,
}, nil
}
-func (cmd *readCommand) getPolicy(ifc command) Policy {
- return cmd.policy
-}
-
func (cmd *readCommand) writeBuffer(ifc command) Error {
return cmd.setRead(cmd.policy, cmd.key, cmd.binNames)
}
-func (cmd *readCommand) getNode(ifc command) (*Node, Error) {
- return cmd.partition.GetNodeRead(cmd.cluster)
-}
-
-func (cmd *readCommand) prepareRetry(ifc command, isTimeout bool) bool {
- cmd.partition.PrepareRetryRead(isTimeout)
- return true
-}
-
func (cmd *readCommand) parseResult(ifc command, conn *Connection) Error {
- // Read proto and check if compressed
- if _, err := conn.Read(cmd.dataBuffer, 8); err != nil {
- logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
+ rp, err := newRecordParser(&cmd.baseCommand)
+ if err != nil {
return err
}
- if compressedSize := cmd.compressedSize(); compressedSize > 0 {
- // Read compressed size
- if _, err := conn.Read(cmd.dataBuffer, 8); err != nil {
- logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
- return err
- }
-
- if err := cmd.conn.initInflater(true, compressedSize); err != nil {
- return newError(types.PARSE_ERROR, fmt.Sprintf("Error setting up zlib inflater for size `%d`: %s", compressedSize, err.Error()))
- }
-
- // Read header.
- if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil {
- logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
- return err
- }
- } else {
- // Read header.
- if _, err := conn.Read(cmd.dataBuffer[8:], int(_MSG_TOTAL_HEADER_SIZE)-8); err != nil {
- logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
- return err
- }
- }
-
- // A number of these are commented out because we just don't care enough to read
- // that section of the header. If we do care, uncomment and check!
- sz := Buffer.BytesToInt64(cmd.dataBuffer, 0)
-
- // Validate header to make sure we are at the beginning of a message
- if err := cmd.validateHeader(sz); err != nil {
+ if err := rp.parseFields(cmd.policy.Txn, cmd.key, false); err != nil {
return err
}
- headerLength := int(cmd.dataBuffer[8])
- resultCode := types.ResultCode(cmd.dataBuffer[13] & 0xFF)
- generation := Buffer.BytesToUint32(cmd.dataBuffer, 14)
- expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 18))
- fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 26)) // almost certainly 0
- opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 28))
- receiveSize := int((sz & 0xFFFFFFFFFFFF) - int64(headerLength))
-
- // Read remaining message bytes.
- if receiveSize > 0 {
- if err := cmd.sizeBufferSz(receiveSize, false); err != nil {
- return err
- }
- if _, err := conn.Read(cmd.dataBuffer, receiveSize); err != nil {
- logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
- return err
- }
-
- }
-
- if resultCode != 0 {
- if resultCode == types.KEY_NOT_FOUND_ERROR {
+ if rp.resultCode != 0 {
+ if rp.resultCode == types.KEY_NOT_FOUND_ERROR {
return ErrKeyNotFound.err()
- } else if resultCode == types.FILTERED_OUT {
+ } else if rp.resultCode == types.FILTERED_OUT {
return ErrFilteredOut.err()
- } else if resultCode == types.UDF_BAD_RESPONSE {
- cmd.record, _ = cmd.parseRecord(ifc, opCount, fieldCount, generation, expiration)
- err := cmd.handleUdfError(resultCode)
- logger.Logger.Debug("UDF execution error: " + err.Error())
- return err
}
- return newError(resultCode)
+ return newError(rp.resultCode)
}
if cmd.object == nil {
- if opCount == 0 {
+ if rp.opCount == 0 {
// data Bin was not returned
- cmd.record = newRecord(cmd.node, cmd.key, nil, generation, expiration)
+ cmd.record = newRecord(cmd.node, cmd.key, nil, rp.generation, rp.expiration)
return nil
}
var err Error
- cmd.record, err = cmd.parseRecord(ifc, opCount, fieldCount, generation, expiration)
+ cmd.record, err = rp.parseRecord(cmd.key, cmd.isOperation)
if err != nil {
return err
}
} else if objectParser != nil {
- if err := objectParser(cmd, opCount, fieldCount, generation, expiration); err != nil {
+ if err := objectParser(&cmd.baseReadCommand, rp.opCount, rp.fieldCount, rp.generation, rp.expiration); err != nil {
return err
}
}
@@ -178,94 +87,6 @@ func (cmd *readCommand) parseResult(ifc command, conn *Connection) Error {
return nil
}
-func (cmd *readCommand) handleUdfError(resultCode types.ResultCode) Error {
- if ret, exists := cmd.record.Bins["FAILURE"]; exists {
- return newError(resultCode, ret.(string))
- }
- return newError(resultCode)
-}
-
-func (cmd *readCommand) parseRecord(
- ifc command,
- opCount int,
- fieldCount int,
- generation uint32,
- expiration uint32,
-) (*Record, Error) {
- var bins BinMap
- receiveOffset := 0
-
- opCmd, isOperate := ifc.(*operateCommand)
- var binNamesSet []string
-
- // There can be fields in the response (setname etc).
- // But for now, ignore them. Expose them to the API if needed in the future.
- //logger.Logger.Debug("field count: %d, databuffer: %v", fieldCount, cmd.dataBuffer)
- if fieldCount > 0 {
- // Just skip over all the fields
- for i := 0; i < fieldCount; i++ {
- //logger.Logger.Debug("%d", receiveOffset)
- fieldSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
- receiveOffset += (4 + fieldSize)
- }
- }
-
- if opCount > 0 {
- bins = make(BinMap, opCount)
- }
-
- for i := 0; i < opCount; i++ {
- opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
- particleType := int(cmd.dataBuffer[receiveOffset+5])
- nameSize := int(cmd.dataBuffer[receiveOffset+7])
- name := string(cmd.dataBuffer[receiveOffset+8 : receiveOffset+8+nameSize])
- receiveOffset += 4 + 4 + nameSize
-
- particleBytesSize := opSize - (4 + nameSize)
- value, _ := bytesToParticle(particleType, cmd.dataBuffer, receiveOffset, particleBytesSize)
- receiveOffset += particleBytesSize
-
- if bins == nil {
- bins = make(BinMap, opCount)
- }
-
- if isOperate {
- // for operate list command results
- if prev, exists := bins[name]; exists {
- if res, ok := prev.(OpResults); ok {
- // List already exists. Add to it.
- bins[name] = append(res, value)
- } else {
- // Make a list to store all values.
- bins[name] = OpResults{prev, value}
- binNamesSet = append(binNamesSet, name)
- }
- } else {
- bins[name] = value
- }
- } else {
- bins[name] = value
- }
- }
-
- // TODO: Remove this in the next major release
- if isOperate && !opCmd.useOpResults {
- for i := range binNamesSet {
- bins[binNamesSet[i]] = []interface{}(bins[binNamesSet[i]].(OpResults))
- }
- }
-
- return newRecord(cmd.node, cmd.key, bins, generation, expiration), nil
-}
-
-func (cmd *readCommand) GetRecord() *Record {
- return cmd.record
-}
-
func (cmd *readCommand) Execute() Error {
return cmd.execute(cmd)
}
-
-func (cmd *readCommand) transactionType() transactionType {
- return ttGet
-}
diff --git a/read_command_reflect.go b/read_command_reflect.go
index ba9d91d5..5e277190 100644
--- a/read_command_reflect.go
+++ b/read_command_reflect.go
@@ -23,8 +23,8 @@ import (
"strings"
"time"
- "github.com/aerospike/aerospike-client-go/v7/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
// if this file is included in the build, it will include this method
@@ -33,28 +33,27 @@ func init() {
}
func parseObject(
- cmd *readCommand,
+ brc *baseReadCommand,
opCount int,
fieldCount int,
generation uint32,
expiration uint32,
) Error {
- receiveOffset := 0
// There can be fields in the response (setname etc).
// But for now, ignore them. Expose them to the API if needed in the future.
- //logger.Logger.Debug("field count: %d, databuffer: %v", fieldCount, cmd.dataBuffer)
+ //logger.Logger.Debug("field count: %d, databuffer: %v", fieldCount, bufx.dataBuffer)
if fieldCount > 0 {
// Just skip over all the fields
for i := 0; i < fieldCount; i++ {
//logger.Logger.Debug("%d", receiveOffset)
- fieldSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
- receiveOffset += (4 + fieldSize)
+ fieldSize := int(Buffer.BytesToUint32(brc.dataBuffer, brc.dataOffset))
+ brc.dataOffset += (4 + fieldSize)
}
}
if opCount > 0 {
- rv := *cmd.object
+ rv := *brc.object
if rv.Kind() != reflect.Ptr {
return ErrInvalidObjectType.err()
@@ -78,19 +77,19 @@ func parseObject(
}
for i := 0; i < opCount; i++ {
- opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, receiveOffset))
- particleType := int(cmd.dataBuffer[receiveOffset+5])
- nameSize := int(cmd.dataBuffer[receiveOffset+7])
- name := string(cmd.dataBuffer[receiveOffset+8 : receiveOffset+8+nameSize])
- receiveOffset += 4 + 4 + nameSize
+ opSize := int(Buffer.BytesToUint32(brc.dataBuffer, brc.dataOffset))
+ particleType := int(brc.dataBuffer[brc.dataOffset+5])
+ nameSize := int(brc.dataBuffer[brc.dataOffset+7])
+ name := string(brc.dataBuffer[brc.dataOffset+8 : brc.dataOffset+8+nameSize])
+ brc.dataOffset += 4 + 4 + nameSize
particleBytesSize := opSize - (4 + nameSize)
- value, _ := bytesToParticle(particleType, cmd.dataBuffer, receiveOffset, particleBytesSize)
+ value, _ := bytesToParticle(particleType, brc.dataBuffer, brc.dataOffset, particleBytesSize)
if err := setObjectField(mappings, iobj, name, value); err != nil {
return err
}
- receiveOffset += particleBytesSize
+ brc.dataOffset += particleBytesSize
}
}
diff --git a/read_header_command.go b/read_header_command.go
index f21a09b4..c2f81fc7 100644
--- a/read_header_command.go
+++ b/read_header_command.go
@@ -15,82 +15,53 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types"
-
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
type readHeaderCommand struct {
- singleCommand
-
- policy *BasePolicy
- record *Record
+ baseReadCommand
}
func newReadHeaderCommand(cluster *Cluster, policy *BasePolicy, key *Key) (readHeaderCommand, Error) {
- var err Error
- var partition *Partition
- if cluster != nil {
- partition, err = PartitionForRead(cluster, policy, key)
- if err != nil {
- return readHeaderCommand{}, err
- }
+ brc, err := newBaseReadCommand(cluster, policy, key)
+ if err != nil {
+ return readHeaderCommand{}, err
}
newReadHeaderCmd := readHeaderCommand{
- singleCommand: newSingleCommand(cluster, key, partition),
- policy: policy,
+ baseReadCommand: brc,
}
return newReadHeaderCmd, nil
}
-func (cmd *readHeaderCommand) getPolicy(ifc command) Policy {
- return cmd.policy
-}
-
func (cmd *readHeaderCommand) writeBuffer(ifc command) Error {
return cmd.setReadHeader(cmd.policy, cmd.key)
}
-func (cmd *readHeaderCommand) getNode(ifc command) (*Node, Error) {
- return cmd.partition.GetNodeRead(cmd.cluster)
-}
-
-func (cmd *readHeaderCommand) prepareRetry(ifc command, isTimeout bool) bool {
- cmd.partition.PrepareRetryRead(isTimeout)
- return true
-}
-
func (cmd *readHeaderCommand) parseResult(ifc command, conn *Connection) Error {
- // Read header.
- if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil {
+ rp, err := newRecordParser(&cmd.baseCommand)
+ if err != nil {
return err
}
- header := Buffer.BytesToInt64(cmd.dataBuffer, 0)
-
- // Validate header to make sure we are at the beginning of a message
- if err := cmd.validateHeader(header); err != nil {
+ if err := rp.parseFields(cmd.policy.Txn, cmd.key, false); err != nil {
return err
}
- resultCode := cmd.dataBuffer[13] & 0xFF
-
- if resultCode == 0 {
- generation := Buffer.BytesToUint32(cmd.dataBuffer, 14)
- expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 18))
- cmd.record = newRecord(cmd.node, cmd.key, nil, generation, expiration)
+ if rp.resultCode == 0 {
+ cmd.record = newRecord(cmd.node, cmd.key, nil, rp.generation, rp.expiration)
} else {
- if types.ResultCode(resultCode) == types.KEY_NOT_FOUND_ERROR {
+ switch rp.resultCode {
+ case types.KEY_NOT_FOUND_ERROR:
cmd.record = nil
- } else if types.ResultCode(resultCode) == types.FILTERED_OUT {
+ case types.FILTERED_OUT:
return ErrFilteredOut.err()
- } else {
- return newError(types.ResultCode(resultCode))
+ default:
+ return newError(rp.resultCode)
}
}
- return cmd.emptySocket(conn)
+ return nil
}
func (cmd *readHeaderCommand) GetRecord() *Record {
@@ -101,6 +72,6 @@ func (cmd *readHeaderCommand) Execute() Error {
return cmd.execute(cmd)
}
-func (cmd *readHeaderCommand) transactionType() transactionType {
+func (cmd *readHeaderCommand) commandType() commandType {
return ttGetHeader
}
diff --git a/record_parser.go b/record_parser.go
new file mode 100644
index 00000000..2b953ff1
--- /dev/null
+++ b/record_parser.go
@@ -0,0 +1,212 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import (
+ "fmt"
+
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
+)
+
+// Task interface defines methods for asynchronous tasks.
+type recordParser struct {
+ resultCode types.ResultCode
+ generation uint32
+ expiration uint32
+ fieldCount int
+ opCount int
+
+ cmd *baseCommand
+}
+
+// recordParser initializes task with fields needed to query server nodes.
+func newRecordParser(cmd *baseCommand) (*recordParser, Error) {
+ rp := &recordParser{
+ cmd: cmd,
+ }
+
+ // Read proto and check if compressed
+ if _, err := rp.cmd.conn.Read(rp.cmd.dataBuffer, 8); err != nil {
+ logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
+ return nil, err
+ }
+
+ rp.cmd.dataOffset = 5
+ if compressedSize := rp.cmd.compressedSize(); compressedSize > 0 {
+ // Read compressed size
+ if _, err := rp.cmd.conn.Read(rp.cmd.dataBuffer, 8); err != nil {
+ logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
+ return nil, err
+ }
+
+ if err := rp.cmd.conn.initInflater(true, compressedSize); err != nil {
+ return nil, newError(types.PARSE_ERROR, fmt.Sprintf("Error setting up zlib inflater for size `%d`: %s", compressedSize, err.Error()))
+ }
+ rp.cmd.dataOffset = 13
+ }
+
+ sz := Buffer.BytesToInt64(rp.cmd.dataBuffer, 0)
+
+ // Read remaining message bytes.
+ receiveSize := int((sz & 0xFFFFFFFFFFFF))
+
+ if receiveSize > 0 {
+ if err := rp.cmd.sizeBufferSz(receiveSize, false); err != nil {
+ return rp, err
+ }
+ if _, err := rp.cmd.conn.Read(rp.cmd.dataBuffer, receiveSize); err != nil {
+ logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
+ return rp, err
+ }
+ }
+
+ // Validate header to make sure we are at the beginning of a message
+ if err := rp.cmd.validateHeader(sz); err != nil {
+ return nil, err
+ }
+
+ rp.resultCode = types.ResultCode(rp.cmd.dataBuffer[rp.cmd.dataOffset] & 0xFF)
+ rp.cmd.dataOffset++
+ rp.generation = Buffer.BytesToUint32(rp.cmd.dataBuffer, rp.cmd.dataOffset)
+ rp.cmd.dataOffset += 4
+ rp.expiration = types.TTL(Buffer.BytesToUint32(rp.cmd.dataBuffer, rp.cmd.dataOffset))
+ rp.cmd.dataOffset += 8
+ rp.fieldCount = int(Buffer.BytesToUint16(rp.cmd.dataBuffer, rp.cmd.dataOffset))
+ rp.cmd.dataOffset += 2
+ rp.opCount = int(Buffer.BytesToUint16(rp.cmd.dataBuffer, rp.cmd.dataOffset))
+ rp.cmd.dataOffset += 2
+
+ return rp, nil
+}
+
+func (rp *recordParser) parseFields(
+ txn *Txn,
+ key *Key,
+ hasWrite bool,
+) Error {
+ if txn == nil {
+ rp.skipFields()
+ return nil
+ }
+
+ var version *uint64
+
+ for i := 0; i < rp.fieldCount; i++ {
+ len := Buffer.BytesToInt32(rp.cmd.dataBuffer, rp.cmd.dataOffset)
+ rp.cmd.dataOffset += 4
+
+ typ := FieldType(rp.cmd.dataBuffer[rp.cmd.dataOffset])
+ rp.cmd.dataOffset++
+ size := len - 1
+
+ if typ == RECORD_VERSION {
+ if size == 7 {
+ version = Buffer.VersionBytesToUint64(rp.cmd.dataBuffer, rp.cmd.dataOffset)
+ } else {
+ return newError(types.PARSE_ERROR, fmt.Sprintf("Record version field has invalid size: %v", size))
+ }
+ }
+ rp.cmd.dataOffset += int(size)
+ }
+
+ if hasWrite {
+ txn.OnWrite(key, version, rp.resultCode)
+ } else {
+ txn.OnRead(key, version)
+ }
+
+ return nil
+}
+
+func (rp *recordParser) skipFields() {
+ // There can be fields in the response (setname etc).
+ // But for now, ignore them. Expose them to the API if needed in the future.
+ for i := 0; i < rp.fieldCount; i++ {
+ fieldLen := Buffer.BytesToUint32(rp.cmd.dataBuffer, rp.cmd.dataOffset)
+ rp.cmd.dataOffset += 4 + int(fieldLen)
+ }
+}
+
+func (rp *recordParser) parseTranDeadline(txn *Txn) {
+ for i := 0; i < rp.fieldCount; i++ {
+ len := Buffer.BytesToInt32(rp.cmd.dataBuffer, rp.cmd.dataOffset)
+ rp.cmd.dataOffset += 4
+
+ typ := rp.cmd.dataBuffer[rp.cmd.dataOffset]
+ rp.cmd.dataOffset++
+ size := len - 1
+
+ if FieldType(typ) == MRT_DEADLINE {
+ deadline := Buffer.LittleBytesToInt32(rp.cmd.dataBuffer, rp.cmd.dataOffset)
+ txn.deadline = int(deadline)
+ }
+ rp.cmd.dataOffset += int(size)
+ }
+}
+func (rp *recordParser) parseRecord(key *Key, isOperation bool) (*Record, Error) {
+ var bins BinMap
+ receiveOffset := rp.cmd.dataOffset
+
+ // There can be fields in the response (setname etc).
+ // But for now, ignore them. Expose them to the API if needed in the future.
+ if rp.fieldCount > 0 {
+ // Just skip over all the fields
+ for i := 0; i < rp.fieldCount; i++ {
+ fieldSize := int(Buffer.BytesToUint32(rp.cmd.dataBuffer, receiveOffset))
+ receiveOffset += (4 + fieldSize)
+ }
+ }
+
+ if rp.opCount > 0 {
+ bins = make(BinMap, rp.opCount)
+ }
+
+ for i := 0; i < rp.opCount; i++ {
+ opSize := int(Buffer.BytesToUint32(rp.cmd.dataBuffer, receiveOffset))
+ particleType := int(rp.cmd.dataBuffer[receiveOffset+5])
+ nameSize := int(rp.cmd.dataBuffer[receiveOffset+7])
+ name := string(rp.cmd.dataBuffer[receiveOffset+8 : receiveOffset+8+nameSize])
+ receiveOffset += 4 + 4 + nameSize
+
+ particleBytesSize := opSize - (4 + nameSize)
+ value, _ := bytesToParticle(particleType, rp.cmd.dataBuffer, receiveOffset, particleBytesSize)
+ receiveOffset += particleBytesSize
+
+ if bins == nil {
+ bins = make(BinMap, rp.opCount)
+ }
+
+ if isOperation {
+ // for operate list command results
+ if prev, exists := bins[name]; exists {
+ if res, ok := prev.(OpResults); ok {
+ // List already exists. Add to it.
+ bins[name] = append(res, value)
+ } else {
+ // Make a list to store all values.
+ bins[name] = OpResults{prev, value}
+ }
+ } else {
+ bins[name] = value
+ }
+ } else {
+ bins[name] = value
+ }
+ }
+
+ return newRecord(rp.cmd.node, key, bins, rp.generation, rp.expiration), nil
+}
diff --git a/recordset.go b/recordset.go
index 16b85f9e..dcf2c88e 100644
--- a/recordset.go
+++ b/recordset.go
@@ -21,7 +21,7 @@ import (
"runtime"
"sync"
- "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
)
// Result is the value returned by Recordset's Results() function.
diff --git a/recordset_test.go b/recordset_test.go
index d0f12426..773db9b9 100644
--- a/recordset_test.go
+++ b/recordset_test.go
@@ -17,7 +17,7 @@ package aerospike
import (
"time"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/scan_objects_executor.go b/scan_objects_executor.go
index 0ee92a36..2cccf718 100644
--- a/scan_objects_executor.go
+++ b/scan_objects_executor.go
@@ -19,7 +19,7 @@ import (
"sync"
"time"
- "github.com/aerospike/aerospike-client-go/v7/logger"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
"golang.org/x/sync/semaphore"
)
diff --git a/scan_partition_command.go b/scan_partition_command.go
index 6727c693..c34366ad 100644
--- a/scan_partition_command.go
+++ b/scan_partition_command.go
@@ -14,7 +14,7 @@
package aerospike
-import "github.com/aerospike/aerospike-client-go/v7/types"
+import "github.com/aerospike/aerospike-client-go/v8/types"
type scanPartitionCommand struct {
baseMultiCommand
@@ -62,7 +62,7 @@ func (cmd *scanPartitionCommand) shouldRetry(e Error) bool {
return cmd.tracker != nil && cmd.tracker.shouldRetry(cmd.nodePartitions, e)
}
-func (cmd *scanPartitionCommand) transactionType() transactionType {
+func (cmd *scanPartitionCommand) commandType() commandType {
return ttScan
}
diff --git a/scan_partition_objects_command.go b/scan_partition_objects_command.go
index 6fc51234..054a5ffb 100644
--- a/scan_partition_objects_command.go
+++ b/scan_partition_objects_command.go
@@ -14,7 +14,7 @@
package aerospike
-import "github.com/aerospike/aerospike-client-go/v7/types"
+import "github.com/aerospike/aerospike-client-go/v8/types"
type scanPartitionObjectsCommand struct {
baseMultiCommand
@@ -66,7 +66,7 @@ func (cmd *scanPartitionObjectsCommand) shouldRetry(e Error) bool {
return cmd.tracker != nil && cmd.tracker.shouldRetry(cmd.nodePartitions, e)
}
-func (cmd *scanPartitionObjectsCommand) transactionType() transactionType {
+func (cmd *scanPartitionObjectsCommand) commandType() commandType {
return ttScan
}
diff --git a/scan_policy.go b/scan_policy.go
index 400850db..cbb1f267 100644
--- a/scan_policy.go
+++ b/scan_policy.go
@@ -15,6 +15,8 @@
package aerospike
// ScanPolicy encapsulates parameters used in scan operations.
+//
+// Inherited Policy fields Policy.Txn are ignored in scan commands.
type ScanPolicy struct {
MultiPolicy
}
diff --git a/scan_test.go b/scan_test.go
index 7c56c0c0..c4d7aead 100644
--- a/scan_test.go
+++ b/scan_test.go
@@ -19,9 +19,9 @@ import (
"math"
"math/rand"
- as "github.com/aerospike/aerospike-client-go/v7"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
- particleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
+ particleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -252,10 +252,6 @@ var _ = gg.Describe("Scan operations", func() {
})
gg.It("must Scan and get all records back for a specified node using Results() channel", func() {
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
-
gm.Expect(len(keys)).To(gm.Equal(keyCount))
counter := 0
@@ -283,10 +279,6 @@ var _ = gg.Describe("Scan operations", func() {
})
gg.It("must Scan and get all records back for a specified node", func() {
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
-
gm.Expect(len(keys)).To(gm.Equal(keyCount))
for _, node := range client.GetNodes() {
diff --git a/security_test.go b/security_test.go
index b53c5327..8400968e 100644
--- a/security_test.go
+++ b/security_test.go
@@ -18,7 +18,7 @@ import (
"fmt"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -34,10 +34,6 @@ var _ = gg.Describe("Security tests", func() {
var err error
gg.BeforeEach(func() {
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
-
if !securityEnabled() {
gg.Skip("Security Tests are not supported in the Community Edition, or when security is not enabled for the Enterprise Edition.")
}
diff --git a/server_command.go b/server_command.go
index 73f8371f..6595e5e6 100644
--- a/server_command.go
+++ b/server_command.go
@@ -15,8 +15,8 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type serverCommand struct {
diff --git a/single_command.go b/single_command.go
index 9739288a..9a1ea0f6 100644
--- a/single_command.go
+++ b/single_command.go
@@ -15,7 +15,7 @@
package aerospike
import (
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type singleCommand struct {
diff --git a/statement.go b/statement.go
index a9d3857d..1f077265 100644
--- a/statement.go
+++ b/statement.go
@@ -18,7 +18,7 @@ import (
"fmt"
"math/rand"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// Statement encapsulates query statement parameters.
diff --git a/task.go b/task.go
index c019ad4a..a6d9631b 100644
--- a/task.go
+++ b/task.go
@@ -17,8 +17,8 @@ package aerospike
import (
"time"
- "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
- "github.com/aerospike/aerospike-client-go/v7/types"
+ "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// Task interface defines methods for asynchronous tasks.
diff --git a/tools/asinfo/asinfo.go b/tools/asinfo/asinfo.go
index 3288cbcb..49670379 100644
--- a/tools/asinfo/asinfo.go
+++ b/tools/asinfo/asinfo.go
@@ -20,7 +20,7 @@ import (
"os"
"strings"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
)
var (
diff --git a/tools/benchmark/benchmark.go b/tools/benchmark/benchmark.go
index 07ab1450..0998678b 100644
--- a/tools/benchmark/benchmark.go
+++ b/tools/benchmark/benchmark.go
@@ -32,9 +32,9 @@ import (
"sync/atomic"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
- asl "github.com/aerospike/aerospike-client-go/v7/logger"
- ast "github.com/aerospike/aerospike-client-go/v7/types"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ asl "github.com/aerospike/aerospike-client-go/v8/logger"
+ ast "github.com/aerospike/aerospike-client-go/v8/types"
)
// TStats is a goroutine's statistics values
@@ -76,10 +76,10 @@ var keyFilePassphrase = flag.String("keyFilePass", "", `Passphrase for encrypted
var binDef = flag.String("o", "I", "Bin object specification.\n\tI\t: Read/write integer bin.\n\tB:200\t: Read/write byte array bin of length 200.\n\tS:50\t: Read/write string bin of length 50.")
var concurrency = flag.Int("c", 32, "Number of goroutines to generate load.")
var workloadDef = flag.String("w", "I:100", "Desired workload.\n\tI:60\t: Linear 'insert' workload initializing 60% of the keys.\n\tRU:80\t: Random read/update workload with 80% reads and 20% writes.")
-var latency = flag.String("L", "", "Latency ,.\n\tShow transaction latency percentages using elapsed time ranges.\n\t Number of elapsed time ranges.\n\t Power of 2 multiple between each range starting at column 3.")
+var latency = flag.String("L", "", "Latency ,.\n\tShow command latency percentages using elapsed time ranges.\n\t Number of elapsed time ranges.\n\t Power of 2 multiple between each range starting at column 3.")
var throughput = flag.Int64("g", 0, "Throttle transactions per second to a maximum value.\n\tIf tps is zero, do not throttle throughput.")
var timeout = flag.Int("T", 0, "Read/Write timeout in milliseconds.")
-var maxRetries = flag.Int("maxRetries", 2, "Maximum number of retries before aborting the current transaction.")
+var maxRetries = flag.Int("maxRetries", 2, "Maximum number of retries before aborting the current command.")
var connQueueSize = flag.Int("queueSize", 128, "Maximum number of connections to pool.")
var maxErrorRate = flag.Int("maxErrorRate", 50, "Maximum Error Rate for the Circuit-Breaker to trigger.")
var errorRateWindow = flag.Int("errorRateWindow", 1, "Error Rate Window for the Circuit-Breaker to trigger.")
@@ -91,7 +91,6 @@ var minConnsPerNode = flag.Int("minConnsPerNode", 0, "Minimum connections to mai
var randBinData = flag.Bool("R", false, "Use dynamically generated random bin values instead of default static fixed bin values.")
var useMarshalling = flag.Bool("M", false, "Use marshaling a struct instead of simple key/value operations")
var debugMode = flag.Bool("d", false, "Run benchmarks in debug mode.")
-var grpc = flag.Bool("grpc", false, "Enable GRPC mode.")
var profileMode = flag.Bool("profile", false, "Run benchmarks with profiler active on port 6060 by default.")
var profilePort = flag.Int("profilePort", 6060, "Profile port.")
var showUsage = flag.Bool("u", false, "Show usage information.")
@@ -171,27 +170,15 @@ func main() {
dbHost := as.NewHost(*host, *port)
dbHost.TLSName = *tlsName
- var client as.ClientIfc
- if *grpc {
- gclient, err := as.NewProxyClientWithPolicyAndHost(clientPolicy, dbHost)
- if err != nil {
- logger.Fatal(err)
- }
-
- client = gclient
- } else {
- nclient, err := as.NewClientWithPolicyAndHost(clientPolicy, dbHost)
- if err != nil {
- logger.Fatal(err)
- }
-
- cc, _ := nclient.WarmUp(*warmUp)
- logger.Printf("Warm-up conns.:\t%d", cc)
- logger.Println("Nodes Found:", nclient.GetNodeNames())
-
- client = nclient
+ client, err := as.NewClientWithPolicyAndHost(clientPolicy, dbHost)
+ if err != nil {
+ logger.Fatal(err)
}
+ cc, _ := client.WarmUp(*warmUp)
+ logger.Printf("Warm-up conns.:\t%d", cc)
+ logger.Println("Nodes Found:", client.GetNodeNames())
+
go reporter()
switch workloadType {
@@ -420,7 +407,7 @@ func incOnError(op, timeout *int, err error) {
}
}
-func runBench_I(client as.ClientIfc, ident int, times int) {
+func runBench_I(client *as.Client, ident int, times int) {
defer wg.Done()
xr := NewXorRand()
@@ -529,7 +516,7 @@ func runBench_I(client as.ClientIfc, ident int, times int) {
countReportChan <- &TStats{false, WCount, 0, writeErr, 0, writeTOErr, 0, wMinLat, wMaxLat, 0, 0, wLatTotal, 0, wLatList, nil}
}
-func runBench_RU(client as.ClientIfc, ident int, times int) {
+func runBench_RU(client *as.Client, ident int, times int) {
defer wg.Done()
xr := NewXorRand()
@@ -694,7 +681,7 @@ func min(a, b int64) int64 {
return b
}
-// listens to transaction report channel, and print them out on intervals
+// listens to command report channel, and print them out on intervals
func reporter() {
var totalWCount, totalRCount int
var totalWErrCount, totalRErrCount int
diff --git a/tools/cli/cli.go b/tools/cli/cli.go
index a2507928..eed5ee75 100644
--- a/tools/cli/cli.go
+++ b/tools/cli/cli.go
@@ -21,8 +21,8 @@ import (
"runtime"
"strings"
- as "github.com/aerospike/aerospike-client-go/v7"
- asl "github.com/aerospike/aerospike-client-go/v7/logger"
+ as "github.com/aerospike/aerospike-client-go/v8"
+ asl "github.com/aerospike/aerospike-client-go/v8/logger"
)
// flag information
diff --git a/touch_command.go b/touch_command.go
index fe0723ab..4fcd2389 100644
--- a/touch_command.go
+++ b/touch_command.go
@@ -15,119 +15,49 @@
package aerospike
import (
- "fmt"
-
- "github.com/aerospike/aerospike-client-go/v7/logger"
- "github.com/aerospike/aerospike-client-go/v7/types"
-
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// guarantee touchCommand implements command interface
var _ command = &touchCommand{}
type touchCommand struct {
- singleCommand
-
- policy *WritePolicy
+ baseWriteCommand
}
func newTouchCommand(cluster *Cluster, policy *WritePolicy, key *Key) (touchCommand, Error) {
- var err Error
- var partition *Partition
- if cluster != nil {
- partition, err = PartitionForWrite(cluster, &policy.BasePolicy, key)
- if err != nil {
- return touchCommand{}, err
- }
+ bwc, err := newBaseWriteCommand(cluster, policy, key)
+ if err != nil {
+ return touchCommand{}, err
}
newTouchCmd := touchCommand{
- singleCommand: newSingleCommand(cluster, key, partition),
- policy: policy,
+ baseWriteCommand: bwc,
}
return newTouchCmd, nil
}
-func (cmd *touchCommand) getPolicy(ifc command) Policy {
- return cmd.policy
-}
-
func (cmd *touchCommand) writeBuffer(ifc command) Error {
return cmd.setTouch(cmd.policy, cmd.key)
}
-func (cmd *touchCommand) getNode(ifc command) (*Node, Error) {
- return cmd.partition.GetNodeWrite(cmd.cluster)
-}
-
-func (cmd *touchCommand) prepareRetry(ifc command, isTimeout bool) bool {
- cmd.partition.PrepareRetryWrite(isTimeout)
- return true
-}
-
func (cmd *touchCommand) parseResult(ifc command, conn *Connection) Error {
- // Read header.
- _, err := conn.Read(cmd.dataBuffer, 8)
+ resultCode, err := cmd.parseHeader()
if err != nil {
- return err
+ return newCustomNodeError(cmd.node, err.resultCode())
}
- if compressedSize := cmd.compressedSize(); compressedSize > 0 {
- // Read compressed size
- _, err = conn.Read(cmd.dataBuffer, compressedSize)
- if err != nil {
- logger.Logger.Debug("Connection error reading data for TouchCommand: %s", err.Error())
- return err
- }
-
- // Read compressed size
- _, err = conn.Read(cmd.dataBuffer, 8)
- if err != nil {
- logger.Logger.Debug("Connection error reading data for TouchCommand: %s", err.Error())
- return err
- }
-
- if err = cmd.conn.initInflater(true, compressedSize); err != nil {
- return newError(types.PARSE_ERROR, fmt.Sprintf("Error setting up zlib inflater for size `%d`: %s", compressedSize, err.Error()))
- }
-
- // Read header.
- _, err = conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE))
- if err != nil {
- logger.Logger.Debug("Connection error reading data for TouchCommand: %s", err.Error())
- return err
- }
- } else {
- // Read header.
- _, err = conn.Read(cmd.dataBuffer[8:], int(_MSG_TOTAL_HEADER_SIZE)-8)
- if err != nil {
- logger.Logger.Debug("Connection error reading data for TouchCommand: %s", err.Error())
- return err
- }
- }
-
- // Read header.
- header := Buffer.BytesToInt64(cmd.dataBuffer, 0)
-
- // Validate header to make sure we are at the beginning of a message
- if err := cmd.validateHeader(header); err != nil {
- return err
- }
-
- resultCode := cmd.dataBuffer[13] & 0xFF
-
- if resultCode != 0 {
- if resultCode == byte(types.KEY_NOT_FOUND_ERROR) {
- return ErrKeyNotFound.err()
- } else if types.ResultCode(resultCode) == types.FILTERED_OUT {
- return ErrFilteredOut.err()
- }
-
+ switch resultCode {
+ case types.OK:
+ return nil
+ case types.KEY_NOT_FOUND_ERROR:
+ return ErrKeyNotFound.err()
+ case types.FILTERED_OUT:
+ return ErrFilteredOut.err()
+ default:
return newError(types.ResultCode(resultCode))
}
- return cmd.emptySocket(conn)
}
func (cmd *touchCommand) isRead() bool {
@@ -138,6 +68,6 @@ func (cmd *touchCommand) Execute() Error {
return cmd.execute(cmd)
}
-func (cmd *touchCommand) transactionType() transactionType {
+func (cmd *touchCommand) commandType() commandType {
return ttPut
}
diff --git a/truncate_test.go b/truncate_test.go
index 94ed72fe..d1929e20 100644
--- a/truncate_test.go
+++ b/truncate_test.go
@@ -19,7 +19,7 @@ import (
"math/rand"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -28,12 +28,6 @@ import (
// ALL tests are isolated by SetName and Key, which are 50 random characters
var _ = gg.Describe("Truncate operations test", func() {
- gg.BeforeEach(func() {
- if *proxy {
- gg.Skip("Not supported in Proxy Client")
- }
- })
-
gg.Context("Truncate", func() {
var err error
var ns = *namespace
diff --git a/txn.go b/txn.go
new file mode 100644
index 00000000..0d986454
--- /dev/null
+++ b/txn.go
@@ -0,0 +1,323 @@
+// Copyright 2014-2024 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import (
+ "fmt"
+ "sync/atomic"
+ "time"
+
+ sm "github.com/aerospike/aerospike-client-go/v8/internal/atomic/map"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+)
+
+// MRT state.
+type TxnState byte
+
+const (
+ TxnStateOpen TxnState = iota
+ TxnStateVerified
+ TxnStateCommitted
+ TxnStateAborted
+)
+
+var txnRandomState atomic.Int64
+
+func init() {
+ txnRandomState.Store(time.Now().UnixNano())
+}
+
+// Multi-record transaction (MRT). Each command in the MRT must use the same namespace.
+type Txn struct {
+ id int64
+ reads sm.Map[*Key, *uint64]
+ writes sm.Map[*Key, struct{}]
+ namespace *string
+ timeout int
+ deadline int
+ monitorInDoubt bool
+ inDoubt bool
+ rollAttempted bool
+ state TxnState
+}
+
+// Create MRT, assign random transaction id and initialize reads/writes hashmaps with default capacities.
+//
+// The default client MRT timeout is zero. This means use the server configuration mrt-duration
+// as the MRT timeout. The default mrt-duration is 10 seconds.
+func NewTxn() *Txn {
+ return &Txn{
+ id: createTxnId(),
+ reads: *sm.New[*Key, *uint64](16),
+ writes: *sm.New[*Key, struct{}](16),
+ timeout: 0,
+ state: TxnStateOpen,
+ }
+}
+
+// Create MRT, assign random transaction id and initialize reads/writes hashmaps with given capacities.
+//
+// readsCapacity expected number of record reads in the MRT. Minimum value is 16.
+// writesCapacity expected number of record writes in the MRT. Minimum value is 16.
+func NewTxnWithCapacity(readsCapacity, writesCapacity int) *Txn {
+ if readsCapacity < 16 {
+ readsCapacity = 16
+ }
+
+ if writesCapacity < 16 {
+ writesCapacity = 16
+ }
+
+ return &Txn{
+ id: createTxnId(),
+ reads: *sm.New[*Key, *uint64](readsCapacity),
+ writes: *sm.New[*Key, struct{}](writesCapacity),
+ timeout: 0,
+ state: TxnStateOpen,
+ }
+}
+
+func createTxnId() int64 {
+ // xorshift64* doesn't generate zeroes.
+ var oldState, newState int64
+
+ oldState = txnRandomState.Load()
+ newState = oldState
+ newState ^= newState >> 12
+ newState ^= newState << 25
+ newState ^= newState >> 27
+
+ for !txnRandomState.CompareAndSwap(oldState, newState) {
+ oldState = txnRandomState.Load()
+ newState = oldState
+ newState ^= newState >> 12
+ newState ^= newState << 25
+ newState ^= newState >> 27
+ }
+
+ return newState // 0x2545f4914f6cdd1dl;
+}
+
+// Return MRT ID.
+func (txn *Txn) Id() int64 {
+ return txn.id
+}
+
+// Return MRT ID.
+func (txn *Txn) State() TxnState {
+ return txn.state
+}
+
+// Set MRT ID.
+func (txn *Txn) SetState(state TxnState) {
+ txn.state = state
+}
+
+// Process the results of a record read. For internal use only.
+func (txn *Txn) OnRead(key *Key, version *uint64) {
+ if version != nil {
+ txn.reads.Set(key, version)
+ }
+}
+
+// Get record version for a given key.
+func (txn *Txn) GetReadVersion(key *Key) *uint64 {
+ return txn.reads.Get(key)
+}
+
+// Get all read keys and their versions.
+func (txn *Txn) ReadExistsForKey(key *Key) bool {
+ return txn.reads.Exists(key)
+}
+
+// Get all read keys and their versions.
+func (txn *Txn) GetReads() map[*Key]*uint64 {
+ return txn.reads.Clone()
+}
+
+// Process the results of a record write. For internal use only.
+func (txn *Txn) OnWrite(key *Key, version *uint64, resultCode types.ResultCode) {
+ if version != nil {
+ txn.reads.Set(key, version)
+ } else if resultCode == 0 {
+ txn.reads.Delete(key)
+ txn.writes.Set(key, struct{}{})
+ }
+}
+
+// Add key to write hash when write command is in doubt (usually caused by timeout).
+func (txn *Txn) OnWriteInDoubt(key *Key) {
+ txn.reads.Delete(key)
+ txn.writes.Set(key, struct{}{})
+}
+
+// Get all write keys and their versions.
+func (txn *Txn) GetWrites() []*Key {
+ return txn.writes.Keys()
+}
+
+// Get all write keys and their versions.
+func (txn *Txn) WriteExistsForKey(key *Key) bool {
+ return txn.writes.Exists(key)
+}
+
+// Return MRT namespace.
+func (txn *Txn) GetNamespace() string {
+ return *txn.namespace
+}
+
+// Verify current MRT state and namespace for a future read command.
+func (txn *Txn) prepareRead(ns string) Error {
+ if err := txn.VerifyCommand(); err != nil {
+ return err
+ }
+ return txn.SetNamespace(ns)
+}
+
+// Verify current MRT state and namespaces for a future batch read command.
+func (txn *Txn) prepareReadForKeys(keys []*Key) Error {
+ if err := txn.VerifyCommand(); err != nil {
+ return err
+ }
+ return txn.setNamespaceForKeys(keys)
+}
+
+// Verify current MRT state and namespaces for a future batch read command.
+func (txn *Txn) prepareBatchReads(records []*BatchRead) Error {
+ if err := txn.VerifyCommand(); err != nil {
+ return err
+ }
+ return txn.setNamespaceForBatchReads(records)
+}
+
+// Verify current MRT state and namespaces for a future batch read command.
+func (txn *Txn) prepareReadForBatchRecordsIfc(records []BatchRecordIfc) Error {
+ if err := txn.VerifyCommand(); err != nil {
+ return err
+ }
+ return txn.setNamespaceForBatchRecordsIfc(records)
+}
+
+// Verify that the MRT state allows future commands.
+func (txn *Txn) VerifyCommand() Error {
+ if txn.state != TxnStateOpen {
+ return newError(types.FAIL_FORBIDDEN, fmt.Sprintf("Command not allowed in current MRT state: %#v", txn.state))
+ }
+ return nil
+}
+
+// Set MRT namespace only if doesn't already exist.
+// If namespace already exists, verify new namespace is the same.
+func (txn *Txn) SetNamespace(ns string) Error {
+ if txn.namespace == nil {
+ txn.namespace = &ns
+ } else if *txn.namespace != ns {
+ return newError(types.COMMON_ERROR, "Namespace must be the same for all commands in the MRT. orig: "+
+ *txn.namespace+" new: "+ns)
+ }
+ return nil
+}
+
+// Set MRT namespaces for each key only if doesn't already exist.
+// If namespace already exists, verify new namespace is the same.
+func (txn *Txn) setNamespaceForKeys(keys []*Key) Error {
+ for _, key := range keys {
+ if err := txn.SetNamespace(key.namespace); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set MRT namespaces for each key only if doesn't already exist.
+// If namespace already exists, verify new namespace is the same.
+func (txn *Txn) setNamespaceForBatchReads(records []*BatchRead) Error {
+ for _, br := range records {
+ if err := txn.SetNamespace(br.key().namespace); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Set MRT namespaces for each key only if doesn't already exist.
+// If namespace already exists, verify new namespace is the same.
+func (txn *Txn) setNamespaceForBatchRecordsIfc(records []BatchRecordIfc) Error {
+ for _, br := range records {
+ if err := txn.SetNamespace(br.key().namespace); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Get MRT deadline.
+func (txn *Txn) GetTimeout() time.Duration {
+ return time.Duration(txn.timeout) * time.Second
+}
+
+// Set MRT timeout in seconds. The timer starts when the MRT monitor record is
+// created.
+// This occurs when the first command in the MRT is executed. If the timeout is
+// reached before
+// a commit or abort is called, the server will expire and rollback the MRT.
+//
+// If the MRT timeout is zero, the server configuration mrt-duration is used.
+// The default mrt-duration is 10 seconds.
+func (txn *Txn) SetTimeout(timeout time.Duration) {
+ txn.timeout = int(timeout / time.Second)
+}
+
+// Get MRT inDoubt.
+func (txn *Txn) GetInDoubt() bool {
+ return txn.inDoubt
+}
+
+// Set MRT inDoubt. For internal use only.
+func (txn *Txn) SetInDoubt(inDoubt bool) {
+ txn.inDoubt = inDoubt
+}
+
+// Set that the MRT monitor existence is in doubt.
+func (txn *Txn) SetMonitorInDoubt() {
+ txn.monitorInDoubt = true
+}
+
+// Does MRT monitor record exist or is in doubt.
+func (txn *Txn) MonitorMightExist() bool {
+ return txn.deadline != 0 || txn.monitorInDoubt
+}
+
+// Does MRT monitor record exist.
+func (txn *Txn) MonitorExists() bool {
+ return txn.deadline != 0
+}
+
+// Verify that commit/abort is only attempted once. For internal use only.
+func (txn *Txn) SetRollAttempted() bool {
+ if txn.rollAttempted {
+ return false
+ }
+ txn.rollAttempted = true
+ return true
+}
+
+// Clear MRT. Remove all tracked keys.
+func (txn *Txn) Clear() {
+ txn.namespace = nil
+ txn.deadline = 0
+ txn.reads.Clear()
+ txn.writes.Clear()
+}
diff --git a/txn_add_keys_command.go b/txn_add_keys_command.go
new file mode 100644
index 00000000..0959e06f
--- /dev/null
+++ b/txn_add_keys_command.go
@@ -0,0 +1,73 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import (
+ "github.com/aerospike/aerospike-client-go/v8/types"
+)
+
+// guarantee txnAddKeysCommand implements command interface
+var _ command = &txnAddKeysCommand{}
+
+type txnAddKeysCommand struct {
+ baseWriteCommand
+
+ args operateArgs
+}
+
+func newTxnAddKeysCommand(
+ cluster *Cluster,
+ key *Key,
+ args operateArgs,
+) (txnAddKeysCommand, Error) {
+ bwc, err := newBaseWriteCommand(cluster, args.writePolicy, key)
+ if err != nil {
+ return txnAddKeysCommand{}, err
+ }
+
+ newTxnAddKeysCmd := txnAddKeysCommand{
+ baseWriteCommand: bwc,
+ args: args,
+ }
+
+ return newTxnAddKeysCmd, nil
+}
+
+func (cmd *txnAddKeysCommand) writeBuffer(ifc command) Error {
+ return cmd.setTxnAddKeys(cmd.policy, cmd.key, cmd.args)
+}
+
+func (cmd *txnAddKeysCommand) parseResult(ifc command, conn *Connection) Error {
+ rp, err := newRecordParser(&cmd.baseCommand)
+ if err != nil {
+ return err
+ }
+ rp.parseTranDeadline(cmd.policy.Txn)
+
+ if rp.resultCode != types.OK {
+ return newCustomNodeError(cmd.node, rp.resultCode)
+ }
+
+ return nil
+}
+
+func (cmd *txnAddKeysCommand) onInDoubt() {
+ // The MRT monitor record might exist if TxnAddKeys command is inDoubt.
+ cmd.txn.SetMonitorInDoubt()
+}
+
+func (cmd *txnAddKeysCommand) Execute() Error {
+ return cmd.execute(cmd)
+}
diff --git a/txn_close.go b/txn_close.go
new file mode 100644
index 00000000..1d7e5cad
--- /dev/null
+++ b/txn_close.go
@@ -0,0 +1,72 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import (
+ "github.com/aerospike/aerospike-client-go/v8/types"
+)
+
+// guarantee txnCloseCommand implements command interface
+var _ command = &txnCloseCommand{}
+
+type txnCloseCommand struct {
+ baseWriteCommand
+
+ txn *Txn
+}
+
+func newTxnCloseCommand(
+ cluster *Cluster,
+ txn *Txn,
+ writePolicy *WritePolicy,
+ key *Key,
+) (txnCloseCommand, Error) {
+ bwc, err := newBaseWriteCommand(cluster, writePolicy, key)
+ if err != nil {
+ return txnCloseCommand{}, err
+ }
+
+ newTxnCloseCmd := txnCloseCommand{
+ baseWriteCommand: bwc,
+ txn: txn,
+ }
+
+ return newTxnCloseCmd, nil
+}
+
+func (cmd *txnCloseCommand) writeBuffer(ifc command) Error {
+ return cmd.setTxnClose(cmd.txn, cmd.key)
+}
+
+func (cmd *txnCloseCommand) parseResult(ifc command, conn *Connection) Error {
+ resultCode, err := cmd.parseHeader()
+ if err != nil {
+ return newCustomNodeError(cmd.node, err.resultCode())
+ }
+
+ if resultCode == 0 || resultCode == types.KEY_NOT_FOUND_ERROR {
+ return nil
+ }
+
+ return newCustomNodeError(cmd.node, types.ResultCode(resultCode))
+}
+
+func (cmd *txnCloseCommand) Execute() Error {
+ return cmd.execute(cmd)
+}
+
+func (cmd *txnCloseCommand) onInDoubt() {
+ return
+}
diff --git a/txn_error.go b/txn_error.go
new file mode 100644
index 00000000..f6d65f4f
--- /dev/null
+++ b/txn_error.go
@@ -0,0 +1,55 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import "github.com/aerospike/aerospike-client-go/v8/types"
+
+// TxnError implements Error interface for aerospike multi-record transaction specific errors.
+type TxnError struct {
+ AerospikeError
+
+ // Error status of the attempted commit.
+ CommitError CommitError
+
+ // Verify result for each read key in the MRT. May be nil if failure occurred before verify.
+ VerifyRecords []BatchRecordIfc
+
+ // Roll forward/backward result for each write key in the MRT. May be nil if failure occurred before
+ // roll forward/backward.
+ RollRecords []BatchRecordIfc
+}
+
+var _ error = &TxnError{}
+var _ Error = &TxnError{}
+
+// func NewTxnCommitError(err CommitError, verifyRecords, rollRecords []BatchRecordIfc, cause Error) Error {
+func NewTxnCommitError(err CommitError, cause Error) Error {
+ if cause == nil {
+ res := newError(types.TXN_FAILED, string(err))
+ return &TxnError{
+ AerospikeError: *(res.(*AerospikeError)),
+ CommitError: err,
+ // VerifyRecords: verifyRecords,
+ // RollRecords: rollRecords,
+ }
+ }
+
+ return &TxnError{
+ AerospikeError: *(cause.(*AerospikeError)),
+ CommitError: err,
+ // VerifyRecords: verifyRecords,
+ // RollRecords: rollRecords,
+ }
+}
diff --git a/txn_mark_roll_forward.go b/txn_mark_roll_forward.go
new file mode 100644
index 00000000..9e58c239
--- /dev/null
+++ b/txn_mark_roll_forward.go
@@ -0,0 +1,64 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import (
+ "github.com/aerospike/aerospike-client-go/v8/types"
+)
+
+// guarantee txnMarkRollForwardCommand implements command interface
+var _ command = &txnMarkRollForwardCommand{}
+
+type txnMarkRollForwardCommand struct {
+ baseWriteCommand
+}
+
+func newTxnMarkRollForwardCommand(
+ cluster *Cluster,
+ writePolicy *WritePolicy,
+ key *Key,
+) (txnMarkRollForwardCommand, Error) {
+ bwc, err := newBaseWriteCommand(cluster, writePolicy, key)
+ if err != nil {
+ return txnMarkRollForwardCommand{}, err
+ }
+
+ newMarkRollForwardCmd := txnMarkRollForwardCommand{
+ baseWriteCommand: bwc,
+ }
+
+ return newMarkRollForwardCmd, nil
+}
+
+func (cmd *txnMarkRollForwardCommand) writeBuffer(ifc command) Error {
+ return cmd.setTxnMarkRollForward(cmd.key)
+}
+
+func (cmd *txnMarkRollForwardCommand) parseResult(ifc command, conn *Connection) Error {
+ resultCode, err := cmd.parseHeader()
+ if err != nil {
+ return newCustomNodeError(cmd.node, err.resultCode())
+ }
+
+ if resultCode == 0 || resultCode == types.MRT_COMMITTED {
+ return nil
+ }
+
+ return newCustomNodeError(cmd.node, types.ResultCode(resultCode))
+}
+
+func (cmd *txnMarkRollForwardCommand) Execute() Error {
+ return cmd.execute(cmd)
+}
diff --git a/txn_monitor.go b/txn_monitor.go
new file mode 100644
index 00000000..dc3cf977
--- /dev/null
+++ b/txn_monitor.go
@@ -0,0 +1,150 @@
+// Copyright 2014-2024 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+type TxnMonitor struct{}
+
+var txnMonitor = new(TxnMonitor)
+
+var txnOrderedListPolicy = &ListPolicy{
+ attributes: ListOrderOrdered,
+ flags: ListWriteFlagsAddUnique | ListWriteFlagsNoFail | ListWriteFlagsPartial,
+}
+
+const binNameId = "id"
+const binNameDigests = "keyds"
+
+func (tm *TxnMonitor) addKey(cluster *Cluster, policy *WritePolicy, cmdKey *Key) Error {
+ txn := policy.Txn
+
+ if txn.WriteExistsForKey(cmdKey) {
+ // Transaction monitor already contains this key.
+ return nil
+ }
+
+ ops := tm.getTranOps(txn, cmdKey)
+ return tm.addWriteKeys(cluster, policy.GetBasePolicy(), ops)
+}
+
+func (tm *TxnMonitor) addKeys(cluster *Cluster, policy *BatchPolicy, keys []*Key) Error {
+ ops := tm.getTranOpsFromKeys(policy.Txn, keys)
+ return tm.addWriteKeys(cluster, policy.GetBasePolicy(), ops)
+}
+
+func (tm *TxnMonitor) addKeysFromRecords(cluster *Cluster, policy *BatchPolicy, records []BatchRecordIfc) Error {
+ ops := tm.getTranOpsFromBatchRecords(policy.Txn, records)
+
+ if len(ops) > 0 {
+ return tm.addWriteKeys(cluster, policy.GetBasePolicy(), ops)
+ }
+ return nil
+}
+
+func (tm *TxnMonitor) getTranOps(txn *Txn, cmdKey *Key) []*Operation {
+ txn.SetNamespace(cmdKey.namespace)
+
+ if txn.MonitorExists() {
+ return []*Operation{
+ ListAppendWithPolicyOp(txnOrderedListPolicy, binNameDigests, cmdKey.Digest()),
+ }
+ } else {
+ return []*Operation{
+ PutOp(NewBin(binNameId, txn.Id())),
+ ListAppendWithPolicyOp(txnOrderedListPolicy, binNameDigests, cmdKey.Digest()),
+ }
+ }
+}
+
+func (tm *TxnMonitor) getTranOpsFromKeys(txn *Txn, keys []*Key) []*Operation {
+ list := make([]interface{}, 0, len(keys))
+
+ for _, key := range keys {
+ txn.SetNamespace(key.namespace)
+ list = append(list, NewBytesValue(key.Digest()))
+ }
+ return tm.getTranOpsFromValueList(txn, list)
+}
+
+func (tm *TxnMonitor) getTranOpsFromBatchRecords(txn *Txn, records []BatchRecordIfc) []*Operation {
+ list := make([]interface{}, 0, len(records))
+
+ for _, br := range records {
+ txn.SetNamespace(br.key().namespace)
+
+ if br.BatchRec().hasWrite {
+ list = append(list, br.key().Digest())
+ }
+ }
+
+ if len(list) == 0 {
+ // Readonly batch does not need to add key digests.
+ return nil
+ }
+ return tm.getTranOpsFromValueList(txn, list)
+}
+
+func (tm *TxnMonitor) getTranOpsFromValueList(txn *Txn, list []interface{}) []*Operation {
+ if txn.MonitorExists() {
+ return []*Operation{
+ ListAppendWithPolicyOp(txnOrderedListPolicy, binNameDigests, list...),
+ }
+ } else {
+ return []*Operation{
+ PutOp(NewBin(binNameId, txn.Id())),
+ ListAppendWithPolicyOp(txnOrderedListPolicy, binNameDigests, list...),
+ }
+ }
+}
+
+func (tm *TxnMonitor) addWriteKeys(cluster *Cluster, policy *BasePolicy, ops []*Operation) Error {
+ txnKey := getTxnMonitorKey(policy.Txn)
+ wp := tm.copyTimeoutPolicy(policy)
+ args, err := newOperateArgs(cluster, wp, txnKey, ops)
+ if err != nil {
+ return err
+ }
+
+ cmd, err := newTxnAddKeysCommand(cluster, txnKey, args)
+ if err != nil {
+ return err
+ }
+ return cmd.Execute()
+}
+
+func (tm *TxnMonitor) copyTimeoutPolicy(policy *BasePolicy) *WritePolicy {
+ // Inherit some fields from the original command's policy.
+ wp := NewWritePolicy(0, 0)
+ wp.Txn = policy.Txn
+ // wp.ConnectTimeout = policy.ConnectTimeout
+ wp.SocketTimeout = policy.SocketTimeout
+ wp.TotalTimeout = policy.TotalTimeout
+ // wp.TimeoutDelay = policy.TimeoutDelay
+ wp.MaxRetries = policy.MaxRetries
+ wp.SleepBetweenRetries = policy.SleepBetweenRetries
+ wp.UseCompression = policy.UseCompression
+ wp.RespondPerEachOp = true
+
+ // Note that the server only accepts the timeout on MRT monitor record create.
+ // The server ignores the MRT timeout field on successive MRT monitor record
+ // updates.
+ wp.Expiration = uint32(policy.Txn.timeout)
+
+ return wp
+}
+
+func getTxnMonitorKey(txn *Txn) *Key {
+ key, _ := NewKey(txn.GetNamespace(), "", 1))
+ }) // it
+
+ gg.It("must support touch and abort", func() {
+ key, _ := as.NewKey(ns, set, randString(50))
+
+ err = client.PutBins(nil, key, as.NewBin(binName, "val1"))
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ txn := as.NewTxn()
+ wp := as.NewWritePolicy(0, 0)
+ wp.Txn = txn
+
+ err := client.Touch(wp, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ status, err := client.Abort(txn)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(status).To(gm.Equal(as.AbortStatusOK))
+
+ record, err := client.Get(nil, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(record.Bins[binName]).To(gm.Equal("val1"))
+ gm.Expect(record.Generation).To(gm.Equal(uint32(3)))
+ }) // it
+
+ gg.It("must support operate write", func() {
+ key, _ := as.NewKey(ns, set, randString(50))
+
+ err := client.PutBins(nil, key, as.NewBin(binName, "val1"), as.NewBin("bin2", "bal1"))
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ txn := as.NewTxn()
+
+ wp := as.NewWritePolicy(0, 0)
+ wp.Txn = txn
+
+ record, err := client.Operate(wp, key,
+ as.PutOp(as.NewBin(binName, "val2")),
+ as.GetBinOp("bin2"),
+ )
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(record.Bins["bin2"]).To(gm.Equal("bal1"))
+
+ status, err := client.Commit(txn)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(status).To(gm.Equal(as.CommitStatusOK))
+
+ record, err = client.Get(nil, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(record.Bins[binName]).To(gm.Equal("val2"))
+ }) // it
+
+ gg.It("must support operate write abort", func() {
+ key, _ := as.NewKey(ns, set, randString(50))
+
+ err := client.PutBins(nil, key, as.NewBin(binName, "val1"), as.NewBin("bin2", "bal1"))
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ txn := as.NewTxn()
+
+ wp := as.NewWritePolicy(0, 0)
+ wp.Txn = txn
+
+ record, err := client.Operate(wp, key,
+ as.PutOp(as.NewBin(binName, "val2")),
+ as.GetBinOp("bin2"),
+ )
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(record.Bins["bin2"]).To(gm.Equal("bal1"))
+
+ status, err := client.Abort(txn)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(status).To(gm.Equal(as.AbortStatusOK))
+
+ record, err = client.Get(nil, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(record.Bins[binName]).To(gm.Equal("val1"))
+ }) // it
+
+ gg.It("must support UDF", func() {
+ key, _ := as.NewKey(ns, set, randString(50))
+
+ err := client.PutBins(nil, key, as.NewBin(binName, "val1"), as.NewBin("bin2", "bal1"))
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ txn := as.NewTxn()
+
+ wp := as.NewWritePolicy(0, 0)
+ wp.Txn = txn
+
+ _, err = client.Execute(
+ wp,
+ key,
+ "record_example",
+ "writeBin",
+ as.NewValue(binName),
+ as.NewValue("val2"),
+ )
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ status, err := client.Commit(txn)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(status).To(gm.Equal(as.CommitStatusOK))
+
+ record, err := client.Get(nil, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(record.Bins[binName]).To(gm.Equal("val2"))
+ }) // it
+
+ gg.It("must support UDF and abort", func() {
+ key, _ := as.NewKey(ns, set, randString(50))
+
+ err := client.PutBins(nil, key, as.NewBin(binName, "val1"), as.NewBin("bin2", "bal1"))
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ txn := as.NewTxn()
+
+ wp := as.NewWritePolicy(0, 0)
+ wp.Txn = txn
+
+ _, err = client.Execute(
+ wp,
+ key,
+ "record_example",
+ "writeBin",
+ as.NewValue(binName),
+ as.NewValue("val2"),
+ )
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ status, err := client.Abort(txn)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(status).To(gm.Equal(as.AbortStatusOK))
+
+ record, err := client.Get(nil, key)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(record.Bins[binName]).To(gm.Equal("val1"))
+ }) // it
+
+ gg.It("must support batchDelete", func() {
+ bin := as.NewBin(binName, 1)
+ keys := make([]*as.Key, 10)
+
+ for i := range keys {
+ key, _ := as.NewKey(ns, set, i)
+ keys[i] = key
+
+ err := client.PutBins(nil, key, bin)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ }
+
+ records, err := client.BatchGet(nil, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).ToNot(gm.BeNil())
+ gm.Expect(records[i].Bins[binName]).To(gm.Equal(1))
+ }
+
+ txn := as.NewTxn()
+
+ bin = as.NewBin(binName, 2)
+
+ bp := as.NewBatchPolicy()
+ bp.Txn = txn
+
+ dp := as.NewBatchDeletePolicy()
+ dp.DurableDelete = true
+
+ _, err = client.BatchDelete(bp, dp, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ records, err = client.BatchGet(bp, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).To(gm.BeNil())
+ }
+
+ status, err := client.Commit(txn)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(status).To(gm.Equal(as.CommitStatusOK))
+
+ records, err = client.BatchGet(nil, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).To(gm.BeNil())
+ }
+ }) // it
+
+ gg.It("must support batchDelete and abort", func() {
+ bin := as.NewBin(binName, 1)
+ keys := make([]*as.Key, 10)
+
+ for i := range keys {
+ key, _ := as.NewKey(ns, set, i)
+ keys[i] = key
+
+ err := client.PutBins(nil, key, bin)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ }
+
+ records, err := client.BatchGet(nil, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).ToNot(gm.BeNil())
+ gm.Expect(records[i].Bins[binName]).To(gm.Equal(1))
+ }
+
+ txn := as.NewTxn()
+
+ bp := as.NewBatchPolicy()
+ bp.Txn = txn
+
+ dp := as.NewBatchDeletePolicy()
+ dp.DurableDelete = true
+
+ _, err = client.BatchDelete(bp, dp, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ status, err := client.Abort(txn)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(status).To(gm.Equal(as.AbortStatusOK))
+
+ records, err = client.BatchGet(nil, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).ToNot(gm.BeNil())
+ gm.Expect(records[i].Bins[binName]).To(gm.Equal(1))
+ }
+ }) // it
+
+ gg.It("must support batch", func() {
+ bin := as.NewBin(binName, 1)
+ keys := make([]*as.Key, 10)
+
+ for i := range keys {
+ key, _ := as.NewKey(ns, set, i)
+ keys[i] = key
+
+ err := client.PutBins(nil, key, bin)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ }
+
+ records, err := client.BatchGet(nil, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).ToNot(gm.BeNil())
+ gm.Expect(records[i].Bins[binName]).To(gm.Equal(1))
+ }
+
+ txn := as.NewTxn()
+
+ bin = as.NewBin(binName, 2)
+
+ bp := as.NewBatchPolicy()
+ bp.Txn = txn
+
+ brecs := make([]as.BatchRecordIfc, len(keys))
+ for i := range brecs {
+ brecs[i] = as.NewBatchWrite(nil, keys[i], as.PutOp(bin))
+ }
+
+ err = client.BatchOperate(bp, brecs)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ records, err = client.BatchGet(bp, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).ToNot(gm.BeNil())
+ gm.Expect(records[i].Bins[binName]).To(gm.Equal(2))
+ }
+
+ status, err := client.Commit(txn)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(status).To(gm.Equal(as.CommitStatusOK))
+
+ records, err = client.BatchGet(nil, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).ToNot(gm.BeNil())
+ gm.Expect(records[i].Bins[binName]).To(gm.Equal(2))
+ }
+ }) // it
+
+ gg.It("must support batch and abort", func() {
+ bin := as.NewBin(binName, 1)
+ keys := make([]*as.Key, 10)
+
+ for i := range keys {
+ key, _ := as.NewKey(ns, set, i)
+ keys[i] = key
+ err := client.PutBins(nil, key, bin)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ }
+
+ records, err := client.BatchGet(nil, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).ToNot(gm.BeNil())
+ gm.Expect(records[i].Bins[binName]).To(gm.Equal(1))
+ }
+
+ txn := as.NewTxn()
+
+ bin = as.NewBin(binName, 2)
+
+ pp := as.NewBatchPolicy()
+ pp.Txn = txn
+
+ brecs := make([]as.BatchRecordIfc, len(keys))
+ for i := range brecs {
+ brecs[i] = as.NewBatchWrite(nil, keys[i], as.PutOp(bin))
+ }
+
+ err = client.BatchOperate(pp, brecs)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+
+ status, err := client.Abort(txn)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ gm.Expect(status).To(gm.Equal(as.AbortStatusOK))
+
+ records, err = client.BatchGet(nil, keys)
+ gm.Expect(err).ToNot(gm.HaveOccurred())
+ for i := range records {
+ gm.Expect(records[i]).ToNot(gm.BeNil())
+ gm.Expect(records[i].Bins[binName]).To(gm.Equal(1))
+ }
+ }) // it
+ }) // describe
+})
diff --git a/txn_verify_batch.go b/txn_verify_batch.go
new file mode 100644
index 00000000..025590a1
--- /dev/null
+++ b/txn_verify_batch.go
@@ -0,0 +1,188 @@
+// Copyright 2014-2024 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import (
+ "reflect"
+
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
+)
+
+type txnBatchVerifyCommand struct {
+ batchCommand
+
+ keys []*Key
+ versions []*uint64
+ records []*BatchRecord
+}
+
+func newTxnBatchVerifyCommand(
+ client *Client,
+ batch *batchNode,
+ policy *BatchPolicy,
+ keys []*Key,
+ versions []*uint64,
+ records []*BatchRecord,
+) *txnBatchVerifyCommand {
+ var node *Node
+ if batch != nil {
+ node = batch.Node
+ }
+
+ res := &txnBatchVerifyCommand{
+ batchCommand: batchCommand{
+ client: client,
+ baseMultiCommand: *newMultiCommand(node, nil, false),
+ policy: policy,
+ batch: batch,
+ },
+ keys: keys,
+ versions: versions,
+ records: records,
+ }
+ return res
+}
+
+func (cmd *txnBatchVerifyCommand) cloneBatchCommand(batch *batchNode) batcher {
+ res := *cmd
+ res.node = batch.Node
+ res.batch = batch
+
+ return &res
+}
+
+func (cmd *txnBatchVerifyCommand) buf() []byte {
+ return cmd.dataBuffer
+}
+
+func (cmd *txnBatchVerifyCommand) object(index int) *reflect.Value {
+ return nil
+}
+
+func (cmd *txnBatchVerifyCommand) writeBuffer(ifc command) Error {
+ return cmd.setBatchTxnVerifyForBatchNode(cmd.policy, cmd.keys, cmd.versions, cmd.batch)
+}
+
+// Parse all results in the batch. Add records to shared list.
+// If the record was not found, the bins will be nil.
+func (cmd *txnBatchVerifyCommand) parseRecordResults(ifc command, receiveSize int) (bool, Error) {
+ //Parse each message response and add it to the result array
+ cmd.dataOffset = 0
+
+ for cmd.dataOffset < receiveSize {
+ if err := cmd.readBytes(int(_MSG_REMAINING_HEADER_SIZE)); err != nil {
+ return false, err
+ }
+ resultCode := types.ResultCode(cmd.dataBuffer[5] & 0xFF)
+
+ // The only valid server return codes are "ok" and "not found" and "filtered out".
+ // If other return codes are received, then abort the batch.
+ if resultCode != 0 && resultCode != types.KEY_NOT_FOUND_ERROR {
+ if resultCode == types.FILTERED_OUT {
+ cmd.filteredOutCnt++
+ } else {
+ return false, newCustomNodeError(cmd.node, resultCode)
+ }
+ }
+
+ info3 := int(cmd.dataBuffer[3])
+
+ // If cmd is the end marker of the response, do not proceed further
+ if (info3 & _INFO3_LAST) == _INFO3_LAST {
+ return false, nil
+ }
+
+ // generation := Buffer.BytesToUint32(cmd.dataBuffer, 6)
+ // expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 10))
+ batchIndex := int(Buffer.BytesToUint32(cmd.dataBuffer, 14))
+ fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 18))
+ // opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 20))
+ err := cmd.skipKey(fieldCount)
+ if err != nil {
+ return false, err
+ }
+
+ record := cmd.records[batchIndex]
+
+ if resultCode == types.OK {
+ record.ResultCode = resultCode
+ } else {
+ record.setError(cmd.node, resultCode, false)
+ }
+ }
+ return true, nil
+}
+
+// Parses the given byte buffer and populate the result object.
+// Returns the number of bytes that were parsed from the given buffer.
+func (cmd *txnBatchVerifyCommand) parseRecord(key *Key, opCount int, generation, expiration uint32) (*Record, Error) {
+ bins := make(BinMap, opCount)
+
+ for i := 0; i < opCount; i++ {
+ if err := cmd.readBytes(8); err != nil {
+ return nil, err
+ }
+ opSize := int(Buffer.BytesToUint32(cmd.dataBuffer, 0))
+ particleType := int(cmd.dataBuffer[5])
+ nameSize := int(cmd.dataBuffer[7])
+
+ if err := cmd.readBytes(nameSize); err != nil {
+ return nil, err
+ }
+ name := string(cmd.dataBuffer[:nameSize])
+
+ particleBytesSize := opSize - (4 + nameSize)
+ if err := cmd.readBytes(particleBytesSize); err != nil {
+ return nil, err
+ }
+ value, err := bytesToParticle(particleType, cmd.dataBuffer, 0, particleBytesSize)
+ if err != nil {
+ return nil, err
+ }
+
+ if cmd.isOperation {
+ if prev, ok := bins[name]; ok {
+ if prev2, ok := prev.(OpResults); ok {
+ bins[name] = append(prev2, value)
+ } else {
+ bins[name] = OpResults{prev, value}
+ }
+ } else {
+ bins[name] = value
+ }
+ } else {
+ bins[name] = value
+ }
+ }
+
+ return newRecord(cmd.node, key, bins, generation, expiration), nil
+}
+
+func (cmd *txnBatchVerifyCommand) commandType() commandType {
+ return ttBatchRead
+}
+
+func (cmd *txnBatchVerifyCommand) executeSingle(client *Client) Error {
+ panic(unreachable)
+}
+
+func (cmd *txnBatchVerifyCommand) Execute() Error {
+ return cmd.execute(cmd)
+}
+
+func (cmd *txnBatchVerifyCommand) generateBatchNodes(cluster *Cluster) ([]*batchNode, Error) {
+ return newBatchNodeListKeys(cluster, cmd.policy, cmd.keys, cmd.records, cmd.sequenceAP, cmd.sequenceSC, cmd.batch, false)
+}
diff --git a/txn_verify_batch_single.go b/txn_verify_batch_single.go
new file mode 100644
index 00000000..6ec9e2cd
--- /dev/null
+++ b/txn_verify_batch_single.go
@@ -0,0 +1,175 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import (
+ "fmt"
+
+ "github.com/aerospike/aerospike-client-go/v8/logger"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
+)
+
+// guarantee batchSingleTxnVerifyCommand implements command interface
+var _ command = &batchSingleTxnVerifyCommand{}
+
+type batchSingleTxnVerifyCommand struct {
+ singleCommand
+
+ record *BatchRecord
+ policy *BatchPolicy
+ version *uint64
+}
+
+func newBatchSingleTxnVerifyCommand(
+ client *Client,
+ policy *BatchPolicy,
+ version *uint64,
+ record *BatchRecord,
+ node *Node,
+) (batchSingleTxnVerifyCommand, Error) {
+ var partition *Partition
+ var err Error
+ if client.cluster != nil {
+ partition, err = PartitionForRead(client.cluster, &policy.BasePolicy, record.Key)
+ if err != nil {
+ return batchSingleTxnVerifyCommand{}, err
+ }
+ }
+
+ res := batchSingleTxnVerifyCommand{
+ singleCommand: newSingleCommand(client.cluster, record.Key, partition),
+ record: record,
+ policy: policy,
+ version: version,
+ }
+ res.node = node
+
+ return res, nil
+}
+
+func (cmd *batchSingleTxnVerifyCommand) getPolicy(ifc command) Policy {
+ return cmd.policy
+}
+
+func (cmd *batchSingleTxnVerifyCommand) writeBuffer(ifc command) Error {
+ return cmd.setTxnVerify(&cmd.policy.BasePolicy, cmd.key, *cmd.version)
+}
+
+func (cmd *batchSingleTxnVerifyCommand) getNode(ifc command) (*Node, Error) {
+ return cmd.node, nil
+}
+
+func (cmd *batchSingleTxnVerifyCommand) prepareRetry(ifc command, isTimeout bool) bool {
+ cmd.partition.PrepareRetryRead(isTimeout)
+ node, err := cmd.partition.GetNodeRead(cmd.cluster)
+ if err != nil {
+ return false
+ }
+
+ cmd.node = node
+ return true
+}
+
+func (cmd *batchSingleTxnVerifyCommand) parseResult(ifc command, conn *Connection) Error {
+ // Read proto and check if compressed
+ if _, err := conn.Read(cmd.dataBuffer, 8); err != nil {
+ logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
+ return err
+ }
+
+ if compressedSize := cmd.compressedSize(); compressedSize > 0 {
+ // Read compressed size
+ if _, err := conn.Read(cmd.dataBuffer, 8); err != nil {
+ logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
+ return err
+ }
+
+ if err := cmd.conn.initInflater(true, compressedSize); err != nil {
+ return newError(types.PARSE_ERROR, fmt.Sprintf("Error setting up zlib inflater for size `%d`: %s", compressedSize, err.Error()))
+ }
+
+ // Read header.
+ if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil {
+ logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
+ return err
+ }
+ } else {
+ // Read header.
+ if _, err := conn.Read(cmd.dataBuffer[8:], int(_MSG_TOTAL_HEADER_SIZE)-8); err != nil {
+ logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
+ return err
+ }
+ }
+
+ // A number of these are commented out because we just don't care enough to read
+ // that section of the header. If we do care, uncomment and check!
+ sz := Buffer.BytesToInt64(cmd.dataBuffer, 0)
+
+ // Validate header to make sure we are at the beginning of a message
+ if err := cmd.validateHeader(sz); err != nil {
+ return err
+ }
+
+ headerLength := int(cmd.dataBuffer[8])
+ resultCode := types.ResultCode(cmd.dataBuffer[13] & 0xFF)
+ // generation := Buffer.BytesToUint32(cmd.dataBuffer, 14)
+ // expiration := types.TTL(Buffer.BytesToUint32(cmd.dataBuffer, 18))
+ // fieldCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 26)) // almost certainly 0
+ // opCount := int(Buffer.BytesToUint16(cmd.dataBuffer, 28))
+ receiveSize := int((sz & 0xFFFFFFFFFFFF) - int64(headerLength))
+
+ // Read remaining message bytes.
+ if receiveSize > 0 {
+ if err := cmd.sizeBufferSz(receiveSize, false); err != nil {
+ return err
+ }
+ if _, err := conn.Read(cmd.dataBuffer, receiveSize); err != nil {
+ logger.Logger.Debug("Connection error reading data for ReadCommand: %s", err.Error())
+ return err
+ }
+
+ }
+
+ if resultCode == 0 {
+ cmd.record.ResultCode = types.OK
+ } else {
+ err := newError(resultCode)
+ err.setInDoubt(cmd.isRead(), cmd.commandSentCounter)
+ }
+
+ return nil
+}
+
+func (cmd *batchSingleTxnVerifyCommand) setInDoubt() bool {
+ if cmd.record.ResultCode == types.NO_RESPONSE {
+ cmd.record.InDoubt = true
+ return true
+ }
+ return false
+}
+
+func (cmd *batchSingleTxnVerifyCommand) isRead() bool {
+ return true
+}
+
+func (cmd *batchSingleTxnVerifyCommand) Execute() Error {
+ return cmd.execute(cmd)
+}
+
+func (cmd *batchSingleTxnVerifyCommand) commandType() commandType {
+ return ttPut
+}
diff --git a/txn_verify_policy.go b/txn_verify_policy.go
new file mode 100644
index 00000000..f41d7b02
--- /dev/null
+++ b/txn_verify_policy.go
@@ -0,0 +1,37 @@
+// Copyright 2014-2024 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import "time"
+
+// Multi-record transaction (MRT) policy fields used to batch verify record versions on commit.
+// Used a placeholder for now as there are no additional fields beyond BatchPolicy.
+type TxnVerifyPolicy struct {
+ BatchPolicy
+}
+
+// NewTxnRollPolicy creates a new TxnVerifyPolicy instance with default values.
+func NewTxnVerifyPolicy() *TxnVerifyPolicy {
+ mp := *NewBatchPolicy()
+ mp.ReadModeSC = ReadModeSCLinearize
+ mp.ReplicaPolicy = MASTER
+ mp.MaxRetries = 5
+ mp.TotalTimeout = 10 * time.Millisecond
+ mp.SleepBetweenRetries = 1 * time.Millisecond
+
+ return &TxnVerifyPolicy{
+ BatchPolicy: mp,
+ }
+}
diff --git a/types/histogram/bench_histogram_test.go b/types/histogram/bench_histogram_test.go
index 48ea8824..6c42ebb7 100644
--- a/types/histogram/bench_histogram_test.go
+++ b/types/histogram/bench_histogram_test.go
@@ -17,7 +17,7 @@ package histogram_test
import (
"testing"
- "github.com/aerospike/aerospike-client-go/v7/types/histogram"
+ "github.com/aerospike/aerospike-client-go/v8/types/histogram"
)
var (
diff --git a/types/histogram/histogram_test.go b/types/histogram/histogram_test.go
index 6b231ee2..79d75701 100644
--- a/types/histogram/histogram_test.go
+++ b/types/histogram/histogram_test.go
@@ -17,7 +17,7 @@ package histogram_test
import (
"testing"
- "github.com/aerospike/aerospike-client-go/v7/types/histogram"
+ "github.com/aerospike/aerospike-client-go/v8/types/histogram"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
diff --git a/types/pool.go b/types/pool.go
index ddcfd71c..5737464c 100644
--- a/types/pool.go
+++ b/types/pool.go
@@ -15,7 +15,7 @@
package types
import (
- "github.com/aerospike/aerospike-client-go/v7/internal/atomic"
+ "github.com/aerospike/aerospike-client-go/v8/internal/atomic"
)
// Pool implements a general purpose fixed-size pool.
diff --git a/types/result_code.go b/types/result_code.go
index df79029e..49bea226 100644
--- a/types/result_code.go
+++ b/types/result_code.go
@@ -21,7 +21,10 @@ import "fmt"
type ResultCode int
const (
- // GRPC_ERROR is wrapped and directly returned from the grpc library
+ // Multi-record transaction failed.
+ TXN_FAILED ResultCode = -22
+
+ // GRPC_ERROR is wrapped and directly returned from the grpc library.
GRPC_ERROR ResultCode = -21
// BATCH_FAILED means one or more keys failed in a batch.
@@ -153,10 +156,10 @@ const (
// FAIL_FORBIDDEN defines operation not allowed at this time.
FAIL_FORBIDDEN ResultCode = 22
- // FAIL_ELEMENT_NOT_FOUND defines element Not Found in CDT
+ // FAIL_ELEMENT_NOT_FOUND defines element Not Found in CDT.
FAIL_ELEMENT_NOT_FOUND ResultCode = 23
- // FAIL_ELEMENT_EXISTS defines element Already Exists in CDT
+ // FAIL_ELEMENT_EXISTS defines element Already Exists in CDT.
FAIL_ELEMENT_EXISTS ResultCode = 24
// ENTERPRISE_ONLY defines attempt to use an Enterprise feature on a Community server or a server
@@ -166,14 +169,14 @@ const (
// OP_NOT_APPLICABLE defines the operation cannot be applied to the current bin value on the server.
OP_NOT_APPLICABLE ResultCode = 26
- // FILTERED_OUT defines the transaction was not performed because the filter was false.
+ // FILTERED_OUT defines the command was not performed because the filter was false.
FILTERED_OUT ResultCode = 27
// LOST_CONFLICT defines write command loses conflict to XDR.
- LOST_CONFLICT = 28
+ LOST_CONFLICT ResultCode = 28
// Write can't complete until XDR finishes shipping.
- XDR_KEY_BUSY = 32
+ XDR_KEY_BUSY ResultCode = 32
// QUERY_END defines there are no more records left for query.
QUERY_END ResultCode = 50
@@ -208,7 +211,7 @@ const (
// EXPIRED_PASSWORD defines security credential is invalid.
EXPIRED_PASSWORD ResultCode = 63
- // FORBIDDEN_PASSWORD defines forbidden password (e.g. recently used)
+ // FORBIDDEN_PASSWORD defines forbidden password (e.g. recently used).
FORBIDDEN_PASSWORD ResultCode = 64
// INVALID_CREDENTIAL defines security credential is invalid.
@@ -226,14 +229,14 @@ const (
// INVALID_PRIVILEGE defines privilege is invalid.
INVALID_PRIVILEGE ResultCode = 72
- // INVALID_WHITELIST defines invalid IP address whiltelist
- INVALID_WHITELIST = 73
+ // INVALID_WHITELIST defines invalid IP address whitelist.
+ INVALID_WHITELIST ResultCode = 73
// QUOTAS_NOT_ENABLED defines Quotas not enabled on server.
- QUOTAS_NOT_ENABLED = 74
+ QUOTAS_NOT_ENABLED ResultCode = 74
// INVALID_QUOTA defines invalid quota value.
- INVALID_QUOTA = 75
+ INVALID_QUOTA ResultCode = 75
// NOT_AUTHENTICATED defines user must be authentication before performing database operations.
NOT_AUTHENTICATED ResultCode = 80
@@ -242,14 +245,33 @@ const (
ROLE_VIOLATION ResultCode = 81
// NOT_WHITELISTED defines command not allowed because sender IP address not whitelisted.
- NOT_WHITELISTED = 82
+ NOT_WHITELISTED ResultCode = 82
// QUOTA_EXCEEDED defines Quota exceeded.
- QUOTA_EXCEEDED = 83
+ QUOTA_EXCEEDED ResultCode = 83
// UDF_BAD_RESPONSE defines a user defined function returned an error code.
UDF_BAD_RESPONSE ResultCode = 100
+ // MRT record blocked by a different transaction.
+ MRT_BLOCKED ResultCode = 120
+
+ // MRT read version mismatch identified during commit.
+ // Some other command changed the record outside of the transaction.
+ MRT_VERSION_MISMATCH ResultCode = 121
+
+ // MRT deadline reached without a successful commit or abort.
+ MRT_EXPIRED ResultCode = 122
+
+ // MRT write command limit (4096) exceeded.
+ MRT_TOO_MANY_WRITES ResultCode = 123
+
+ // MRT was already committed.
+ MRT_COMMITTED ResultCode = 124
+
+ // MRT was already aborted.
+ MRT_ABORTED ResultCode = 125
+
// BATCH_DISABLED defines batch functionality has been disabled.
BATCH_DISABLED ResultCode = 150
@@ -311,6 +333,9 @@ const (
// ResultCodeToString returns a human readable errors message based on the result code.
func ResultCodeToString(resultCode ResultCode) string {
switch ResultCode(resultCode) {
+ case TXN_FAILED:
+ return "Multi-record transaction failed"
+
case GRPC_ERROR:
return "GRPC error"
case BATCH_FAILED:
@@ -535,6 +560,24 @@ func ResultCodeToString(resultCode ResultCode) string {
case UDF_BAD_RESPONSE:
return "UDF returned error"
+ case MRT_BLOCKED:
+ return "MRT record blocked by a different transaction"
+
+ case MRT_VERSION_MISMATCH:
+ return "MRT read version mismatch identified during commit. Some other command changed the record outside of the transaction"
+
+ case MRT_EXPIRED:
+ return "MRT deadline reached without a successful commit or abort"
+
+ case MRT_TOO_MANY_WRITES:
+ return "MRT write command limit (4096) exceeded"
+
+ case MRT_COMMITTED:
+ return "MRT was already committed"
+
+ case MRT_ABORTED:
+ return "MRT was already aborted"
+
case BATCH_DISABLED:
return "Batch functionality has been disabled"
@@ -599,8 +642,10 @@ func ResultCodeToString(resultCode ResultCode) string {
func (rc ResultCode) String() string {
switch rc {
+ case TXN_FAILED:
+ return "TXN_FAILED"
case GRPC_ERROR:
- return "GRPC error"
+ return "GRPC_ERROR"
case BATCH_FAILED:
return "BATCH_FAILED"
case NO_RESPONSE:
@@ -749,6 +794,18 @@ func (rc ResultCode) String() string {
return "QUOTA_EXCEEDED"
case UDF_BAD_RESPONSE:
return "UDF_BAD_RESPONSE"
+ case MRT_BLOCKED:
+ return "MRT_BLOCKED"
+ case MRT_VERSION_MISMATCH:
+ return "MRT_VERSION_MISMATCH"
+ case MRT_EXPIRED:
+ return "MRT_EXPIRED"
+ case MRT_TOO_MANY_WRITES:
+ return "MRT_TOO_MANY_WRITES"
+ case MRT_COMMITTED:
+ return "MRT_COMMITTED"
+ case MRT_ABORTED:
+ return "MRT_ABORTED"
case BATCH_DISABLED:
return "BATCH_DISABLED"
case BATCH_MAX_REQUESTS_EXCEEDED:
diff --git a/udf_test.go b/udf_test.go
index 29067377..1afdca31 100644
--- a/udf_test.go
+++ b/udf_test.go
@@ -23,7 +23,7 @@ import (
"sync"
"time"
- as "github.com/aerospike/aerospike-client-go/v7"
+ as "github.com/aerospike/aerospike-client-go/v8"
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
@@ -83,16 +83,6 @@ var _ = gg.Describe("UDF/Query tests", func() {
bin1 := as.NewBin("bin1", rand.Intn(math.MaxInt16))
bin2 := as.NewBin("bin2", 1)
- gg.BeforeEach(func() {
- if *dbaas {
- gg.Skip("Not supported in DBAAS environment")
- }
-
- if *proxy {
- gg.Skip("Not supported in proxy environment")
- }
- })
-
gg.It("must Register a UDF", func() {
regTask, err := client.RegisterUDF(wpolicy, []byte(udfBody), "udf1.lua", as.LUA)
gm.Expect(err).ToNot(gm.HaveOccurred())
diff --git a/unpacker.go b/unpacker.go
index e7a35a1b..4cb377fa 100644
--- a/unpacker.go
+++ b/unpacker.go
@@ -19,10 +19,10 @@ import (
"math"
"reflect"
- "github.com/aerospike/aerospike-client-go/v7/types"
- ParticleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ ParticleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type unpacker struct {
diff --git a/user_roles.go b/user_roles.go
index 61f8d151..1c4ccdde 100644
--- a/user_roles.go
+++ b/user_roles.go
@@ -27,7 +27,7 @@ type UserRoles struct {
// Current statistics by offset are:
//
// 0: read quota in records per second
- // 1: single record read transaction rate (TPS)
+ // 1: single record read command rate (TPS)
// 2: read scan/query record per second rate (RPS)
// 3: number of limitless read scans/queries
//
@@ -38,7 +38,7 @@ type UserRoles struct {
// Current statistics by offset are:
//
// 0: write quota in records per second
- // 1: single record write transaction rate (TPS)
+ // 1: single record write command rate (TPS)
// 2: write scan/query record per second rate (RPS)
// 3: number of limitless write scans/queries
//
diff --git a/utils/buffer/buffer.go b/utils/buffer/buffer.go
index e37f623f..774fe224 100644
--- a/utils/buffer/buffer.go
+++ b/utils/buffer/buffer.go
@@ -101,6 +101,36 @@ func VarBytesToInt64(buf []byte, offset int, len int) int64 {
return val
}
+// Int64ToVersionBytes will convert a uint64 to a 7 byte record version for MRT.
+func Uint64ToVersionBytes(v uint64, buf []byte, offset int) {
+ buf[offset] = (byte)(v >> 0)
+ offset++
+ buf[offset] = (byte)(v >> 8)
+ offset++
+ buf[offset] = (byte)(v >> 16)
+ offset++
+ buf[offset] = (byte)(v >> 24)
+ offset++
+ buf[offset] = (byte)(v >> 32)
+ offset++
+ buf[offset] = (byte)(v >> 40)
+ offset++
+ buf[offset] = (byte)(v >> 48)
+}
+
+// VersionBytesToUint64 converts a 7 byte record version to an uint64 for MRT.
+func VersionBytesToUint64(buf []byte, offset int) *uint64 {
+ res := (((uint64(buf[offset]) & 0xFF) << 0) |
+ ((uint64(buf[offset+1]) & 0xFF) << 8) |
+ ((uint64(buf[offset+2]) & 0xFF) << 16) |
+ ((uint64(buf[offset+3]) & 0xFF) << 24) |
+ ((uint64(buf[offset+4]) & 0xFF) << 32) |
+ ((uint64(buf[offset+5]) & 0xFF) << 40) |
+ ((uint64(buf[offset+6]) & 0xFF) << 48))
+
+ return &res
+}
+
// BytesToInt32 converts a slice into int32; only maximum of 4 bytes will be used
func BytesToInt32(buf []byte, offset int) int32 {
return int32(binary.BigEndian.Uint32(buf[offset : offset+uint32sz]))
diff --git a/value.go b/value.go
index 297581a3..7a51ff65 100644
--- a/value.go
+++ b/value.go
@@ -19,10 +19,10 @@ import (
"reflect"
"strconv"
- "github.com/aerospike/aerospike-client-go/v7/types"
- ParticleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
+ "github.com/aerospike/aerospike-client-go/v8/types"
+ ParticleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
// this function will be set in value_slow file if included
@@ -704,7 +704,6 @@ type LongValue int64
// NewLongValue generates a LongValue instance.
func NewLongValue(value int64) LongValue {
- // TODO: Remove this type, it's not necessary next to IntegerValue
return LongValue(value)
}
@@ -1300,19 +1299,3 @@ func unwrapValue(v interface{}) interface{} {
return v
}
-
-func grpcValuePacked(v Value) []byte {
- if v == nil {
- return nil
- }
-
- sz, err := v.pack(nil)
- if err != nil {
- panic(err)
- }
- buf := newBuffer(sz)
- if _, err := v.pack(buf); err != nil {
- panic(err)
- }
- return buf.Bytes()
-}
diff --git a/value_test.go b/value_test.go
index 7c92487d..2d16eb5b 100644
--- a/value_test.go
+++ b/value_test.go
@@ -21,8 +21,8 @@ import (
gg "github.com/onsi/ginkgo/v2"
gm "github.com/onsi/gomega"
- ParticleType "github.com/aerospike/aerospike-client-go/v7/types/particle_type"
- "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ ParticleType "github.com/aerospike/aerospike-client-go/v8/types/particle_type"
+ "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
)
type testBLOB struct {
diff --git a/werrgroup.go b/werrgroup.go
index 7e36d334..1cc7e18f 100644
--- a/werrgroup.go
+++ b/werrgroup.go
@@ -20,7 +20,7 @@ import (
"golang.org/x/sync/semaphore"
- "github.com/aerospike/aerospike-client-go/v7/logger"
+ "github.com/aerospike/aerospike-client-go/v8/logger"
)
type werrGroup struct {
diff --git a/write_command.go b/write_command.go
index 9d23432a..15d61176 100644
--- a/write_command.go
+++ b/write_command.go
@@ -15,102 +15,66 @@
package aerospike
import (
- "github.com/aerospike/aerospike-client-go/v7/types"
-
- Buffer "github.com/aerospike/aerospike-client-go/v7/utils/buffer"
+ "github.com/aerospike/aerospike-client-go/v8/types"
)
// guarantee writeCommand implements command interface
var _ command = &writeCommand{}
type writeCommand struct {
- singleCommand
+ baseWriteCommand
- policy *WritePolicy
bins []*Bin
binMap BinMap
operation OperationType
}
-func newWriteCommand(cluster *Cluster,
+func newWriteCommand(
+ cluster *Cluster,
policy *WritePolicy,
key *Key,
bins []*Bin,
binMap BinMap,
- operation OperationType) (writeCommand, Error) {
-
- var partition *Partition
- var err Error
- if cluster != nil {
- partition, err = PartitionForWrite(cluster, &policy.BasePolicy, key)
- if err != nil {
- return writeCommand{}, err
- }
+ operation OperationType,
+) (writeCommand, Error) {
+ bwc, err := newBaseWriteCommand(cluster, policy, key)
+ if err != nil {
+ return writeCommand{}, err
}
newWriteCmd := writeCommand{
- singleCommand: newSingleCommand(cluster, key, partition),
- policy: policy,
- bins: bins,
- binMap: binMap,
- operation: operation,
+ baseWriteCommand: bwc,
+ bins: bins,
+ binMap: binMap,
+ operation: operation,
}
return newWriteCmd, nil
}
-func (cmd *writeCommand) getPolicy(ifc command) Policy {
- return cmd.policy
-}
-
func (cmd *writeCommand) writeBuffer(ifc command) Error {
return cmd.setWrite(cmd.policy, cmd.operation, cmd.key, cmd.bins, cmd.binMap)
}
-func (cmd *writeCommand) getNode(ifc command) (*Node, Error) {
- return cmd.partition.GetNodeWrite(cmd.cluster)
-}
-
-func (cmd *writeCommand) prepareRetry(ifc command, isTimeout bool) bool {
- cmd.partition.PrepareRetryWrite(isTimeout)
- return true
-}
-
func (cmd *writeCommand) parseResult(ifc command, conn *Connection) Error {
- // Read header.
- if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil {
- return err
- }
-
- header := Buffer.BytesToInt64(cmd.dataBuffer, 0)
-
- // Validate header to make sure we are at the beginning of a message
- if err := cmd.validateHeader(header); err != nil {
- return err
+ resultCode, err := cmd.parseHeader()
+ if err != nil {
+ return newCustomNodeError(cmd.node, err.resultCode())
}
- resultCode := cmd.dataBuffer[13] & 0xFF
-
- if resultCode != 0 {
- if resultCode == byte(types.KEY_NOT_FOUND_ERROR) {
+ if resultCode != types.OK {
+ if resultCode == types.KEY_NOT_FOUND_ERROR {
return ErrKeyNotFound.err()
- } else if types.ResultCode(resultCode) == types.FILTERED_OUT {
+ } else if resultCode == types.FILTERED_OUT {
return ErrFilteredOut.err()
}
return newCustomNodeError(cmd.node, types.ResultCode(resultCode))
}
- return cmd.emptySocket(conn)
-}
-func (cmd *writeCommand) isRead() bool {
- return false
+ return nil
}
func (cmd *writeCommand) Execute() Error {
return cmd.execute(cmd)
}
-
-func (cmd *writeCommand) transactionType() transactionType {
- return ttPut
-}
diff --git a/write_payload_command.go b/write_payload_command.go
new file mode 100644
index 00000000..aacbc33d
--- /dev/null
+++ b/write_payload_command.go
@@ -0,0 +1,120 @@
+// Copyright 2014-2022 Aerospike, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aerospike
+
+import (
+ "github.com/aerospike/aerospike-client-go/v8/types"
+
+ Buffer "github.com/aerospike/aerospike-client-go/v8/utils/buffer"
+)
+
+// guarantee writePayloadCommand implements command interface
+var _ command = &writePayloadCommand{}
+
+type writePayloadCommand struct {
+ singleCommand
+
+ policy *WritePolicy
+ payload []byte
+}
+
+func newWritePayloadCommand(
+ cluster *Cluster,
+ policy *WritePolicy,
+ key *Key,
+ payload []byte,
+) (writePayloadCommand, Error) {
+
+ var partition *Partition
+ var err Error
+ if cluster != nil {
+ partition, err = PartitionForWrite(cluster, &policy.BasePolicy, key)
+ if err != nil {
+ return writePayloadCommand{}, err
+ }
+ }
+
+ newWriteCmd := writePayloadCommand{
+ singleCommand: newSingleCommand(cluster, key, partition),
+ policy: policy,
+ payload: payload,
+ }
+
+ return newWriteCmd, nil
+}
+
+func (cmd *writePayloadCommand) getPolicy(ifc command) Policy {
+ return cmd.policy
+}
+
+func (cmd *writePayloadCommand) writeBuffer(ifc command) Error {
+ cmd.dataBuffer = cmd.payload
+ cmd.dataOffset = len(cmd.payload)
+ return nil
+}
+
+func (cmd *writePayloadCommand) getNode(ifc command) (*Node, Error) {
+ return cmd.partition.GetNodeWrite(cmd.cluster)
+}
+
+func (cmd *writePayloadCommand) prepareRetry(ifc command, isTimeout bool) bool {
+ cmd.partition.PrepareRetryWrite(isTimeout)
+ return true
+}
+
+func (cmd *writePayloadCommand) parseResult(ifc command, conn *Connection) Error {
+ // make sure the payload is not put back in the buffer pool
+ defer func() {
+ cmd.dataBuffer = cmd.conn.origDataBuffer
+ cmd.dataOffset = 0
+ }()
+
+ // Read header.
+ if _, err := conn.Read(cmd.dataBuffer, int(_MSG_TOTAL_HEADER_SIZE)); err != nil {
+ return err
+ }
+
+ header := Buffer.BytesToInt64(cmd.dataBuffer, 0)
+
+ // Validate header to make sure we are at the beginning of a message
+ if err := cmd.validateHeader(header); err != nil {
+ return err
+ }
+
+ resultCode := cmd.dataBuffer[13] & 0xFF
+
+ if resultCode != 0 {
+ if resultCode == byte(types.KEY_NOT_FOUND_ERROR) {
+ return ErrKeyNotFound.err()
+ } else if types.ResultCode(resultCode) == types.FILTERED_OUT {
+ return ErrFilteredOut.err()
+ }
+
+ return newCustomNodeError(cmd.node, types.ResultCode(resultCode))
+ }
+ return cmd.emptySocket(conn)
+}
+
+func (cmd *writePayloadCommand) isRead() bool {
+ return false
+}
+
+func (cmd *writePayloadCommand) Execute() Error {
+ return cmd.execute(cmd)
+}
+
+func (cmd *writePayloadCommand) commandType() commandType {
+ return ttPut
+}
diff --git a/write_policy.go b/write_policy.go
index 492fc384..32a96cf5 100644
--- a/write_policy.go
+++ b/write_policy.go
@@ -39,7 +39,7 @@ type WritePolicy struct {
// indicates that the generation is not used to restrict writes.
GenerationPolicy GenerationPolicy //= GenerationPolicy.NONE;
- // Desired consistency guarantee when committing a transaction on the server. The default
+ // Desired consistency guarantee when committing a command on the server. The default
// (COMMIT_ALL) indicates that the server should wait for master and all replica commits to
// be successful before returning success to the client.
CommitLevel CommitLevel //= COMMIT_ALL
@@ -70,7 +70,7 @@ type WritePolicy struct {
// by default.
RespondPerEachOp bool
- // DurableDelete leaves a tombstone for the record if the transaction results in a record deletion.
+ // DurableDelete leaves a tombstone for the record if the command results in a record deletion.
// This prevents deleted records from reappearing after node failures.
// Valid for Aerospike Server Enterprise Edition 3.10+ only.
DurableDelete bool