Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Preallocate buffers to object uploads #835

Merged
merged 2 commits into from
Oct 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 15 additions & 2 deletions internal/neofs/neofs.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
"io"
"math"
"strconv"
"sync"
"time"

"github.com/nspcc-dev/neofs-s3-gw/api"
Expand Down Expand Up @@ -46,16 +47,24 @@
anonSigner user.Signer
cfg Config
epochGetter EpochGetter
buffers *sync.Pool
}

// NewNeoFS creates new NeoFS using provided pool.Pool.
func NewNeoFS(p *pool.Pool, signer user.Signer, anonSigner user.Signer, cfg Config, epochGetter EpochGetter) *NeoFS {
buffers := sync.Pool{}
buffers.New = func() any {
b := make([]byte, cfg.MaxObjectSize)
return &b
}

Check warning on line 59 in internal/neofs/neofs.go

View check run for this annotation

Codecov / codecov/patch

internal/neofs/neofs.go#L54-L59

Added lines #L54 - L59 were not covered by tests

return &NeoFS{
pool: p,
gateSigner: signer,
anonSigner: anonSigner,
cfg: cfg,
epochGetter: epochGetter,
buffers: &buffers,

Check warning on line 67 in internal/neofs/neofs.go

View check run for this annotation

Codecov / codecov/patch

internal/neofs/neofs.go#L62-L67

Added lines #L62 - L67 were not covered by tests
}
}

Expand Down Expand Up @@ -278,26 +287,26 @@
prm.Payload = bytes.NewReader(obj.Payload())
}

if x.cfg.IsSlicerEnabled {
opts := slicer.Options{}
opts.SetObjectPayloadLimit(uint64(x.cfg.MaxObjectSize))
opts.SetCopiesNumber(prm.CopiesNumber)
opts.SetCurrentNeoFSEpoch(x.epochGetter.CurrentEpoch())

if x.cfg.IsHomomorphicEnabled {
opts.CalculateHomomorphicChecksum()
}

Check warning on line 298 in internal/neofs/neofs.go

View check run for this annotation

Codecov / codecov/patch

internal/neofs/neofs.go#L290-L298

Added lines #L290 - L298 were not covered by tests

if prm.BearerToken != nil {
opts.SetBearerToken(*prm.BearerToken)
}

Check warning on line 302 in internal/neofs/neofs.go

View check run for this annotation

Codecov / codecov/patch

internal/neofs/neofs.go#L300-L302

Added lines #L300 - L302 were not covered by tests

objID, err := slicer.Put(ctx, x.pool, obj, x.signer(ctx), prm.Payload, opts)
if err != nil {
return oid.ID{}, fmt.Errorf("slicer put: %w", err)
}

Check warning on line 307 in internal/neofs/neofs.go

View check run for this annotation

Codecov / codecov/patch

internal/neofs/neofs.go#L304-L307

Added lines #L304 - L307 were not covered by tests

return objID, nil

Check warning on line 309 in internal/neofs/neofs.go

View check run for this annotation

Codecov / codecov/patch

internal/neofs/neofs.go#L309

Added line #L309 was not covered by tests
}

var prmObjPutInit client.PrmObjectPutInit
Expand All @@ -316,8 +325,12 @@
return oid.ID{}, fmt.Errorf("save object via connection pool: %w", err)
}

chunk := make([]byte, x.cfg.MaxObjectSize)
_, err = io.CopyBuffer(writer, prm.Payload, chunk)
data := x.buffers.Get()
chunk := data.(*[]byte)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Likely some Go 1.2X will bring generic-based Pool to us.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would be great, but 1.21 still have any version


_, err = io.CopyBuffer(writer, prm.Payload, *chunk)
x.buffers.Put(chunk)

Check warning on line 333 in internal/neofs/neofs.go

View check run for this annotation

Codecov / codecov/patch

internal/neofs/neofs.go#L328-L333

Added lines #L328 - L333 were not covered by tests
if err != nil {
return oid.ID{}, fmt.Errorf("read payload chunk: %w", err)
}
Expand Down
211 changes: 211 additions & 0 deletions internal/neofs/neofs_test.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,28 @@
package neofs

import (
"bytes"
"context"
"crypto/rand"
"fmt"
"io"
"runtime"
"strconv"
"sync"
"testing"
"time"

"github.com/nspcc-dev/neo-go/pkg/crypto/keys"
"github.com/nspcc-dev/neofs-s3-gw/api/layer"
"github.com/nspcc-dev/neofs-sdk-go/client"
apistatus "github.com/nspcc-dev/neofs-sdk-go/client/status"
"github.com/nspcc-dev/neofs-sdk-go/container"
"github.com/nspcc-dev/neofs-sdk-go/container/acl"
cid "github.com/nspcc-dev/neofs-sdk-go/container/id"
"github.com/nspcc-dev/neofs-sdk-go/netmap"
"github.com/nspcc-dev/neofs-sdk-go/pool"
"github.com/nspcc-dev/neofs-sdk-go/user"
"github.com/nspcc-dev/neofs-sdk-go/waiter"
"github.com/stretchr/testify/require"
)

Expand All @@ -23,3 +40,197 @@ func TestErrorChecking(t *testing.T) {
require.ErrorIs(t, wrappedError, layer.ErrAccessDenied)
require.Contains(t, wrappedError.Error(), reason)
}

func Benchmark(b *testing.B) {
b.Skip("Required connection to NeoFS cluster")

ctx := context.Background()

pk, err := keys.NewPrivateKey()
require.NoError(b, err)
signer := user.NewAutoIDSignerRFC6979(pk.PrivateKey)

anonPk, err := keys.NewPrivateKey()
require.NoError(b, err)
anonSigner := user.NewAutoIDSignerRFC6979(anonPk.PrivateKey)

var prm pool.InitParameters
prm.SetSigner(signer)
prm.AddNode(pool.NewNodeParam(1, "localhost:8080", 1))

p, err := pool.NewPool(prm)
require.NoError(b, err)

require.NoError(b, p.Dial(ctx))

ni, err := p.NetworkInfo(ctx, client.PrmNetworkInfo{})
require.NoError(b, err)

neofsCfg := Config{
MaxObjectSize: int64(ni.MaxObjectSize()),
IsSlicerEnabled: false,
IsHomomorphicEnabled: !ni.HomomorphicHashingDisabled(),
}

neo := NewNeoFS(p, signer, anonSigner, neofsCfg, ni)

var createParams layer.PrmObjectCreate
createParams.Creator = signer.UserID()

for i := 128; i <= 512; i += 128 {
b.Run("object upload "+strconv.Itoa(i), func(b *testing.B) {
b.StopTimer()
payload := make([]byte, i*1024)
_, err = rand.Read(payload)
require.NoError(b, err)

id, err := createContainer(ctx, signer, p)
require.NoError(b, err)
createParams.Container = id

defer func() {
_ = deleteContainer(ctx, id, signer, p)
}()

b.ReportAllocs()
b.ResetTimer()
b.StartTimer()

for i := 0; i < b.N; i++ {
b.StopTimer()
createParams.Payload = bytes.NewReader(payload)
createParams.CreationTime = time.Now()
b.StartTimer()

_, err = neo.CreateObject(ctx, createParams)
b.StopTimer()
require.NoError(b, err)
b.StartTimer()
}
})
}
}

func createContainer(ctx context.Context, signer user.Signer, p *pool.Pool) (cid.ID, error) {
var cnr container.Container
cnr.Init()
cnr.SetOwner(signer.UserID())

var rd netmap.ReplicaDescriptor
rd.SetNumberOfObjects(1)

var pp netmap.PlacementPolicy
pp.SetContainerBackupFactor(1)
pp.AddReplicas(rd)

cnr.SetPlacementPolicy(pp)
cnr.SetBasicACL(acl.PublicRW)

var prm client.PrmContainerPut

w := waiter.NewContainerPutWaiter(p, waiter.DefaultPollInterval)
return w.ContainerPut(ctx, cnr, signer, prm)
}

func deleteContainer(ctx context.Context, id cid.ID, signer user.Signer, p *pool.Pool) error {
var prm client.PrmContainerDelete
return p.ContainerDelete(ctx, id, signer, prm)
}

func TestConcurrencyAndConsistency(t *testing.T) {
t.Skip("Required connection to NeoFS cluster")

ctx, cancel := context.WithCancel(context.Background())

pk, err := keys.NewPrivateKey()
require.NoError(t, err)
signer := user.NewAutoIDSignerRFC6979(pk.PrivateKey)

anonPk, err := keys.NewPrivateKey()
require.NoError(t, err)
anonSigner := user.NewAutoIDSignerRFC6979(anonPk.PrivateKey)

var prm pool.InitParameters
prm.SetSigner(signer)
prm.AddNode(pool.NewNodeParam(1, "localhost:8080", 1))

p, err := pool.NewPool(prm)
require.NoError(t, err)

require.NoError(t, p.Dial(ctx))

ni, err := p.NetworkInfo(ctx, client.PrmNetworkInfo{})
require.NoError(t, err)

gorutines := runtime.GOMAXPROCS(0)

neofsCfg := Config{
MaxObjectSize: int64(ni.MaxObjectSize()),
IsSlicerEnabled: false,
IsHomomorphicEnabled: !ni.HomomorphicHashingDisabled(),
}

neo := NewNeoFS(p, signer, anonSigner, neofsCfg, ni)

var createParams layer.PrmObjectCreate
createParams.Creator = signer.UserID()

wg := sync.WaitGroup{}
wg.Add(gorutines)

for i := 0; i < gorutines; i++ {
go func() {
uploadDownload(ctx, t, neo, p, signer, createParams, &wg)
}()
}

<-time.After(30 * time.Second)
cancel()
wg.Wait()
}

func uploadDownload(ctx context.Context, t *testing.T, neo *NeoFS, p *pool.Pool, signer user.Signer, createParams layer.PrmObjectCreate, wg *sync.WaitGroup) {
defer wg.Done()

payload := make([]byte, 32*1024)

id, err := createContainer(ctx, signer, p)
require.NoError(t, err)
createParams.Container = id

defer func() {
_ = deleteContainer(ctx, id, signer, p)
}()

// separate context to operations to catch ContextCanceled only in Select.
opContext := context.Background()

for {
select {
case <-ctx.Done():
return
default:
}

_, err = rand.Read(payload)
require.NoError(t, err)

createParams.Payload = bytes.NewReader(payload)
createParams.CreationTime = time.Now()

objID, err := neo.CreateObject(opContext, createParams)
require.NoError(t, err)

var objReadPrm layer.PrmObjectRead
objReadPrm.Object = objID
objReadPrm.Container = id

op, err := neo.ReadObject(opContext, objReadPrm)
require.NoError(t, err)

pl, err := io.ReadAll(op.Payload)
require.NoError(t, err)

require.True(t, bytes.Equal(payload, pl))
}
}
Loading