Skip to content

Commit

Permalink
Increase default multipart size to 128MiB (#1112)
Browse files Browse the repository at this point in the history
  • Loading branch information
harshavardhana authored May 28, 2019
1 parent da91b3b commit c5ac824
Show file tree
Hide file tree
Showing 12 changed files with 114 additions and 52 deletions.
17 changes: 13 additions & 4 deletions .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@ os:

env:
- ARCH=x86_64
- ARCH=i686

go:
- 1.12.x
Expand All @@ -22,7 +21,17 @@ addons:
packages:
- devscripts

before_install:
- curl -O https://dl.minio.io/server/minio/release/linux-amd64/minio && chmod +x ./minio
- sudo cp testcerts/public.crt /usr/local/share/ca-certificates/
- sudo update-ca-certificates
- MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 ./minio server --compat --quiet --certs-dir testcerts data 2>&1 > minio.log &

script:
- diff -au <(gofmt -d .) <(printf "")
- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "")
- make
- diff -au <(gofmt -d .) <(printf "")
- diff -au <(licensecheck --check '.go$' --recursive --lines 0 * | grep -v -w 'Apache (v2.0)') <(printf "")
- make

addons:
artifacts:
- minio.log
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ vet:
@GO111MODULE=on go vet ./...

test:
@GO111MODULE=on SERVER_ENDPOINT=play.min.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go test -race -v ./...

examples:
@mkdir -p /tmp/examples && for i in $(echo examples/s3/*); do go build -o /tmp/examples/$(basename ${i:0:-3}) ${i}; done
Expand All @@ -17,4 +17,4 @@ docs:
@(cd docs; GO111MODULE=on go build validator.go && ./validator -m ../docs/API.md -t checker.go.tpl)

functional-test:
@GO111MODULE=on SERVER_ENDPOINT=play.min.io:9000 ACCESS_KEY=Q3AM3UQ867SPQQA43P2F SECRET_KEY=zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go
@GO111MODULE=on SERVER_ENDPOINT=localhost:9000 ACCESS_KEY=minio SECRET_KEY=minio123 ENABLE_HTTPS=1 MINT_MODE=full go run functional_tests.go
2 changes: 1 addition & 1 deletion api-put-object-common.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func isReadAt(reader io.Reader) (ok bool) {
// object storage it will have the following parameters as constants.
//
// maxPartsCount - 10000
// minPartSize - 64MiB
// minPartSize - 128MiB
// maxMultipartPutObjectSize - 5TiB
//
func optimalPartInfo(objectSize int64, configuredPartSize uint64) (totalPartsCount int, partSize int64, lastPartSize int64, err error) {
Expand Down
2 changes: 1 addition & 1 deletion api-put-object-streaming.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ type uploadPartReq struct {
Part *ObjectPart // Size of the part uploaded.
}

// putObjectMultipartFromReadAt - Uploads files bigger than 64MiB.
// putObjectMultipartFromReadAt - Uploads files bigger than 128MiB.
// Supports all readers which implements io.ReaderAt interface
// (ReadAt method).
//
Expand Down
6 changes: 3 additions & 3 deletions api-put-object.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,9 +124,9 @@ func (a completedParts) Less(i, j int) bool { return a[i].PartNumber < a[j].Part
//
// You must have WRITE permissions on a bucket to create an object.
//
// - For size smaller than 64MiB PutObject automatically does a
// - For size smaller than 128MiB PutObject automatically does a
// single atomic Put operation.
// - For size larger than 64MiB PutObject automatically does a
// - For size larger than 128MiB PutObject automatically does a
// multipart Put operation.
// - For size input as -1 PutObject does a multipart Put operation
// until input stream reaches EOF. Maximum object size that can
Expand Down Expand Up @@ -167,7 +167,7 @@ func (c Client) putObjectCommon(ctx context.Context, bucketName, objectName stri
return c.putObjectNoChecksum(ctx, bucketName, objectName, reader, size, opts)
}

// For all sizes greater than 64MiB do multipart.
// For all sizes greater than 128MiB do multipart.
return c.putObjectMultipartStream(ctx, bucketName, objectName, reader, size, opts)
}

Expand Down
20 changes: 10 additions & 10 deletions api_unit_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,11 +120,11 @@ func TestPartSize(t *testing.T) {
if err != nil {
t.Fatal("Error: ", err)
}
if totalPartsCount != 79 {
t.Fatalf("Error: expecting total parts count of 79: got %v instead", totalPartsCount)
if totalPartsCount != 40 {
t.Fatalf("Error: expecting total parts count of 40: got %v instead", totalPartsCount)
}
if partSize != 67108864 {
t.Fatalf("Error: expecting part size of 67108864: got %v instead", partSize)
if partSize != 134217728 {
t.Fatalf("Error: expecting part size of 134217728: got %v instead", partSize)
}
if lastPartSize != 9437184 {
t.Fatalf("Error: expecting last part size of 9437184: got %v instead", lastPartSize)
Expand All @@ -146,14 +146,14 @@ func TestPartSize(t *testing.T) {
if err != nil {
t.Fatal("Error:", err)
}
if totalPartsCount != 9103 {
t.Fatalf("Error: expecting total parts count of 9103: got %v instead", totalPartsCount)
if totalPartsCount != 8192 {
t.Fatalf("Error: expecting total parts count of 8192: got %v instead", totalPartsCount)
}
if partSize != 603979776 {
t.Fatalf("Error: expecting part size of 603979776: got %v instead", partSize)
if partSize != 671088640 {
t.Fatalf("Error: expecting part size of 671088640: got %v instead", partSize)
}
if lastPartSize != 134217728 {
t.Fatalf("Error: expecting last part size of 134217728: got %v instead", lastPartSize)
if lastPartSize != 671088640 {
t.Fatalf("Error: expecting last part size of 671088640: got %v instead", lastPartSize)
}
}

Expand Down
4 changes: 2 additions & 2 deletions constants.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ package minio
// a part in a multipart upload may not be uploaded.
const absMinPartSize = 1024 * 1024 * 5

// minPartSize - minimum part size 64MiB per object after which
// minPartSize - minimum part size 128MiB per object after which
// putObject behaves internally as multipart.
const minPartSize = 1024 * 1024 * 64
const minPartSize = 1024 * 1024 * 128

// maxPartsCount - maximum number of parts for a single multipart session.
const maxPartsCount = 10000
Expand Down
4 changes: 2 additions & 2 deletions docs/API.md
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,7 @@ if err != nil {

<a name="PutObject"></a>
### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error)
Uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than 64MiB in size, PutObject seamlessly uploads the object as parts of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
Uploads objects that are less than 128MiB in a single PUT operation. For objects that are greater than 128MiB in size, PutObject seamlessly uploads the object as parts of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB.

__Parameters__

Expand Down Expand Up @@ -869,7 +869,7 @@ if err != nil {
### FPutObject(bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
Uploads contents from a file to objectName.

FPutObject uploads objects that are less than 64MiB in a single PUT operation. For objects that are greater than the 64MiB in size, FPutObject seamlessly uploads the object in chunks of 64MiB or more depending on the actual file size. The max upload size for an object is 5TB.
FPutObject uploads objects that are less than 128MiB in a single PUT operation. For objects that are greater than the 128MiB in size, FPutObject seamlessly uploads the object in chunks of 128MiB or more depending on the actual file size. The max upload size for an object is 5TB.

__Parameters__

Expand Down
4 changes: 2 additions & 2 deletions docs/zh_CN/API.md
Original file line number Diff line number Diff line change
Expand Up @@ -550,7 +550,7 @@ if err != nil {

<a name="PutObject"></a>
### PutObject(bucketName, objectName string, reader io.Reader, objectSize int64,opts PutObjectOptions) (n int, err error)
当对象小于64MiB时,直接在一次PUT请求里进行上传。当大于64MiB时,根据文件的实际大小,PutObject会自动地将对象进行拆分成64MiB一块或更大一些进行上传。对象的最大大小是5TB。
当对象小于128MiB时,直接在一次PUT请求里进行上传。当大于128MiB时,根据文件的实际大小,PutObject会自动地将对象进行拆分成128MiB一块或更大一些进行上传。对象的最大大小是5TB。

__参数__

Expand Down Expand Up @@ -889,7 +889,7 @@ if err != nil {
### FPutObject(bucketName, objectName, filePath, opts PutObjectOptions) (length int64, err error)
将filePath对应的文件内容上传到一个对象中。

当对象小于64MiB时,FPutObject直接在一次PUT请求里进行上传。当大于64MiB时,根据文件的实际大小,FPutObject会自动地将对象进行拆分成64MiB一块或更大一些进行上传。对象的最大大小是5TB。
当对象小于128MiB时,FPutObject直接在一次PUT请求里进行上传。当大于128MiB时,根据文件的实际大小,FPutObject会自动地将对象进行拆分成128MiB一块或更大一些进行上传。对象的最大大小是5TB。

__参数__

Expand Down
50 changes: 25 additions & 25 deletions functional_tests.go
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ var dataFileMap = map[string]int{
"datafile-5-MB": 5 * humanize.MiByte,
"datafile-6-MB": 6 * humanize.MiByte,
"datafile-11-MB": 11 * humanize.MiByte,
"datafile-65-MB": 65 * humanize.MiByte,
"datafile-129-MB": 129 * humanize.MiByte,
}

func isFullMode() bool {
Expand Down Expand Up @@ -531,8 +531,8 @@ func testPutObjectReadAt() {
return
}

bufSize := dataFileMap["datafile-65-MB"]
var reader = getDataReader("datafile-65-MB")
bufSize := dataFileMap["datafile-129-MB"]
var reader = getDataReader("datafile-129-MB")
defer reader.Close()

// Save the data
Expand Down Expand Up @@ -641,8 +641,8 @@ func testPutObjectWithMetadata() {
return
}

bufSize := dataFileMap["datafile-65-MB"]
var reader = getDataReader("datafile-65-MB")
bufSize := dataFileMap["datafile-129-MB"]
var reader = getDataReader("datafile-129-MB")
defer reader.Close()

// Save the data
Expand Down Expand Up @@ -1308,7 +1308,7 @@ func testFPutObjectMultipart() {
}

// Upload 4 parts to utilize all 3 'workers' in multipart and still have a part to upload.
var fileName = getMintDataDirFilePath("datafile-65-MB")
var fileName = getMintDataDirFilePath("datafile-129-MB")
if fileName == "" {
// Make a temp file with minPartSize bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
Expand All @@ -1317,7 +1317,7 @@ func testFPutObjectMultipart() {
return
}
// Upload 2 parts to utilize all 3 'workers' in multipart and still have a part to upload.
if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil {
logError(testName, function, args, startTime, "", "Copy failed", err)
return
}
Expand All @@ -1328,7 +1328,7 @@ func testFPutObjectMultipart() {
fileName = file.Name()
args["fileName"] = fileName
}
totalSize := dataFileMap["datafile-65-MB"]
totalSize := dataFileMap["datafile-129-MB"]
// Set base object name
objectName := bucketName + "FPutObject" + "-standard"
args["objectName"] = objectName
Expand Down Expand Up @@ -1426,7 +1426,7 @@ func testFPutObject() {

// Upload 3 parts worth of data to use all 3 of multiparts 'workers' and have an extra part.
// Use different data in part for multipart tests to check parts are uploaded in correct order.
var fName = getMintDataDirFilePath("datafile-65-MB")
var fName = getMintDataDirFilePath("datafile-129-MB")
if fName == "" {
// Make a temp file with minPartSize bytes of data.
file, err := ioutil.TempFile(os.TempDir(), "FPutObjectTest")
Expand All @@ -1436,7 +1436,7 @@ func testFPutObject() {
}

// Upload 3 parts to utilize all 3 'workers' in multipart and still have a part to upload.
if _, err = io.Copy(file, getDataReader("datafile-65-MB")); err != nil {
if _, err = io.Copy(file, getDataReader("datafile-129-MB")); err != nil {
logError(testName, function, args, startTime, "", "File copy failed", err)
return
}
Expand All @@ -1448,7 +1448,7 @@ func testFPutObject() {
defer os.Remove(file.Name())
fName = file.Name()
}
totalSize := dataFileMap["datafile-65-MB"]
totalSize := dataFileMap["datafile-129-MB"]

// Set base object name
function = "FPutObject(bucketName, objectName, fileName, opts)"
Expand Down Expand Up @@ -2706,9 +2706,9 @@ func testSSECEncryptedGetObjectReadSeekFunctional() {
}
}()

// Generate 65MiB of data.
bufSize := dataFileMap["datafile-65-MB"]
var reader = getDataReader("datafile-65-MB")
// Generate 129MiB of data.
bufSize := dataFileMap["datafile-129-MB"]
var reader = getDataReader("datafile-129-MB")
defer reader.Close()

objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
Expand Down Expand Up @@ -2894,9 +2894,9 @@ func testSSES3EncryptedGetObjectReadSeekFunctional() {
}
}()

// Generate 65MiB of data.
bufSize := dataFileMap["datafile-65-MB"]
var reader = getDataReader("datafile-65-MB")
// Generate 129MiB of data.
bufSize := dataFileMap["datafile-129-MB"]
var reader = getDataReader("datafile-129-MB")
defer reader.Close()

objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
Expand Down Expand Up @@ -3072,9 +3072,9 @@ func testSSECEncryptedGetObjectReadAtFunctional() {
return
}

// Generate 65MiB of data.
bufSize := dataFileMap["datafile-65-MB"]
var reader = getDataReader("datafile-65-MB")
// Generate 129MiB of data.
bufSize := dataFileMap["datafile-129-MB"]
var reader = getDataReader("datafile-129-MB")
defer reader.Close()

objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
Expand Down Expand Up @@ -3263,9 +3263,9 @@ func testSSES3EncryptedGetObjectReadAtFunctional() {
return
}

// Generate 65MiB of data.
bufSize := dataFileMap["datafile-65-MB"]
var reader = getDataReader("datafile-65-MB")
// Generate 129MiB of data.
bufSize := dataFileMap["datafile-129-MB"]
var reader = getDataReader("datafile-129-MB")
defer reader.Close()

objectName := randString(60, rand.NewSource(time.Now().UnixNano()), "")
Expand Down Expand Up @@ -8561,8 +8561,8 @@ func testPutObjectNoLengthV2() {
objectName := bucketName + "unique"
args["objectName"] = objectName

bufSize := dataFileMap["datafile-65-MB"]
var reader = getDataReader("datafile-65-MB")
bufSize := dataFileMap["datafile-129-MB"]
var reader = getDataReader("datafile-129-MB")
defer reader.Close()
args["size"] = bufSize

Expand Down
28 changes: 28 additions & 0 deletions testcerts/private.key
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
-----BEGIN PRIVATE KEY-----
MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCwUyKC2VOXy2+8
gMQkRrDJ4aA7K5pgj6LHWu25GeY93x+8DLFyQ9BhoaMcAbs2Cmw91rONDrZ0gNql
yi5JX8t+iiVH8o6dcq6W8jNLnOw0GMNJ2/E1Ckfe5ktkn9synSSwMdnFp3cDk7Hb
2j6IiWrb+PXb7VGL47kDrG59iKQ350MiB3PNpd1ulHbi2m2ZC3WyoTTzlgeTXiXa
zhBIX4wsGVYs6RzS1bTZFBq05dIPNMJCRDVBSBYAAVuBxKjh4xvhC6j0rTCCK8uJ
752KioW4Y0VAEv6yUC4Ht6D9Jcj7gODTgb2irWSCNXFH+pZaI6wWlS8pPiL6iljY
P3kBeFiLAgMBAAECggEAKM20SM9+FryPSPILcdGiC7XY3JiEix/yLWwPYyxpKZw+
vce6MJUc3dsH4e1Mo37Z+Z17w4LKGj/PWVpmR7iRYOEbK4EoG6t0V54I3NCdoJiy
aJ8rPHj6lMx6WfjcQuQ2n0eJ+8F7OyqsmBHzMqmKPwln69MJcfPq1rzKfOZoCj9p
0oZ+3Iv3roC4uH8peZFooCDUlzJL+8KiybVlemNfklKsHfRmL2vOdFBt+qvit6N/
9JgBTX1mRx1+vqECj+TlVP//k3BTEPNfpIvsLCRN0eBbQcXYzu/gZfHwGnsy5Lxy
HaHNJnmLZMWSCc4iyCK7uN/BHXNUSSh3qqp4wqz0IQKBgQDdGbOuOVdJW4J1yYua
nDLAu2RQqvZTnoz1/b8jcrjS7tS1la5x1IN0Z9/VqTmkfxyHOK9ab1iVlklkIzjP
CmHnadUwr8vrLcdicFpjVLQU3O4ZqGrgiSGIPAotvOfAOuuzMs+r5ElW/MrGq0Pa
/3tGCTIx8JscZZjGhffUNoIGeQKBgQDMKB+flQB9Ajeo1JM4y3DtHbMJ5D2+/qoe
IkM7kN5K85EEpNwA2PMNKL2qthgM9YFU3K6Dj0gxPNsUKg3W7Ff2r+gaj8K+VjU0
VbdhTZANbou8hU551swDUCUgquassMtZJIdZnQ7puwLGK67sZwWlOS6Pe1aqaNc5
nY/MRbemIwKBgEySfykCkNlGCPuUDnZATE91VrudSewRyA3VkGHNdHcQ4bf1m9Gu
YMxqwRl1HxJ6Nz4ZgplWYJ6FyusUS7NgjCGiBIR1DbFoTFoqQROPnUJwdUGLk2Ap
/eP5ryjB+J0ZitGn8kY8rK2kpPGDFN/+hQnvW2PySTXfdbajZP4o1oU5AoGAMiT0
x3yQlyPRSf2Uf5Gwlf0Ceb5+0Ae6/xXJT7sgbmZuyyY3B1pCMIw+MczyEVTHxHFD
x/qMb9OTt9swdQauAGBqcQO4gImqHcWj+hlT9Yied9qCUPjKOVIZHHH9oJL4D1gi
iodCH3SYlNYr69LOFyv5XLKdsdN4caVaqYDCP+MCgYEAwXyCmSml5oCxJeAOrEDC
Yg3vq3Ul9JO1wc8VDXn9+DtnFsuRHm0fTIxBelmis8AjIIq5DcObpk6wGYZwUiTU
LYQU7v0/Azujv9cl10GI8wzYKiRvExZDTn0sp6OKnau735qBUZvsRDqEQQ5n7waZ
xjlGmZyfah17laYZV9aJoHk=
-----END PRIVATE KEY-----
25 changes: 25 additions & 0 deletions testcerts/public.crt
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
-----BEGIN CERTIFICATE-----
MIIEKjCCApKgAwIBAgIRAPVKnAiFmDti207oQPs2VfUwDQYJKoZIhvcNAQELBQAw
VTEeMBwGA1UEChMVbWtjZXJ0IGRldmVsb3BtZW50IENBMRUwEwYDVQQLDAxoYXJz
aGFAcHJ0c2MxHDAaBgNVBAMME21rY2VydCBoYXJzaGFAcHJ0c2MwHhcNMTkwMTA3
MTE1ODE2WhcNMjkwMTA3MTE1ODE2WjBEMScwJQYDVQQKEx5ta2NlcnQgZGV2ZWxv
cG1lbnQgY2VydGlmaWNhdGUxGTAXBgNVBAsMEGhhcnNoYUBiYWNrc3BhY2UwggEi
MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCwUyKC2VOXy2+8gMQkRrDJ4aA7
K5pgj6LHWu25GeY93x+8DLFyQ9BhoaMcAbs2Cmw91rONDrZ0gNqlyi5JX8t+iiVH
8o6dcq6W8jNLnOw0GMNJ2/E1Ckfe5ktkn9synSSwMdnFp3cDk7Hb2j6IiWrb+PXb
7VGL47kDrG59iKQ350MiB3PNpd1ulHbi2m2ZC3WyoTTzlgeTXiXazhBIX4wsGVYs
6RzS1bTZFBq05dIPNMJCRDVBSBYAAVuBxKjh4xvhC6j0rTCCK8uJ752KioW4Y0VA
Ev6yUC4Ht6D9Jcj7gODTgb2irWSCNXFH+pZaI6wWlS8pPiL6iljYP3kBeFiLAgMB
AAGjgYUwgYIwDgYDVR0PAQH/BAQDAgWgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwG
A1UdEwEB/wQCMAAwHwYDVR0jBBgwFoAUD575sRLoRt9dCxSRqbVctoEHt3MwLAYD
VR0RBCUwI4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0GCSqG
SIb3DQEBCwUAA4IBgQC7qDRDNAHtfGtQs1UmvqWvHPI7qcBQgAibYq/Fox6X9ia1
weQBfNWEoNOsk97wzbTz81ifXIQ0oV11kWE8EdsbXOf9xeFe9FmDn10d4bGjuMLd
+N3OtGKxLWry2xDYEsVHJZxVxwrf5GK6AJSJj/S837Nil6uRuwjvBVTbxmh1q0nV
x63V8Ag65rLS0fu8msSb64N5UHMCQk6IE+BFHY2gh0lBfZHMdtP4IbeCm756K78/
WMeqjavGA3bqzVTixCHnJ9S2VLk/oQUS6mL869jM8+tN5VeE6Qsr1/Q5h+NaFCJg
Ed5xjT9mmnc3BLsOHflb1dg+rA90Zz9wphgebXbJhRNuuDRv81dtRPTzM+evGRGM
iRKtiDpog+K0HulfX2g4ZQ1dItEjYz+JYgUFJG+yCvBlNZ/WsTrIVcUCFKaG5rUC
aNqvKrSXfbzKQx7V/TtUAeSfRk7TBRn5qh8Pl+MmQQsB0L9hwTdnqTNn057tghu4
3/yIIBpzdWPhQ5uv7Vc=
-----END CERTIFICATE-----

0 comments on commit c5ac824

Please sign in to comment.