Skip to content

Commit 1a13d0f

Browse files
committed
Add bench for S3 storage Save
1 parent 8724d6b commit 1a13d0f

File tree

1 file changed

+55
-8
lines changed

1 file changed

+55
-8
lines changed

pbm/storage/s3/s3_test.go

Lines changed: 55 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ package s3
22

33
import (
44
"context"
5+
"flag"
56
"io"
67
"net/http"
78
"path"
@@ -19,6 +20,7 @@ import (
1920
"github.com/testcontainers/testcontainers-go/modules/minio"
2021

2122
"github.com/percona/percona-backup-mongodb/pbm/errors"
23+
"github.com/percona/percona-backup-mongodb/pbm/log"
2224
"github.com/percona/percona-backup-mongodb/pbm/storage"
2325
)
2426

@@ -245,12 +247,15 @@ func TestToClientLogMode(t *testing.T) {
245247
}
246248
}
247249

248-
func BenchmarkAWSUpload(b *testing.B) {
249-
fsize := int64(500 * 1024 * 1024)
250+
var (
251+
fileSize = flag.Int64("file-size", 500, "file size that will be uploaded")
252+
partSize = flag.Int64("part-size", 10, "part size that will be used to upload file")
253+
)
254+
255+
func BenchmarkS3Upload(b *testing.B) {
250256
numThreds := max(runtime.GOMAXPROCS(0), 1)
251-
partSize := defaultPartSize
252-
// partSize := int64(50 * 1024 * 1024)
253-
// partSize := int64(100 * 1024 * 1024)
257+
fsize := *fileSize * 1024 * 1024
258+
pSize := *partSize * 1024 * 1024
254259

255260
region := "eu-central-1"
256261
bucket := ""
@@ -270,8 +275,8 @@ func BenchmarkAWSUpload(b *testing.B) {
270275
b.Fatalf("load default aws config: %v", err)
271276
}
272277
s3Client := s3.NewFromConfig(awsCfg)
273-
b.Logf("aws s3 client: file size=%d bytes; part size=%d bytes; NumThreads=%d",
274-
fsize, partSize, numThreds)
278+
b.Logf("aws s3 client: file size=%s; part size=%s; NumThreads=%d",
279+
storage.PrettySize(fsize), storage.PrettySize(pSize), numThreds)
275280

276281
b.ResetTimer()
277282
b.SetBytes(fsize)
@@ -293,7 +298,7 @@ func BenchmarkAWSUpload(b *testing.B) {
293298

294299
b.StartTimer()
295300
_, err := manager.NewUploader(s3Client, func(u *manager.Uploader) {
296-
u.PartSize = partSize
301+
u.PartSize = pSize
297302
u.LeavePartsOnError = true
298303
u.Concurrency = numThreds
299304
}).Upload(context.Background(), putInput)
@@ -303,6 +308,48 @@ func BenchmarkAWSUpload(b *testing.B) {
303308
}
304309
}
305310

311+
func BenchmarkS3StorageSave(b *testing.B) {
312+
numThreds := max(runtime.GOMAXPROCS(0), 1)
313+
fsize := *fileSize * 1024 * 1024
314+
pSize := *partSize * 1024 * 1024
315+
316+
cfg := &Config{
317+
Region: "eu-central-1",
318+
Bucket: "",
319+
Prefix: "",
320+
Credentials: Credentials{
321+
AccessKeyID: "",
322+
SecretAccessKey: "",
323+
},
324+
UploadPartSize: int(pSize),
325+
}
326+
327+
s, err := New(cfg, "", log.DiscardEvent)
328+
if err != nil {
329+
b.Fatalf("s3 storage creation: %v", err)
330+
}
331+
b.Logf("aws s3 client: file size=%s; part size=%s; NumThreads=%d",
332+
storage.PrettySize(fsize), storage.PrettySize(pSize), numThreds)
333+
334+
b.ResetTimer()
335+
b.SetBytes(fsize)
336+
337+
for b.Loop() {
338+
b.StopTimer()
339+
infR := NewInfiniteCustomReader()
340+
r := io.LimitReader(infR, fsize)
341+
342+
fname := time.Now().Format("2006-01-02T15:04:05")
343+
b.Logf("uploading file: %s ....", fname)
344+
345+
b.StartTimer()
346+
err := s.Save(fname, r)
347+
if err != nil {
348+
b.Fatalf("save %s: %v", fname, err)
349+
}
350+
}
351+
}
352+
306353
type InfiniteCustomReader struct {
307354
pattern []byte
308355
patternIndex int

0 commit comments

Comments
 (0)