forked from dvassallo/s3-benchmark
-
Notifications
You must be signed in to change notification settings - Fork 2
/
main.go
424 lines (366 loc) · 13.6 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
package main
import (
"crypto/sha1"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
log2 "log"
"math"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/iternity-dotcom/storage-benchmark/sbmark"
uuid "github.com/satori/go.uuid"
)
const bucketNamePrefix = "storage-benchmark"
var defaultBucketName = fmt.Sprintf("%s-%x", bucketNamePrefix, sha1.Sum([]byte(getHostname())))
// use go build -ldflags "-X main.buildstamp=`date -u '+%Y-%m-%dT%I:%M:%S'` -X main.githash=`git rev-parse HEAD`"
var buildstamp = "TODAY"
var githash = "DEV"
// wether to display the version information
var showVersion bool
// if true, all given .json output files will be parsed and fixed to match the current version of the model.
var fix bool
// if true, all given .json output files will be parsed and printed to the console using the standard console output format.
var print bool
// if not empty, the results of the test are saved as .csv file
var csvFileName string
// holds a path to a .json file or a path where to find multiple .json files
var jsonPath string
// whether to create a bucket on startup
var createBucket bool
// path for log file
var logPath string
// the context for this benchmark includes everything that's needed to run the benchmark
var ctx *sbmark.BenchmarkContext
// program entry point
func main() {
parseFlags()
if showVersion {
displayVersion()
return
}
if fix || print {
fixJsonFiles()
return
}
if createBucket {
createBenchmarkBucket()
}
runBenchmark()
}
func parseFlags() {
versionArg := flag.Bool("version", false, "Displays the version information.")
descriptionArg := flag.String("description", "", "The description of your test run run will be added to the .json report.")
threadsMinArg := flag.Int("threads-min", 1, "The minimum number of threads to use when fetching objects.")
threadsMaxArg := flag.Int("threads-max", 16, "The maximum number of threads to use when fetching objects.")
payloadsMinArg := flag.Int("payloads-min", 1, "The minimum object size to test, with 1 = 1 KB, and every increment is a double of the previous value.")
payloadsMaxArg := flag.Int("payloads-max", 18, "The maximum object size to test, with 1 = 1 KB, and every increment is a double of the previous value.")
samplesArg := flag.Int("samples", 50, "The number of samples to collect for each test of a single object size and thread count. Default is 50. Minimum value is 4.")
bucketNameArg := flag.String("bucket-name", defaultBucketName, "The target bucket or folder to be used.")
regionArg := flag.String("region", "", "Sets the AWS region to use for the S3 bucket. Only applies if the bucket doesn't already exist.")
endpointArg := flag.String("endpoint", "", "Sets the endpoint to use. Might be any URI.")
csvArg := flag.String("csv", "", "Saves the results as .csv file.")
jsonArg := flag.String("json", "", "Saves the results as .json file.")
operationArg := flag.String("operation", "read", "Specify if you want to measure 'read' or 'write'. Default is 'read'")
createBucketArg := flag.Bool("create-bucket", false, "create new bucket(default false)")
logPathArg := flag.String("log-path", "", "Specify the path of the log file. Default is 'currentDir'")
modeArg := flag.String("mode", "latency", "What do you want to measure? Choose 'latency' or 'burst'. Default is 'latency'")
fixArg := flag.Bool("fix", false, "If set all .json reports given with the -json option will be parsed and fixed, so that they match the current version of storage-benchmark.")
printArg := flag.Bool("print", false, "If set all .json reports given with -json option will be printed using the standard console output format.")
keepAliveArg := flag.String("keepalive", "mode", "Use 'enabled' or 'disabled' to explicitly enable or dissable connection pooling to your endpoint. Use 'mode' to use the default of the specified benchmark mode. Default is 'mode'.")
// parse the arguments and set all the global variables accordingly
flag.Parse()
showVersion = *versionArg
fix = *fixArg
print = *printArg
jsonPath = *jsonArg
// Stop parsing flags if -version or -fix-json arguments are there
//if showVersion || fix || print {
// return
//}
csvFileName = *csvArg
createBucket = *createBucketArg
if *logPathArg == "" {
logPath, _ = os.Getwd()
} else {
logPath = *logPathArg
}
ctx = &sbmark.BenchmarkContext{
Description: *descriptionArg,
ModeName: *modeArg,
OperationName: *operationArg,
Endpoint: *endpointArg,
Region: *regionArg,
Path: *bucketNameArg,
PayloadsMin: *payloadsMinArg,
PayloadsMax: *payloadsMaxArg,
ThreadsMin: *threadsMinArg,
ThreadsMax: *threadsMaxArg,
Samples: *samplesArg,
NumberOfRuns: 0,
Hostname: getHostname(),
InfoLogger: createLogger("INFO "),
WarningLogger: createLogger("WARNING "),
ErrorLogger: createLogger("ERROR "),
KeepAlive: *keepAliveArg,
}
err := ctx.Start()
if err != nil {
panic(err)
}
}
func displayVersion() {
fmt.Printf("%s version %s", getAppName(), getVersion())
}
// Try to transform existing storage-benchmark .json files to the current model.
// The function creates a new .json file, if the existing file can be transformed.
// The function is idempotent, so it can be safely executed multiple times.
// It will ignore files that does not end with .json or that doesn't seem to be a valid storage-benchmark result.
func fixJsonFiles() {
fileList := make([]string, 0)
e := filepath.Walk(jsonPath, func(path string, f os.FileInfo, err error) error {
if filepath.Ext(path) == ".json" && !strings.HasSuffix(path, githash+".json") {
fileList = append(fileList, path)
}
return err
})
if e != nil {
panic(e)
}
for _, file := range fileList {
// Parse in a generic way and try to transform the model
var genericModel map[string]interface{}
jsonData, err := ioutil.ReadFile(file)
if err != nil {
fmt.Printf("Couldn't read file %s. Error: %v\n", file, err)
continue
}
err = json.Unmarshal(jsonData, &genericModel)
if err != nil {
fmt.Printf("Couldn't unmarshal file %s. Error: %v\n", file, err)
continue
}
// Verify if this is a storage-benchmark result file
_, hostnameExists := genericModel["hostname"]
_, endpointExists := genericModel["endpoint"]
_, operationExists := genericModel["operation"]
_, reportExists := genericModel["report"]
if !hostnameExists || !endpointExists || !operationExists || !reportExists {
fmt.Printf("Skipping %s. This doesn't seem to be a storage-benchmark result file.\n", file)
continue
}
// Remove ignored interface{} values that can conflict with primitive values
if genericModel["Mode"] != nil {
delete(genericModel, "Mode")
}
if genericModel["Operation"] != nil {
delete(genericModel, "Operation")
}
// Marshal fixed model to []byte
jsonData, err = json.Marshal(genericModel)
if err != nil {
fmt.Printf("Couldn't marshal content of file %s. Error: %v\n", file, err)
continue
}
// Unmarshal to existing BenchmarkContext model
jsonCtx, err := sbmark.FromJsonByteArray(jsonData)
jsonCtx.Start()
if err != nil {
fmt.Printf("Couldn't unmarshal modified content of file %s. Error: %v\n", file, err)
continue
}
if fix {
fixJson(jsonCtx, file)
}
if print {
printJson(jsonCtx, ctx)
}
}
}
func fixJson(ctx *sbmark.BenchmarkContext, file string) {
// Create Uuid if missing
if ctx.Report.Uuid == "" {
uuid := uuid.NewV4().String()
// Try to get a UUID from an already transformed .json file
fixedJsonFile := getFixedJsonFileName(file)
fixedJsonData, err := ioutil.ReadFile(fixedJsonFile)
if err == nil {
fb, err := sbmark.FromJsonByteArray(fixedJsonData)
if err == nil {
uuid = fb.Report.Uuid
}
}
ctx.Report.Uuid = uuid
}
// Move description from BenchmarkContext to BenchmarkReport
if ctx.Description != "" && ctx.Report.Description == "" {
ctx.Report.Description = ctx.Description
}
// Transform Records
for i := range ctx.Report.Records {
if ctx.Report.Records[i].ObjectSizeBytes > 0 && ctx.Report.Records[i].TotalBytes == 0 {
ctx.Report.Records[i].TotalBytes = ctx.Report.Records[i].ObjectSizeBytes
}
if ctx.Report.Records[i].ObjectsCount == 0 {
ctx.Report.Records[i].ObjectsCount = uint64(ctx.Samples)
}
if ctx.Report.Records[i].SingleObjectSize == 0 && ctx.Report.Records[i].ObjectSizeBytes > 0 && ctx.Samples > 0 {
ctx.Report.Records[i].SingleObjectSize = ctx.Report.Records[i].ObjectSizeBytes / uint64(ctx.Samples)
}
}
jsonData, err := sbmark.ToJson(ctx)
if err != nil {
fmt.Printf("Couldn't marshal modified content of file %s. Error: %v\n", file, err)
return
}
// Create a new file containing the transformed model
newFile := getFixedJsonFileName(file)
err = os.WriteFile(newFile, jsonData, 0644)
if err != nil {
fmt.Printf("Couldn't create file %s. Error: %v\n", newFile, err)
return
}
fmt.Printf("Processed %s. Created %s\n", file, newFile)
}
func printJson(jsonCtx *sbmark.BenchmarkContext, cliCtx *sbmark.BenchmarkContext) {
jsonCtx.PrintSettings()
jsonCtx.Mode.PrintHeader(jsonCtx.OperationName)
payloadSize := uint64(0)
minPayloadSize := getPayloadSize(cliCtx.PayloadsMin)
maxPayloadSize := getPayloadSize(cliCtx.PayloadsMax)
minThreads := cliCtx.ThreadsMin
maxThreads := cliCtx.ThreadsMax
for _, r := range jsonCtx.Report.Records {
if payloadSize != 0 && payloadSize != r.SingleObjectSize {
if payloadSize == maxPayloadSize {
break
}
if payloadSize >= minPayloadSize {
jsonCtx.Mode.PrintPayloadFooter()
}
}
if payloadSize != r.SingleObjectSize {
payloadSize = r.SingleObjectSize
if payloadSize >= minPayloadSize {
jsonCtx.Mode.PrintPayloadHeader(payloadSize, jsonCtx.OperationName)
}
}
if payloadSize >= minPayloadSize && r.Threads >= minThreads && r.Threads <= maxThreads {
jsonCtx.Mode.PrintRecord(r)
}
}
jsonCtx.Mode.PrintPayloadFooter()
jsonCtx.Mode.PrintFooter()
}
func getFixedJsonFileName(origFile string) string {
origFileWithoutExt := strings.Replace(origFile, filepath.Ext(origFile), "", 1)
return fmt.Sprintf("%s.%s.json", origFileWithoutExt, githash)
}
func createLogger(prefix string) *log2.Logger {
file, _ := os.OpenFile(filepath.FromSlash(logPath+"/")+"storage-benchmark.log", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0777)
return log2.New(file, prefix, log2.Ldate+log2.Ltime+log2.Lshortfile+log2.Lmsgprefix)
}
func createBenchmarkBucket() {
fmt.Print("\n--- SETUP --------------------------------------------------------------------------------------------------------------------\n\n")
_, err := ctx.Client.CreateBucket(ctx.Path)
fmt.Printf("Created target path %s\n\n", getTargetPath())
// if the error is because the bucket already exists, ignore the error
if err != nil && !strings.Contains(err.Error(), "BucketAlreadyOwnedByYou:") {
panic("Failed to create bucket: " + err.Error())
}
}
func runBenchmark() {
// Init the final report
ctx.Report = sbmark.Report{
Uuid: uuid.NewV4().String(),
Description: ctx.Description,
ClientEnv: fmt.Sprintf("Application: %s, Version: %s, Host: %s, OS: %s", getAppName(), getVersion(), getHostname(), runtime.GOOS),
ServerEnv: ctx.Endpoint, // How can we get some informations about the ServerEnv? Or should this be a CLI param?
DateTimeUTC: time.Now().UTC().String(),
Records: []sbmark.Record{},
}
// an object size iterator that starts from 1 KB and doubles the size on every iteration
generatePayload := payloadSizeGenerator()
ctx.PrintSettings()
ctx.Mode.PrintHeader(ctx.OperationName)
// loop over every payload size (we need to start at p := 1 because of the generatePayload() function)
for p := 1; p <= ctx.PayloadsMax; p++ {
// get an object size from the iterator
payloadSize := generatePayload()
// ignore payloads smaller than the min argument
if p < ctx.PayloadsMin {
continue
}
ctx.Mode.EnsureTestdata(ctx, payloadSize)
ctx.Mode.PrintPayloadHeader(payloadSize, ctx.OperationName)
ctx.Mode.ExecuteBenchmark(ctx, payloadSize)
ctx.Mode.PrintPayloadFooter()
ctx.Mode.CleanupTestdata(ctx, payloadSize)
}
ctx.Mode.PrintFooter()
// if the csv option is set, save the report as .csv
if csvFileName != "" {
csvReport, err := sbmark.ToCsv(ctx.Report)
if err != nil {
panic("Failed to create .csv output: " + err.Error())
}
err = os.WriteFile(csvFileName, csvReport, 0644)
if err != nil {
panic("Failed to create .csv output: " + err.Error())
}
fmt.Printf("CSV results were written to %s\n", csvFileName)
}
// if the json option is set, save the report as .json
if jsonPath != "" {
jsonReport, err := sbmark.ToJson(ctx)
if err != nil {
panic("Failed to create .json output: " + err.Error())
}
err = os.WriteFile(jsonPath, jsonReport, 0644)
if err != nil {
panic("Failed to create .json output: " + err.Error())
}
fmt.Printf("JSON results were written to %s\n", jsonPath)
}
}
// gets the name of the host that executes the test.
func getHostname() string {
hostname, err := os.Hostname()
if err != nil {
panic(err)
}
return hostname
}
func getVersion() string {
return fmt.Sprintf("%s.%s", githash, buildstamp)
}
func getAppName() string {
return filepath.Base(os.Args[0])
}
func getTargetPath() string {
return fmt.Sprintf("%s/%s", ctx.Endpoint, ctx.Path)
}
// returns an object size iterator, starting from 1 KB and double in size by each iteration
func payloadSizeGenerator() func() uint64 {
nextPayloadSize := uint64(1024)
return func() uint64 {
thisPayloadSize := nextPayloadSize
nextPayloadSize *= 2
return thisPayloadSize
}
}
// returns the payload size of the i-th iteration.
// i=1 => 1024
// i=2 => 2048
// i=3 => 4096
// ...
func getPayloadSize(i int) uint64 {
if i < 1 {
return 0
}
return uint64(math.Pow(2, float64(i-1)) * 1024)
}