From 6957c9d534c155e5df8d4fa3191056eba6840a08 Mon Sep 17 00:00:00 2001 From: Steven Normore Date: Wed, 9 Apr 2014 11:40:46 +0000 Subject: initial bench and generate command structure --- cmd/bolt/bench.go | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 cmd/bolt/bench.go (limited to 'cmd/bolt/bench.go') diff --git a/cmd/bolt/bench.go b/cmd/bolt/bench.go new file mode 100644 index 0000000..c35e747 --- /dev/null +++ b/cmd/bolt/bench.go @@ -0,0 +1,43 @@ +package main + +import ( + "os" + + "github.com/boltdb/bolt" +) + +// Run benchmarks on a given dataset. +func Bench() { + path := "bench" + if _, err := os.Stat(path); os.IsNotExist(err) { + fatal(err) + return + } + + db, err := bolt.Open(path, 0600) + if err != nil { + fatal(err) + return + } + defer db.Close() + + bucketName := "widgets" + key := "key1" + value := "value1" + + err = db.Update(func(tx *bolt.Tx) error { + // Find bucket. + b := tx.Bucket(bucketName) + if b == nil { + fatalf("bucket not found: %s", bucketName) + return nil + } + + // Set value for a given key. + return b.Put([]byte(key), []byte(value)) + }) + if err != nil { + fatal(err) + return + } +} -- cgit v1.2.3 From fdde1bcb0624ce5232bf0f3226a2f47fd5a24cb3 Mon Sep 17 00:00:00 2001 From: Steven Normore Date: Fri, 11 Apr 2014 13:55:14 +0000 Subject: moar bench package --- Makefile | 3 ++- bench.go | 55 ++++++++++++++++++++++++++++++++++++++++++ bench/bench.go | 42 ++++++++++++++++++++++++++++++++ cmd/bolt/bench.go | 70 +++++++++++++++++++++++++++--------------------------- cmd/bolt/import.go | 5 ++++ cmd/bolt/keys.go | 32 +++++++++++++++++-------- cmd/bolt/main.go | 3 ++- cmd/bolt/set.go | 6 +++++ tx_test.go | 41 ++++++++++++++++++++++++++++++++ 9 files changed, 210 insertions(+), 47 deletions(-) create mode 100644 bench.go create mode 100644 bench/bench.go (limited to 'cmd/bolt/bench.go') diff --git a/Makefile b/Makefile index f4964a6..81beac3 100644 --- a/Makefile +++ b/Makefile @@ -5,7 +5,7 @@ BRANCH=`git rev-parse --abbrev-ref HEAD` COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" -bench: benchpreq +bench: go test -v -test.bench=$(BENCH) # http://cloc.sourceforge.net/ @@ -35,6 +35,7 @@ get: build: get @mkdir -p bin @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt-`git rev-parse --short HEAD` ./cmd/bolt + @echo "writing bin/bolt-`git rev-parse --short HEAD`" test: fmt errcheck @go get github.com/stretchr/testify/assert diff --git a/bench.go b/bench.go new file mode 100644 index 0000000..16e4d7a --- /dev/null +++ b/bench.go @@ -0,0 +1,55 @@ +package bolt + +import ( + "sync" + "testing" +) + +const ( + BenchReadMode = "read" + BenchWriteMode = "write" + BenchSequentialTraversal = "sequential" + BenchRandomTraversal = "random" +) + +type Benchmark struct { + InputPath string + ReadWriteMode string + TraversalPattern string + Parallelism int +} + +func NewBenchmark(inputPath, readWriteMode, traversalPattern string, parallelism int) *Benchmark { + return &Benchmark{inputPath, readWriteMode, traversalPattern, parallelism} +} + +func (bm *Benchmark) Run(b *testing.B) { + + // Open the database. + db, err := Open(bm.InputPath, 0600) + if err != nil { + panic(err) + return + } + defer db.Close() + + b.ResetTimer() + + // Keep running a fixed number of parallel reads until we run out of time. + for i := 0; i < b.N; i++ { + var wg sync.WaitGroup + for j := 0; j < bm.Parallelism; j++ { + wg.Add(1) + go func() { + if bm.TraversalPattern == BenchRandomTraversal { + // Perform all reads in random order. + // indexes := rand.Perm(total) + } else { + // Perform all reads in sequential order. + } + wg.Done() + }() + } + wg.Wait() + } +} diff --git a/bench/bench.go b/bench/bench.go new file mode 100644 index 0000000..3bdd43a --- /dev/null +++ b/bench/bench.go @@ -0,0 +1,42 @@ +package bench + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" +) + +type bucketItems map[string]string +type buckets map[string]bucketItems + +type Benchmark struct { + buckets buckets +} + +func New(filePath string) (*Benchmark, error) { + data := readFromFile(filePath) +} + +func readFromFile(filePath string) (*Benchmark, error) { + if _, err := os.Stat(filePath); os.IsNotExist(err) { + return nil, err + } + + file, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, err + } + + b := new(Benchmark) + if err := json.Unmarshal(file, &b.buckets); err != nil { + return nil, err + } + + return b, nil +} + +func (b *Benchmark) Run() error { + fmt.Println("Do things, run benchmarks, tell people...") + return nil +} diff --git a/cmd/bolt/bench.go b/cmd/bolt/bench.go index c35e747..28cbbc3 100644 --- a/cmd/bolt/bench.go +++ b/cmd/bolt/bench.go @@ -1,43 +1,43 @@ package main import ( - "os" + "testing" "github.com/boltdb/bolt" ) -// Run benchmarks on a given dataset. -func Bench() { - path := "bench" - if _, err := os.Stat(path); os.IsNotExist(err) { - fatal(err) - return - } - - db, err := bolt.Open(path, 0600) - if err != nil { - fatal(err) - return - } - defer db.Close() - - bucketName := "widgets" - key := "key1" - value := "value1" - - err = db.Update(func(tx *bolt.Tx) error { - // Find bucket. - b := tx.Bucket(bucketName) - if b == nil { - fatalf("bucket not found: %s", bucketName) - return nil - } - - // Set value for a given key. - return b.Put([]byte(key), []byte(value)) - }) - if err != nil { - fatal(err) - return - } +// Import converts an exported database dump into a new database. +// parallelism: integer representing number of concurrent reads/writes +// readWriteMode: 'read' or 'write' +// traversalPattern: 'sequentrial' or 'random' +func Bench(inputPath string, readWriteMode string, traversalPattern string, parallelism int) { + + // cursor/sequential reads + // random reads + + // sequential writes + // random writes + + // reading from many buckets + // writing to many buckets + + // read from many paths + // writing to many paths + + // bucket size/messages + // bucket depth + + // concurrency + + // chart/graph + + // profile + + // benchmarks for getting all keys + + b := bolt.NewBenchmark(inputPath, readWriteMode, traversalPattern, parallelism) + + result := testing.Benchmark(b.Run) + + println(result) } diff --git a/cmd/bolt/import.go b/cmd/bolt/import.go index bcb7d2e..0988f32 100644 --- a/cmd/bolt/import.go +++ b/cmd/bolt/import.go @@ -23,6 +23,11 @@ func Import(path string, input string) { fatal(err) } + // Import all of the buckets. + importBuckets(path, root) +} + +func importBuckets(path string, root []*rawMessage) { // Open the database. db, err := bolt.Open(path, 0600) if err != nil { diff --git a/cmd/bolt/keys.go b/cmd/bolt/keys.go index 6affefe..65b717f 100644 --- a/cmd/bolt/keys.go +++ b/cmd/bolt/keys.go @@ -1,6 +1,8 @@ package main import ( + "errors" + "fmt" "os" "github.com/boltdb/bolt" @@ -8,34 +10,44 @@ import ( // Keys retrieves a list of keys for a given bucket. func Keys(path, name string) { - if _, err := os.Stat(path); os.IsNotExist(err) { + keys, err := keys(path, name) + + if err != nil { fatal(err) return } + for _, key := range keys { + println(key) + } +} + +func keys(path, name string) ([]string, error) { + if _, err := os.Stat(path); os.IsNotExist(err) { + return nil, err + } + db, err := bolt.Open(path, 0600) if err != nil { - fatal(err) - return + return nil, err } defer db.Close() + keys := []string{} + err = db.View(func(tx *bolt.Tx) error { // Find bucket. b := tx.Bucket([]byte(name)) if b == nil { - fatalf("bucket not found: %s", name) - return nil + return errors.New(fmt.Sprintf("bucket %+v not found", b)) } // Iterate over each key. return b.ForEach(func(key, _ []byte) error { - println(string(key)) + keys = append(keys, string(key)) return nil }) }) - if err != nil { - fatal(err) - return - } + + return keys, err } diff --git a/cmd/bolt/main.go b/cmd/bolt/main.go index 009bdc5..19df59f 100644 --- a/cmd/bolt/main.go +++ b/cmd/bolt/main.go @@ -107,7 +107,8 @@ func NewApp() *cli.App { Name: "bench", Usage: "Run benchmarks on a given dataset", Action: func(c *cli.Context) { - Bench() + srcPath := c.Args().Get(0) + Bench(srcPath, "read", "sequential", 1) }, }, } diff --git a/cmd/bolt/set.go b/cmd/bolt/set.go index 9761f44..c757d27 100644 --- a/cmd/bolt/set.go +++ b/cmd/bolt/set.go @@ -21,6 +21,12 @@ func Set(path, name, key, value string) { defer db.Close() err = db.Update(func(tx *bolt.Tx) error { + // Create the bucket if it doesn't exist. + if err := tx.CreateBucketIfNotExists([]byte(name)); err != nil { + fatalf("create bucket: %s", err) + return nil + } + // Find bucket. b := tx.Bucket([]byte(name)) if b == nil { diff --git a/tx_test.go b/tx_test.go index 53ec53e..ff0e1e4 100644 --- a/tx_test.go +++ b/tx_test.go @@ -365,6 +365,47 @@ func benchmarkTxPutSequential(b *testing.B, total int) { }) } +// func BenchmarkParallel_1items_1threads(b *testing.B) { benchmarkParallel(1, 1) } +// func BenchmarkParallel_1items_10threads(b *testing.B) { benchmarkParallel(1, 10) } +// func BenchmarkParallel_1items_100threads(b *testing.B) { benchmarkParallel(1, 100) } +// func BenchmarkParallel_1items_1000threads(b *testing.B) { benchmarkParallel(1, 1000) } + +// func BenchmarkParallel_10items_1threads(b *testing.B) { benchmarkParallel(10, 1) } +// func BenchmarkParallel_10items_10threads(b *testing.B) { benchmarkParallel(10, 10) } +// func BenchmarkParallel_10items_100threads(b *testing.B) { benchmarkParallel(10, 100) } +// func BenchmarkParallel_10items_1000threads(b *testing.B) { benchmarkParallel(10, 1000) } + +// func BenchmarkParallel_100items_1threads(b *testing.B) { benchmarkParallel(100, 1) } +// func BenchmarkParallel_100items_10threads(b *testing.B) { benchmarkParallel(100, 10) } +// func BenchmarkParallel_100items_100threads(b *testing.B) { benchmarkParallel(100, 100) } +// func BenchmarkParallel_100items_1000threads(b *testing.B) { benchmarkParallel(100, 1000) } + +// func BenchmarkParallel_1000items_1threads(b *testing.B) { benchmarkParallel(1000, 1) } +// func BenchmarkParallel_1000items_10threads(b *testing.B) { benchmarkParallel(1000, 10) } +// func BenchmarkParallel_1000items_100threads(b *testing.B) { benchmarkParallel(1000, 100) } +// func BenchmarkParallel_1000items_1000threads(b *testing.B) { benchmarkParallel(1000, 1000) } + +// func benchmarkParallel(b *testing.B, itemCount, parallelism int) { +// // Setup database. +// for i := 0; i < itemCount; i++ { +// // ... insert key/values here ... +// } +// b.ResetTimer() + +// // Keep running a fixed number of parallel reads until we run out of time. +// for i := 0; i < b.N; i++ { +// var wg sync.WaitGroup +// for j := 0; j < parallelism; j++ { +// wg.Add(1) +// go func() { +// // ... execute read here ... +// wg.Done() +// }() +// } +// wg.Wait() +// } +// } + func ExampleTx_Rollback() { // Open the database. db, _ := Open(tempfile(), 0666) -- cgit v1.2.3 From 97bd718b0231814f119299a460d2b7d2b15590ce Mon Sep 17 00:00:00 2001 From: Steven Normore Date: Sun, 13 Apr 2014 19:57:47 +0000 Subject: add benchmarks using Benchmark framework --- bench.go | 83 ++++++++++++++++-------- bench/bench.go | 42 ------------ cmd/bolt/bench.go | 10 ++- cmd/bolt/generate.go | 61 ++++++++++++------ cmd/bolt/generate_test.go | 1 - cmd/bolt/keys.go | 32 +++------- cmd/bolt/main.go | 10 ++- cmd/bolt/set.go | 5 -- tx_test.go | 158 ++++++++++++++++++++++++++++++++++------------ 9 files changed, 240 insertions(+), 162 deletions(-) delete mode 100644 bench/bench.go delete mode 100644 cmd/bolt/generate_test.go (limited to 'cmd/bolt/bench.go') diff --git a/bench.go b/bench.go index a93a27c..e41c260 100644 --- a/bench.go +++ b/bench.go @@ -3,7 +3,6 @@ package bolt import ( "errors" "fmt" - "os" "sync" "testing" ) @@ -16,29 +15,31 @@ const ( ) type Benchmark struct { - InputPath string + db *DB ReadWriteMode string TraversalPattern string Parallelism int } -func NewBenchmark(inputPath, readWriteMode, traversalPattern string, parallelism int) *Benchmark { - return &Benchmark{inputPath, readWriteMode, traversalPattern, parallelism} +func NewBenchmark(db *DB, readWriteMode, traversalPattern string, parallelism int) *Benchmark { + return &Benchmark{db, readWriteMode, traversalPattern, parallelism} } func (bm *Benchmark) Run(b *testing.B) { - // Open the database. - db, err := Open(bm.InputPath, 0600) + // Read buckets and keys before benchmark begins so we don't knew the + // results. + buckets, err := buckets(bm.db) if err != nil { b.Fatalf("error: %+v", err) - return } - defer db.Close() - - buckets, err := buckets(db, bm.InputPath) - if err != nil { - b.Fatalf("error: %+v", err) + bucketsWithKeys := make(map[string][]string) + for _, bucket := range buckets { + keys, err := keys(bm.db, bucket) + if err != nil { + b.Fatalf("error: %+v", err) + } + bucketsWithKeys[bucket] = keys } b.ResetTimer() @@ -50,7 +51,7 @@ func (bm *Benchmark) Run(b *testing.B) { wg.Add(1) go func() { defer wg.Done() - if err := bm.runBuckets(b, db, buckets); err != nil { + if err := bm.runBuckets(b, bm.db, bucketsWithKeys); err != nil { b.Fatalf("error: %+v", err) } }() @@ -60,30 +61,39 @@ func (bm *Benchmark) Run(b *testing.B) { } // Run benchmark(s) for each of the given buckets. -func (bm *Benchmark) runBuckets(b *testing.B, db *DB, buckets []string) error { +func (bm *Benchmark) runBuckets(b *testing.B, db *DB, bucketsWithKeys map[string][]string) error { return db.View(func(tx *Tx) error { - bucketsCount := len(buckets) - for _, bucket := range buckets { - c := tx.Bucket([]byte(bucket)).Cursor() - count := 0 - for k, _ := c.First(); k != nil; k, _ = c.Next() { - count++ - } - if count != bucketsCount { - return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, bucketsCount)) + bucketsCount := len(bucketsWithKeys) + count := 0 + for bucket, keys := range bucketsWithKeys { + bucket := tx.Bucket([]byte(bucket)) + if err := bm.runKeys(b, bucket, keys); err != nil { + return err } + count++ + } + if count != bucketsCount { + return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, bucketsCount)) } return nil }) } -func buckets(db *DB, path string) ([]string, error) { - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil, err +func (bm *Benchmark) runKeys(b *testing.B, bucket *Bucket, keys []string) error { + c := bucket.Cursor() + keysCount := len(keys) + count := 0 + for k, _ := c.First(); k != nil; k, _ = c.Next() { + count++ + } + if count != keysCount { + return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, keysCount)) } + return nil +} +func buckets(db *DB) ([]string, error) { buckets := []string{} - err := db.View(func(tx *Tx) error { // Iterate over each bucket. return tx.ForEach(func(name []byte, _ *Bucket) error { @@ -91,6 +101,23 @@ func buckets(db *DB, path string) ([]string, error) { return nil }) }) - return buckets, err } + +func keys(db *DB, bucket string) ([]string, error) { + keys := []string{} + err := db.View(func(tx *Tx) error { + // Find bucket. + b := tx.Bucket([]byte(bucket)) + if b == nil { + return errors.New(fmt.Sprintf("bucket %+v not found", b)) + } + + // Iterate over each key. + return b.ForEach(func(key, _ []byte) error { + keys = append(keys, string(key)) + return nil + }) + }) + return keys, err +} diff --git a/bench/bench.go b/bench/bench.go deleted file mode 100644 index 3bdd43a..0000000 --- a/bench/bench.go +++ /dev/null @@ -1,42 +0,0 @@ -package bench - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" -) - -type bucketItems map[string]string -type buckets map[string]bucketItems - -type Benchmark struct { - buckets buckets -} - -func New(filePath string) (*Benchmark, error) { - data := readFromFile(filePath) -} - -func readFromFile(filePath string) (*Benchmark, error) { - if _, err := os.Stat(filePath); os.IsNotExist(err) { - return nil, err - } - - file, err := ioutil.ReadFile(filePath) - if err != nil { - return nil, err - } - - b := new(Benchmark) - if err := json.Unmarshal(file, &b.buckets); err != nil { - return nil, err - } - - return b, nil -} - -func (b *Benchmark) Run() error { - fmt.Println("Do things, run benchmarks, tell people...") - return nil -} diff --git a/cmd/bolt/bench.go b/cmd/bolt/bench.go index 28cbbc3..f894a70 100644 --- a/cmd/bolt/bench.go +++ b/cmd/bolt/bench.go @@ -35,7 +35,15 @@ func Bench(inputPath string, readWriteMode string, traversalPattern string, para // benchmarks for getting all keys - b := bolt.NewBenchmark(inputPath, readWriteMode, traversalPattern, parallelism) + // Open the database. + db, err := bolt.Open(inputPath, 0600) + if err != nil { + fatalf("error: %+v", err) + return + } + defer db.Close() + + b := bolt.NewBenchmark(db, readWriteMode, traversalPattern, parallelism) result := testing.Benchmark(b.Run) diff --git a/cmd/bolt/generate.go b/cmd/bolt/generate.go index ecd391e..15edb27 100644 --- a/cmd/bolt/generate.go +++ b/cmd/bolt/generate.go @@ -1,32 +1,55 @@ package main import ( - "bufio" "fmt" - "os" - "strings" + + "github.com/boltdb/bolt" ) // Generate data for benchmarks. -func Generate(numEvents int, destPath string) { - f, err := os.Create(destPath) +func Generate(destPath string, numBuckets, numItems int) { + + // Open the database. + db, err := bolt.Open(destPath, 0600) if err != nil { - fatal(err) - } - defer func() { - if err := f.Close(); err != nil { - fatal(err) - } - }() - w := bufio.NewWriter(f) - - for i := 0; i < numEvents; i++ { - if _, err := w.Write([]byte(fmt.Sprintf("key%d:%s\n", i, strings.Repeat("0", 64)))); err != nil { - fatal(err) - } + fatalf("open db:", err) + return } + defer db.Close() + + for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ { + bucketName := fmt.Sprintf("bucket%03d", bucketIndex) + + err = db.Update(func(tx *bolt.Tx) error { - if err = w.Flush(); err != nil { + // Create the bucket if it doesn't exist. + if err := tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil { + fatalf("create bucket: %s", err) + return nil + } + + // Find bucket. + b := tx.Bucket([]byte(bucketName)) + if b == nil { + fatalf("bucket not found: %s", bucketName) + return nil + } + + for i := 0; i < numItems; i++ { + key := fmt.Sprintf("key%03d", i) + value := fmt.Sprintf("value%03d", i) + + // Set value for a given key. + if err := b.Put([]byte(key), []byte(value)); err != nil { + return err + } + } + + return nil + }) + } + if err != nil { fatal(err) + return } } diff --git a/cmd/bolt/generate_test.go b/cmd/bolt/generate_test.go deleted file mode 100644 index 06ab7d0..0000000 --- a/cmd/bolt/generate_test.go +++ /dev/null @@ -1 +0,0 @@ -package main diff --git a/cmd/bolt/keys.go b/cmd/bolt/keys.go index 65b717f..6affefe 100644 --- a/cmd/bolt/keys.go +++ b/cmd/bolt/keys.go @@ -1,8 +1,6 @@ package main import ( - "errors" - "fmt" "os" "github.com/boltdb/bolt" @@ -10,44 +8,34 @@ import ( // Keys retrieves a list of keys for a given bucket. func Keys(path, name string) { - keys, err := keys(path, name) - - if err != nil { + if _, err := os.Stat(path); os.IsNotExist(err) { fatal(err) return } - for _, key := range keys { - println(key) - } -} - -func keys(path, name string) ([]string, error) { - if _, err := os.Stat(path); os.IsNotExist(err) { - return nil, err - } - db, err := bolt.Open(path, 0600) if err != nil { - return nil, err + fatal(err) + return } defer db.Close() - keys := []string{} - err = db.View(func(tx *bolt.Tx) error { // Find bucket. b := tx.Bucket([]byte(name)) if b == nil { - return errors.New(fmt.Sprintf("bucket %+v not found", b)) + fatalf("bucket not found: %s", name) + return nil } // Iterate over each key. return b.ForEach(func(key, _ []byte) error { - keys = append(keys, string(key)) + println(string(key)) return nil }) }) - - return keys, err + if err != nil { + fatal(err) + return + } } diff --git a/cmd/bolt/main.go b/cmd/bolt/main.go index 19df59f..ac71631 100644 --- a/cmd/bolt/main.go +++ b/cmd/bolt/main.go @@ -95,12 +95,16 @@ func NewApp() *cli.App { Name: "generate", Usage: "Generate data for benchmarks", Action: func(c *cli.Context) { - numEvents, err := strconv.Atoi(c.Args().Get(0)) + destPath := c.Args().Get(0) + numBuckets, err := strconv.Atoi(c.Args().Get(1)) if err != nil { fatal(err) } - destPath := c.Args().Get(1) - Generate(numEvents, destPath) + numItems, err := strconv.Atoi(c.Args().Get(2)) + if err != nil { + fatal(err) + } + Generate(destPath, numBuckets, numItems) }, }, { diff --git a/cmd/bolt/set.go b/cmd/bolt/set.go index c757d27..f4a4696 100644 --- a/cmd/bolt/set.go +++ b/cmd/bolt/set.go @@ -21,11 +21,6 @@ func Set(path, name, key, value string) { defer db.Close() err = db.Update(func(tx *bolt.Tx) error { - // Create the bucket if it doesn't exist. - if err := tx.CreateBucketIfNotExists([]byte(name)); err != nil { - fatalf("create bucket: %s", err) - return nil - } // Find bucket. b := tx.Bucket([]byte(name)) diff --git a/tx_test.go b/tx_test.go index ff0e1e4..17498ed 100644 --- a/tx_test.go +++ b/tx_test.go @@ -266,6 +266,123 @@ func TestTx_OnCommit_Rollback(t *testing.T) { assert.Equal(t, 0, x) } +func BenchmarkReadSequential_1Buckets_1Items_1Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1, 1) +} +func BenchmarkReadSequential_1Buckets_10Items_1Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10, 1) +} +func BenchmarkReadSequential_1Buckets_100Items_1Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 100, 1) +} +func BenchmarkReadSequential_1Buckets_1000Items_1Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1000, 1) +} +func BenchmarkReadSequential_1Buckets_10000Items_1Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10000, 1) +} + +func BenchmarkReadSequential_1Buckets_1Items_10Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1, 10) +} +func BenchmarkReadSequential_1Buckets_10Items_10Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10, 10) +} +func BenchmarkReadSequential_1Buckets_100Items_10Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 100, 10) +} +func BenchmarkReadSequential_1Buckets_1000Items_10Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1000, 10) +} +func BenchmarkReadSequential_1Buckets_10000Items_10Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10000, 10) +} + +func BenchmarkReadSequential_1Buckets_1Items_100Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1, 100) +} +func BenchmarkReadSequential_1Buckets_10Items_100Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10, 100) +} +func BenchmarkReadSequential_1Buckets_100Items_100Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 100, 100) +} +func BenchmarkReadSequential_1Buckets_1000Items_100Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1000, 100) +} +func BenchmarkReadSequential_1Buckets_10000Items_100Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10000, 100) +} + +func BenchmarkReadSequential_1Buckets_1Items_1000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1, 1000) +} +func BenchmarkReadSequential_1Buckets_10Items_1000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10, 1000) +} +func BenchmarkReadSequential_1Buckets_100Items_1000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 100, 1000) +} +func BenchmarkReadSequential_1Buckets_1000Items_1000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1000, 1000) +} +func BenchmarkReadSequential_1Buckets_10000Items_1000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10000, 1000) +} + +func BenchmarkReadSequential_1Buckets_1Items_10000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1, 10000) +} +func BenchmarkReadSequential_1Buckets_10Items_10000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10, 10000) +} +func BenchmarkReadSequential_1Buckets_100Items_10000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 100, 10000) +} +func BenchmarkReadSequential_1Buckets_1000Items_10000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 1000, 10000) +} +func BenchmarkReadSequential_1Buckets_10000Items_10000Concurrency(b *testing.B) { + benchmarkReadSequential(b, 1, 10000, 10000) +} + +func benchmark(b *testing.B, readWriteMode, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) { + withOpenDB(func(db *DB, path string) { + if err := generateDB(db, numBuckets, numItemsPerBucket); err != nil { + b.Fatal(err) + } + NewBenchmark(db, readWriteMode, traversalPattern, parallelism).Run(b) + }) +} + +func benchmarkRead(b *testing.B, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) { + benchmark(b, BenchReadMode, traversalPattern, numBuckets, numItemsPerBucket, parallelism) +} + +func benchmarkReadSequential(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) { + benchmark(b, BenchReadMode, BenchSequentialTraversal, numBuckets, numItemsPerBucket, parallelism) +} + +func benchmarkReadRandom(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) { + benchmark(b, BenchReadMode, BenchRandomTraversal, numBuckets, numItemsPerBucket, parallelism) +} + +// Generate and write data to specified number of buckets/items. +func generateDB(db *DB, numBuckets, numItemsPerBucket int) error { + return db.Update(func(tx *Tx) error { + for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ { + bucketName := fmt.Sprintf("bucket%08d") + tx.CreateBucket([]byte(bucketName)) + bucket := tx.Bucket([]byte(bucketName)) + for i := 0; i < numItemsPerBucket; i++ { + value := []byte(strings.Repeat("0", 100)) + bucket.Put([]byte(fmt.Sprintf("key%08d", i)), value) + } + } + return nil + }) +} + // Benchmark the performance iterating over a cursor. func BenchmarkTxCursor1(b *testing.B) { benchmarkTxCursor(b, 1) } func BenchmarkTxCursor10(b *testing.B) { benchmarkTxCursor(b, 10) } @@ -365,47 +482,6 @@ func benchmarkTxPutSequential(b *testing.B, total int) { }) } -// func BenchmarkParallel_1items_1threads(b *testing.B) { benchmarkParallel(1, 1) } -// func BenchmarkParallel_1items_10threads(b *testing.B) { benchmarkParallel(1, 10) } -// func BenchmarkParallel_1items_100threads(b *testing.B) { benchmarkParallel(1, 100) } -// func BenchmarkParallel_1items_1000threads(b *testing.B) { benchmarkParallel(1, 1000) } - -// func BenchmarkParallel_10items_1threads(b *testing.B) { benchmarkParallel(10, 1) } -// func BenchmarkParallel_10items_10threads(b *testing.B) { benchmarkParallel(10, 10) } -// func BenchmarkParallel_10items_100threads(b *testing.B) { benchmarkParallel(10, 100) } -// func BenchmarkParallel_10items_1000threads(b *testing.B) { benchmarkParallel(10, 1000) } - -// func BenchmarkParallel_100items_1threads(b *testing.B) { benchmarkParallel(100, 1) } -// func BenchmarkParallel_100items_10threads(b *testing.B) { benchmarkParallel(100, 10) } -// func BenchmarkParallel_100items_100threads(b *testing.B) { benchmarkParallel(100, 100) } -// func BenchmarkParallel_100items_1000threads(b *testing.B) { benchmarkParallel(100, 1000) } - -// func BenchmarkParallel_1000items_1threads(b *testing.B) { benchmarkParallel(1000, 1) } -// func BenchmarkParallel_1000items_10threads(b *testing.B) { benchmarkParallel(1000, 10) } -// func BenchmarkParallel_1000items_100threads(b *testing.B) { benchmarkParallel(1000, 100) } -// func BenchmarkParallel_1000items_1000threads(b *testing.B) { benchmarkParallel(1000, 1000) } - -// func benchmarkParallel(b *testing.B, itemCount, parallelism int) { -// // Setup database. -// for i := 0; i < itemCount; i++ { -// // ... insert key/values here ... -// } -// b.ResetTimer() - -// // Keep running a fixed number of parallel reads until we run out of time. -// for i := 0; i < b.N; i++ { -// var wg sync.WaitGroup -// for j := 0; j < parallelism; j++ { -// wg.Add(1) -// go func() { -// // ... execute read here ... -// wg.Done() -// }() -// } -// wg.Wait() -// } -// } - func ExampleTx_Rollback() { // Open the database. db, _ := Open(tempfile(), 0666) -- cgit v1.2.3 From 105fece47a108f73bd18128af0403acbe8228d36 Mon Sep 17 00:00:00 2001 From: Steven Normore Date: Mon, 14 Apr 2014 15:45:44 +0000 Subject: add bench sub-package --- Makefile | 4 +- bench.go | 123 ---------------------------------------------------- bench/bench.go | 126 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ bench/config.go | 7 +++ bench/generate.go | 24 +++++++++++ cmd/bolt/bench.go | 36 ++++------------ tx_test.go | 81 +++++++++++++++-------------------- 7 files changed, 202 insertions(+), 199 deletions(-) delete mode 100644 bench.go create mode 100644 bench/bench.go create mode 100644 bench/config.go create mode 100644 bench/generate.go (limited to 'cmd/bolt/bench.go') diff --git a/Makefile b/Makefile index 81beac3..0ed5996 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ COMMIT=`git rev-parse --short HEAD` GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" bench: - go test -v -test.bench=$(BENCH) + go test -v -test.run=NOTHINCONTAINSTHIS -test.bench=$(BENCH) # http://cloc.sourceforge.net/ cloc: @@ -24,7 +24,7 @@ cpuprofile: fmt # go get github.com/kisielk/errcheck errcheck: @echo "=== errcheck ===" - @errcheck github.com/boltdb/bolt + @.go/bin/errcheck github.com/boltdb/bolt fmt: @go fmt ./... diff --git a/bench.go b/bench.go deleted file mode 100644 index e41c260..0000000 --- a/bench.go +++ /dev/null @@ -1,123 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "sync" - "testing" -) - -const ( - BenchReadMode = "read" - BenchWriteMode = "write" - BenchSequentialTraversal = "sequential" - BenchRandomTraversal = "random" -) - -type Benchmark struct { - db *DB - ReadWriteMode string - TraversalPattern string - Parallelism int -} - -func NewBenchmark(db *DB, readWriteMode, traversalPattern string, parallelism int) *Benchmark { - return &Benchmark{db, readWriteMode, traversalPattern, parallelism} -} - -func (bm *Benchmark) Run(b *testing.B) { - - // Read buckets and keys before benchmark begins so we don't knew the - // results. - buckets, err := buckets(bm.db) - if err != nil { - b.Fatalf("error: %+v", err) - } - bucketsWithKeys := make(map[string][]string) - for _, bucket := range buckets { - keys, err := keys(bm.db, bucket) - if err != nil { - b.Fatalf("error: %+v", err) - } - bucketsWithKeys[bucket] = keys - } - - b.ResetTimer() - - // Keep running a fixed number of parallel reads until we run out of time. - for i := 0; i < b.N; i++ { - var wg sync.WaitGroup - for j := 0; j < bm.Parallelism; j++ { - wg.Add(1) - go func() { - defer wg.Done() - if err := bm.runBuckets(b, bm.db, bucketsWithKeys); err != nil { - b.Fatalf("error: %+v", err) - } - }() - } - wg.Wait() - } -} - -// Run benchmark(s) for each of the given buckets. -func (bm *Benchmark) runBuckets(b *testing.B, db *DB, bucketsWithKeys map[string][]string) error { - return db.View(func(tx *Tx) error { - bucketsCount := len(bucketsWithKeys) - count := 0 - for bucket, keys := range bucketsWithKeys { - bucket := tx.Bucket([]byte(bucket)) - if err := bm.runKeys(b, bucket, keys); err != nil { - return err - } - count++ - } - if count != bucketsCount { - return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, bucketsCount)) - } - return nil - }) -} - -func (bm *Benchmark) runKeys(b *testing.B, bucket *Bucket, keys []string) error { - c := bucket.Cursor() - keysCount := len(keys) - count := 0 - for k, _ := c.First(); k != nil; k, _ = c.Next() { - count++ - } - if count != keysCount { - return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, keysCount)) - } - return nil -} - -func buckets(db *DB) ([]string, error) { - buckets := []string{} - err := db.View(func(tx *Tx) error { - // Iterate over each bucket. - return tx.ForEach(func(name []byte, _ *Bucket) error { - buckets = append(buckets, string(name)) - return nil - }) - }) - return buckets, err -} - -func keys(db *DB, bucket string) ([]string, error) { - keys := []string{} - err := db.View(func(tx *Tx) error { - // Find bucket. - b := tx.Bucket([]byte(bucket)) - if b == nil { - return errors.New(fmt.Sprintf("bucket %+v not found", b)) - } - - // Iterate over each key. - return b.ForEach(func(key, _ []byte) error { - keys = append(keys, string(key)) - return nil - }) - }) - return keys, err -} diff --git a/bench/bench.go b/bench/bench.go new file mode 100644 index 0000000..df584f2 --- /dev/null +++ b/bench/bench.go @@ -0,0 +1,126 @@ +package bench + +import ( + "errors" + "fmt" + "sync" + "testing" + + "github.com/boltdb/bolt" +) + +const ( + BenchReadMode = "read" + BenchWriteMode = "write" + BenchSequentialTraversal = "sequential" + BenchRandomTraversal = "random" +) + +type Benchmark struct { + db *bolt.DB + config *Config +} + +func New(db *bolt.DB, config *Config) *Benchmark { + b := new(Benchmark) + b.db = db + b.config = config + return b +} + +func (bm *Benchmark) Run(b *testing.B) { + + // Read buckets and keys before benchmark begins so we don't knew the + // results. + buckets, err := buckets(bm.db) + if err != nil { + b.Fatalf("error: %+v", err) + } + bucketsWithKeys := make(map[string][]string) + for _, bucket := range buckets { + keys, err := keys(bm.db, bucket) + if err != nil { + b.Fatalf("error: %+v", err) + } + bucketsWithKeys[bucket] = keys + } + + b.ResetTimer() + + // Keep running a fixed number of parallel reads until we run out of time. + for i := 0; i < b.N; i++ { + var wg sync.WaitGroup + for j := 0; j < bm.config.Parallelism; j++ { + wg.Add(1) + go func() { + defer wg.Done() + if err := bm.readBuckets(b, bm.db, bucketsWithKeys); err != nil { + b.Fatalf("error: %+v", err) + } + }() + } + wg.Wait() + } +} + +// Run benchmark(s) for each of the given buckets. +func (bm *Benchmark) readBuckets(b *testing.B, db *bolt.DB, bucketsWithKeys map[string][]string) error { + return db.View(func(tx *bolt.Tx) error { + bucketsCount := len(bucketsWithKeys) + count := 0 + for bucket, keys := range bucketsWithKeys { + bucket := tx.Bucket([]byte(bucket)) + if err := bm.readKeys(b, bucket, keys); err != nil { + return err + } + count++ + } + if count != bucketsCount { + return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, bucketsCount)) + } + return nil + }) +} + +func (bm *Benchmark) readKeys(b *testing.B, bucket *bolt.Bucket, keys []string) error { + c := bucket.Cursor() + keysCount := len(keys) + count := 0 + for k, _ := c.First(); k != nil; k, _ = c.Next() { + count++ + } + if count != keysCount { + return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, keysCount)) + } + return nil +} + +func buckets(db *bolt.DB) ([]string, error) { + buckets := []string{} + err := db.View(func(tx *bolt.Tx) error { + // Iterate over each bucket. + return tx.ForEach(func(name []byte, _ *bolt.Bucket) error { + buckets = append(buckets, string(name)) + return nil + }) + }) + return buckets, err +} + +func keys(db *bolt.DB, bucket string) ([]string, error) { + keys := []string{} + err := db.View(func(tx *bolt.Tx) error { + // Find bucket. + b := tx.Bucket([]byte(bucket)) + if b == nil { + return errors.New(fmt.Sprintf("bucket %+v not found", b)) + } + + // Iterate over each key. + return b.ForEach(func(key, _ []byte) error { + keys = append(keys, string(key)) + return nil + }) + }) + return keys, err +} diff --git a/bench/config.go b/bench/config.go new file mode 100644 index 0000000..dea08fd --- /dev/null +++ b/bench/config.go @@ -0,0 +1,7 @@ +package bench + +type Config struct { + ReadWriteMode string + TraversalPattern string + Parallelism int +} diff --git a/bench/generate.go b/bench/generate.go new file mode 100644 index 0000000..8c5554d --- /dev/null +++ b/bench/generate.go @@ -0,0 +1,24 @@ +package bench + +import ( + "fmt" + "strings" + + "github.com/boltdb/bolt" +) + +// Generate and write data to specified number of buckets/items. +func GenerateDB(db *bolt.DB, numBuckets, numItemsPerBucket int) error { + return db.Update(func(tx *bolt.Tx) error { + for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ { + bucketName := fmt.Sprintf("bucket%08d") + tx.CreateBucket([]byte(bucketName)) + bucket := tx.Bucket([]byte(bucketName)) + for i := 0; i < numItemsPerBucket; i++ { + value := []byte(strings.Repeat("0", 100)) + bucket.Put([]byte(fmt.Sprintf("key%08d", i)), value) + } + } + return nil + }) +} diff --git a/cmd/bolt/bench.go b/cmd/bolt/bench.go index f894a70..2b6ff8a 100644 --- a/cmd/bolt/bench.go +++ b/cmd/bolt/bench.go @@ -4,37 +4,15 @@ import ( "testing" "github.com/boltdb/bolt" + "github.com/boltdb/bolt/bench" ) // Import converts an exported database dump into a new database. -// parallelism: integer representing number of concurrent reads/writes // readWriteMode: 'read' or 'write' // traversalPattern: 'sequentrial' or 'random' +// parallelism: integer representing number of concurrent reads/writes func Bench(inputPath string, readWriteMode string, traversalPattern string, parallelism int) { - // cursor/sequential reads - // random reads - - // sequential writes - // random writes - - // reading from many buckets - // writing to many buckets - - // read from many paths - // writing to many paths - - // bucket size/messages - // bucket depth - - // concurrency - - // chart/graph - - // profile - - // benchmarks for getting all keys - // Open the database. db, err := bolt.Open(inputPath, 0600) if err != nil { @@ -43,9 +21,11 @@ func Bench(inputPath string, readWriteMode string, traversalPattern string, para } defer db.Close() - b := bolt.NewBenchmark(db, readWriteMode, traversalPattern, parallelism) - - result := testing.Benchmark(b.Run) + b := bench.New(db, &bench.Config{ + ReadWriteMode: readWriteMode, + TraversalPattern: traversalPattern, + Parallelism: parallelism, + }) - println(result) + println(testing.Benchmark(b.Run)) } diff --git a/tx_test.go b/tx_test.go index 17498ed..61810d3 100644 --- a/tx_test.go +++ b/tx_test.go @@ -9,6 +9,7 @@ import ( "strings" "testing" + "github.com/boltdb/bolt/bench" "github.com/stretchr/testify/assert" ) @@ -266,121 +267,109 @@ func TestTx_OnCommit_Rollback(t *testing.T) { assert.Equal(t, 0, x) } -func BenchmarkReadSequential_1Buckets_1Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 1) } -func BenchmarkReadSequential_1Buckets_10Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 1) } -func BenchmarkReadSequential_1Buckets_100Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 1) } -func BenchmarkReadSequential_1Buckets_1000Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 1) } -func BenchmarkReadSequential_1Buckets_10000Items_1Concurrency(b *testing.B) { +func BenchmarkReadSequential_1Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 1) } -func BenchmarkReadSequential_1Buckets_1Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 10) } -func BenchmarkReadSequential_1Buckets_10Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 10) } -func BenchmarkReadSequential_1Buckets_100Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 10) } -func BenchmarkReadSequential_1Buckets_1000Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 10) } -func BenchmarkReadSequential_1Buckets_10000Items_10Concurrency(b *testing.B) { +func BenchmarkReadSequential_10Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 10) } -func BenchmarkReadSequential_1Buckets_1Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 100) } -func BenchmarkReadSequential_1Buckets_10Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 100) } -func BenchmarkReadSequential_1Buckets_100Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 100) } -func BenchmarkReadSequential_1Buckets_1000Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 100) } -func BenchmarkReadSequential_1Buckets_10000Items_100Concurrency(b *testing.B) { +func BenchmarkReadSequential_100Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 100) } -func BenchmarkReadSequential_1Buckets_1Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 1000) } -func BenchmarkReadSequential_1Buckets_10Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 1000) } -func BenchmarkReadSequential_1Buckets_100Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 1000) } -func BenchmarkReadSequential_1Buckets_1000Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 1000) } -func BenchmarkReadSequential_1Buckets_10000Items_1000Concurrency(b *testing.B) { +func BenchmarkReadSequential_1000Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 1000) } -func BenchmarkReadSequential_1Buckets_1Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_1Items(b *testing.B) { benchmarkReadSequential(b, 1, 1, 10000) } -func BenchmarkReadSequential_1Buckets_10Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_10Items(b *testing.B) { benchmarkReadSequential(b, 1, 10, 10000) } -func BenchmarkReadSequential_1Buckets_100Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_100Items(b *testing.B) { benchmarkReadSequential(b, 1, 100, 10000) } -func BenchmarkReadSequential_1Buckets_1000Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_1000Items(b *testing.B) { benchmarkReadSequential(b, 1, 1000, 10000) } -func BenchmarkReadSequential_1Buckets_10000Items_10000Concurrency(b *testing.B) { +func BenchmarkReadSequential_10000Concurrency_1Buckets_10000Items(b *testing.B) { benchmarkReadSequential(b, 1, 10000, 10000) } func benchmark(b *testing.B, readWriteMode, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) { withOpenDB(func(db *DB, path string) { - if err := generateDB(db, numBuckets, numItemsPerBucket); err != nil { + if err := bench.GenerateDB(db, numBuckets, numItemsPerBucket); err != nil { b.Fatal(err) } - NewBenchmark(db, readWriteMode, traversalPattern, parallelism).Run(b) + bench.New(db, &bench.Config{ + ReadWriteMode: readWriteMode, + TraversalPattern: traversalPattern, + Parallelism: parallelism, + }).Run(b) }) } func benchmarkRead(b *testing.B, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) { - benchmark(b, BenchReadMode, traversalPattern, numBuckets, numItemsPerBucket, parallelism) + benchmark(b, bench.BenchReadMode, traversalPattern, numBuckets, numItemsPerBucket, parallelism) } func benchmarkReadSequential(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) { - benchmark(b, BenchReadMode, BenchSequentialTraversal, numBuckets, numItemsPerBucket, parallelism) + benchmark(b, bench.BenchReadMode, bench.BenchSequentialTraversal, numBuckets, numItemsPerBucket, parallelism) } func benchmarkReadRandom(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) { - benchmark(b, BenchReadMode, BenchRandomTraversal, numBuckets, numItemsPerBucket, parallelism) -} - -// Generate and write data to specified number of buckets/items. -func generateDB(db *DB, numBuckets, numItemsPerBucket int) error { - return db.Update(func(tx *Tx) error { - for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ { - bucketName := fmt.Sprintf("bucket%08d") - tx.CreateBucket([]byte(bucketName)) - bucket := tx.Bucket([]byte(bucketName)) - for i := 0; i < numItemsPerBucket; i++ { - value := []byte(strings.Repeat("0", 100)) - bucket.Put([]byte(fmt.Sprintf("key%08d", i)), value) - } - } - return nil - }) + benchmark(b, bench.BenchReadMode, bench.BenchRandomTraversal, numBuckets, numItemsPerBucket, parallelism) } // Benchmark the performance iterating over a cursor. -- cgit v1.2.3 From a42d74da7e6b3162701ae17d59647a6880ccb6bf Mon Sep 17 00:00:00 2001 From: Ben Johnson Date: Fri, 18 Apr 2014 21:37:45 -0500 Subject: Add 'bolt bench'. This commit adds a flexible benchmarking tool to the 'bolt' CLI. It allows the user to separately specify the write mode and read mode (e.g. sequential random, etc). It also allows the user to isolate profiling to either the read or the writes. Currently the bench tool only supports "seq" read and write modes. It also does not support streaming of Bolt counters yet. Fixes #95. /cc @snormore --- Makefile | 5 +- cmd/bolt/bench.go | 270 ++++++++++++++++++++++++++++++++++++++++++--- cmd/bolt/bench/bench.go | 126 --------------------- cmd/bolt/bench/config.go | 7 -- cmd/bolt/bench/generate.go | 24 ---- cmd/bolt/generate.go | 55 --------- cmd/bolt/main.go | 46 ++++---- db_test.go | 36 ------ tx_test.go | 208 ---------------------------------- 9 files changed, 281 insertions(+), 496 deletions(-) delete mode 100644 cmd/bolt/bench/bench.go delete mode 100644 cmd/bolt/bench/config.go delete mode 100644 cmd/bolt/bench/generate.go delete mode 100644 cmd/bolt/generate.go (limited to 'cmd/bolt/bench.go') diff --git a/Makefile b/Makefile index 0ed5996..2c0dc97 100644 --- a/Makefile +++ b/Makefile @@ -24,7 +24,7 @@ cpuprofile: fmt # go get github.com/kisielk/errcheck errcheck: @echo "=== errcheck ===" - @.go/bin/errcheck github.com/boltdb/bolt + @errcheck github.com/boltdb/bolt fmt: @go fmt ./... @@ -34,8 +34,7 @@ get: build: get @mkdir -p bin - @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt-`git rev-parse --short HEAD` ./cmd/bolt - @echo "writing bin/bolt-`git rev-parse --short HEAD`" + @go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt test: fmt errcheck @go get github.com/stretchr/testify/assert diff --git a/cmd/bolt/bench.go b/cmd/bolt/bench.go index 2b6ff8a..72144b8 100644 --- a/cmd/bolt/bench.go +++ b/cmd/bolt/bench.go @@ -1,31 +1,271 @@ package main import ( - "testing" + "encoding/binary" + "errors" + "fmt" + "io/ioutil" + "os" + "runtime" + "runtime/pprof" + "time" "github.com/boltdb/bolt" - "github.com/boltdb/bolt/bench" ) -// Import converts an exported database dump into a new database. -// readWriteMode: 'read' or 'write' -// traversalPattern: 'sequentrial' or 'random' -// parallelism: integer representing number of concurrent reads/writes -func Bench(inputPath string, readWriteMode string, traversalPattern string, parallelism int) { +// File handlers for the various profiles. +var cpuprofile, memprofile, blockprofile *os.File - // Open the database. - db, err := bolt.Open(inputPath, 0600) +var benchBucketName = []byte("bench") + +// Bench executes a customizable, synthetic benchmark against Bolt. +func Bench(options *BenchOptions) { + var results BenchResults + + // Find temporary location. + path := tempfile() + defer os.Remove(path) + + // Create database. + db, err := bolt.Open(path, 0600) if err != nil { - fatalf("error: %+v", err) + fatal(err) return } defer db.Close() - b := bench.New(db, &bench.Config{ - ReadWriteMode: readWriteMode, - TraversalPattern: traversalPattern, - Parallelism: parallelism, + // Start profiling for writes. + if options.ProfileMode == "rw" || options.ProfileMode == "w" { + benchStartProfiling(options) + } + + // Write to the database. + if err := benchWrite(db, options, &results); err != nil { + fatal("bench: write: ", err) + } + + // Stop profiling for writes only. + if options.ProfileMode == "w" { + benchStopProfiling() + } + + // Start profiling for reads. + if options.ProfileMode == "r" { + benchStartProfiling(options) + } + + // Read from the database. + if err := benchRead(db, options, &results); err != nil { + fatal("bench: read: ", err) + } + + // Stop profiling for writes only. + if options.ProfileMode == "rw" || options.ProfileMode == "r" { + benchStopProfiling() + } + + // Print results. + fmt.Printf("# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond()) + fmt.Printf("# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond()) + fmt.Println("") +} + +// Writes to the database. +func benchWrite(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var err error + var t = time.Now() + + switch options.WriteMode { + case "seq": + err = benchWriteSequential(db, options, results) + default: + return fmt.Errorf("invalid write mode: %s", options.WriteMode) + } + + results.WriteDuration = time.Since(t) + + return err +} + +func benchWriteSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + results.WriteOps = options.Iterations + + return db.Update(func(tx *bolt.Tx) error { + b, _ := tx.CreateBucketIfNotExists(benchBucketName) + + for i := 0; i < options.Iterations; i++ { + var key = make([]byte, options.KeySize) + var value = make([]byte, options.ValueSize) + binary.BigEndian.PutUint32(key, uint32(i)) + if err := b.Put(key, value); err != nil { + return err + } + } + + return nil + }) +} + +// Reads from the database. +func benchRead(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + var err error + var t = time.Now() + + switch options.ReadMode { + case "seq": + err = benchReadSequential(db, options, results) + default: + return fmt.Errorf("invalid read mode: %s", options.ReadMode) + } + + results.ReadDuration = time.Since(t) + + return err +} + +func benchReadSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error { + return db.View(func(tx *bolt.Tx) error { + var t = time.Now() + + for { + c := tx.Bucket(benchBucketName).Cursor() + var count int + for k, v := c.First(); k != nil; k, v = c.Next() { + if v == nil { + return errors.New("invalid value") + } + count++ + } + + if count != options.Iterations { + return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count) + } + + results.ReadOps += count + + // Make sure we do this for at least a second. + if time.Since(t) >= time.Second { + break + } + } + + return nil }) +} + +// Starts all profiles set on the options. +func benchStartProfiling(options *BenchOptions) { + var err error + + // Start CPU profiling. + if options.CPUProfile != "" { + cpuprofile, err = os.Create(options.CPUProfile) + if err != nil { + fatal("bench: could not create cpu profile %q: %v", options.CPUProfile, err) + } + pprof.StartCPUProfile(cpuprofile) + } + + // Start memory profiling. + if options.MemProfile != "" { + memprofile, err = os.Create(options.MemProfile) + if err != nil { + fatal("bench: could not create memory profile %q: %v", options.MemProfile, err) + } + runtime.MemProfileRate = 4096 + } + + // Start fatal profiling. + if options.BlockProfile != "" { + blockprofile, err = os.Create(options.BlockProfile) + if err != nil { + fatal("bench: could not create block profile %q: %v", options.BlockProfile, err) + } + runtime.SetBlockProfileRate(1) + } +} + +// Stops all profiles. +func benchStopProfiling() { + if cpuprofile != nil { + pprof.StopCPUProfile() + cpuprofile.Close() + cpuprofile = nil + } + + if memprofile != nil { + pprof.Lookup("heap").WriteTo(memprofile, 0) + memprofile.Close() + memprofile = nil + } + + if blockprofile != nil { + pprof.Lookup("block").WriteTo(blockprofile, 0) + blockprofile.Close() + blockprofile = nil + runtime.SetBlockProfileRate(0) + } +} + +// BenchOptions represents the set of options that can be passed to Bench(). +type BenchOptions struct { + ProfileMode string + WriteMode string + ReadMode string + Iterations int + KeySize int + ValueSize int + CPUProfile string + MemProfile string + BlockProfile string +} + +// BenchResults represents the performance results of the benchmark. +type BenchResults struct { + WriteOps int + WriteDuration time.Duration + ReadOps int + ReadDuration time.Duration +} + +// Returns the duration for a single write operation. +func (r *BenchResults) WriteOpDuration() time.Duration { + if r.WriteOps == 0 { + return 0 + } + return r.WriteDuration / time.Duration(r.WriteOps) +} + +// Returns average number of write operations that can be performed per second. +func (r *BenchResults) WriteOpsPerSecond() int { + var op = r.WriteOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} + +// Returns the duration for a single read operation. +func (r *BenchResults) ReadOpDuration() time.Duration { + if r.ReadOps == 0 { + return 0 + } + return r.ReadDuration / time.Duration(r.ReadOps) +} + +// Returns average number of read operations that can be performed per second. +func (r *BenchResults) ReadOpsPerSecond() int { + var op = r.ReadOpDuration() + if op == 0 { + return 0 + } + return int(time.Second) / int(op) +} - println(testing.Benchmark(b.Run)) +// tempfile returns a temporary file path. +func tempfile() string { + f, _ := ioutil.TempFile("", "bolt-bench-") + f.Close() + os.Remove(f.Name()) + return f.Name() } diff --git a/cmd/bolt/bench/bench.go b/cmd/bolt/bench/bench.go deleted file mode 100644 index df584f2..0000000 --- a/cmd/bolt/bench/bench.go +++ /dev/null @@ -1,126 +0,0 @@ -package bench - -import ( - "errors" - "fmt" - "sync" - "testing" - - "github.com/boltdb/bolt" -) - -const ( - BenchReadMode = "read" - BenchWriteMode = "write" - BenchSequentialTraversal = "sequential" - BenchRandomTraversal = "random" -) - -type Benchmark struct { - db *bolt.DB - config *Config -} - -func New(db *bolt.DB, config *Config) *Benchmark { - b := new(Benchmark) - b.db = db - b.config = config - return b -} - -func (bm *Benchmark) Run(b *testing.B) { - - // Read buckets and keys before benchmark begins so we don't knew the - // results. - buckets, err := buckets(bm.db) - if err != nil { - b.Fatalf("error: %+v", err) - } - bucketsWithKeys := make(map[string][]string) - for _, bucket := range buckets { - keys, err := keys(bm.db, bucket) - if err != nil { - b.Fatalf("error: %+v", err) - } - bucketsWithKeys[bucket] = keys - } - - b.ResetTimer() - - // Keep running a fixed number of parallel reads until we run out of time. - for i := 0; i < b.N; i++ { - var wg sync.WaitGroup - for j := 0; j < bm.config.Parallelism; j++ { - wg.Add(1) - go func() { - defer wg.Done() - if err := bm.readBuckets(b, bm.db, bucketsWithKeys); err != nil { - b.Fatalf("error: %+v", err) - } - }() - } - wg.Wait() - } -} - -// Run benchmark(s) for each of the given buckets. -func (bm *Benchmark) readBuckets(b *testing.B, db *bolt.DB, bucketsWithKeys map[string][]string) error { - return db.View(func(tx *bolt.Tx) error { - bucketsCount := len(bucketsWithKeys) - count := 0 - for bucket, keys := range bucketsWithKeys { - bucket := tx.Bucket([]byte(bucket)) - if err := bm.readKeys(b, bucket, keys); err != nil { - return err - } - count++ - } - if count != bucketsCount { - return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, bucketsCount)) - } - return nil - }) -} - -func (bm *Benchmark) readKeys(b *testing.B, bucket *bolt.Bucket, keys []string) error { - c := bucket.Cursor() - keysCount := len(keys) - count := 0 - for k, _ := c.First(); k != nil; k, _ = c.Next() { - count++ - } - if count != keysCount { - return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, keysCount)) - } - return nil -} - -func buckets(db *bolt.DB) ([]string, error) { - buckets := []string{} - err := db.View(func(tx *bolt.Tx) error { - // Iterate over each bucket. - return tx.ForEach(func(name []byte, _ *bolt.Bucket) error { - buckets = append(buckets, string(name)) - return nil - }) - }) - return buckets, err -} - -func keys(db *bolt.DB, bucket string) ([]string, error) { - keys := []string{} - err := db.View(func(tx *bolt.Tx) error { - // Find bucket. - b := tx.Bucket([]byte(bucket)) - if b == nil { - return errors.New(fmt.Sprintf("bucket %+v not found", b)) - } - - // Iterate over each key. - return b.ForEach(func(key, _ []byte) error { - keys = append(keys, string(key)) - return nil - }) - }) - return keys, err -} diff --git a/cmd/bolt/bench/config.go b/cmd/bolt/bench/config.go deleted file mode 100644 index dea08fd..0000000 --- a/cmd/bolt/bench/config.go +++ /dev/null @@ -1,7 +0,0 @@ -package bench - -type Config struct { - ReadWriteMode string - TraversalPattern string - Parallelism int -} diff --git a/cmd/bolt/bench/generate.go b/cmd/bolt/bench/generate.go deleted file mode 100644 index 8c5554d..0000000 --- a/cmd/bolt/bench/generate.go +++ /dev/null @@ -1,24 +0,0 @@ -package bench - -import ( - "fmt" - "strings" - - "github.com/boltdb/bolt" -) - -// Generate and write data to specified number of buckets/items. -func GenerateDB(db *bolt.DB, numBuckets, numItemsPerBucket int) error { - return db.Update(func(tx *bolt.Tx) error { - for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ { - bucketName := fmt.Sprintf("bucket%08d") - tx.CreateBucket([]byte(bucketName)) - bucket := tx.Bucket([]byte(bucketName)) - for i := 0; i < numItemsPerBucket; i++ { - value := []byte(strings.Repeat("0", 100)) - bucket.Put([]byte(fmt.Sprintf("key%08d", i)), value) - } - } - return nil - }) -} diff --git a/cmd/bolt/generate.go b/cmd/bolt/generate.go deleted file mode 100644 index 15edb27..0000000 --- a/cmd/bolt/generate.go +++ /dev/null @@ -1,55 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/boltdb/bolt" -) - -// Generate data for benchmarks. -func Generate(destPath string, numBuckets, numItems int) { - - // Open the database. - db, err := bolt.Open(destPath, 0600) - if err != nil { - fatalf("open db:", err) - return - } - defer db.Close() - - for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ { - bucketName := fmt.Sprintf("bucket%03d", bucketIndex) - - err = db.Update(func(tx *bolt.Tx) error { - - // Create the bucket if it doesn't exist. - if err := tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil { - fatalf("create bucket: %s", err) - return nil - } - - // Find bucket. - b := tx.Bucket([]byte(bucketName)) - if b == nil { - fatalf("bucket not found: %s", bucketName) - return nil - } - - for i := 0; i < numItems; i++ { - key := fmt.Sprintf("key%03d", i) - value := fmt.Sprintf("value%03d", i) - - // Set value for a given key. - if err := b.Put([]byte(key), []byte(value)); err != nil { - return err - } - } - - return nil - }) - } - if err != nil { - fatal(err) - return - } -} diff --git a/cmd/bolt/main.go b/cmd/bolt/main.go index ac71631..719bf00 100644 --- a/cmd/bolt/main.go +++ b/cmd/bolt/main.go @@ -6,7 +6,6 @@ import ( "fmt" "log" "os" - "strconv" "github.com/codegangsta/cli" ) @@ -91,31 +90,34 @@ func NewApp() *cli.App { Check(path) }, }, - { - Name: "generate", - Usage: "Generate data for benchmarks", - Action: func(c *cli.Context) { - destPath := c.Args().Get(0) - numBuckets, err := strconv.Atoi(c.Args().Get(1)) - if err != nil { - fatal(err) - } - numItems, err := strconv.Atoi(c.Args().Get(2)) - if err != nil { - fatal(err) - } - Generate(destPath, numBuckets, numItems) - }, - }, { Name: "bench", - Usage: "Run benchmarks on a given dataset", + Usage: "Performs a synthetic benchmark", + Flags: []cli.Flag{ + &cli.StringFlag{Name: "profile-mode", Value: "rw", Usage: "Profile mode"}, + &cli.StringFlag{Name: "write-mode", Value: "seq", Usage: "Write mode"}, + &cli.StringFlag{Name: "read-mode", Value: "seq", Usage: "Read mode"}, + &cli.IntFlag{Name: "count", Value: 1000, Usage: "Item count"}, + &cli.IntFlag{Name: "key-size", Value: 8, Usage: "Key size"}, + &cli.IntFlag{Name: "value-size", Value: 32, Usage: "Value size"}, + &cli.StringFlag{Name: "cpuprofile", Usage: "CPU profile output path"}, + &cli.StringFlag{Name: "memprofile", Usage: "Memory profile output path"}, + &cli.StringFlag{Name: "blockprofile", Usage: "Block profile output path"}, + }, Action: func(c *cli.Context) { - srcPath := c.Args().Get(0) - Bench(srcPath, "read", "sequential", 1) + Bench(&BenchOptions{ + ProfileMode: c.String("profile-mode"), + WriteMode: c.String("write-mode"), + ReadMode: c.String("read-mode"), + Iterations: c.Int("count"), + KeySize: c.Int("key-size"), + ValueSize: c.Int("value-size"), + CPUProfile: c.String("cpuprofile"), + MemProfile: c.String("memprofile"), + BlockProfile: c.String("blockprofile"), + }) }, - }, - } + }} return app } diff --git a/db_test.go b/db_test.go index baef98e..57c356e 100644 --- a/db_test.go +++ b/db_test.go @@ -5,11 +5,8 @@ import ( "flag" "fmt" "io/ioutil" - "math/rand" "os" "regexp" - "strconv" - "strings" "testing" "time" "unsafe" @@ -356,39 +353,6 @@ func TestDBStats_Sub(t *testing.T) { assert.Equal(t, 7, diff.TxStats.PageCount) } -// Benchmark the performance of single put transactions in random order. -func BenchmarkDB_Put_Sequential(b *testing.B) { - value := []byte(strings.Repeat("0", 64)) - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - for i := 0; i < b.N; i++ { - db.Update(func(tx *Tx) error { - return tx.Bucket([]byte("widgets")).Put([]byte(strconv.Itoa(i)), value) - }) - } - }) -} - -// Benchmark the performance of single put transactions in random order. -func BenchmarkDB_Put_Random(b *testing.B) { - indexes := rand.Perm(b.N) - value := []byte(strings.Repeat("0", 64)) - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - for i := 0; i < b.N; i++ { - db.Update(func(tx *Tx) error { - return tx.Bucket([]byte("widgets")).Put([]byte(strconv.Itoa(indexes[i])), value) - }) - } - }) -} - func ExampleDB_Update() { // Open the database. db, _ := Open(tempfile(), 0666) diff --git a/tx_test.go b/tx_test.go index 178d5aa..f630e97 100644 --- a/tx_test.go +++ b/tx_test.go @@ -3,13 +3,9 @@ package bolt import ( "errors" "fmt" - "math/rand" "os" - "strconv" - "strings" "testing" - "github.com/boltdb/bolt/bench" "github.com/stretchr/testify/assert" ) @@ -267,210 +263,6 @@ func TestTx_OnCommit_Rollback(t *testing.T) { assert.Equal(t, 0, x) } -// func BenchmarkReadSequential_1Concurrency_1Buckets_1Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1, 1) -// } -// func BenchmarkReadSequential_1Concurrency_1Buckets_10Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10, 1) -// } -// func BenchmarkReadSequential_1Concurrency_1Buckets_100Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 100, 1) -// } -// func BenchmarkReadSequential_1Concurrency_1Buckets_1000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1000, 1) -// } -// func BenchmarkReadSequential_1Concurrency_1Buckets_10000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10000, 1) -// } - -// func BenchmarkReadSequential_10Concurrency_1Buckets_1Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1, 10) -// } -// func BenchmarkReadSequential_10Concurrency_1Buckets_10Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10, 10) -// } -// func BenchmarkReadSequential_10Concurrency_1Buckets_100Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 100, 10) -// } -// func BenchmarkReadSequential_10Concurrency_1Buckets_1000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1000, 10) -// } -// func BenchmarkReadSequential_10Concurrency_1Buckets_10000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10000, 10) -// } - -// func BenchmarkReadSequential_100Concurrency_1Buckets_1Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1, 100) -// } -// func BenchmarkReadSequential_100Concurrency_1Buckets_10Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10, 100) -// } -// func BenchmarkReadSequential_100Concurrency_1Buckets_100Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 100, 100) -// } -// func BenchmarkReadSequential_100Concurrency_1Buckets_1000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1000, 100) -// } -// func BenchmarkReadSequential_100Concurrency_1Buckets_10000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10000, 100) -// } - -// func BenchmarkReadSequential_1000Concurrency_1Buckets_1Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1, 1000) -// } -// func BenchmarkReadSequential_1000Concurrency_1Buckets_10Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10, 1000) -// } -// func BenchmarkReadSequential_1000Concurrency_1Buckets_100Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 100, 1000) -// } -// func BenchmarkReadSequential_1000Concurrency_1Buckets_1000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1000, 1000) -// } -// func BenchmarkReadSequential_1000Concurrency_1Buckets_10000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10000, 1000) -// } - -// func BenchmarkReadSequential_10000Concurrency_1Buckets_1Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1, 10000) -// } -// func BenchmarkReadSequential_10000Concurrency_1Buckets_10Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10, 10000) -// } -// func BenchmarkReadSequential_10000Concurrency_1Buckets_100Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 100, 10000) -// } -// func BenchmarkReadSequential_10000Concurrency_1Buckets_1000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 1000, 10000) -// } -// func BenchmarkReadSequential_10000Concurrency_1Buckets_10000Items(b *testing.B) { -// benchmarkReadSequential(b, 1, 10000, 10000) -// } - -// func benchmark(b *testing.B, readWriteMode, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) { -// withOpenDB(func(db *DB, path string) { -// if err := bench.GenerateDB(db, numBuckets, numItemsPerBucket); err != nil { -// b.Fatal(err) -// } -// bench.New(db, &bench.Config{ -// ReadWriteMode: readWriteMode, -// TraversalPattern: traversalPattern, -// Parallelism: parallelism, -// }).Run(b) -// }) -// } - -// func benchmarkRead(b *testing.B, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) { -// benchmark(b, bench.BenchReadMode, traversalPattern, numBuckets, numItemsPerBucket, parallelism) -// } - -// func benchmarkReadSequential(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) { -// benchmark(b, bench.BenchReadMode, bench.BenchSequentialTraversal, numBuckets, numItemsPerBucket, parallelism) -// } - -// func benchmarkReadRandom(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) { -// benchmark(b, bench.BenchReadMode, bench.BenchRandomTraversal, numBuckets, numItemsPerBucket, parallelism) -// } - -// Benchmark the performance iterating over a cursor. -func BenchmarkTxCursor1(b *testing.B) { benchmarkTxCursor(b, 1) } -func BenchmarkTxCursor10(b *testing.B) { benchmarkTxCursor(b, 10) } -func BenchmarkTxCursor100(b *testing.B) { benchmarkTxCursor(b, 100) } -func BenchmarkTxCursor1000(b *testing.B) { benchmarkTxCursor(b, 1000) } -func BenchmarkTxCursor10000(b *testing.B) { benchmarkTxCursor(b, 10000) } - -func benchmarkTxCursor(b *testing.B, total int) { - indexes := rand.Perm(total) - value := []byte(strings.Repeat("0", 100)) - - withOpenDB(func(db *DB, path string) { - // Write data to bucket. - db.Update(func(tx *Tx) error { - tx.CreateBucket([]byte("widgets")) - bucket := tx.Bucket([]byte("widgets")) - for i := 0; i < total; i++ { - bucket.Put([]byte(fmt.Sprintf("%016d", indexes[i])), value) - } - return nil - }) - b.ResetTimer() - - // Iterate over bucket using cursor. - for i := 0; i < b.N; i++ { - db.View(func(tx *Tx) error { - count := 0 - c := tx.Bucket([]byte("widgets")).Cursor() - for k, _ := c.First(); k != nil; k, _ = c.Next() { - count++ - } - if count != total { - b.Fatalf("wrong count: %d; expected: %d", count, total) - } - return nil - }) - } - }) -} - -// Benchmark the performance of bulk put transactions in random order. -func BenchmarkTxPutRandom1(b *testing.B) { benchmarkTxPutRandom(b, 1) } -func BenchmarkTxPutRandom10(b *testing.B) { benchmarkTxPutRandom(b, 10) } -func BenchmarkTxPutRandom100(b *testing.B) { benchmarkTxPutRandom(b, 100) } -func BenchmarkTxPutRandom1000(b *testing.B) { benchmarkTxPutRandom(b, 1000) } -func BenchmarkTxPutRandom10000(b *testing.B) { benchmarkTxPutRandom(b, 10000) } - -func benchmarkTxPutRandom(b *testing.B, total int) { - indexes := rand.Perm(total) - value := []byte(strings.Repeat("0", 64)) - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - var tx *Tx - var bucket *Bucket - for j := 0; j < b.N; j++ { - for i := 0; i < total; i++ { - if i%1000 == 0 { - if tx != nil { - tx.Commit() - } - tx, _ = db.Begin(true) - bucket = tx.Bucket([]byte("widgets")) - } - bucket.Put([]byte(strconv.Itoa(indexes[i])), value) - } - } - tx.Commit() - }) -} - -// Benchmark the performance of bulk put transactions in sequential order. -func BenchmarkTxPutSequential1(b *testing.B) { benchmarkTxPutSequential(b, 1) } -func BenchmarkTxPutSequential10(b *testing.B) { benchmarkTxPutSequential(b, 10) } -func BenchmarkTxPutSequential100(b *testing.B) { benchmarkTxPutSequential(b, 100) } -func BenchmarkTxPutSequential1000(b *testing.B) { benchmarkTxPutSequential(b, 1000) } -func BenchmarkTxPutSequential10000(b *testing.B) { benchmarkTxPutSequential(b, 10000) } - -func benchmarkTxPutSequential(b *testing.B, total int) { - value := []byte(strings.Repeat("0", 64)) - withOpenDB(func(db *DB, path string) { - db.Update(func(tx *Tx) error { - _, err := tx.CreateBucket([]byte("widgets")) - return err - }) - db.Update(func(tx *Tx) error { - bucket := tx.Bucket([]byte("widgets")) - for j := 0; j < b.N; j++ { - for i := 0; i < total; i++ { - bucket.Put([]byte(strconv.Itoa(i)), value) - } - } - return nil - }) - }) -} - func ExampleTx_Rollback() { // Open the database. db, _ := Open(tempfile(), 0666) -- cgit v1.2.3