diff options
author | Ben Johnson <benbjohnson@yahoo.com> | 2014-06-02 15:26:58 -0600 |
---|---|---|
committer | Ben Johnson <benbjohnson@yahoo.com> | 2014-06-02 15:26:58 -0600 |
commit | a96185e8b69725985e48926b7b28747ddbbfe9b6 (patch) | |
tree | 538e1f759c900a5a368d04a57a09af07f8d32099 /bucket_test.go | |
parent | Add ipxed to README. (diff) | |
download | dedo-a96185e8b69725985e48926b7b28747ddbbfe9b6.tar.gz dedo-a96185e8b69725985e48926b7b28747ddbbfe9b6.tar.xz |
Allow split nodes to be merged with the next node.
This commit changes the node.split() functionality to check if the next node has
available space and, if so, it will merge the newly split keys into the next node.
Previously, keys could be continually put into the left side of a split causing that
first half to split off small right side nodes. This was especially problematic with
databases with a high fill percent.
Diffstat (limited to 'bucket_test.go')
-rw-r--r-- | bucket_test.go | 78 |
1 files changed, 61 insertions, 17 deletions
diff --git a/bucket_test.go b/bucket_test.go index 9ed63ee..803e043 100644 --- a/bucket_test.go +++ b/bucket_test.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "fmt" + "math/rand" "os" "strconv" "strings" @@ -560,18 +561,19 @@ func TestBucket_Put_KeyTooLarge(t *testing.T) { // Ensure a bucket can calculate stats. func TestBucket_Stats(t *testing.T) { withOpenDB(func(db *DB, path string) { + // Add bucket with fewer keys but one big value. big_key := []byte("really-big-value") + for i := 0; i < 500; i++ { + db.Update(func(tx *Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) + return b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))) + }) + } db.Update(func(tx *Tx) error { - // Add bucket with fewer keys but one big value. - b, err := tx.CreateBucket([]byte("woojits")) - assert.NoError(t, err) - for i := 0; i < 500; i++ { - b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))) - } - b.Put(big_key, []byte(strings.Repeat("*", 10000))) - - return nil + b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) + return b.Put(big_key, []byte(strings.Repeat("*", 10000))) }) + mustCheck(db) db.View(func(tx *Tx) error { b := tx.Bucket([]byte("woojits")) @@ -608,6 +610,48 @@ func TestBucket_Stats(t *testing.T) { }) } +// Ensure a bucket with random insertion utilizes fill percentage correctly. +func TestBucket_Stats_RandomFill(t *testing.T) { + if os.Getpagesize() != 4096 { + t.Skip("invalid page size for test") + } + + withOpenDB(func(db *DB, path string) { + db.FillPercent = 0.9 + + // Add a set of values in random order. It will be the same random + // order so we can maintain consistency between test runs. + r := rand.New(rand.NewSource(42)) + for _, i := range r.Perm(1000) { + db.Update(func(tx *Tx) error { + b, _ := tx.CreateBucketIfNotExists([]byte("woojits")) + for _, j := range r.Perm(100) { + index := (j * 10000) + i + b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")) + } + return nil + }) + mustCheck(db) + } + + db.View(func(tx *Tx) error { + s := tx.Bucket([]byte("woojits")).Stats() + assert.Equal(t, 100000, s.KeyN, "KeyN") + + assert.Equal(t, 22, s.BranchPageN, "BranchPageN") + assert.Equal(t, 0, s.BranchOverflowN, "BranchOverflowN") + assert.Equal(t, 62963, s.BranchInuse, "BranchInuse") + assert.Equal(t, 90112, s.BranchAlloc, "BranchAlloc") + + assert.Equal(t, 1677, s.LeafPageN, "LeafPageN") + assert.Equal(t, 0, s.LeafOverflowN, "LeafOverflowN") + assert.Equal(t, 4714722, s.LeafInuse, "LeafInuse") + assert.Equal(t, 6868992, s.LeafAlloc, "LeafAlloc") + return nil + }) + }) +} + // Ensure a bucket can calculate stats. func TestBucket_Stats_Small(t *testing.T) { @@ -750,11 +794,11 @@ func TestBucket_Stats_Large(t *testing.T) { withOpenDB(func(db *DB, path string) { var index int - for i := 0; i < 1000; i++ { + for i := 0; i < 10000; i++ { db.Update(func(tx *Tx) error { // Add bucket with lots of keys. b, _ := tx.CreateBucketIfNotExists([]byte("widgets")) - for i := 0; i < 100; i++ { + for i := 0; i < 10; i++ { b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))) index++ } @@ -766,18 +810,18 @@ func TestBucket_Stats_Large(t *testing.T) { db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) stats := b.Stats() - assert.Equal(t, 19, stats.BranchPageN, "BranchPageN") + assert.Equal(t, 13, stats.BranchPageN, "BranchPageN") assert.Equal(t, 0, stats.BranchOverflowN, "BranchOverflowN") - assert.Equal(t, 1291, stats.LeafPageN, "LeafPageN") + assert.Equal(t, 1195, stats.LeafPageN, "LeafPageN") assert.Equal(t, 0, stats.LeafOverflowN, "LeafOverflowN") assert.Equal(t, 100000, stats.KeyN, "KeyN") assert.Equal(t, 3, stats.Depth, "Depth") - assert.Equal(t, 27007, stats.BranchInuse, "BranchInuse") - assert.Equal(t, 2598436, stats.LeafInuse, "LeafInuse") + assert.Equal(t, 25208, stats.BranchInuse, "BranchInuse") + assert.Equal(t, 2596900, stats.LeafInuse, "LeafInuse") if os.Getpagesize() == 4096 { // Incompatible page size - assert.Equal(t, 77824, stats.BranchAlloc, "BranchAlloc") - assert.Equal(t, 5287936, stats.LeafAlloc, "LeafAlloc") + assert.Equal(t, 53248, stats.BranchAlloc, "BranchAlloc") + assert.Equal(t, 4894720, stats.LeafAlloc, "LeafAlloc") } assert.Equal(t, 1, stats.BucketN, "BucketN") assert.Equal(t, 0, stats.InlineBucketN, "InlineBucketN") |