aboutsummaryrefslogtreecommitdiff
path: root/tests/dedo.go
diff options
context:
space:
mode:
Diffstat (limited to 'tests/dedo.go')
-rw-r--r--tests/dedo.go6021
1 files changed, 6021 insertions, 0 deletions
diff --git a/tests/dedo.go b/tests/dedo.go
new file mode 100644
index 0000000..8426271
--- /dev/null
+++ b/tests/dedo.go
@@ -0,0 +1,6021 @@
+package dedo
+
+import (
+ crypto "crypto/rand"
+ "bytes"
+ "io"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "log"
+ "math/rand"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+ "testing/quick"
+ "reflect"
+ "sort"
+ "flag"
+ "hash/fnv"
+ "io/ioutil"
+ "path/filepath"
+ "regexp"
+ "sync"
+ "time"
+ "unsafe"
+)
+
+// Ensure that a bucket that gets a non-existent key returns nil.
+func TestBucket_Get_NonExistent(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if v := b.Get([]byte("foo")); v != nil {
+ t.Fatal("expected nil value")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can read a value that is not flushed yet.
+func TestBucket_Get_FromNode(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket retrieved via Get() returns a nil.
+func TestBucket_Get_IncompatibleValue(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
+ t.Fatal(err)
+ }
+
+ if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil {
+ t.Fatal("expected nil value")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a slice returned from a bucket has a capacity equal to its length.
+// This also allows slices to be appended to since it will require a realloc by Go.
+//
+// https://github.com/boltdb/bolt/issues/544
+func TestBucket_Get_Capacity(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Write key to a bucket.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("bucket"))
+ if err != nil {
+ return err
+ }
+ return b.Put([]byte("key"), []byte("val"))
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Retrieve value and attempt to append to it.
+ if err := db.Update(func(tx *Tx) error {
+ k, v := tx.Bucket([]byte("bucket")).Cursor().First()
+
+ // Verify capacity.
+ if len(k) != cap(k) {
+ t.Fatalf("unexpected key slice capacity: %d", cap(k))
+ } else if len(v) != cap(v) {
+ t.Fatalf("unexpected value slice capacity: %d", cap(v))
+ }
+
+ // Ensure slice can be appended to without a segfault.
+ k = append(k, []byte("123")...)
+ v = append(v, []byte("123")...)
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can write a key/value.
+func TestBucket_Put(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+
+ v := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ if !bytes.Equal([]byte("bar"), v) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can rewrite a key in the same transaction.
+func TestBucket_Put_Repeat(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("baz")); err != nil {
+ t.Fatal(err)
+ }
+
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ if !bytes.Equal([]byte("baz"), value) {
+ t.Fatalf("unexpected value: %v", value)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can write a bunch of large values.
+func TestBucket_Put_Large(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ count, factor := 100, 200
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 1; i < count; i++ {
+ if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for i := 1; i < count; i++ {
+ value := b.Get([]byte(strings.Repeat("0", i*factor)))
+ if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) {
+ t.Fatalf("unexpected value: %v", value)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a database can perform multiple large appends safely.
+func TestDB_Put_VeryLarge(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ n, batchN := 400000, 200000
+ ksize, vsize := 8, 500
+
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ for i := 0; i < n; i += batchN {
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for j := 0; j < batchN; j++ {
+ k, v := make([]byte, ksize), make([]byte, vsize)
+ binary.BigEndian.PutUint32(k, uint32(i+j))
+ if err := b.Put(k, v); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+// Ensure that a setting a value on a key with a bucket value returns an error.
+func TestBucket_Put_IncompatibleValue(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b0, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b0.Put([]byte("foo"), []byte("bar")); err != ErrIncompatibleValue {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a setting a value while the transaction is closed returns an error.
+func TestBucket_Put_Closed(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := b.Put([]byte("foo"), []byte("bar")); err != ErrTxClosed {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that setting a value on a read-only bucket returns an error.
+func TestBucket_Put_ReadOnly(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ if err := b.Put([]byte("foo"), []byte("bar")); err != ErrTxNotWritable {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can delete an existing key.
+func TestBucket_Delete(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Delete([]byte("foo")); err != nil {
+ t.Fatal(err)
+ }
+ if v := b.Get([]byte("foo")); v != nil {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that deleting a large set of keys will work correctly.
+func TestBucket_Delete_Large(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 100; i++ {
+ if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for i := 0; i < 100; i++ {
+ if err := b.Delete([]byte(strconv.Itoa(i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for i := 0; i < 100; i++ {
+ if v := b.Get([]byte(strconv.Itoa(i))); v != nil {
+ t.Fatalf("unexpected value: %v, i=%d", v, i)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Deleting a very large list of keys will cause the freelist to use overflow.
+func TestBucket_Delete_FreelistOverflow(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ k := make([]byte, 16)
+ for i := uint64(0); i < 10000; i++ {
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucketIfNotExists([]byte("0"))
+ if err != nil {
+ t.Fatalf("bucket error: %s", err)
+ }
+
+ for j := uint64(0); j < 1000; j++ {
+ binary.BigEndian.PutUint64(k[:8], i)
+ binary.BigEndian.PutUint64(k[8:], j)
+ if err := b.Put(k, nil); err != nil {
+ t.Fatalf("put error: %s", err)
+ }
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Delete all of them in one large transaction
+ if err := db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("0"))
+ c := b.Cursor()
+ for k, _ := c.First(); k != nil; k, _ = c.Next() {
+ if err := c.Delete(); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that accessing and updating nested buckets is ok across transactions.
+func TestBucket_Nested(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ // Create a widgets bucket.
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a widgets/foo bucket.
+ _, err = b.CreateBucket([]byte("foo"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Create a widgets/bar key.
+ if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
+ t.Fatal(err)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ db.MustCheck()
+
+ // Update widgets/bar.
+ if err := db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ db.MustCheck()
+
+ // Cause a split.
+ if err := db.Update(func(tx *Tx) error {
+ var b = tx.Bucket([]byte("widgets"))
+ for i := 0; i < 10000; i++ {
+ if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ db.MustCheck()
+
+ // Insert into widgets/foo/baz.
+ if err := db.Update(func(tx *Tx) error {
+ var b = tx.Bucket([]byte("widgets"))
+ if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ db.MustCheck()
+
+ // Verify.
+ if err := db.View(func(tx *Tx) error {
+ var b = tx.Bucket([]byte("widgets"))
+ if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ for i := 0; i < 10000; i++ {
+ if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that deleting a bucket using Delete() returns an error.
+func TestBucket_Delete_Bucket(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := b.CreateBucket([]byte("foo")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Delete([]byte("foo")); err != ErrIncompatibleValue {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that deleting a key on a read-only bucket returns an error.
+func TestBucket_Delete_ReadOnly(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != ErrTxNotWritable {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a deleting value while the transaction is closed returns an error.
+func TestBucket_Delete_Closed(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Delete([]byte("foo")); err != ErrTxClosed {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that deleting a bucket causes nested buckets to be deleted.
+func TestBucket_DeleteBucket_Nested(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ widgets, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ foo, err := widgets.CreateBucket([]byte("foo"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bar, err := foo.CreateBucket([]byte("bar"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
+ t.Fatal(err)
+ }
+ if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
+func TestBucket_DeleteBucket_Nested2(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ widgets, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ foo, err := widgets.CreateBucket([]byte("foo"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ bar, err := foo.CreateBucket([]byte("bar"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ widgets := tx.Bucket([]byte("widgets"))
+ if widgets == nil {
+ t.Fatal("expected widgets bucket")
+ }
+
+ foo := widgets.Bucket([]byte("foo"))
+ if foo == nil {
+ t.Fatal("expected foo bucket")
+ }
+
+ bar := foo.Bucket([]byte("bar"))
+ if bar == nil {
+ t.Fatal("expected bar bucket")
+ }
+
+ if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ if err := tx.DeleteBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ if tx.Bucket([]byte("widgets")) != nil {
+ t.Fatal("expected bucket to be deleted")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
+// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly.
+func TestBucket_DeleteBucket_Large(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ widgets, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ foo, err := widgets.CreateBucket([]byte("foo"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 1000; i++ {
+ if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ if err := tx.DeleteBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a simple value retrieved via Bucket() returns a nil.
+func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ widgets, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil {
+ t.Fatal("expected nil bucket")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that creating a bucket on an existing non-bucket key returns an error.
+func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ widgets, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := widgets.CreateBucket([]byte("foo")); err != ErrIncompatibleValue {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that deleting a bucket on an existing non-bucket key returns an error.
+func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ widgets, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != ErrIncompatibleValue {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure bucket can set and update its sequence number.
+func TestBucket_Sequence(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ bkt, err := tx.CreateBucket([]byte("0"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Retrieve sequence.
+ if v := bkt.Sequence(); v != 0 {
+ t.Fatalf("unexpected sequence: %d", v)
+ }
+
+ // Update sequence.
+ if err := bkt.SetSequence(1000); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read sequence again.
+ if v := bkt.Sequence(); v != 1000 {
+ t.Fatalf("unexpected sequence: %d", v)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify sequence in separate transaction.
+ if err := db.View(func(tx *Tx) error {
+ if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 {
+ t.Fatalf("unexpected sequence: %d", v)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can return an autoincrementing sequence.
+func TestBucket_NextSequence(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ widgets, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ woojits, err := tx.CreateBucket([]byte("woojits"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Make sure sequence increments.
+ if seq, err := widgets.NextSequence(); err != nil {
+ t.Fatal(err)
+ } else if seq != 1 {
+ t.Fatalf("unexpecte sequence: %d", seq)
+ }
+
+ if seq, err := widgets.NextSequence(); err != nil {
+ t.Fatal(err)
+ } else if seq != 2 {
+ t.Fatalf("unexpected sequence: %d", seq)
+ }
+
+ // Buckets should be separate.
+ if seq, err := woojits.NextSequence(); err != nil {
+ t.Fatal(err)
+ } else if seq != 1 {
+ t.Fatalf("unexpected sequence: %d", 1)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket will persist an autoincrementing sequence even if its
+// the only thing updated on the bucket.
+// https://github.com/boltdb/bolt/issues/296
+func TestBucket_NextSequence_Persist(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ seq, err := tx.Bucket([]byte("widgets")).NextSequence()
+ if err != nil {
+ t.Fatalf("unexpected error: %s", err)
+ } else if seq != 2 {
+ t.Fatalf("unexpected sequence: %d", seq)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that retrieving the next sequence on a read-only bucket returns an error.
+func TestBucket_NextSequence_ReadOnly(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ _, err := tx.Bucket([]byte("widgets")).NextSequence()
+ if err != ErrTxNotWritable {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that retrieving the next sequence for a bucket on a closed database return an error.
+func TestBucket_NextSequence_Closed(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := b.NextSequence(); err != ErrTxClosed {
+ t.Fatal(err)
+ }
+}
+
+// Ensure a user can loop over all key/value pairs in a bucket.
+func TestBucket_ForEach(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("baz"), []byte("0001")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
+ t.Fatal(err)
+ }
+
+ var index int
+ if err := b.ForEach(func(k, v []byte) error {
+ switch index {
+ case 0:
+ if !bytes.Equal(k, []byte("bar")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte("0002")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ case 1:
+ if !bytes.Equal(k, []byte("baz")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte("0001")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ case 2:
+ if !bytes.Equal(k, []byte("foo")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte("0000")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ }
+ index++
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if index != 3 {
+ t.Fatalf("unexpected index: %d", index)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure a database can stop iteration early.
+func TestBucket_ForEach_ShortCircuit(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("baz"), []byte("0000")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
+ t.Fatal(err)
+ }
+
+ var index int
+ if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
+ index++
+ if bytes.Equal(k, []byte("baz")) {
+ return errors.New("marker")
+ }
+ return nil
+ }); err == nil || err.Error() != "marker" {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if index != 2 {
+ t.Fatalf("unexpected index: %d", index)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that looping over a bucket on a closed database returns an error.
+func TestBucket_ForEach_Closed(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := b.ForEach(func(k, v []byte) error { return nil }); err != ErrTxClosed {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that an error is returned when inserting with an empty key.
+func TestBucket_Put_EmptyKey(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte(""), []byte("bar")); err != ErrKeyRequired {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ if err := b.Put(nil, []byte("bar")); err != ErrKeyRequired {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that an error is returned when inserting with a key that's too large.
+func TestBucket_Put_KeyTooLarge(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put(make([]byte, 32769), []byte("bar")); err != ErrKeyTooLarge {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that an error is returned when inserting a value that's too large.
+func TestBucket_Put_ValueTooLarge(t *testing.T) {
+ // Skip this test on DroneCI because the machine is resource constrained.
+ if os.Getenv("DRONE") == "true" {
+ t.Skip("not enough RAM for test")
+ }
+
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), make([]byte, MaxValueSize+1)); err != ErrValueTooLarge {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure a bucket can calculate stats.
+func TestBucket_Stats(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Add bucket with fewer keys but one big value.
+ bigKey := []byte("really-big-value")
+ for i := 0; i < 500; i++ {
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucketIfNotExists([]byte("woojits"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := db.Update(func(tx *Tx) error {
+ if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", 10000))); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ db.MustCheck()
+
+ if err := db.View(func(tx *Tx) error {
+ stats := tx.Bucket([]byte("woojits")).Stats()
+ if stats.BranchPageN != 1 {
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
+ } else if stats.BranchOverflowN != 0 {
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
+ } else if stats.LeafPageN != 7 {
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
+ } else if stats.LeafOverflowN != 2 {
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
+ } else if stats.KeyN != 501 {
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
+ } else if stats.Depth != 2 {
+ t.Fatalf("unexpected Depth: %d", stats.Depth)
+ }
+
+ branchInuse := 16 // branch page header
+ branchInuse += 7 * 16 // branch elements
+ branchInuse += 7 * 3 // branch keys (6 3-byte keys)
+ if stats.BranchInuse != branchInuse {
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
+ }
+
+ leafInuse := 7 * 16 // leaf page header
+ leafInuse += 501 * 16 // leaf elements
+ leafInuse += 500*3 + len(bigKey) // leaf keys
+ leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values
+ if stats.LeafInuse != leafInuse {
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
+ }
+
+ // Only check allocations for 4KB pages.
+ if os.Getpagesize() == 4096 {
+ if stats.BranchAlloc != 4096 {
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
+ } else if stats.LeafAlloc != 36864 {
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
+ }
+ }
+
+ if stats.BucketN != 1 {
+ t.Fatalf("unexpected BucketN: %d", stats.BucketN)
+ } else if stats.InlineBucketN != 0 {
+ t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
+ } else if stats.InlineBucketInuse != 0 {
+ t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure a bucket with random insertion utilizes fill percentage correctly.
+func TestBucket_Stats_RandomFill(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ } else if os.Getpagesize() != 4096 {
+ t.Skip("invalid page size for test")
+ }
+
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Add a set of values in random order. It will be the same random
+ // order so we can maintain consistency between test runs.
+ var count int
+ rand := rand.New(rand.NewSource(42))
+ for _, i := range rand.Perm(1000) {
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucketIfNotExists([]byte("woojits"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ b.FillPercent = 0.9
+ for _, j := range rand.Perm(100) {
+ index := (j * 10000) + i
+ if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil {
+ t.Fatal(err)
+ }
+ count++
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ db.MustCheck()
+
+ if err := db.View(func(tx *Tx) error {
+ stats := tx.Bucket([]byte("woojits")).Stats()
+ if stats.KeyN != 100000 {
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
+ }
+
+ if stats.BranchPageN != 98 {
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
+ } else if stats.BranchOverflowN != 0 {
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
+ } else if stats.BranchInuse != 130984 {
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
+ } else if stats.BranchAlloc != 401408 {
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
+ }
+
+ if stats.LeafPageN != 3412 {
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
+ } else if stats.LeafOverflowN != 0 {
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
+ } else if stats.LeafInuse != 4742482 {
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
+ } else if stats.LeafAlloc != 13975552 {
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure a bucket can calculate stats.
+func TestBucket_Stats_Small(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ // Add a bucket that fits on a single root leaf.
+ b, err := tx.CreateBucket([]byte("whozawhats"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ db.MustCheck()
+
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("whozawhats"))
+ stats := b.Stats()
+ if stats.BranchPageN != 0 {
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
+ } else if stats.BranchOverflowN != 0 {
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
+ } else if stats.LeafPageN != 0 {
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
+ } else if stats.LeafOverflowN != 0 {
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
+ } else if stats.KeyN != 1 {
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
+ } else if stats.Depth != 1 {
+ t.Fatalf("unexpected Depth: %d", stats.Depth)
+ } else if stats.BranchInuse != 0 {
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
+ } else if stats.LeafInuse != 0 {
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
+ }
+
+ if os.Getpagesize() == 4096 {
+ if stats.BranchAlloc != 0 {
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
+ } else if stats.LeafAlloc != 0 {
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
+ }
+ }
+
+ if stats.BucketN != 1 {
+ t.Fatalf("unexpected BucketN: %d", stats.BucketN)
+ } else if stats.InlineBucketN != 1 {
+ t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
+ } else if stats.InlineBucketInuse != 16+16+6 {
+ t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestBucket_Stats_EmptyBucket(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ // Add a bucket that fits on a single root leaf.
+ if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ db.MustCheck()
+
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("whozawhats"))
+ stats := b.Stats()
+ if stats.BranchPageN != 0 {
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
+ } else if stats.BranchOverflowN != 0 {
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
+ } else if stats.LeafPageN != 0 {
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
+ } else if stats.LeafOverflowN != 0 {
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
+ } else if stats.KeyN != 0 {
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
+ } else if stats.Depth != 1 {
+ t.Fatalf("unexpected Depth: %d", stats.Depth)
+ } else if stats.BranchInuse != 0 {
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
+ } else if stats.LeafInuse != 0 {
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
+ }
+
+ if os.Getpagesize() == 4096 {
+ if stats.BranchAlloc != 0 {
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
+ } else if stats.LeafAlloc != 0 {
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
+ }
+ }
+
+ if stats.BucketN != 1 {
+ t.Fatalf("unexpected BucketN: %d", stats.BucketN)
+ } else if stats.InlineBucketN != 1 {
+ t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
+ } else if stats.InlineBucketInuse != 16 {
+ t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure a bucket can calculate stats.
+func TestBucket_Stats_Nested(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("foo"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < 100; i++ {
+ if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ bar, err := b.CreateBucket([]byte("bar"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < 10; i++ {
+ if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ baz, err := bar.CreateBucket([]byte("baz"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < 10; i++ {
+ if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ db.MustCheck()
+
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("foo"))
+ stats := b.Stats()
+ if stats.BranchPageN != 0 {
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
+ } else if stats.BranchOverflowN != 0 {
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
+ } else if stats.LeafPageN != 2 {
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
+ } else if stats.LeafOverflowN != 0 {
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
+ } else if stats.KeyN != 122 {
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
+ } else if stats.Depth != 3 {
+ t.Fatalf("unexpected Depth: %d", stats.Depth)
+ } else if stats.BranchInuse != 0 {
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
+ }
+
+ foo := 16 // foo (pghdr)
+ foo += 101 * 16 // foo leaf elements
+ foo += 100*2 + 100*2 // foo leaf key/values
+ foo += 3 + 16 // foo -> bar key/value
+
+ bar := 16 // bar (pghdr)
+ bar += 11 * 16 // bar leaf elements
+ bar += 10 + 10 // bar leaf key/values
+ bar += 3 + 16 // bar -> baz key/value
+
+ baz := 16 // baz (inline) (pghdr)
+ baz += 10 * 16 // baz leaf elements
+ baz += 10 + 10 // baz leaf key/values
+
+ if stats.LeafInuse != foo+bar+baz {
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
+ }
+
+ if os.Getpagesize() == 4096 {
+ if stats.BranchAlloc != 0 {
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
+ } else if stats.LeafAlloc != 8192 {
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
+ }
+ }
+
+ if stats.BucketN != 3 {
+ t.Fatalf("unexpected BucketN: %d", stats.BucketN)
+ } else if stats.InlineBucketN != 1 {
+ t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
+ } else if stats.InlineBucketInuse != baz {
+ t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure a large bucket can calculate stats.
+func TestBucket_Stats_Large(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var index int
+ for i := 0; i < 100; i++ {
+ // Add bucket with lots of keys.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < 1000; i++ {
+ if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil {
+ t.Fatal(err)
+ }
+ index++
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ db.MustCheck()
+
+ if err := db.View(func(tx *Tx) error {
+ stats := tx.Bucket([]byte("widgets")).Stats()
+ if stats.BranchPageN != 13 {
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
+ } else if stats.BranchOverflowN != 0 {
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
+ } else if stats.LeafPageN != 1196 {
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
+ } else if stats.LeafOverflowN != 0 {
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
+ } else if stats.KeyN != 100000 {
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
+ } else if stats.Depth != 3 {
+ t.Fatalf("unexpected Depth: %d", stats.Depth)
+ } else if stats.BranchInuse != 25257 {
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
+ } else if stats.LeafInuse != 2596916 {
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
+ }
+
+ if os.Getpagesize() == 4096 {
+ if stats.BranchAlloc != 53248 {
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
+ } else if stats.LeafAlloc != 4898816 {
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
+ }
+ }
+
+ if stats.BucketN != 1 {
+ t.Fatalf("unexpected BucketN: %d", stats.BucketN)
+ } else if stats.InlineBucketN != 0 {
+ t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
+ } else if stats.InlineBucketInuse != 0 {
+ t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can write random keys and values across multiple transactions.
+func TestBucket_Put_Single(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ index := 0
+ if err := quick.Check(func(items testdata) bool {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ m := make(map[string][]byte)
+
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, item := range items {
+ if err := db.Update(func(tx *Tx) error {
+ if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil {
+ panic("put error: " + err.Error())
+ }
+ m[string(item.Key)] = item.Value
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify all key/values so far.
+ if err := db.View(func(tx *Tx) error {
+ i := 0
+ for k, v := range m {
+ value := tx.Bucket([]byte("widgets")).Get([]byte(k))
+ if !bytes.Equal(value, v) {
+ t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
+ db.CopyTempFile()
+ t.FailNow()
+ }
+ i++
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ index++
+ return true
+ }, nil); err != nil {
+ t.Error(err)
+ }
+}
+
+// Ensure that a transaction can insert multiple key/value pairs at once.
+func TestBucket_Put_Multiple(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ if err := quick.Check(func(items testdata) bool {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Bulk insert all values.
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for _, item := range items {
+ if err := b.Put(item.Key, item.Value); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify all items exist.
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for _, item := range items {
+ value := b.Get(item.Key)
+ if !bytes.Equal(item.Value, value) {
+ db.CopyTempFile()
+ t.Fatalf("exp=%x; got=%x", item.Value, value)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ return true
+ }, qconfig()); err != nil {
+ t.Error(err)
+ }
+}
+
+// Ensure that a transaction can delete all key/value pairs and return to a single leaf page.
+func TestBucket_Delete_Quick(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ if err := quick.Check(func(items testdata) bool {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Bulk insert all values.
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for _, item := range items {
+ if err := b.Put(item.Key, item.Value); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Remove items one at a time and check consistency.
+ for _, item := range items {
+ if err := db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Delete(item.Key)
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Anything before our deletion index should be nil.
+ if err := db.View(func(tx *Tx) error {
+ if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
+ t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3))
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ return true
+ }, qconfig()); err != nil {
+ t.Error(err)
+ }
+}
+
+func ExampleBucket_Put() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Start a write transaction.
+ if err := db.Update(func(tx *Tx) error {
+ // Create a bucket.
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ return err
+ }
+
+ // Set the value "bar" for the key "foo".
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Read value back in a different read-only transaction.
+ if err := db.View(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ fmt.Printf("The value of 'foo' is: %s\n", value)
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Close database to release file lock.
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // The value of 'foo' is: bar
+}
+
+func ExampleBucket_Delete() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Start a write transaction.
+ if err := db.Update(func(tx *Tx) error {
+ // Create a bucket.
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ return err
+ }
+
+ // Set the value "bar" for the key "foo".
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ return err
+ }
+
+ // Retrieve the key back from the database and verify it.
+ value := b.Get([]byte("foo"))
+ fmt.Printf("The value of 'foo' was: %s\n", value)
+
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Delete the key in a different write transaction.
+ if err := db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Retrieve the key again.
+ if err := db.View(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ if value == nil {
+ fmt.Printf("The value of 'foo' is now: nil\n")
+ }
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Close database to release file lock.
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // The value of 'foo' was: bar
+ // The value of 'foo' is now: nil
+}
+
+func ExampleBucket_ForEach() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Insert data into a bucket.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("animals"))
+ if err != nil {
+ return err
+ }
+
+ if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
+ return err
+ }
+ if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
+ return err
+ }
+ if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
+ return err
+ }
+
+ // Iterate over items in sorted key order.
+ if err := b.ForEach(func(k, v []byte) error {
+ fmt.Printf("A %s is %s.\n", k, v)
+ return nil
+ }); err != nil {
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Close database to release file lock.
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // A cat is lame.
+ // A dog is fun.
+ // A liger is awesome.
+}
+
+// Ensure that a cursor can return a reference to the bucket that created it.
+func TestCursor_Bucket(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) {
+ t.Fatal("cursor bucket mismatch")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a Tx cursor can seek to the appropriate keys.
+func TestCursor_Seek(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("0001")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("baz"), []byte("0003")); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := b.CreateBucket([]byte("bkt")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+
+ // Exact match should go to the key.
+ if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte("0002")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ // Inexact match should go to the next key.
+ if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte("0003")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ // Low key should go to the first key.
+ if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte("0002")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ // High key should return no key.
+ if k, v := c.Seek([]byte("zzz")); k != nil {
+ t.Fatalf("expected nil key: %v", k)
+ } else if v != nil {
+ t.Fatalf("expected nil value: %v", v)
+ }
+
+ // Buckets should return their key but no value.
+ if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if v != nil {
+ t.Fatalf("expected nil value: %v", v)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestCursor_Delete(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ const count = 1000
+
+ // Insert every other key between 0 and $count.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := 0; i < count; i += 1 {
+ k := make([]byte, 8)
+ binary.BigEndian.PutUint64(k, uint64(i))
+ if err := b.Put(k, make([]byte, 100)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if _, err := b.CreateBucket([]byte("sub")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ bound := make([]byte, 8)
+ binary.BigEndian.PutUint64(bound, uint64(count/2))
+ for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
+ if err := c.Delete(); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ c.Seek([]byte("sub"))
+ if err := c.Delete(); err != ErrIncompatibleValue {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ stats := tx.Bucket([]byte("widgets")).Stats()
+ if stats.KeyN != count/2+1 {
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a Tx cursor can seek to the appropriate keys when there are a
+// large number of keys. This test also checks that seek will always move
+// forward to the next key.
+//
+// Related: https://github.com/boltdb/bolt/pull/187
+func TestCursor_Seek_Large(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var count = 10000
+
+ // Insert every other key between 0 and $count.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < count; i += 100 {
+ for j := i; j < i+100; j += 2 {
+ k := make([]byte, 8)
+ binary.BigEndian.PutUint64(k, uint64(j))
+ if err := b.Put(k, make([]byte, 100)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for i := 0; i < count; i++ {
+ seek := make([]byte, 8)
+ binary.BigEndian.PutUint64(seek, uint64(i))
+
+ k, _ := c.Seek(seek)
+
+ // The last seek is beyond the end of the the range so
+ // it should return nil.
+ if i == count-1 {
+ if k != nil {
+ t.Fatal("expected nil key")
+ }
+ continue
+ }
+
+ // Otherwise we should seek to the exact key or the next key.
+ num := binary.BigEndian.Uint64(k)
+ if i%2 == 0 {
+ if num != uint64(i) {
+ t.Fatalf("unexpected num: %d", num)
+ }
+ } else {
+ if num != uint64(i+1) {
+ t.Fatalf("unexpected num: %d", num)
+ }
+ }
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a cursor can iterate over an empty bucket without error.
+func TestCursor_EmptyBucket(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ return err
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ k, v := c.First()
+ if k != nil {
+ t.Fatalf("unexpected key: %v", k)
+ } else if v != nil {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
+func TestCursor_EmptyBucketReverse(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ return err
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := db.View(func(tx *Tx) error {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ k, v := c.Last()
+ if k != nil {
+ t.Fatalf("unexpected key: %v", k)
+ } else if v != nil {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a Tx cursor can iterate over a single root with a couple elements.
+func TestCursor_Iterate_Leaf(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("baz"), []byte{}); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte{0}); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("bar"), []byte{1}); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ tx, err := db.Begin(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer func() { _ = tx.Rollback() }()
+
+ c := tx.Bucket([]byte("widgets")).Cursor()
+
+ k, v := c.First()
+ if !bytes.Equal(k, []byte("bar")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte{1}) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ k, v = c.Next()
+ if !bytes.Equal(k, []byte("baz")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte{}) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ k, v = c.Next()
+ if !bytes.Equal(k, []byte("foo")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte{0}) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ k, v = c.Next()
+ if k != nil {
+ t.Fatalf("expected nil key: %v", k)
+ } else if v != nil {
+ t.Fatalf("expected nil value: %v", v)
+ }
+
+ k, v = c.Next()
+ if k != nil {
+ t.Fatalf("expected nil key: %v", k)
+ } else if v != nil {
+ t.Fatalf("expected nil value: %v", v)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
+func TestCursor_LeafRootReverse(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("baz"), []byte{}); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte{0}); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("bar"), []byte{1}); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ tx, err := db.Begin(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := tx.Bucket([]byte("widgets")).Cursor()
+
+ if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte{0}) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte{}) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, []byte{1}) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ if k, v := c.Prev(); k != nil {
+ t.Fatalf("expected nil key: %v", k)
+ } else if v != nil {
+ t.Fatalf("expected nil value: %v", v)
+ }
+
+ if k, v := c.Prev(); k != nil {
+ t.Fatalf("expected nil key: %v", k)
+ } else if v != nil {
+ t.Fatalf("expected nil value: %v", v)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a Tx cursor can restart from the beginning.
+func TestCursor_Restart(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("bar"), []byte{}); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte{}); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ tx, err := db.Begin(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := tx.Bucket([]byte("widgets")).Cursor()
+
+ if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) {
+ t.Fatalf("unexpected key: %v", k)
+ }
+ if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) {
+ t.Fatalf("unexpected key: %v", k)
+ }
+
+ if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) {
+ t.Fatalf("unexpected key: %v", k)
+ }
+ if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) {
+ t.Fatalf("unexpected key: %v", k)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a cursor can skip over empty pages that have been deleted.
+func TestCursor_First_EmptyPages(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Create 1000 keys in the "widgets" bucket.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 1000; i++ {
+ if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Delete half the keys and then try to iterate.
+ if err := db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for i := 0; i < 600; i++ {
+ if err := b.Delete(u64tob(uint64(i))); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ c := b.Cursor()
+ var n int
+ for k, _ := c.First(); k != nil; k, _ = c.Next() {
+ n++
+ }
+ if n != 400 {
+ t.Fatalf("unexpected key count: %d", n)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a Tx can iterate over all elements in a bucket.
+func TestCursor_QuickCheck(t *testing.T) {
+ f := func(items testdata) bool {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Bulk insert all values.
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, item := range items {
+ if err := b.Put(item.Key, item.Value); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Sort test data.
+ sort.Sort(items)
+
+ // Iterate over all items and check consistency.
+ var index = 0
+ tx, err = db.Begin(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
+ if !bytes.Equal(k, items[index].Key) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, items[index].Value) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ index++
+ }
+ if len(items) != index {
+ t.Fatalf("unexpected item count: %v, expected %v", len(items), index)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+
+ return true
+ }
+ if err := quick.Check(f, qconfig()); err != nil {
+ t.Error(err)
+ }
+}
+
+// Ensure that a transaction can iterate over all elements in a bucket in reverse.
+func TestCursor_QuickCheck_Reverse(t *testing.T) {
+ f := func(items testdata) bool {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Bulk insert all values.
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, item := range items {
+ if err := b.Put(item.Key, item.Value); err != nil {
+ t.Fatal(err)
+ }
+ }
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Sort test data.
+ sort.Sort(revtestdata(items))
+
+ // Iterate over all items and check consistency.
+ var index = 0
+ tx, err = db.Begin(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
+ if !bytes.Equal(k, items[index].Key) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if !bytes.Equal(v, items[index].Value) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ index++
+ }
+ if len(items) != index {
+ t.Fatalf("unexpected item count: %v, expected %v", len(items), index)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+
+ return true
+ }
+ if err := quick.Check(f, qconfig()); err != nil {
+ t.Error(err)
+ }
+}
+
+// Ensure that a Tx cursor can iterate over subbuckets.
+func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := b.CreateBucket([]byte("foo")); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := b.CreateBucket([]byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := b.CreateBucket([]byte("baz")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ var names []string
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ names = append(names, string(k))
+ if v != nil {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ }
+ if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) {
+ t.Fatalf("unexpected names: %+v", names)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a Tx cursor can reverse iterate over subbuckets.
+func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := b.CreateBucket([]byte("foo")); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := b.CreateBucket([]byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if _, err := b.CreateBucket([]byte("baz")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ var names []string
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.Last(); k != nil; k, v = c.Prev() {
+ names = append(names, string(k))
+ if v != nil {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ }
+ if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) {
+ t.Fatalf("unexpected names: %+v", names)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func ExampleCursor() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Start a read-write transaction.
+ if err := db.Update(func(tx *Tx) error {
+ // Create a new bucket.
+ b, err := tx.CreateBucket([]byte("animals"))
+ if err != nil {
+ return err
+ }
+
+ // Insert data into a bucket.
+ if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
+ log.Fatal(err)
+ }
+ if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
+ log.Fatal(err)
+ }
+ if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a cursor for iteration.
+ c := b.Cursor()
+
+ // Iterate over items in sorted key order. This starts from the
+ // first key/value pair and updates the k/v variables to the
+ // next key/value on each iteration.
+ //
+ // The loop finishes at the end of the cursor when a nil key is returned.
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ fmt.Printf("A %s is %s.\n", k, v)
+ }
+
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // A cat is lame.
+ // A dog is fun.
+ // A liger is awesome.
+}
+
+func ExampleCursor_reverse() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Start a read-write transaction.
+ if err := db.Update(func(tx *Tx) error {
+ // Create a new bucket.
+ b, err := tx.CreateBucket([]byte("animals"))
+ if err != nil {
+ return err
+ }
+
+ // Insert data into a bucket.
+ if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
+ log.Fatal(err)
+ }
+ if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
+ log.Fatal(err)
+ }
+ if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
+ log.Fatal(err)
+ }
+
+ // Create a cursor for iteration.
+ c := b.Cursor()
+
+ // Iterate over items in reverse sorted key order. This starts
+ // from the last key/value pair and updates the k/v variables to
+ // the previous key/value on each iteration.
+ //
+ // The loop finishes at the beginning of the cursor when a nil key
+ // is returned.
+ for k, v := c.Last(); k != nil; k, v = c.Prev() {
+ fmt.Printf("A %s is %s.\n", k, v)
+ }
+
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Close the database to release the file lock.
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // A liger is awesome.
+ // A dog is fun.
+ // A cat is lame.
+}
+
+var statsFlag = flag.Bool("stats", false, "show performance stats")
+
+// pageSize is the size of one page in the data file.
+const pageSize = 4096
+
+// Ensure that a database can be opened without error.
+func TestOpen(t *testing.T) {
+ path := tempfile()
+ db, err := Open(path, 0666, nil)
+ if err != nil {
+ t.Fatal(err)
+ } else if db == nil {
+ t.Fatal("expected db")
+ }
+
+ if s := db.Path(); s != path {
+ t.Fatalf("unexpected path: %s", s)
+ }
+
+ if err := db.Close(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that opening a database with a blank path returns an error.
+func TestOpen_ErrPathRequired(t *testing.T) {
+ _, err := Open("", 0666, nil)
+ if err == nil {
+ t.Fatalf("expected error")
+ }
+}
+
+// Ensure that opening a database with a bad path returns an error.
+func TestOpen_ErrNotExists(t *testing.T) {
+ _, err := Open(filepath.Join(tempfile(), "bad-path"), 0666, nil)
+ if err == nil {
+ t.Fatal("expected error")
+ }
+}
+
+// Ensure that opening a file that is not a Bolt database returns ErrInvalid.
+func TestOpen_ErrInvalid(t *testing.T) {
+ path := tempfile()
+
+ f, err := os.Create(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if _, err := fmt.Fprintln(f, "this is not a bolt database"); err != nil {
+ t.Fatal(err)
+ }
+ if err := f.Close(); err != nil {
+ t.Fatal(err)
+ }
+ defer os.Remove(path)
+
+ if _, err := Open(path, 0666, nil); err != ErrInvalid {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that opening a file with two invalid versions returns ErrVersionMismatch.
+func TestOpen_ErrVersionMismatch(t *testing.T) {
+ if pageSize != os.Getpagesize() {
+ t.Skip("page size mismatch")
+ }
+
+ // Create empty database.
+ db := MustOpenDB()
+ path := db.Path()
+ defer db.MustClose()
+
+ // Close database.
+ if err := db.DB.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read data file.
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Rewrite meta pages.
+ meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize]))
+ meta0.version++
+ meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize]))
+ meta1.version++
+ if err := ioutil.WriteFile(path, buf, 0666); err != nil {
+ t.Fatal(err)
+ }
+
+ // Reopen data file.
+ if _, err := Open(path, 0666, nil); err != ErrVersionMismatch {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that opening a file with two invalid checksums returns ErrChecksum.
+func TestOpen_ErrChecksum(t *testing.T) {
+ if pageSize != os.Getpagesize() {
+ t.Skip("page size mismatch")
+ }
+
+ // Create empty database.
+ db := MustOpenDB()
+ path := db.Path()
+ defer db.MustClose()
+
+ // Close database.
+ if err := db.DB.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read data file.
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Rewrite meta pages.
+ meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize]))
+ meta0.pgid++
+ meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize]))
+ meta1.pgid++
+ if err := ioutil.WriteFile(path, buf, 0666); err != nil {
+ t.Fatal(err)
+ }
+
+ // Reopen data file.
+ if _, err := Open(path, 0666, nil); err != ErrChecksum {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that opening a database does not increase its size.
+// https://github.com/boltdb/bolt/issues/291
+func TestOpen_Size(t *testing.T) {
+ // Open a data file.
+ db := MustOpenDB()
+ path := db.Path()
+ defer db.MustClose()
+
+ pagesize := db.Info().PageSize
+
+ // Insert until we get above the minimum 4MB size.
+ if err := db.Update(func(tx *Tx) error {
+ b, _ := tx.CreateBucketIfNotExists([]byte("data"))
+ for i := 0; i < 10000; i++ {
+ if err := b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)); err != nil {
+ t.Fatal(err)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Close database and grab the size.
+ if err := db.DB.Close(); err != nil {
+ t.Fatal(err)
+ }
+ sz := fileSize(path)
+ if sz == 0 {
+ t.Fatalf("unexpected new file size: %d", sz)
+ }
+
+ // Reopen database, update, and check size again.
+ db0, err := Open(path, 0666, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := db0.Update(func(tx *Tx) error {
+ if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := db0.Close(); err != nil {
+ t.Fatal(err)
+ }
+ newSz := fileSize(path)
+ if newSz == 0 {
+ t.Fatalf("unexpected new file size: %d", newSz)
+ }
+
+ // Compare the original size with the new size.
+ // db size might increase by a few page sizes due to the new small update.
+ if sz < newSz-5*int64(pagesize) {
+ t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
+ }
+}
+
+// Ensure that opening a database beyond the max step size does not increase its size.
+// https://github.com/boltdb/bolt/issues/303
+func TestOpen_Size_Large(t *testing.T) {
+ if testing.Short() {
+ t.Skip("short mode")
+ }
+
+ // Open a data file.
+ db := MustOpenDB()
+ path := db.Path()
+ defer db.MustClose()
+
+ pagesize := db.Info().PageSize
+
+ // Insert until we get above the minimum 4MB size.
+ var index uint64
+ for i := 0; i < 10000; i++ {
+ if err := db.Update(func(tx *Tx) error {
+ b, _ := tx.CreateBucketIfNotExists([]byte("data"))
+ for j := 0; j < 1000; j++ {
+ if err := b.Put(u64tob(index), make([]byte, 50)); err != nil {
+ t.Fatal(err)
+ }
+ index++
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Close database and grab the size.
+ if err := db.DB.Close(); err != nil {
+ t.Fatal(err)
+ }
+ sz := fileSize(path)
+ if sz == 0 {
+ t.Fatalf("unexpected new file size: %d", sz)
+ } else if sz < (1 << 30) {
+ t.Fatalf("expected larger initial size: %d", sz)
+ }
+
+ // Reopen database, update, and check size again.
+ db0, err := Open(path, 0666, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := db0.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0})
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := db0.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ newSz := fileSize(path)
+ if newSz == 0 {
+ t.Fatalf("unexpected new file size: %d", newSz)
+ }
+
+ // Compare the original size with the new size.
+ // db size might increase by a few page sizes due to the new small update.
+ if sz < newSz-5*int64(pagesize) {
+ t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
+ }
+}
+
+// Ensure that a re-opened database is consistent.
+func TestOpen_Check(t *testing.T) {
+ path := tempfile()
+
+ db, err := Open(path, 0666, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := db.View(func(tx *Tx) error { return <-tx.Check() }); err != nil {
+ t.Fatal(err)
+ }
+ if err := db.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ db, err = Open(path, 0666, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := db.View(func(tx *Tx) error { return <-tx.Check() }); err != nil {
+ t.Fatal(err)
+ }
+ if err := db.Close(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that write errors to the meta file handler during initialization are returned.
+func TestOpen_MetaInitWriteError(t *testing.T) {
+ t.Skip("pending")
+}
+
+// Ensure that a database that is too small returns an error.
+func TestOpen_FileTooSmall(t *testing.T) {
+ path := tempfile()
+
+ db, err := Open(path, 0666, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := db.Close(); err != nil {
+ t.Fatal(err)
+ }
+
+ // corrupt the database
+ if err := os.Truncate(path, int64(os.Getpagesize())); err != nil {
+ t.Fatal(err)
+ }
+
+ db, err = Open(path, 0666, nil)
+ if err == nil || err.Error() != "file size too small" {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough
+// to hold data from concurrent write transaction resolves the issue that
+// read transaction blocks the write transaction and causes deadlock.
+// This is a very hacky test since the mmap size is not exposed.
+func TestDB_Open_InitialMmapSize(t *testing.T) {
+ path := tempfile()
+ defer os.Remove(path)
+
+ initMmapSize := 1 << 31 // 2GB
+ testWriteSize := 1 << 27 // 134MB
+
+ db, err := Open(path, 0666, &Options{InitialMmapSize: initMmapSize})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create a long-running read transaction
+ // that never gets closed while writing
+ rtx, err := db.Begin(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // create a write transaction
+ wtx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ b, err := wtx.CreateBucket([]byte("test"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // and commit a large write
+ err = b.Put([]byte("foo"), make([]byte, testWriteSize))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ done := make(chan struct{})
+
+ go func() {
+ if err := wtx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+ done <- struct{}{}
+ }()
+
+ select {
+ case <-time.After(5 * time.Second):
+ t.Errorf("unexpected that the reader blocks writer")
+ case <-done:
+ }
+
+ if err := rtx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a database cannot open a transaction when it's not open.
+func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) {
+ var db DB
+ if _, err := db.Begin(false); err != ErrDatabaseNotOpen {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that a read-write transaction can be retrieved.
+func TestDB_BeginRW(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ } else if tx == nil {
+ t.Fatal("expected tx")
+ }
+
+ if tx.DB() != db.DB {
+ t.Fatal("unexpected tx database")
+ } else if !tx.Writable() {
+ t.Fatal("expected writable tx")
+ }
+
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that opening a transaction while the DB is closed returns an error.
+func TestDB_BeginRW_Closed(t *testing.T) {
+ var db DB
+ if _, err := db.Begin(true); err != ErrDatabaseNotOpen {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) }
+func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) }
+
+// Ensure that a database cannot close while transactions are open.
+func testDB_Close_PendingTx(t *testing.T, writable bool) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Start transaction.
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // Open update in separate goroutine.
+ done := make(chan struct{})
+ go func() {
+ if err := db.Close(); err != nil {
+ t.Fatal(err)
+ }
+ close(done)
+ }()
+
+ // Ensure database hasn't closed.
+ time.Sleep(100 * time.Millisecond)
+ select {
+ case <-done:
+ t.Fatal("database closed too early")
+ default:
+ }
+
+ // Commit transaction.
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+
+ // Ensure database closed now.
+ time.Sleep(100 * time.Millisecond)
+ select {
+ case <-done:
+ default:
+ t.Fatal("database did not close")
+ }
+}
+
+// Ensure a database can provide a transactional block.
+func TestDB_Update(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Delete([]byte("foo")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ if v := b.Get([]byte("foo")); v != nil {
+ t.Fatalf("expected nil value, got: %v", v)
+ }
+ if v := b.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure a closed database returns an error while running a transaction block
+func TestDB_Update_Closed(t *testing.T) {
+ var db DB
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != ErrDatabaseNotOpen {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure a panic occurs while trying to commit a managed transaction.
+func TestDB_Update_ManualCommit(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var panicked bool
+ if err := db.Update(func(tx *Tx) error {
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ } else if !panicked {
+ t.Fatal("expected panic")
+ }
+}
+
+// Ensure a panic occurs while trying to rollback a managed transaction.
+func TestDB_Update_ManualRollback(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var panicked bool
+ if err := db.Update(func(tx *Tx) error {
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ } else if !panicked {
+ t.Fatal("expected panic")
+ }
+}
+
+// Ensure a panic occurs while trying to commit a managed transaction.
+func TestDB_View_ManualCommit(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var panicked bool
+ if err := db.View(func(tx *Tx) error {
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ } else if !panicked {
+ t.Fatal("expected panic")
+ }
+}
+
+// Ensure a panic occurs while trying to rollback a managed transaction.
+func TestDB_View_ManualRollback(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var panicked bool
+ if err := db.View(func(tx *Tx) error {
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ panicked = true
+ }
+ }()
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ } else if !panicked {
+ t.Fatal("expected panic")
+ }
+}
+
+// Ensure a write transaction that panics does not hold open locks.
+func TestDB_Update_Panic(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Panic during update but recover.
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Log("recover: update", r)
+ }
+ }()
+
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ panic("omg")
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // Verify we can update again.
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Verify that our change persisted.
+ if err := db.Update(func(tx *Tx) error {
+ if tx.Bucket([]byte("widgets")) == nil {
+ t.Fatal("expected bucket")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure a database can return an error through a read-only transactional block.
+func TestDB_View_Error(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.View(func(tx *Tx) error {
+ return errors.New("xxx")
+ }); err == nil || err.Error() != "xxx" {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure a read transaction that panics does not hold open locks.
+func TestDB_View_Panic(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Panic during view transaction but recover.
+ func() {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Log("recover: view", r)
+ }
+ }()
+
+ if err := db.View(func(tx *Tx) error {
+ if tx.Bucket([]byte("widgets")) == nil {
+ t.Fatal("expected bucket")
+ }
+ panic("omg")
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }()
+
+ // Verify that we can still use read transactions.
+ if err := db.View(func(tx *Tx) error {
+ if tx.Bucket([]byte("widgets")) == nil {
+ t.Fatal("expected bucket")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that DB stats can be returned.
+func TestDB_Stats(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ return err
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ stats := db.Stats()
+ if stats.TxStats.PageCount != 2 {
+ t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.PageCount)
+ } else if stats.FreePageN != 0 {
+ t.Fatalf("unexpected FreePageN != 0: %d", stats.FreePageN)
+ } else if stats.PendingPageN != 2 {
+ t.Fatalf("unexpected PendingPageN != 2: %d", stats.PendingPageN)
+ }
+}
+
+// Ensure that database pages are in expected order and type.
+func TestDB_Consistency(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ return err
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ for i := 0; i < 10; i++ {
+ if err := db.Update(func(tx *Tx) error {
+ if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ if p, _ := tx.Page(0); p == nil {
+ t.Fatal("expected page")
+ } else if p.Type != "meta" {
+ t.Fatalf("unexpected page type: %s", p.Type)
+ }
+
+ if p, _ := tx.Page(1); p == nil {
+ t.Fatal("expected page")
+ } else if p.Type != "meta" {
+ t.Fatalf("unexpected page type: %s", p.Type)
+ }
+
+ if p, _ := tx.Page(2); p == nil {
+ t.Fatal("expected page")
+ } else if p.Type != "free" {
+ t.Fatalf("unexpected page type: %s", p.Type)
+ }
+
+ if p, _ := tx.Page(3); p == nil {
+ t.Fatal("expected page")
+ } else if p.Type != "free" {
+ t.Fatalf("unexpected page type: %s", p.Type)
+ }
+
+ if p, _ := tx.Page(4); p == nil {
+ t.Fatal("expected page")
+ } else if p.Type != "leaf" {
+ t.Fatalf("unexpected page type: %s", p.Type)
+ }
+
+ if p, _ := tx.Page(5); p == nil {
+ t.Fatal("expected page")
+ } else if p.Type != "freelist" {
+ t.Fatalf("unexpected page type: %s", p.Type)
+ }
+
+ if p, _ := tx.Page(6); p != nil {
+ t.Fatal("unexpected page")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that DB stats can be subtracted from one another.
+func TestDBStats_Sub(t *testing.T) {
+ var a, b Stats
+ a.TxStats.PageCount = 3
+ a.FreePageN = 4
+ b.TxStats.PageCount = 10
+ b.FreePageN = 14
+ diff := b.Sub(&a)
+ if diff.TxStats.PageCount != 7 {
+ t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.PageCount)
+ }
+
+ // free page stats are copied from the receiver and not subtracted
+ if diff.FreePageN != 14 {
+ t.Fatalf("unexpected FreePageN: %d", diff.FreePageN)
+ }
+}
+
+// Ensure two functions can perform updates in a single batch.
+func TestDB_Batch(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Iterate over multiple updates in separate goroutines.
+ n := 2
+ ch := make(chan error)
+ for i := 0; i < n; i++ {
+ go func(i int) {
+ ch <- db.Batch(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
+ })
+ }(i)
+ }
+
+ // Check all responses to make sure there's no error.
+ for i := 0; i < n; i++ {
+ if err := <-ch; err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Ensure data is correct.
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for i := 0; i < n; i++ {
+ if v := b.Get(u64tob(uint64(i))); v == nil {
+ t.Errorf("key not found: %d", i)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDB_Batch_Panic(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var sentinel int
+ var bork = &sentinel
+ var problem interface{}
+ var err error
+
+ // Execute a function inside a batch that panics.
+ func() {
+ defer func() {
+ if p := recover(); p != nil {
+ problem = p
+ }
+ }()
+ err = db.Batch(func(tx *Tx) error {
+ panic(bork)
+ })
+ }()
+
+ // Verify there is no error.
+ if g, e := err, error(nil); g != e {
+ t.Fatalf("wrong error: %v != %v", g, e)
+ }
+ // Verify the panic was captured.
+ if g, e := problem, bork; g != e {
+ t.Fatalf("wrong error: %v != %v", g, e)
+ }
+}
+
+func TestDB_BatchFull(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ return err
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ const size = 3
+ // buffered so we never leak goroutines
+ ch := make(chan error, size)
+ put := func(i int) {
+ ch <- db.Batch(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
+ })
+ }
+
+ db.MaxBatchSize = size
+ // high enough to never trigger here
+ db.MaxBatchDelay = 1 * time.Hour
+
+ go put(1)
+ go put(2)
+
+ // Give the batch a chance to exhibit bugs.
+ time.Sleep(10 * time.Millisecond)
+
+ // not triggered yet
+ select {
+ case <-ch:
+ t.Fatalf("batch triggered too early")
+ default:
+ }
+
+ go put(3)
+
+ // Check all responses to make sure there's no error.
+ for i := 0; i < size; i++ {
+ if err := <-ch; err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Ensure data is correct.
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for i := 1; i <= size; i++ {
+ if v := b.Get(u64tob(uint64(i))); v == nil {
+ t.Errorf("key not found: %d", i)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestDB_BatchTime(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ return err
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ const size = 1
+ // buffered so we never leak goroutines
+ ch := make(chan error, size)
+ put := func(i int) {
+ ch <- db.Batch(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
+ })
+ }
+
+ db.MaxBatchSize = 1000
+ db.MaxBatchDelay = 0
+
+ go put(1)
+
+ // Batch must trigger by time alone.
+
+ // Check all responses to make sure there's no error.
+ for i := 0; i < size; i++ {
+ if err := <-ch; err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ // Ensure data is correct.
+ if err := db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for i := 1; i <= size; i++ {
+ if v := b.Get(u64tob(uint64(i))); v == nil {
+ t.Errorf("key not found: %d", i)
+ }
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func ExampleDB_Update() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Execute several commands within a read-write transaction.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Read the value back from a separate read-only transaction.
+ if err := db.View(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ fmt.Printf("The value of 'foo' is: %s\n", value)
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Close database to release the file lock.
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // The value of 'foo' is: bar
+}
+
+func ExampleDB_View() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Insert data into a bucket.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("people"))
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte("john"), []byte("doe")); err != nil {
+ return err
+ }
+ if err := b.Put([]byte("susy"), []byte("que")); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Access data from within a read-only transactional block.
+ if err := db.View(func(tx *Tx) error {
+ v := tx.Bucket([]byte("people")).Get([]byte("john"))
+ fmt.Printf("John's last name is %s.\n", v)
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Close database to release the file lock.
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // John's last name is doe.
+}
+
+func ExampleDB_Begin_ReadOnly() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Create a bucket using a read-write transaction.
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ return err
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Create several keys in a transaction.
+ tx, err := db.Begin(true)
+ if err != nil {
+ log.Fatal(err)
+ }
+ b := tx.Bucket([]byte("widgets"))
+ if err := b.Put([]byte("john"), []byte("blue")); err != nil {
+ log.Fatal(err)
+ }
+ if err := b.Put([]byte("abby"), []byte("red")); err != nil {
+ log.Fatal(err)
+ }
+ if err := b.Put([]byte("zephyr"), []byte("purple")); err != nil {
+ log.Fatal(err)
+ }
+ if err := tx.Commit(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Iterate over the values in sorted key order.
+ tx, err = db.Begin(false)
+ if err != nil {
+ log.Fatal(err)
+ }
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ fmt.Printf("%s likes %s\n", k, v)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ log.Fatal(err)
+ }
+
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // abby likes red
+ // john likes blue
+ // zephyr likes purple
+}
+
+func BenchmarkDBBatchAutomatic(b *testing.B) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("bench"))
+ return err
+ }); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ start := make(chan struct{})
+ var wg sync.WaitGroup
+
+ for round := 0; round < 1000; round++ {
+ wg.Add(1)
+
+ go func(id uint32) {
+ defer wg.Done()
+ <-start
+
+ h := fnv.New32a()
+ buf := make([]byte, 4)
+ binary.LittleEndian.PutUint32(buf, id)
+ _, _ = h.Write(buf[:])
+ k := h.Sum(nil)
+ insert := func(tx *Tx) error {
+ b := tx.Bucket([]byte("bench"))
+ return b.Put(k, []byte("filler"))
+ }
+ if err := db.Batch(insert); err != nil {
+ b.Error(err)
+ return
+ }
+ }(uint32(round))
+ }
+ close(start)
+ wg.Wait()
+ }
+
+ b.StopTimer()
+ validateBatchBench(b, db)
+}
+
+func BenchmarkDBBatchSingle(b *testing.B) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("bench"))
+ return err
+ }); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ start := make(chan struct{})
+ var wg sync.WaitGroup
+
+ for round := 0; round < 1000; round++ {
+ wg.Add(1)
+ go func(id uint32) {
+ defer wg.Done()
+ <-start
+
+ h := fnv.New32a()
+ buf := make([]byte, 4)
+ binary.LittleEndian.PutUint32(buf, id)
+ _, _ = h.Write(buf[:])
+ k := h.Sum(nil)
+ insert := func(tx *Tx) error {
+ b := tx.Bucket([]byte("bench"))
+ return b.Put(k, []byte("filler"))
+ }
+ if err := db.Update(insert); err != nil {
+ b.Error(err)
+ return
+ }
+ }(uint32(round))
+ }
+ close(start)
+ wg.Wait()
+ }
+
+ b.StopTimer()
+ validateBatchBench(b, db)
+}
+
+func BenchmarkDBBatchManual10x100(b *testing.B) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("bench"))
+ return err
+ }); err != nil {
+ b.Fatal(err)
+ }
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ start := make(chan struct{})
+ var wg sync.WaitGroup
+
+ for major := 0; major < 10; major++ {
+ wg.Add(1)
+ go func(id uint32) {
+ defer wg.Done()
+ <-start
+
+ insert100 := func(tx *Tx) error {
+ h := fnv.New32a()
+ buf := make([]byte, 4)
+ for minor := uint32(0); minor < 100; minor++ {
+ binary.LittleEndian.PutUint32(buf, uint32(id*100+minor))
+ h.Reset()
+ _, _ = h.Write(buf[:])
+ k := h.Sum(nil)
+ b := tx.Bucket([]byte("bench"))
+ if err := b.Put(k, []byte("filler")); err != nil {
+ return err
+ }
+ }
+ return nil
+ }
+ if err := db.Update(insert100); err != nil {
+ b.Fatal(err)
+ }
+ }(uint32(major))
+ }
+ close(start)
+ wg.Wait()
+ }
+
+ b.StopTimer()
+ validateBatchBench(b, db)
+}
+
+func validateBatchBench(b *testing.B, db *WDB) {
+ var rollback = errors.New("sentinel error to cause rollback")
+ validate := func(tx *Tx) error {
+ bucket := tx.Bucket([]byte("bench"))
+ h := fnv.New32a()
+ buf := make([]byte, 4)
+ for id := uint32(0); id < 1000; id++ {
+ binary.LittleEndian.PutUint32(buf, id)
+ h.Reset()
+ _, _ = h.Write(buf[:])
+ k := h.Sum(nil)
+ v := bucket.Get(k)
+ if v == nil {
+ b.Errorf("not found id=%d key=%x", id, k)
+ continue
+ }
+ if g, e := v, []byte("filler"); !bytes.Equal(g, e) {
+ b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e)
+ }
+ if err := bucket.Delete(k); err != nil {
+ return err
+ }
+ }
+ // should be empty now
+ c := bucket.Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ b.Errorf("unexpected key: %x = %q", k, v)
+ }
+ return rollback
+ }
+ if err := db.Update(validate); err != nil && err != rollback {
+ b.Error(err)
+ }
+}
+
+// DB is a test wrapper for DB.
+type WDB struct {
+ *DB
+}
+
+// MustOpenDB returns a new, open DB at a temporary location.
+func MustOpenDB() *WDB {
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ panic(err)
+ }
+ return &WDB{db}
+}
+
+// Close closes the database and deletes the underlying file.
+func (db *WDB) Close() error {
+ // Log statistics.
+ if *statsFlag {
+ db.PrintStats()
+ }
+
+ // Check database consistency after every test.
+ db.MustCheck()
+
+ // Close database and remove file.
+ defer os.Remove(db.Path())
+ return db.DB.Close()
+}
+
+// MustClose closes the database and deletes the underlying file. Panic on error.
+func (db *WDB) MustClose() {
+ if err := db.Close(); err != nil {
+ panic(err)
+ }
+}
+
+// PrintStats prints the database stats
+func (db *WDB) PrintStats() {
+ var stats = db.Stats()
+ fmt.Printf("[db] %-20s %-20s %-20s\n",
+ fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
+ fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
+ fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
+ )
+ fmt.Printf(" %-20s %-20s %-20s\n",
+ fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
+ fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
+ fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
+ )
+}
+
+// MustCheck runs a consistency check on the database and panics if any errors are found.
+func (db *WDB) MustCheck() {
+ if err := db.Update(func(tx *Tx) error {
+ // Collect all the errors.
+ var errors []error
+ for err := range tx.Check() {
+ errors = append(errors, err)
+ if len(errors) > 10 {
+ break
+ }
+ }
+
+ // If errors occurred, copy the DB and print the errors.
+ if len(errors) > 0 {
+ var path = tempfile()
+ if err := tx.CopyFile(path, 0600); err != nil {
+ panic(err)
+ }
+
+ // Print errors.
+ fmt.Print("\n\n")
+ fmt.Printf("consistency check failed (%d errors)\n", len(errors))
+ for _, err := range errors {
+ fmt.Println(err)
+ }
+ fmt.Println("")
+ fmt.Println("db saved to:")
+ fmt.Println(path)
+ fmt.Print("\n\n")
+ os.Exit(-1)
+ }
+
+ return nil
+ }); err != nil && err != ErrDatabaseNotOpen {
+ panic(err)
+ }
+}
+
+// CopyTempFile copies a database to a temporary file.
+func (db *WDB) CopyTempFile() {
+ path := tempfile()
+ if err := db.View(func(tx *Tx) error {
+ return tx.CopyFile(path, 0600)
+ }); err != nil {
+ panic(err)
+ }
+ fmt.Println("db copied to: ", path)
+}
+
+// tempfile returns a temporary file path.
+func tempfile() string {
+ f, err := ioutil.TempFile("", "bolt-")
+ if err != nil {
+ panic(err)
+ }
+ if err := f.Close(); err != nil {
+ panic(err)
+ }
+ if err := os.Remove(f.Name()); err != nil {
+ panic(err)
+ }
+ return f.Name()
+}
+
+// mustContainKeys checks that a bucket contains a given set of keys.
+func mustContainKeys(b *Bucket, m map[string]string) {
+ found := make(map[string]string)
+ if err := b.ForEach(func(k, _ []byte) error {
+ found[string(k)] = ""
+ return nil
+ }); err != nil {
+ panic(err)
+ }
+
+ // Check for keys found in bucket that shouldn't be there.
+ var keys []string
+ for k, _ := range found {
+ if _, ok := m[string(k)]; !ok {
+ keys = append(keys, k)
+ }
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ",")))
+ }
+
+ // Check for keys not found in bucket that should be there.
+ for k, _ := range m {
+ if _, ok := found[string(k)]; !ok {
+ keys = append(keys, k)
+ }
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ",")))
+ }
+}
+
+func trunc(b []byte, length int) []byte {
+ if length < len(b) {
+ return b[:length]
+ }
+ return b
+}
+
+func truncDuration(d time.Duration) string {
+ return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
+}
+
+func fileSize(path string) int64 {
+ fi, err := os.Stat(path)
+ if err != nil {
+ return 0
+ }
+ return fi.Size()
+}
+
+// u64tob converts a uint64 into an 8-byte slice.
+func u64tob(v uint64) []byte {
+ b := make([]byte, 8)
+ binary.BigEndian.PutUint64(b, v)
+ return b
+}
+
+// btou64 converts an 8-byte slice into an uint64.
+func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) }
+
+// Ensure that a page is added to a transaction's freelist.
+func TestFreelist_free(t *testing.T) {
+ f := newFreelist()
+ f.free(100, &page{id: 12})
+ if !reflect.DeepEqual([]pgid{12}, f.pending[100]) {
+ t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100])
+ }
+}
+
+// Ensure that a page and its overflow is added to a transaction's freelist.
+func TestFreelist_free_overflow(t *testing.T) {
+ f := newFreelist()
+ f.free(100, &page{id: 12, overflow: 3})
+ if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) {
+ t.Fatalf("exp=%v; got=%v", exp, f.pending[100])
+ }
+}
+
+// Ensure that a transaction's free pages can be released.
+func TestFreelist_release(t *testing.T) {
+ f := newFreelist()
+ f.free(100, &page{id: 12, overflow: 1})
+ f.free(100, &page{id: 9})
+ f.free(102, &page{id: 39})
+ f.release(100)
+ f.release(101)
+ if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ }
+
+ f.release(102)
+ if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ }
+}
+
+// Ensure that a freelist can find contiguous blocks of pages.
+func TestFreelist_allocate(t *testing.T) {
+ f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}}
+ if id := int(f.allocate(3)); id != 3 {
+ t.Fatalf("exp=3; got=%v", id)
+ }
+ if id := int(f.allocate(1)); id != 6 {
+ t.Fatalf("exp=6; got=%v", id)
+ }
+ if id := int(f.allocate(3)); id != 0 {
+ t.Fatalf("exp=0; got=%v", id)
+ }
+ if id := int(f.allocate(2)); id != 12 {
+ t.Fatalf("exp=12; got=%v", id)
+ }
+ if id := int(f.allocate(1)); id != 7 {
+ t.Fatalf("exp=7; got=%v", id)
+ }
+ if id := int(f.allocate(0)); id != 0 {
+ t.Fatalf("exp=0; got=%v", id)
+ }
+ if id := int(f.allocate(0)); id != 0 {
+ t.Fatalf("exp=0; got=%v", id)
+ }
+ if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ }
+
+ if id := int(f.allocate(1)); id != 9 {
+ t.Fatalf("exp=9; got=%v", id)
+ }
+ if id := int(f.allocate(1)); id != 18 {
+ t.Fatalf("exp=18; got=%v", id)
+ }
+ if id := int(f.allocate(1)); id != 0 {
+ t.Fatalf("exp=0; got=%v", id)
+ }
+ if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ }
+}
+
+// Ensure that a freelist can deserialize from a freelist page.
+func TestFreelist_read(t *testing.T) {
+ // Create a page.
+ var buf [4096]byte
+ page := (*page)(unsafe.Pointer(&buf[0]))
+ page.flags = freelistPageFlag
+ page.count = 2
+
+ // Insert 2 page ids.
+ ids := (*[3]pgid)(unsafe.Pointer(&page.ptr))
+ ids[0] = 23
+ ids[1] = 50
+
+ // Deserialize page into a freelist.
+ f := newFreelist()
+ f.read(page)
+
+ // Ensure that there are two page ids in the freelist.
+ if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ }
+}
+
+// Ensure that a freelist can serialize into a freelist page.
+func TestFreelist_write(t *testing.T) {
+ // Create a freelist and write it to a page.
+ var buf [4096]byte
+ f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)}
+ f.pending[100] = []pgid{28, 11}
+ f.pending[101] = []pgid{3}
+ p := (*page)(unsafe.Pointer(&buf[0]))
+ if err := f.write(p); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the page back out.
+ f2 := newFreelist()
+ f2.read(p)
+
+ // Ensure that the freelist is correct.
+ // All pages should be present and in reverse order.
+ if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) {
+ t.Fatalf("exp=%v; got=%v", exp, f2.ids)
+ }
+}
+
+func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) }
+func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) }
+func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) }
+func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) }
+
+func benchmark_FreelistRelease(b *testing.B, size int) {
+ ids := randomPgids(size)
+ pending := randomPgids(len(ids) / 400)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}}
+ f.release(1)
+ }
+}
+
+func randomPgids(n int) []pgid {
+ rand.Seed(42)
+ pgids := make(pgids, n)
+ for i := range pgids {
+ pgids[i] = pgid(rand.Int63())
+ }
+ sort.Sort(pgids)
+ return pgids
+}
+
+// Ensure that a node can insert a key/value.
+func TestNode_put(t *testing.T) {
+ n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}}
+ n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0)
+ n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
+ n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
+ n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
+
+ if len(n.inodes) != 3 {
+ t.Fatalf("exp=3; got=%d", len(n.inodes))
+ }
+ if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" {
+ t.Fatalf("exp=<bar,1>; got=<%s,%s>", k, v)
+ }
+ if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" {
+ t.Fatalf("exp=<baz,2>; got=<%s,%s>", k, v)
+ }
+ if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" {
+ t.Fatalf("exp=<foo,3>; got=<%s,%s>", k, v)
+ }
+ if n.inodes[2].flags != uint32(leafPageFlag) {
+ t.Fatalf("not a leaf: %d", n.inodes[2].flags)
+ }
+}
+
+// Ensure that a node can deserialize from a leaf page.
+func TestNode_read_LeafPage(t *testing.T) {
+ // Create a page.
+ var buf [4096]byte
+ page := (*page)(unsafe.Pointer(&buf[0]))
+ page.flags = leafPageFlag
+ page.count = 2
+
+ // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16
+ nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr))
+ nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2
+ nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4
+
+ // Write data for the nodes at the end.
+ data := (*[4096]byte)(unsafe.Pointer(&nodes[2]))
+ copy(data[:], []byte("barfooz"))
+ copy(data[7:], []byte("helloworldbye"))
+
+ // Deserialize page into a leaf.
+ n := &node{}
+ n.read(page)
+
+ // Check that there are two inodes with correct data.
+ if !n.isLeaf {
+ t.Fatal("expected leaf")
+ }
+ if len(n.inodes) != 2 {
+ t.Fatalf("exp=2; got=%d", len(n.inodes))
+ }
+ if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" {
+ t.Fatalf("exp=<bar,fooz>; got=<%s,%s>", k, v)
+ }
+ if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" {
+ t.Fatalf("exp=<helloworld,bye>; got=<%s,%s>", k, v)
+ }
+}
+
+// Ensure that a node can serialize into a leaf page.
+func TestNode_write_LeafPage(t *testing.T) {
+ // Create a node.
+ n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
+ n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0)
+ n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0)
+ n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0)
+
+ // Write it to a page.
+ var buf [4096]byte
+ p := (*page)(unsafe.Pointer(&buf[0]))
+ n.write(p)
+
+ // Read the page back in.
+ n2 := &node{}
+ n2.read(p)
+
+ // Check that the two pages are the same.
+ if len(n2.inodes) != 3 {
+ t.Fatalf("exp=3; got=%d", len(n2.inodes))
+ }
+ if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" {
+ t.Fatalf("exp=<john,johnson>; got=<%s,%s>", k, v)
+ }
+ if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" {
+ t.Fatalf("exp=<ricki,lake>; got=<%s,%s>", k, v)
+ }
+ if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" {
+ t.Fatalf("exp=<susy,que>; got=<%s,%s>", k, v)
+ }
+}
+
+// Ensure that a node can split into appropriate subgroups.
+func TestNode_split(t *testing.T) {
+ // Create a node.
+ n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
+ n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
+
+ // Split between 2 & 3.
+ n.split(100)
+
+ var parent = n.parent
+ if len(parent.children) != 2 {
+ t.Fatalf("exp=2; got=%d", len(parent.children))
+ }
+ if len(parent.children[0].inodes) != 2 {
+ t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes))
+ }
+ if len(parent.children[1].inodes) != 3 {
+ t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes))
+ }
+}
+
+// Ensure that a page with the minimum number of inodes just returns a single node.
+func TestNode_split_MinKeys(t *testing.T) {
+ // Create a node.
+ n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
+ n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
+
+ // Split.
+ n.split(20)
+ if n.parent != nil {
+ t.Fatalf("expected nil parent")
+ }
+}
+
+// Ensure that a node that has keys that all fit on a page just returns one leaf.
+func TestNode_split_SinglePage(t *testing.T) {
+ // Create a node.
+ n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
+ n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
+
+ // Split.
+ n.split(4096)
+ if n.parent != nil {
+ t.Fatalf("expected nil parent")
+ }
+}
+
+// Ensure that the page type can be returned in human readable format.
+func TestPage_typ(t *testing.T) {
+ if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" {
+ t.Fatalf("exp=branch; got=%v", typ)
+ }
+ if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" {
+ t.Fatalf("exp=leaf; got=%v", typ)
+ }
+ if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" {
+ t.Fatalf("exp=meta; got=%v", typ)
+ }
+ if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" {
+ t.Fatalf("exp=freelist; got=%v", typ)
+ }
+ if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" {
+ t.Fatalf("exp=unknown<4e20>; got=%v", typ)
+ }
+}
+
+// Ensure that the hexdump debugging function doesn't blow up.
+func TestPage_dump(t *testing.T) {
+ (&page{id: 256}).hexdump(16)
+}
+
+func TestPgids_merge(t *testing.T) {
+ a := pgids{4, 5, 6, 10, 11, 12, 13, 27}
+ b := pgids{1, 3, 8, 9, 25, 30}
+ c := a.merge(b)
+ if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) {
+ t.Errorf("mismatch: %v", c)
+ }
+
+ a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36}
+ b = pgids{8, 9, 25, 30}
+ c = a.merge(b)
+ if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) {
+ t.Errorf("mismatch: %v", c)
+ }
+}
+
+func TestPgids_merge_quick(t *testing.T) {
+ if err := quick.Check(func(a, b pgids) bool {
+ // Sort incoming lists.
+ sort.Sort(a)
+ sort.Sort(b)
+
+ // Merge the two lists together.
+ got := a.merge(b)
+
+ // The expected value should be the two lists combined and sorted.
+ exp := append(a, b...)
+ sort.Sort(exp)
+
+ if !reflect.DeepEqual(exp, got) {
+ t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got)
+ return false
+ }
+
+ return true
+ }, nil); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// testing/quick defaults to 5 iterations and a random seed.
+// You can override these settings from the command line:
+//
+// -quick.count The number of iterations to perform.
+// -quick.seed The seed to use for randomizing.
+// -quick.maxitems The maximum number of items to insert into a DB.
+// -quick.maxksize The maximum size of a key.
+// -quick.maxvsize The maximum size of a value.
+//
+
+var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int
+
+func init() {
+ flag.IntVar(&qcount, "quick.count", 5, "")
+ flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "")
+ flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "")
+ flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "")
+ flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "")
+ flag.Parse()
+ fmt.Fprintln(os.Stderr, "seed:", qseed)
+ fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize)
+}
+
+func qconfig() *quick.Config {
+ return &quick.Config{
+ MaxCount: qcount,
+ Rand: rand.New(rand.NewSource(int64(qseed))),
+ }
+}
+
+type testdata []testdataitem
+
+func (t testdata) Len() int { return len(t) }
+func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 }
+
+func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
+ n := rand.Intn(qmaxitems-1) + 1
+ items := make(testdata, n)
+ used := make(map[string]bool)
+ for i := 0; i < n; i++ {
+ item := &items[i]
+ // Ensure that keys are unique by looping until we find one that we have not already used.
+ for {
+ item.Key = randByteSlice(rand, 1, qmaxksize)
+ if !used[string(item.Key)] {
+ used[string(item.Key)] = true
+ break
+ }
+ }
+ item.Value = randByteSlice(rand, 0, qmaxvsize)
+ }
+ return reflect.ValueOf(items)
+}
+
+type revtestdata []testdataitem
+
+func (t revtestdata) Len() int { return len(t) }
+func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
+func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 }
+
+type testdataitem struct {
+ Key []byte
+ Value []byte
+}
+
+func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte {
+ n := rand.Intn(maxSize-minSize) + minSize
+ b := make([]byte, n)
+ for i := 0; i < n; i++ {
+ b[i] = byte(rand.Intn(255))
+ }
+ return b
+}
+
+func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) }
+func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) }
+func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) }
+func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) }
+func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) }
+
+func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) }
+func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) }
+func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) }
+func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) }
+
+func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) }
+func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) }
+func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) }
+
+func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) }
+
+// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety.
+func testSimulate(t *testing.T, threadCount, parallelism int) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ rand.Seed(int64(qseed))
+
+ // A list of operations that readers and writers can perform.
+ var readerHandlers = []simulateHandler{simulateGetHandler}
+ var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler}
+
+ var versions = make(map[int]*QuickDB)
+ versions[1] = NewQuickDB()
+
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var mutex sync.Mutex
+
+ // Run n threads in parallel, each with their own operation.
+ var wg sync.WaitGroup
+ var threads = make(chan bool, parallelism)
+ var i int
+ for {
+ threads <- true
+ wg.Add(1)
+ writable := ((rand.Int() % 100) < 20) // 20% writers
+
+ // Choose an operation to execute.
+ var handler simulateHandler
+ if writable {
+ handler = writerHandlers[rand.Intn(len(writerHandlers))]
+ } else {
+ handler = readerHandlers[rand.Intn(len(readerHandlers))]
+ }
+
+ // Execute a thread for the given operation.
+ go func(writable bool, handler simulateHandler) {
+ defer wg.Done()
+
+ // Start transaction.
+ tx, err := db.Begin(writable)
+ if err != nil {
+ t.Fatal("tx begin: ", err)
+ }
+
+ // Obtain current state of the dataset.
+ mutex.Lock()
+ var qdb = versions[tx.ID()]
+ if writable {
+ qdb = versions[tx.ID()-1].Copy()
+ }
+ mutex.Unlock()
+
+ // Make sure we commit/rollback the tx at the end and update the state.
+ if writable {
+ defer func() {
+ mutex.Lock()
+ versions[tx.ID()] = qdb
+ mutex.Unlock()
+
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+ }()
+ } else {
+ defer func() { _ = tx.Rollback() }()
+ }
+
+ // Ignore operation if we don't have data yet.
+ if qdb == nil {
+ return
+ }
+
+ // Execute handler.
+ handler(tx, qdb)
+
+ // Release a thread back to the scheduling loop.
+ <-threads
+ }(writable, handler)
+
+ i++
+ if i > threadCount {
+ break
+ }
+ }
+
+ // Wait until all threads are done.
+ wg.Wait()
+}
+
+type simulateHandler func(tx *Tx, qdb *QuickDB)
+
+// Retrieves a key from the database and verifies that it is what is expected.
+func simulateGetHandler(tx *Tx, qdb *QuickDB) {
+ // Randomly retrieve an existing exist.
+ keys := qdb.Rand()
+ if len(keys) == 0 {
+ return
+ }
+
+ // Retrieve root bucket.
+ b := tx.Bucket(keys[0])
+ if b == nil {
+ panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4)))
+ }
+
+ // Drill into nested buckets.
+ for _, key := range keys[1 : len(keys)-1] {
+ b = b.Bucket(key)
+ if b == nil {
+ panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key))
+ }
+ }
+
+ // Verify key/value on the final bucket.
+ expected := qdb.Get(keys)
+ actual := b.Get(keys[len(keys)-1])
+ if !bytes.Equal(actual, expected) {
+ fmt.Println("=== EXPECTED ===")
+ fmt.Println(expected)
+ fmt.Println("=== ACTUAL ===")
+ fmt.Println(actual)
+ fmt.Println("=== END ===")
+ panic("value mismatch")
+ }
+}
+
+// Inserts a key into the database.
+func simulatePutHandler(tx *Tx, qdb *QuickDB) {
+ var err error
+ keys, value := randKeys(), randValue()
+
+ // Retrieve root bucket.
+ b := tx.Bucket(keys[0])
+ if b == nil {
+ b, err = tx.CreateBucket(keys[0])
+ if err != nil {
+ panic("create bucket: " + err.Error())
+ }
+ }
+
+ // Create nested buckets, if necessary.
+ for _, key := range keys[1 : len(keys)-1] {
+ child := b.Bucket(key)
+ if child != nil {
+ b = child
+ } else {
+ b, err = b.CreateBucket(key)
+ if err != nil {
+ panic("create bucket: " + err.Error())
+ }
+ }
+ }
+
+ // Insert into database.
+ if err := b.Put(keys[len(keys)-1], value); err != nil {
+ panic("put: " + err.Error())
+ }
+
+ // Insert into in-memory database.
+ qdb.Put(keys, value)
+}
+
+// QuickDB is an in-memory database that replicates the functionality of the
+// Bolt DB type except that it is entirely in-memory. It is meant for testing
+// that the Bolt database is consistent.
+type QuickDB struct {
+ sync.RWMutex
+ m map[string]interface{}
+}
+
+// NewQuickDB returns an instance of QuickDB.
+func NewQuickDB() *QuickDB {
+ return &QuickDB{m: make(map[string]interface{})}
+}
+
+// Get retrieves the value at a key path.
+func (db *QuickDB) Get(keys [][]byte) []byte {
+ db.RLock()
+ defer db.RUnlock()
+
+ m := db.m
+ for _, key := range keys[:len(keys)-1] {
+ value := m[string(key)]
+ if value == nil {
+ return nil
+ }
+ switch value := value.(type) {
+ case map[string]interface{}:
+ m = value
+ case []byte:
+ return nil
+ }
+ }
+
+ // Only return if it's a simple value.
+ if value, ok := m[string(keys[len(keys)-1])].([]byte); ok {
+ return value
+ }
+ return nil
+}
+
+// Put inserts a value into a key path.
+func (db *QuickDB) Put(keys [][]byte, value []byte) {
+ db.Lock()
+ defer db.Unlock()
+
+ // Build buckets all the way down the key path.
+ m := db.m
+ for _, key := range keys[:len(keys)-1] {
+ if _, ok := m[string(key)].([]byte); ok {
+ return // Keypath intersects with a simple value. Do nothing.
+ }
+
+ if m[string(key)] == nil {
+ m[string(key)] = make(map[string]interface{})
+ }
+ m = m[string(key)].(map[string]interface{})
+ }
+
+ // Insert value into the last key.
+ m[string(keys[len(keys)-1])] = value
+}
+
+// Rand returns a random key path that points to a simple value.
+func (db *QuickDB) Rand() [][]byte {
+ db.RLock()
+ defer db.RUnlock()
+ if len(db.m) == 0 {
+ return nil
+ }
+ var keys [][]byte
+ db.rand(db.m, &keys)
+ return keys
+}
+
+func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) {
+ i, index := 0, rand.Intn(len(m))
+ for k, v := range m {
+ if i == index {
+ *keys = append(*keys, []byte(k))
+ if v, ok := v.(map[string]interface{}); ok {
+ db.rand(v, keys)
+ }
+ return
+ }
+ i++
+ }
+ panic("quickdb rand: out-of-range")
+}
+
+// Copy copies the entire database.
+func (db *QuickDB) Copy() *QuickDB {
+ db.RLock()
+ defer db.RUnlock()
+ return &QuickDB{m: db.copy(db.m)}
+}
+
+func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} {
+ clone := make(map[string]interface{}, len(m))
+ for k, v := range m {
+ switch v := v.(type) {
+ case map[string]interface{}:
+ clone[k] = db.copy(v)
+ default:
+ clone[k] = v
+ }
+ }
+ return clone
+}
+
+func randKey() []byte {
+ var min, max = 1, 1024
+ n := rand.Intn(max-min) + min
+ b := make([]byte, n)
+ for i := 0; i < n; i++ {
+ b[i] = byte(rand.Intn(255))
+ }
+ return b
+}
+
+func randKeys() [][]byte {
+ var keys [][]byte
+ var count = rand.Intn(2) + 2
+ for i := 0; i < count; i++ {
+ keys = append(keys, randKey())
+ }
+ return keys
+}
+
+func randValue() []byte {
+ n := rand.Intn(8192)
+ b := make([]byte, n)
+ for i := 0; i < n; i++ {
+ b[i] = byte(rand.Intn(255))
+ }
+ return b
+}
+
+// Ensure that committing a closed transaction returns an error.
+func TestTx_Commit_ErrTxClosed(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tx.CreateBucket([]byte("foo")); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := tx.Commit(); err != ErrTxClosed {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that rolling back a closed transaction returns an error.
+func TestTx_Rollback_ErrTxClosed(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := tx.Rollback(); err != nil {
+ t.Fatal(err)
+ }
+ if err := tx.Rollback(); err != ErrTxClosed {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that committing a read-only transaction returns an error.
+func TestTx_Commit_ErrTxNotWritable(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ tx, err := db.Begin(false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := tx.Commit(); err != ErrTxNotWritable {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a transaction can retrieve a cursor on the root bucket.
+func TestTx_Cursor(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tx.CreateBucket([]byte("woojits")); err != nil {
+ t.Fatal(err)
+ }
+
+ c := tx.Cursor()
+ if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if v != nil {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) {
+ t.Fatalf("unexpected key: %v", k)
+ } else if v != nil {
+ t.Fatalf("unexpected value: %v", v)
+ }
+
+ if k, v := c.Next(); k != nil {
+ t.Fatalf("unexpected key: %v", k)
+ } else if v != nil {
+ t.Fatalf("unexpected value: %v", k)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that creating a bucket with a read-only transaction returns an error.
+func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.View(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("foo"))
+ if err != ErrTxNotWritable {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that creating a bucket on a closed transaction returns an error.
+func TestTx_CreateBucket_ErrTxClosed(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+
+ if _, err := tx.CreateBucket([]byte("foo")); err != ErrTxClosed {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that a Tx can retrieve a bucket.
+func TestTx_Bucket(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ if tx.Bucket([]byte("widgets")) == nil {
+ t.Fatal("expected bucket")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a Tx retrieving a non-existent key returns nil.
+func TestTx_Get_NotFound(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if b.Get([]byte("no_such_key")) != nil {
+ t.Fatal("expected nil value")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can be created and retrieved.
+func TestTx_CreateBucket(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Create a bucket.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ } else if b == nil {
+ t.Fatal("expected bucket")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the bucket through a separate transaction.
+ if err := db.View(func(tx *Tx) error {
+ if tx.Bucket([]byte("widgets")) == nil {
+ t.Fatal("expected bucket")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can be created if it doesn't already exist.
+func TestTx_CreateBucketIfNotExists(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ // Create bucket.
+ if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ } else if b == nil {
+ t.Fatal("expected bucket")
+ }
+
+ // Create bucket again.
+ if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ } else if b == nil {
+ t.Fatal("expected bucket")
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Read the bucket through a separate transaction.
+ if err := db.View(func(tx *Tx) error {
+ if tx.Bucket([]byte("widgets")) == nil {
+ t.Fatal("expected bucket")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure transaction returns an error if creating an unnamed bucket.
+func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucketIfNotExists([]byte{}); err != ErrBucketNameRequired {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ if _, err := tx.CreateBucketIfNotExists(nil); err != ErrBucketNameRequired {
+ t.Fatalf("unexpected error: %s", err)
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket cannot be created twice.
+func TestTx_CreateBucket_ErrBucketExists(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Create a bucket.
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Create the same bucket again.
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket([]byte("widgets")); err != ErrBucketExists {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket is created with a non-blank name.
+func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ if _, err := tx.CreateBucket(nil); err != ErrBucketNameRequired {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that a bucket can be deleted.
+func TestTx_DeleteBucket(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ // Create a bucket and add a value.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ // Delete the bucket and make sure we can't get the value.
+ if err := db.Update(func(tx *Tx) error {
+ if err := tx.DeleteBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ if tx.Bucket([]byte("widgets")) != nil {
+ t.Fatal("unexpected bucket")
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.Update(func(tx *Tx) error {
+ // Create the bucket again and make sure there's not a phantom value.
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if v := b.Get([]byte("foo")); v != nil {
+ t.Fatalf("unexpected phantom value: %v", v)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that deleting a bucket on a closed transaction returns an error.
+func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ tx, err := db.Begin(true)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := tx.Commit(); err != nil {
+ t.Fatal(err)
+ }
+ if err := tx.DeleteBucket([]byte("foo")); err != ErrTxClosed {
+ t.Fatalf("unexpected error: %s", err)
+ }
+}
+
+// Ensure that deleting a bucket with a read-only transaction returns an error.
+func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.View(func(tx *Tx) error {
+ if err := tx.DeleteBucket([]byte("foo")); err != ErrTxNotWritable {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that nothing happens when deleting a bucket that doesn't exist.
+func TestTx_DeleteBucket_NotFound(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ if err := tx.DeleteBucket([]byte("widgets")); err != ErrBucketNotFound {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that no error is returned when a tx.ForEach function does not return
+// an error.
+func TestTx_ForEach_NoError(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := tx.ForEach(func(name []byte, b *Bucket) error {
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that an error is returned when a tx.ForEach function returns an error.
+func TestTx_ForEach_WithError(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+
+ marker := errors.New("marker")
+ if err := tx.ForEach(func(name []byte, b *Bucket) error {
+ return marker
+ }); err != marker {
+ t.Fatalf("unexpected error: %s", err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure that Tx commit handlers are called after a transaction successfully commits.
+func TestTx_OnCommit(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var x int
+ if err := db.Update(func(tx *Tx) error {
+ tx.OnCommit(func() { x += 1 })
+ tx.OnCommit(func() { x += 2 })
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ } else if x != 3 {
+ t.Fatalf("unexpected x: %d", x)
+ }
+}
+
+// Ensure that Tx commit handlers are NOT called after a transaction rolls back.
+func TestTx_OnCommit_Rollback(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ var x int
+ if err := db.Update(func(tx *Tx) error {
+ tx.OnCommit(func() { x += 1 })
+ tx.OnCommit(func() { x += 2 })
+ if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ t.Fatal(err)
+ }
+ return errors.New("rollback this commit")
+ }); err == nil || err.Error() != "rollback this commit" {
+ t.Fatalf("unexpected error: %s", err)
+ } else if x != 0 {
+ t.Fatalf("unexpected x: %d", x)
+ }
+}
+
+// Ensure that the database can be copied to a file path.
+func TestTx_CopyFile(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+
+ path := tempfile()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ return tx.CopyFile(path, 0600)
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ db2, err := Open(path, 0600, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db2.View(func(tx *Tx) error {
+ if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
+ t.Fatalf("unexpected value: %v", v)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db2.Close(); err != nil {
+ t.Fatal(err)
+ }
+}
+
+type failWriterError struct{}
+
+func (failWriterError) Error() string {
+ return "error injected for tests"
+}
+
+type failWriter struct {
+ // fail after this many bytes
+ After int
+}
+
+func (f *failWriter) Write(p []byte) (n int, err error) {
+ n = len(p)
+ if n > f.After {
+ n = f.After
+ err = failWriterError{}
+ }
+ f.After -= n
+ return n, err
+}
+
+// Ensure that Copy handles write errors right.
+func TestTx_CopyFile_Error_Meta(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ return tx.Copy(&failWriter{})
+ }); err == nil || err.Error() != "meta 0 copy: error injected for tests" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+// Ensure that Copy handles write errors right.
+func TestTx_CopyFile_Error_Normal(t *testing.T) {
+ db := MustOpenDB()
+ defer db.MustClose()
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ t.Fatal(err)
+ }
+ if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+ t.Fatal(err)
+ }
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+
+ if err := db.View(func(tx *Tx) error {
+ return tx.Copy(&failWriter{3 * db.Info().PageSize})
+ }); err == nil || err.Error() != "error injected for tests" {
+ t.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func ExampleTx_Rollback() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Create a bucket.
+ if err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ return err
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Set a value for a key.
+ if err := db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Update the key but rollback the transaction so it never saves.
+ tx, err := db.Begin(true)
+ if err != nil {
+ log.Fatal(err)
+ }
+ b := tx.Bucket([]byte("widgets"))
+ if err := b.Put([]byte("foo"), []byte("baz")); err != nil {
+ log.Fatal(err)
+ }
+ if err := tx.Rollback(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Ensure that our original value is still set.
+ if err := db.View(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ fmt.Printf("The value for 'foo' is still: %s\n", value)
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Close database to release file lock.
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // The value for 'foo' is still: bar
+}
+
+func ExampleTx_CopyFile() {
+ // Open the database.
+ db, err := Open(tempfile(), 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(db.Path())
+
+ // Create a bucket and a key.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ return err
+ }
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Copy the database to another file.
+ toFile := tempfile()
+ if err := db.View(func(tx *Tx) error {
+ return tx.CopyFile(toFile, 0666)
+ }); err != nil {
+ log.Fatal(err)
+ }
+ defer os.Remove(toFile)
+
+ // Open the cloned database.
+ db2, err := Open(toFile, 0666, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Ensure that the key exists in the copy.
+ if err := db2.View(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ fmt.Printf("The value for 'foo' in the clone is: %s\n", value)
+ return nil
+ }); err != nil {
+ log.Fatal(err)
+ }
+
+ // Close database to release file lock.
+ if err := db.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ if err := db2.Close(); err != nil {
+ log.Fatal(err)
+ }
+
+ // Output:
+ // The value for 'foo' in the clone is: bar
+}
+
+// Ensure the "info" command can print information about a database.
+func TestInfoCommand_Run(t *testing.T) {
+ db := MustOpen2(0666, nil)
+ db.DB.Close()
+ defer db.Close()
+
+ // Run the info command.
+ m := NewMain()
+ if err := m.Run("info", db.Path); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Ensure the "stats" command executes correctly with an empty database.
+func TestStatsCommand_Run_EmptyDatabase(t *testing.T) {
+ // Ignore
+ if os.Getpagesize() != 4096 {
+ t.Skip("system does not use 4KB page size")
+ }
+
+ db := MustOpen2(0666, nil)
+ defer db.Close()
+ db.DB.Close()
+
+ // Generate expected result.
+ exp := "Aggregate statistics for 0 buckets\n\n" +
+ "Page count statistics\n" +
+ "\tNumber of logical branch pages: 0\n" +
+ "\tNumber of physical branch overflow pages: 0\n" +
+ "\tNumber of logical leaf pages: 0\n" +
+ "\tNumber of physical leaf overflow pages: 0\n" +
+ "Tree statistics\n" +
+ "\tNumber of keys/value pairs: 0\n" +
+ "\tNumber of levels in B+tree: 0\n" +
+ "Page size utilization\n" +
+ "\tBytes allocated for physical branch pages: 0\n" +
+ "\tBytes actually used for branch data: 0 (0%)\n" +
+ "\tBytes allocated for physical leaf pages: 0\n" +
+ "\tBytes actually used for leaf data: 0 (0%)\n" +
+ "Bucket statistics\n" +
+ "\tTotal number of buckets: 0\n" +
+ "\tTotal number on inlined buckets: 0 (0%)\n" +
+ "\tBytes used for inlined buckets: 0 (0%)\n"
+
+ // Run the command.
+ m := NewMainTW()
+ if err := m.Run("stats", db.Path); err != nil {
+ t.Fatal(err)
+ } else if m.Stdout.String() != exp {
+ t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String())
+ }
+}
+
+// Ensure the "stats" command can execute correctly.
+func TestStatsCommand_Run(t *testing.T) {
+ // Ignore
+ if os.Getpagesize() != 4096 {
+ t.Skip("system does not use 4KB page size")
+ }
+
+ db := MustOpen2(0666, nil)
+ defer db.Close()
+
+ if err := db.Update(func(tx *Tx) error {
+ // Create "foo" bucket.
+ b, err := tx.CreateBucket([]byte("foo"))
+ if err != nil {
+ return err
+ }
+ for i := 0; i < 10; i++ {
+ if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
+ return err
+ }
+ }
+
+ // Create "bar" bucket.
+ b, err = tx.CreateBucket([]byte("bar"))
+ if err != nil {
+ return err
+ }
+ for i := 0; i < 100; i++ {
+ if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
+ return err
+ }
+ }
+
+ // Create "baz" bucket.
+ b, err = tx.CreateBucket([]byte("baz"))
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte("key"), []byte("value")); err != nil {
+ return err
+ }
+
+ return nil
+ }); err != nil {
+ t.Fatal(err)
+ }
+ db.DB.Close()
+
+ // Generate expected result.
+ exp := "Aggregate statistics for 3 buckets\n\n" +
+ "Page count statistics\n" +
+ "\tNumber of logical branch pages: 0\n" +
+ "\tNumber of physical branch overflow pages: 0\n" +
+ "\tNumber of logical leaf pages: 1\n" +
+ "\tNumber of physical leaf overflow pages: 0\n" +
+ "Tree statistics\n" +
+ "\tNumber of keys/value pairs: 111\n" +
+ "\tNumber of levels in B+tree: 1\n" +
+ "Page size utilization\n" +
+ "\tBytes allocated for physical branch pages: 0\n" +
+ "\tBytes actually used for branch data: 0 (0%)\n" +
+ "\tBytes allocated for physical leaf pages: 4096\n" +
+ "\tBytes actually used for leaf data: 1996 (48%)\n" +
+ "Bucket statistics\n" +
+ "\tTotal number of buckets: 3\n" +
+ "\tTotal number on inlined buckets: 2 (66%)\n" +
+ "\tBytes used for inlined buckets: 236 (11%)\n"
+
+ // Run the command.
+ m := NewMainTW()
+ if err := m.Run("stats", db.Path); err != nil {
+ t.Fatal(err)
+ } else if m.Stdout.String() != exp {
+ t.Fatalf("unexpected stdout:\n\n%s", m.Stdout.String())
+ }
+}
+
+// Main represents a test wrapper for main.Main that records output.
+type MainTW struct {
+ *MainT
+ Stdin bytes.Buffer
+ Stdout bytes.Buffer
+ Stderr bytes.Buffer
+}
+
+// NewMain returns a new instance of Main.
+func NewMainTW() *MainTW {
+ m := &MainTW{MainT: NewMain()}
+ m.MainT.Stdin = &m.Stdin
+ m.MainT.Stdout = &m.Stdout
+ m.MainT.Stderr = &m.Stderr
+ return m
+}
+
+// MustOpen creates a Bolt database in a temporary location.
+func MustOpen2(mode os.FileMode, options *Options) *WDB2 {
+ // Create temporary path.
+ f, _ := ioutil.TempFile("", "bolt-")
+ f.Close()
+ os.Remove(f.Name())
+
+ db, err := Open(f.Name(), mode, options)
+ if err != nil {
+ panic(err.Error())
+ }
+ return &WDB2{DB: db, Path: f.Name()}
+}
+
+// DB is a test wrapper for bolt.DB.
+type WDB2 struct {
+ *DB
+ Path string
+}
+
+// Close closes and removes the database.
+func (db *WDB2) Close() error {
+ defer os.Remove(db.Path)
+ return db.DB.Close()
+}
+
+func TestCompactCommand_Run(t *testing.T) {
+ var s int64
+ if err := binary.Read(crypto.Reader, binary.BigEndian, &s); err != nil {
+ t.Fatal(err)
+ }
+ rand.Seed(s)
+
+ dstdb := MustOpen2(0666, nil)
+ dstdb.Close()
+
+ // fill the db
+ db := MustOpen2(0666, nil)
+ if err := db.Update(func(tx *Tx) error {
+ n := 2 + rand.Intn(5)
+ for i := 0; i < n; i++ {
+ k := []byte(fmt.Sprintf("b%d", i))
+ b, err := tx.CreateBucketIfNotExists(k)
+ if err != nil {
+ return err
+ }
+ if err := b.SetSequence(uint64(i)); err != nil {
+ return err
+ }
+ if err := fillBucket(b, append(k, '.')); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ db.Close()
+ t.Fatal(err)
+ }
+
+ // make the db grow by adding large values, and delete them.
+ if err := db.Update(func(tx *Tx) error {
+ b, err := tx.CreateBucketIfNotExists([]byte("large_vals"))
+ if err != nil {
+ return err
+ }
+ n := 5 + rand.Intn(5)
+ for i := 0; i < n; i++ {
+ v := make([]byte, 1000*1000*(1+rand.Intn(5)))
+ _, err := crypto.Read(v)
+ if err != nil {
+ return err
+ }
+ if err := b.Put([]byte(fmt.Sprintf("l%d", i)), v); err != nil {
+ return err
+ }
+ }
+ return nil
+ }); err != nil {
+ db.Close()
+ t.Fatal(err)
+ }
+ if err := db.Update(func(tx *Tx) error {
+ c := tx.Bucket([]byte("large_vals")).Cursor()
+ for k, _ := c.First(); k != nil; k, _ = c.Next() {
+ if err := c.Delete(); err != nil {
+ return err
+ }
+ }
+ return tx.DeleteBucket([]byte("large_vals"))
+ }); err != nil {
+ db.Close()
+ t.Fatal(err)
+ }
+ db.DB.Close()
+ defer db.Close()
+ defer dstdb.Close()
+
+ dbChk, err := chkdb(db.Path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ m := NewMainTW()
+ if err := m.Run("compact", "-o", dstdb.Path, db.Path); err != nil {
+ t.Fatal(err)
+ }
+
+ dbChkAfterCompact, err := chkdb(db.Path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ dstdbChk, err := chkdb(dstdb.Path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(dbChk, dbChkAfterCompact) {
+ t.Error("the original db has been touched")
+ }
+ if !bytes.Equal(dbChk, dstdbChk) {
+ t.Error("the compacted db data isn't the same than the original db")
+ }
+}
+
+func fillBucket(b *Bucket, prefix []byte) error {
+ n := 10 + rand.Intn(50)
+ for i := 0; i < n; i++ {
+ v := make([]byte, 10*(1+rand.Intn(4)))
+ _, err := crypto.Read(v)
+ if err != nil {
+ return err
+ }
+ k := append(prefix, []byte(fmt.Sprintf("k%d", i))...)
+ if err := b.Put(k, v); err != nil {
+ return err
+ }
+ }
+ // limit depth of subbuckets
+ s := 2 + rand.Intn(4)
+ if len(prefix) > (2*s + 1) {
+ return nil
+ }
+ n = 1 + rand.Intn(3)
+ for i := 0; i < n; i++ {
+ k := append(prefix, []byte(fmt.Sprintf("b%d", i))...)
+ sb, err := b.CreateBucket(k)
+ if err != nil {
+ return err
+ }
+ if err := fillBucket(sb, append(k, '.')); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func chkdb(path string) ([]byte, error) {
+ db, err := Open(path, 0666, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer db.Close()
+ var buf bytes.Buffer
+ err = db.View(func(tx *Tx) error {
+ return tx.ForEach(func(name []byte, b *Bucket) error {
+ return walkBucket(b, name, nil, &buf)
+ })
+ })
+ if err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func walkBucket(parent *Bucket, k []byte, v []byte, w io.Writer) error {
+ if _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v); err != nil {
+ return err
+ }
+
+ // not a bucket, exit.
+ if v != nil {
+ return nil
+ }
+ return parent.ForEach(func(k, v []byte) error {
+ if v == nil {
+ return walkBucket(parent.Bucket(k), k, nil, w)
+ }
+ return walkBucket(parent, k, v, w)
+ })
+}
+
+
+func MainTest() {
+}