package dedo import ( "bytes" crypto "crypto/rand" "encoding/binary" "errors" "flag" "fmt" "hash/fnv" "io" "io/ioutil" "log" "math/rand" "os" "path/filepath" "reflect" "regexp" "sort" "strconv" "strings" "sync" "testing" "testing/internal/testdeps" "testing/quick" "time" "unsafe" g "gobang" ) func test_newDB() { g.TestStart("newDB()") g.Testing("different path are given back", func() { db1 := newDB("path1", nil) db2 := newDB("path2", nil) g.TAssertEqual(db1.path, "path1") g.TAssertEqual(db2.path, "path2") }) g.Testing("diferent *os.File pointers", func() { f1 := new(os.File) f2 := new(os.File) db1 := newDB("path", f1) db2 := newDB("path", f2) g.TAssertEqual(db1 == db2, false) }) } func test_openFile() { g.TestStart("openFile()") g.Testing("error when parent directory does not exist", func() { path := tempfile() + tempfile() defer os.Remove(path) _, err := openFile(path) g.TErrorNil(err) }) g.Testing("success when file doesn't exist yet", func() { path := tempfile() defer os.Remove(path) file, err := openFile(path) defer file.Close() g.TErrorIf(err) }) g.Testing("success when file is read/write", func() { path := tempfile() defer os.Remove(path) file, err := os.OpenFile(path, os.O_CREATE, 0o600) g.TErrorIf(err) file.Close() _, err = openFile(path) g.TErrorIf(err) }) g.Testing("success when file is read-only", func() { path := tempfile() defer os.Remove(path) file, err := os.OpenFile(path, os.O_CREATE, 0o400) g.TErrorIf(err) file.Close() _, err = openFile(path) g.TErrorIf(err) }) } func test_readPageSize() { g.TestStart("readPageSize()") g.Testing("empty page gives us EOF", func() { }) g.Testing("page smaller than 4KiB also gives us EOF", func() { }) g.Testing("zeroed 4KiB page gives us ErrInvalid", func() { }) } func test_initDB() { g.TestStart("initDB()") } func test_Open() { g.TestStart("Open()") } func test_commands() { g.TestStart("commands") g.Testing("ensuremap key and name are in sync", func() { for key, command := range commands { g.TAssertEqual(command.name, key) } }) } // Ensure that opening a file that is not a Bolt database returns ErrInvalid. func TestOpen_ErrInvalid(t *testing.T) { return } // Ensure that a bucket that gets a non-existent key returns nil. func TestBucket_Get_NonExistent(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } v := b.Get([]byte("foo")) if v != nil { t.Fatal("expected nil value") } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can read a value that is not flushed yet. func TestBucket_Get_FromNode(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } v := b.Get([]byte("foo")) if !bytes.Equal(v, []byte("bar")) { t.Fatalf("unexpected value: %v", v) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket retrieved via Get() returns a nil. func TestBucket_Get_IncompatibleValue(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } _, err = tx.Bucket([]byte("widgets")).CreateBucket([ ]byte("foo"), ) if err != nil { t.Fatal(err) } if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil { t.Fatal("expected nil value") } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a slice returned from a bucket has a capacity equal to its // length. This also allows slices to be appended to since it will require a // realloc by Go. // // https://github.com/boltdb/bolt/issues/544 func TestBucket_Get_Capacity(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Write key to a bucket. err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("bucket")) if err != nil { return err } return b.Put([]byte("key"), []byte("val")) }) if err != nil { t.Fatal(err) } // Retrieve value and attempt to append to it. err = db.Update(func(tx *Tx) error { k, v := tx.Bucket([]byte("bucket")).Cursor().First() // Verify capacity. if len(k) != cap(k) { t.Fatalf("unexpected key slice capacity: %d", cap(k)) } else if len(v) != cap(v) { t.Fatalf("unexpected value slice capacity: %d", cap(v)) } // Ensure slice can be appended to without a segfault. k = append(k, []byte("123")...) v = append(v, []byte("123")...) return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can write a key/value. func TestBucket_Put(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } v := tx.Bucket([]byte("widgets")).Get([]byte("foo")) if !bytes.Equal([]byte("bar"), v) { t.Fatalf("unexpected value: %v", v) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can rewrite a key in the same transaction. func TestBucket_Put_Repeat(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("baz")) if err != nil { t.Fatal(err) } value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) if !bytes.Equal([]byte("baz"), value) { t.Fatalf("unexpected value: %v", value) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can write a bunch of large values. func TestBucket_Put_Large(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) count, factor := 100, 200 err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } for i := 1; i < count; i++ { key := strings.Repeat("0", i * factor) value := strings.Repeat("X", (count - i) * factor) err := b.Put([]byte(key), []byte(value)) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for i := 1; i < count; i++ { value := b.Get([]byte(strings.Repeat("0", i*factor))) expected := []byte(strings.Repeat( "X", (count - i) * factor, )) if !bytes.Equal(value, expected) { t.Fatalf("unexpected value: %v", value) } } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a database can perform multiple large appends safely. func TestDB_Put_VeryLarge(t *testing.T) { const ( n = 2000 // FIXME: was too slow batchN = 1000 ksize = 8 vsize = 500 ) db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) for i := 0; i < n; i += batchN { err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("widgets")) if err != nil { t.Fatal(err) } for j := 0; j < batchN; j++ { k, v := make([]byte, ksize), make([]byte, vsize) binary.BigEndian.PutUint32(k, uint32(i+j)) err := b.Put(k, v) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } } } // Ensure that a setting a value on a key with a bucket value returns an error. func TestBucket_Put_IncompatibleValue(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b0, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } _, err = tx.Bucket([]byte("widgets")).CreateBucket( []byte("foo"), ) if err != nil { t.Fatal(err) } err = b0.Put([]byte("foo"), []byte("bar")) if err != ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a setting a value while the transaction is closed returns an // error. func TestBucket_Put_Closed(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(true) if err != nil { t.Fatal(err) } b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = tx.Rollback() if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } // Ensure that setting a value on a read-only bucket returns an error. func TestBucket_Put_ReadOnly(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) err := b.Put([]byte("foo"), []byte("bar")) if err != ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can delete an existing key. func TestBucket_Delete(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } err = b.Delete([]byte("foo")) if err != nil { t.Fatal(err) } v := b.Get([]byte("foo")) if v != nil { t.Fatalf("unexpected value: %v", v) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that deleting a large set of keys will work correctly. func TestBucket_Delete_Large(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } for i := 0; i < 100; i++ { err := b.Put( []byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024)), ) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } err = db.Update(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < 100; i++ { err := b.Delete([]byte(strconv.Itoa(i))) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < 100; i++ { v := b.Get([]byte(strconv.Itoa(i))) if v != nil { t.Fatalf("unexpected value: %v, i=%d", v, i) } } return nil }) if err != nil { t.Fatal(err) } } // Deleting a very large list of keys will cause the freelist to use overflow. func TestBucket_Delete_FreelistOverflow(t *testing.T) { const ( n1 = 20 // FIXME: was too slow n2 = 2 // FIXME: was too slow ) db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) k := make([]byte, 16) for i := uint64(0); i < n1; i++ { err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucketIfNotExists([]byte("0")) if err != nil { t.Fatalf("bucket error: %s", err) } for j := uint64(0); j < n2; j++ { binary.BigEndian.PutUint64(k[:8], i) binary.BigEndian.PutUint64(k[8:], j) err := b.Put(k, nil) if err != nil { t.Fatalf("put error: %s", err) } } return nil }) if err != nil { t.Fatal(err) } } // Delete all of them in one large transaction err := db.Update(func(tx *Tx) error { b := tx.Bucket([]byte("0")) c := b.Cursor() for k, _ := c.First(); k != nil; k, _ = c.Next() { err := c.Delete() if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } } // Ensure that accessing and updating nested buckets is ok across transactions. func TestBucket_Nested(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { // Create a widgets bucket. b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } // Create a widgets/foo bucket. _, err = b.CreateBucket([]byte("foo")) if err != nil { t.Fatal(err) } // Create a widgets/bar key. err = b.Put([]byte("bar"), []byte("0000")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } db.MustCheck() // Update widgets/bar. err = db.Update(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) err := b.Put([]byte("bar"), []byte("xxxx")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } db.MustCheck() // Cause a split. err = db.Update(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < 10000; i++ { err := b.Put( []byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)), ) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } db.MustCheck() // Insert into widgets/foo/baz. err = db.Update(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) err := b.Bucket([]byte("foo")).Put( []byte("baz"), []byte("yyyy"), ) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } db.MustCheck() // Verify. err = db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) v := b.Bucket([]byte("foo")).Get([]byte("baz")) if !bytes.Equal(v, []byte("yyyy")) { t.Fatalf("unexpected value: %v", v) } v = b.Get([]byte("bar")) if !bytes.Equal(v, []byte("xxxx")) { t.Fatalf("unexpected value: %v", v) } for i := 0; i < 10000; i++ { v := b.Get([]byte(strconv.Itoa(i))) if !bytes.Equal(v, []byte(strconv.Itoa(i))) { t.Fatalf("unexpected value: %v", v) } } return nil }) if err != nil { t.Fatal(err) } } // Ensure that deleting a bucket using Delete() returns an error. func TestBucket_Delete_Bucket(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } _, err = b.CreateBucket([]byte("foo")) if err != nil { t.Fatal(err) } err = b.Delete([]byte("foo")) if err != ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that deleting a key on a read-only bucket returns an error. func TestBucket_Delete_ReadOnly(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")) if err != ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a deleting value while the transaction is closed returns an // error. func TestBucket_Delete_Closed(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(true) if err != nil { t.Fatal(err) } b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = tx.Rollback() if err != nil { t.Fatal(err) } err = b.Delete([]byte("foo")) if err != ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } // Ensure that deleting a bucket causes nested buckets to be deleted. func TestBucket_DeleteBucket_Nested(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } foo, err := widgets.CreateBucket([]byte("foo")) if err != nil { t.Fatal(err) } bar, err := foo.CreateBucket([]byte("bar")) if err != nil { t.Fatal(err) } err = bar.Put([]byte("baz"), []byte("bat")) if err != nil { t.Fatal(err) } err = tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that deleting a bucket causes nested buckets to be deleted after they // have been committed. func TestBucket_DeleteBucket_Nested2(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } foo, err := widgets.CreateBucket([]byte("foo")) if err != nil { t.Fatal(err) } bar, err := foo.CreateBucket([]byte("bar")) if err != nil { t.Fatal(err) } err = bar.Put([]byte("baz"), []byte("bat")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.Update(func(tx *Tx) error { widgets := tx.Bucket([]byte("widgets")) if widgets == nil { t.Fatal("expected widgets bucket") } foo := widgets.Bucket([]byte("foo")) if foo == nil { t.Fatal("expected foo bucket") } bar := foo.Bucket([]byte("bar")) if bar == nil { t.Fatal("expected bar bucket") } v := bar.Get([]byte("baz")) if !bytes.Equal(v, []byte("bat")) { t.Fatalf("unexpected value: %v", v) } err := tx.DeleteBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { if tx.Bucket([]byte("widgets")) != nil { t.Fatal("expected bucket to be deleted") } return nil }) if err != nil { t.Fatal(err) } } // Ensure that deleting a child bucket with multiple pages causes all pages to // get collected. NOTE: Consistency check in bolt_test.DB.Close() will panic if // pages not freed properly. func TestBucket_DeleteBucket_Large(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } foo, err := widgets.CreateBucket([]byte("foo")) if err != nil { t.Fatal(err) } for i := 0; i < 1000; i++ { err := foo.Put( []byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i)), ) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } err = db.Update(func(tx *Tx) error { err := tx.DeleteBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a simple value retrieved via Bucket() returns a nil. func TestBucket_Bucket_IncompatibleValue(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = widgets.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")) if b != nil { t.Fatal("expected nil bucket") } return nil }) if err != nil { t.Fatal(err) } } // Ensure that creating a bucket on an existing non-bucket key returns an error. func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = widgets.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } _, err = widgets.CreateBucket([]byte("foo")) if err != ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that deleting a bucket on an existing non-bucket key returns an error. func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = widgets.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } err = tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")) if err != ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure bucket can set and update its sequence number. func TestBucket_Sequence(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { bkt, err := tx.CreateBucket([]byte("0")) if err != nil { t.Fatal(err) } // Retrieve sequence. v := bkt.Sequence() if v != 0 { t.Fatalf("unexpected sequence: %d", v) } // Update sequence. err = bkt.SetSequence(1000) if err != nil { t.Fatal(err) } // Read sequence again. v = bkt.Sequence() if v != 1000 { t.Fatalf("unexpected sequence: %d", v) } return nil }) if err != nil { t.Fatal(err) } // Verify sequence in separate transaction. err = db.View(func(tx *Tx) error { v := tx.Bucket([]byte("0")).Sequence() if v != 1000 { t.Fatalf("unexpected sequence: %d", v) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can return an autoincrementing sequence. func TestBucket_NextSequence(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { widgets, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } woojits, err := tx.CreateBucket([]byte("woojits")) if err != nil { t.Fatal(err) } // Make sure sequence increments. seq, err := widgets.NextSequence() if err != nil { t.Fatal(err) } else if seq != 1 { t.Fatalf("unexpecte sequence: %d", seq) } seq, err = widgets.NextSequence() if err != nil { t.Fatal(err) } else if seq != 2 { t.Fatalf("unexpected sequence: %d", seq) } // Buckets should be separate. seq, err = woojits.NextSequence() if err != nil { t.Fatal(err) } else if seq != 1 { t.Fatalf("unexpected sequence: %d", 1) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket will persist an autoincrementing sequence even if its // the only thing updated on the bucket. // https://github.com/boltdb/bolt/issues/296 func TestBucket_NextSequence_Persist(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.Update(func(tx *Tx) error { _, err := tx.Bucket([]byte("widgets")).NextSequence() if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.Update(func(tx *Tx) error { seq, err := tx.Bucket([]byte("widgets")).NextSequence() if err != nil { t.Fatalf("unexpected error: %s", err) } else if seq != 2 { t.Fatalf("unexpected sequence: %d", seq) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that retrieving the next sequence on a read-only bucket returns an // error. func TestBucket_NextSequence_ReadOnly(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { _, err := tx.Bucket([]byte("widgets")).NextSequence() if err != ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that retrieving the next sequence for a bucket on a closed database // return an error. func TestBucket_NextSequence_Closed(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(true) if err != nil { t.Fatal(err) } b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = tx.Rollback() if err != nil { t.Fatal(err) } _, err = b.NextSequence() if err != ErrTxClosed { t.Fatal(err) } } // Ensure a user can loop over all key/value pairs in a bucket. func TestBucket_ForEach(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("0000")) if err != nil { t.Fatal(err) } err = b.Put([]byte("baz"), []byte("0001")) if err != nil { t.Fatal(err) } err = b.Put([]byte("bar"), []byte("0002")) if err != nil { t.Fatal(err) } var index int err = b.ForEach(func(k, v []byte) error { switch index { case 0: if !bytes.Equal(k, []byte("bar")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte("0002")) { t.Fatalf("unexpected value: %v", v) } case 1: if !bytes.Equal(k, []byte("baz")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte("0001")) { t.Fatalf("unexpected value: %v", v) } case 2: if !bytes.Equal(k, []byte("foo")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte("0000")) { t.Fatalf("unexpected value: %v", v) } } index++ return nil }) if err != nil { t.Fatal(err) } if index != 3 { t.Fatalf("unexpected index: %d", index) } return nil }) if err != nil { t.Fatal(err) } } // Ensure a database can stop iteration early. func TestBucket_ForEach_ShortCircuit(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("bar"), []byte("0000")) if err != nil { t.Fatal(err) } err = b.Put([]byte("baz"), []byte("0000")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("0000")) if err != nil { t.Fatal(err) } var index int err = tx.Bucket( []byte("widgets"), ).ForEach(func(k, v []byte) error { index++ if bytes.Equal(k, []byte("baz")) { return errors.New("marker") } return nil }) if err == nil || err.Error() != "marker" { t.Fatalf("unexpected error: %s", err) } if index != 2 { t.Fatalf("unexpected index: %d", index) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that looping over a bucket on a closed database returns an error. func TestBucket_ForEach_Closed(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(true) if err != nil { t.Fatal(err) } b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = tx.Rollback() if err != nil { t.Fatal(err) } err = b.ForEach(func(k, v []byte) error { return nil }) if err != ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } // Ensure that an error is returned when inserting with an empty key. func TestBucket_Put_EmptyKey(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte(""), []byte("bar")) if err != ErrKeyRequired { t.Fatalf("unexpected error: %s", err) } err = b.Put(nil, []byte("bar")) if err != ErrKeyRequired { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that an error is returned when inserting with a key that's too large. func TestBucket_Put_KeyTooLarge(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put(make([]byte, 32769), []byte("bar")) if err != ErrKeyTooLarge { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that an error is returned when inserting a value that's too large. func TestBucket_Put_ValueTooLarge(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), make([]byte, MaxValueSize+1)) if err != ErrValueTooLarge { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can write random keys and values across multiple // transactions. func TestBucket_Put_Single(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } index := 0 err := quick.Check(func(items testdata) bool { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) m := make(map[string][]byte) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } for _, item := range items { err := db.Update(func(tx *Tx) error { err := tx.Bucket([]byte("widgets")).Put( item.Key, item.Value, ) if err != nil { panic("put error: " + err.Error()) } m[string(item.Key)] = item.Value return nil }) if err != nil { t.Fatal(err) } const errmsg = "value mismatch [run %d] (%d of %d):\n" + "key: %x\ngot: %x\nexp: %x" // Verify all key/values so far. err = db.View(func(tx *Tx) error { i := 0 for k, v := range m { value := tx.Bucket( []byte("widgets"), ).Get([]byte(k)) if !bytes.Equal(value, v) { t.Logf( errmsg, index, i, len(m), []byte(k), value, v, ) db.CopyTempFile() t.FailNow() } i++ } return nil }) if err != nil { t.Fatal(err) } } index++ return true }, nil) if err != nil { t.Error(err) } } // Ensure that a transaction can insert multiple key/value pairs at once. func TestBucket_Put_Multiple(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } err := quick.Check(func(items testdata) bool { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Bulk insert all values. err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.Update(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { err := b.Put(item.Key, item.Value) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } // Verify all items exist. err = db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { value := b.Get(item.Key) if !bytes.Equal(item.Value, value) { db.CopyTempFile() t.Fatalf( "exp=%x, got=%x", item.Value, value, ) } } return nil }) if err != nil { t.Fatal(err) } return true }, qconfig()) if err != nil { t.Error(err) } } // Ensure that a transaction can delete all key/value pairs and return to a // single leaf page. func TestBucket_Delete_Quick(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } err := quick.Check(func(items testdata) bool { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Bulk insert all values. err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.Update(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for _, item := range items { err := b.Put(item.Key, item.Value) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } // Remove items one at a time and check consistency. for _, item := range items { err := db.Update(func(tx *Tx) error { return tx.Bucket([]byte("widgets")).Delete( item.Key, ) }) if err != nil { t.Fatal(err) } } // Anything before our deletion index should be nil. err = db.View(func(tx *Tx) error { err := tx.Bucket( []byte("widgets"), ).ForEach(func(k, v []byte) error { t.Fatalf( "bucket should be empty; found: %06x", trunc(k, 3), ) return nil }) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } return true }, qconfig()) if err != nil { t.Error(err) } } func ExampleBucket_Put() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Start a write transaction. err = db.Update(func(tx *Tx) error { // Create a bucket. b, err := tx.CreateBucket([]byte("widgets")) if err != nil { return err } // Set the value "bar" for the key "foo". err = b.Put([]byte("foo"), []byte("bar")) if err != nil { return err } return nil }) if err != nil { log.Fatal(err) } // Read value back in a different read-only transaction. err = db.View(func(tx *Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value of 'foo' is: %s\n", value) return nil }) if err != nil { log.Fatal(err) } // Close database to release file lock. err = db.Close() if err != nil { log.Fatal(err) } // Output: // The value of 'foo' is: bar } func ExampleBucket_Delete() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Start a write transaction. err = db.Update(func(tx *Tx) error { // Create a bucket. b, err := tx.CreateBucket([]byte("widgets")) if err != nil { return err } // Set the value "bar" for the key "foo". err = b.Put([]byte("foo"), []byte("bar")) if err != nil { return err } // Retrieve the key back from the database and verify it. value := b.Get([]byte("foo")) fmt.Printf("The value of 'foo' was: %s\n", value) return nil }) if err != nil { log.Fatal(err) } // Delete the key in a different write transaction. err = db.Update(func(tx *Tx) error { return tx.Bucket([]byte("widgets")).Delete([]byte("foo")) }) if err != nil { log.Fatal(err) } // Retrieve the key again. err = db.View(func(tx *Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) if value == nil { fmt.Printf("The value of 'foo' is now: nil\n") } return nil }) if err != nil { log.Fatal(err) } // Close database to release file lock. err = db.Close() if err != nil { log.Fatal(err) } // Output: // The value of 'foo' was: bar // The value of 'foo' is now: nil } func ExampleBucket_ForEach() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Insert data into a bucket. err = db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("animals")) if err != nil { return err } err = b.Put([]byte("dog"), []byte("fun")) if err != nil { return err } err = b.Put([]byte("cat"), []byte("lame")) if err != nil { return err } err = b.Put([]byte("liger"), []byte("awesome")) if err != nil { return err } // Iterate over items in sorted key order. err = b.ForEach(func(k, v []byte) error { fmt.Printf("A %s is %s.\n", k, v) return nil }) if err != nil { return err } return nil }) if err != nil { log.Fatal(err) } // Close database to release file lock. err = db.Close() if err != nil { log.Fatal(err) } // Output: // A cat is lame. // A dog is fun. // A liger is awesome. } // Ensure that a cursor can return a reference to the bucket that created it. func TestCursor_Bucket(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } cb := b.Cursor().Bucket() if !reflect.DeepEqual(cb, b) { t.Fatal("cursor bucket mismatch") } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a Tx cursor can seek to the appropriate keys. func TestCursor_Seek(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("0001")) if err != nil { t.Fatal(err) } err = b.Put([]byte("bar"), []byte("0002")) if err != nil { t.Fatal(err) } err = b.Put([]byte("baz"), []byte("0003")) if err != nil { t.Fatal(err) } _, err = b.CreateBucket([]byte("bkt")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() // Exact match should go to the key. k, v := c.Seek([]byte("bar")) if !bytes.Equal(k, []byte("bar")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte("0002")) { t.Fatalf("unexpected value: %v", v) } // Inexact match should go to the next key. k, v = c.Seek([]byte("bas")) if !bytes.Equal(k, []byte("baz")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte("0003")) { t.Fatalf("unexpected value: %v", v) } // Low key should go to the first key. k, v = c.Seek([]byte("")) if !bytes.Equal(k, []byte("bar")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte("0002")) { t.Fatalf("unexpected value: %v", v) } // High key should return no key. k, v = c.Seek([]byte("zzz")) if k != nil { t.Fatalf("expected nil key: %v", k) } else if v != nil { t.Fatalf("expected nil value: %v", v) } // Buckets should return their key but no value. k, v = c.Seek([]byte("bkt")) if !bytes.Equal(k, []byte("bkt")) { t.Fatalf("unexpected key: %v", k) } else if v != nil { t.Fatalf("expected nil value: %v", v) } return nil }) if err != nil { t.Fatal(err) } } func TestCursor_Delete(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) const count = 1000 // Insert every other key between 0 and $count. err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } for i := 0; i < count; i += 1 { k := make([]byte, 8) binary.BigEndian.PutUint64(k, uint64(i)) err := b.Put(k, make([]byte, 100)) if err != nil { t.Fatal(err) } } _, err = b.CreateBucket([]byte("sub")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.Update(func(tx *Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() bound := make([]byte, 8) binary.BigEndian.PutUint64(bound, uint64(count/2)) for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() { err := c.Delete() if err != nil { t.Fatal(err) } } c.Seek([]byte("sub")) err = c.Delete() if err != ErrIncompatibleValue { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a Tx cursor can seek to the appropriate keys when there are a // large number of keys. This test also checks that seek will always move // forward to the next key. // // Related: https://github.com/boltdb/bolt/pull/187 func TestCursor_Seek_Large(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) const count = 10000 // Insert every other key between 0 and $count. err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } for i := 0; i < count; i += 100 { for j := i; j < i+100; j += 2 { k := make([]byte, 8) binary.BigEndian.PutUint64(k, uint64(j)) err := b.Put(k, make([]byte, 100)) if err != nil { t.Fatal(err) } } } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() for i := 0; i < count; i++ { seek := make([]byte, 8) binary.BigEndian.PutUint64(seek, uint64(i)) k, _ := c.Seek(seek) // The last seek is beyond the end of the the range so // it should return nil. if i == count-1 { if k != nil { t.Fatal("expected nil key") } continue } // Otherwise we should seek to the exact key or the next // key. num := binary.BigEndian.Uint64(k) if i%2 == 0 { if num != uint64(i) { t.Fatalf("unexpected num: %d", num) } } else { if num != uint64(i+1) { t.Fatalf("unexpected num: %d", num) } } } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a cursor can iterate over an empty bucket without error. func TestCursor_EmptyBucket(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.First() if k != nil { t.Fatalf("unexpected key: %v", k) } else if v != nil { t.Fatalf("unexpected value: %v", v) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a Tx cursor can reverse iterate over an empty bucket without // error. func TestCursor_EmptyBucketReverse(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.Last() if k != nil { t.Fatalf("unexpected key: %v", k) } else if v != nil { t.Fatalf("unexpected value: %v", v) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a Tx cursor can iterate over a single root with a couple // elements. func TestCursor_Iterate_Leaf(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("baz"), []byte{}) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte{0}) if err != nil { t.Fatal(err) } err = b.Put([]byte("bar"), []byte{1}) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } tx, err := db.Begin(false) if err != nil { t.Fatal(err) } defer func() { _ = tx.Rollback() }() c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.First() if !bytes.Equal(k, []byte("bar")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte{1}) { t.Fatalf("unexpected value: %v", v) } k, v = c.Next() if !bytes.Equal(k, []byte("baz")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte{}) { t.Fatalf("unexpected value: %v", v) } k, v = c.Next() if !bytes.Equal(k, []byte("foo")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte{0}) { t.Fatalf("unexpected value: %v", v) } k, v = c.Next() if k != nil { t.Fatalf("expected nil key: %v", k) } else if v != nil { t.Fatalf("expected nil value: %v", v) } k, v = c.Next() if k != nil { t.Fatalf("expected nil key: %v", k) } else if v != nil { t.Fatalf("expected nil value: %v", v) } err = tx.Rollback() if err != nil { t.Fatal(err) } } // Ensure that a Tx cursor can iterate in reverse over a single root with a // couple elements. func TestCursor_LeafRootReverse(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("baz"), []byte{}) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte{0}) if err != nil { t.Fatal(err) } err = b.Put([]byte("bar"), []byte{1}) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } tx, err := db.Begin(false) if err != nil { t.Fatal(err) } c := tx.Bucket([]byte("widgets")).Cursor() k, v := c.Last() if !bytes.Equal(k, []byte("foo")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte{0}) { t.Fatalf("unexpected value: %v", v) } k, v = c.Prev() if !bytes.Equal(k, []byte("baz")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte{}) { t.Fatalf("unexpected value: %v", v) } k, v = c.Prev() if !bytes.Equal(k, []byte("bar")) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, []byte{1}) { t.Fatalf("unexpected value: %v", v) } k, v = c.Prev() if k != nil { t.Fatalf("expected nil key: %v", k) } else if v != nil { t.Fatalf("expected nil value: %v", v) } k, v = c.Prev() if k != nil { t.Fatalf("expected nil key: %v", k) } else if v != nil { t.Fatalf("expected nil value: %v", v) } err = tx.Rollback() if err != nil { t.Fatal(err) } } // Ensure that a Tx cursor can restart from the beginning. func TestCursor_Restart(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("bar"), []byte{}) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte{}) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } tx, err := db.Begin(false) if err != nil { t.Fatal(err) } c := tx.Bucket([]byte("widgets")).Cursor() k, _ := c.First() if !bytes.Equal(k, []byte("bar")) { t.Fatalf("unexpected key: %v", k) } k, _ = c.Next() if !bytes.Equal(k, []byte("foo")) { t.Fatalf("unexpected key: %v", k) } k, _ = c.First() if !bytes.Equal(k, []byte("bar")) { t.Fatalf("unexpected key: %v", k) } k, _ = c.Next() if !bytes.Equal(k, []byte("foo")) { t.Fatalf("unexpected key: %v", k) } err = tx.Rollback() if err != nil { t.Fatal(err) } } // Ensure that a cursor can skip over empty pages that have been deleted. func TestCursor_First_EmptyPages(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Create 1000 keys in the "widgets" bucket. err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } for i := 0; i < 1000; i++ { err := b.Put(u64tob(uint64(i)), []byte{}) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } // Delete half the keys and then try to iterate. err = db.Update(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < 600; i++ { err := b.Delete(u64tob(uint64(i))) if err != nil { t.Fatal(err) } } c := b.Cursor() var n int for k, _ := c.First(); k != nil; k, _ = c.Next() { n++ } if n != 400 { t.Fatalf("unexpected key count: %d", n) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a Tx can iterate over all elements in a bucket. func TestCursor_QuickCheck(t *testing.T) { f := func(items testdata) bool { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Bulk insert all values. tx, err := db.Begin(true) if err != nil { t.Fatal(err) } b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } for _, item := range items { err := b.Put(item.Key, item.Value) if err != nil { t.Fatal(err) } } err = tx.Commit() if err != nil { t.Fatal(err) } // Sort test data. sort.Sort(items) // Iterate over all items and check consistency. index := 0 tx, err = db.Begin(false) if err != nil { t.Fatal(err) } c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() { if !bytes.Equal(k, items[index].Key) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, items[index].Value) { t.Fatalf("unexpected value: %v", v) } index++ } if len(items) != index { t.Fatalf( "unexpected item count: %v, expected %v", len(items), index, ) } err = tx.Rollback() if err != nil { t.Fatal(err) } return true } err := quick.Check(f, qconfig()) if err != nil { t.Error(err) } } // Ensure that a transaction can iterate over all elements in a bucket in // reverse. func TestCursor_QuickCheck_Reverse(t *testing.T) { f := func(items testdata) bool { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Bulk insert all values. tx, err := db.Begin(true) if err != nil { t.Fatal(err) } b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } for _, item := range items { err := b.Put(item.Key, item.Value) if err != nil { t.Fatal(err) } } err = tx.Commit() if err != nil { t.Fatal(err) } // Sort test data. sort.Sort(revtestdata(items)) // Iterate over all items and check consistency. index := 0 tx, err = db.Begin(false) if err != nil { t.Fatal(err) } c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() { if !bytes.Equal(k, items[index].Key) { t.Fatalf("unexpected key: %v", k) } else if !bytes.Equal(v, items[index].Value) { t.Fatalf("unexpected value: %v", v) } index++ } if len(items) != index { t.Fatalf( "unexpected item count: %v, expected %v", len(items), index, ) } err = tx.Rollback() if err != nil { t.Fatal(err) } return true } err := quick.Check(f, qconfig()) if err != nil { t.Error(err) } } // Ensure that a Tx cursor can iterate over subbuckets. func TestCursor_QuickCheck_BucketsOnly(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } _, err = b.CreateBucket([]byte("foo")) if err != nil { t.Fatal(err) } _, err = b.CreateBucket([]byte("bar")) if err != nil { t.Fatal(err) } _, err = b.CreateBucket([]byte("baz")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { names := []string{} c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { names = append(names, string(k)) if v != nil { t.Fatalf("unexpected value: %v", v) } } if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) { t.Fatalf("unexpected names: %+v", names) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a Tx cursor can reverse iterate over subbuckets. func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } _, err = b.CreateBucket([]byte("foo")) if err != nil { t.Fatal(err) } _, err = b.CreateBucket([]byte("bar")) if err != nil { t.Fatal(err) } _, err = b.CreateBucket([]byte("baz")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { names := []string{} c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.Last(); k != nil; k, v = c.Prev() { names = append(names, string(k)) if v != nil { t.Fatalf("unexpected value: %v", v) } } if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) { t.Fatalf("unexpected names: %+v", names) } return nil }) if err != nil { t.Fatal(err) } } func ExampleCursor() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Start a read-write transaction. err = db.Update(func(tx *Tx) error { // Create a new bucket. b, err := tx.CreateBucket([]byte("animals")) if err != nil { return err } // Insert data into a bucket. err = b.Put([]byte("dog"), []byte("fun")) if err != nil { log.Fatal(err) } err = b.Put([]byte("cat"), []byte("lame")) if err != nil { log.Fatal(err) } err = b.Put([]byte("liger"), []byte("awesome")) if err != nil { log.Fatal(err) } // Create a cursor for iteration. c := b.Cursor() // Iterate over items in sorted key order. This starts from the // first key/value pair and updates the k/v variables to the // next key/value on each iteration. // // The loop finishes at the end of the cursor when a nil key is // returned. for k, v := c.First(); k != nil; k, v = c.Next() { fmt.Printf("A %s is %s.\n", k, v) } return nil }) if err != nil { log.Fatal(err) } err = db.Close() if err != nil { log.Fatal(err) } // Output: // A cat is lame. // A dog is fun. // A liger is awesome. } func ExampleCursor_reverse() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Start a read-write transaction. err = db.Update(func(tx *Tx) error { // Create a new bucket. b, err := tx.CreateBucket([]byte("animals")) if err != nil { return err } // Insert data into a bucket. err = b.Put([]byte("dog"), []byte("fun")) if err != nil { log.Fatal(err) } err = b.Put([]byte("cat"), []byte("lame")) if err != nil { log.Fatal(err) } err = b.Put([]byte("liger"), []byte("awesome")) if err != nil { log.Fatal(err) } // Create a cursor for iteration. c := b.Cursor() // Iterate over items in reverse sorted key order. This starts // from the last key/value pair and updates the k/v variables to // the previous key/value on each iteration. // // The loop finishes at the beginning of the cursor when a nil // key is returned. for k, v := c.Last(); k != nil; k, v = c.Prev() { fmt.Printf("A %s is %s.\n", k, v) } return nil }) if err != nil { log.Fatal(err) } // Close the database to release the file lock. err = db.Close() if err != nil { log.Fatal(err) } // Output: // A liger is awesome. // A dog is fun. // A cat is lame. } // pageSize is the size of one page in the data file. const pageSize = 4096 // Ensure that a database can be opened without error. func TestOpen(t *testing.T) { path := tempfile() db, err := Open(path) if err != nil { t.Fatal(err) } else if db == nil { t.Fatal("expected db") } defer os.Remove(db.Path()) s := db.Path() if s != path { t.Fatalf("unexpected path: %s", s) } err = db.Close() if err != nil { t.Fatal(err) } } // Ensure that opening a database with a blank path returns an error. func TestOpen_ErrPathRequired(t *testing.T) { _, err := Open("") if err == nil { t.Fatalf("expected error") } } // Ensure that opening a database with a bad path returns an error. func TestOpen_ErrNotExists(t *testing.T) { _, err := Open(filepath.Join(tempfile(), "bad-path")) if err == nil { t.Fatal("expected error") } } // Ensure that opening a file with two invalid versions returns // ErrVersionMismatch. func TestOpen_ErrVersionMismatch(t *testing.T) { if pageSize != os.Getpagesize() { t.Skip("page size mismatch") } // Create empty database. db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) path := db.Path() // Close database. err := db.DB.Close() if err != nil { t.Fatal(err) } // Read data file. buf, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } // Rewrite meta pages. meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) meta0.version++ meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) meta1.version++ err = ioutil.WriteFile(path, buf, 0666) if err != nil { t.Fatal(err) } // Reopen data file. _, err = Open(path) if err != ErrVersionMismatch { t.Fatalf("unexpected error: %s", err) } } // Ensure that opening a file with two invalid checksums returns ErrChecksum. func TestOpen_ErrChecksum(t *testing.T) { if pageSize != os.Getpagesize() { t.Skip("page size mismatch") } // Create empty database. db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) path := db.Path() // Close database. err := db.DB.Close() if err != nil { t.Fatal(err) } // Read data file. buf, err := ioutil.ReadFile(path) if err != nil { t.Fatal(err) } // Rewrite meta pages. meta0 := (*meta)(unsafe.Pointer(&buf[pageHeaderSize])) meta0.pgid++ meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize])) meta1.pgid++ err = ioutil.WriteFile(path, buf, 0666) if err != nil { t.Fatal(err) } // Reopen data file. _, err = Open(path) if err != ErrChecksum { t.Fatalf("unexpected error: %s", err) } } // Ensure that opening a database does not increase its size. // https://github.com/boltdb/bolt/issues/291 func TestOpen_Size(t *testing.T) { // Open a data file. db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) path := db.Path() pagesize := db.pageSize // Insert until we get above the minimum 4MB size. err := db.Update(func(tx *Tx) error { b, _ := tx.CreateBucketIfNotExists([]byte("data")) for i := 0; i < 10000; i++ { err := b.Put([]byte( fmt.Sprintf("%04d", i)), make([]byte, 1000), ) if err != nil { t.Fatal(err) } } return nil }) if err != nil { t.Fatal(err) } // Close database and grab the size. err = db.DB.Close() if err != nil { t.Fatal(err) } sz := fileSize(path) if sz == 0 { t.Fatalf("unexpected new file size: %d", sz) } // Reopen database, update, and check size again. db0, err := Open(path) if err != nil { t.Fatal(err) } err = db0.Update(func(tx *Tx) error { err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db0.Close() if err != nil { t.Fatal(err) } newSz := fileSize(path) if newSz == 0 { t.Fatalf("unexpected new file size: %d", newSz) } // Compare the original size with the new size. Database size might // increase by a few page sizes due to the new small update. if sz < newSz-5*int64(pagesize) { t.Fatalf("unexpected file growth: %d => %d", sz, newSz) } } // Ensure that opening a database beyond the max step size does not increase its // size. // https://github.com/boltdb/bolt/issues/303 func TestOpen_Size_Large(t *testing.T) { return // FIXME: way too slow // Open a data file. db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) path := db.Path() pagesize := db.pageSize const ( n1 = 10000 n2 = 1000 ) // Insert until we get above the minimum 4MB size. var index uint64 for i := 0; i < n1; i++ { err := db.Update(func(tx *Tx) error { b, _ := tx.CreateBucketIfNotExists([]byte("data")) for j := 0; j < n2; j++ { err := b.Put(u64tob(index), make([]byte, 50)) if err != nil { t.Fatal(err) } index++ } return nil }) if err != nil { t.Fatal(err) } } // Close database and grab the size. err := db.DB.Close() if err != nil { t.Fatal(err) } sz := fileSize(path) if sz == 0 { t.Fatalf("unexpected new file size: %d", sz) } else if sz < (1 << 30) { t.Fatalf("expected larger initial size: %d", sz) } // Reopen database, update, and check size again. db0, err := Open(path) if err != nil { t.Fatal(err) } err = db0.Update(func(tx *Tx) error { return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}) }) if err != nil { t.Fatal(err) } err = db0.Close() if err != nil { t.Fatal(err) } newSz := fileSize(path) if newSz == 0 { t.Fatalf("unexpected new file size: %d", newSz) } // Compare the original size with the new size. Database size might // increase by a few page sizes due to the new small update. if sz < newSz - (5 * int64(pagesize)) { t.Fatalf("unexpected file growth: %d => %d", sz, newSz) } } // Ensure that a re-opened database is consistent. func TestOpen_Check(t *testing.T) { path := tempfile() defer os.Remove(path) db, err := Open(path) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { return <-tx.Check() }) if err != nil { t.Fatal(err) } err = db.Close() if err != nil { t.Fatal(err) } db, err = Open(path) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { return <-tx.Check() }) if err != nil { t.Fatal(err) } err = db.Close() if err != nil { t.Fatal(err) } } // Ensure that write errors to the meta file handler during initialization are // returned. func TestOpen_MetaInitWriteError(t *testing.T) { // FIXME // t.Skip("pending") } // Ensure that a database that is too small returns an error. func TestOpen_FileTooSmall(t *testing.T) { path := tempfile() defer os.Remove(path) db, err := Open(path) if err != nil { t.Fatal(err) } err = db.Close() if err != nil { t.Fatal(err) } // corrupt the database err = os.Truncate(path, int64(os.Getpagesize())) if err != nil { t.Fatal(err) } db, err = Open(path) if err == nil || err.Error() != "file size too small" { t.Fatalf("unexpected error: %s", err) } } // TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough // to hold data from concurrent write transaction resolves the issue that // read transaction blocks the write transaction and causes deadlock. // This is a very hacky test since the mmap size is not exposed. func TestDB_Open_InitialMmapSize(t *testing.T) { // FIXME: bad test that uses time to detect reader block return path := tempfile() defer os.Remove(path) testWriteSize := 1 << 27 // 134MB db, err := Open(path) if err != nil { t.Fatal(err) } // create a long-running read transaction // that never gets closed while writing rtx, err := db.Begin(false) if err != nil { t.Fatal(err) } // create a write transaction wtx, err := db.Begin(true) if err != nil { t.Fatal(err) } b, err := wtx.CreateBucket([]byte("test")) if err != nil { t.Fatal(err) } // and commit a large write err = b.Put([]byte("foo"), make([]byte, testWriteSize)) if err != nil { t.Fatal(err) } done := make(chan struct{}) go func() { err := wtx.Commit() if err != nil { t.Fatal(err) } done <- struct{}{} }() select { case <-time.After(5 * time.Second): t.Errorf("unexpected that the reader blocks writer") case <-done: } err = rtx.Rollback() if err != nil { t.Fatal(err) } } // Ensure that a database cannot open a transaction when it's not open. func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) { var db DB _, err := db.Begin(false) if err != ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } } // Ensure that a read-write transaction can be retrieved. func TestDB_BeginRW(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(true) if err != nil { t.Fatal(err) } else if tx == nil { t.Fatal("expected tx") } if tx.DB() != db.DB { t.Fatal("unexpected tx database") } else if !tx.Writable() { t.Fatal("expected writable tx") } err = tx.Commit() if err != nil { t.Fatal(err) } } // Ensure that opening a transaction while the DB is closed returns an error. func TestDB_BeginRW_Closed(t *testing.T) { var db DB _, err := db.Begin(true) if err != ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } } func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) } func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) } // Ensure that a database cannot close while transactions are open. func testDB_Close_PendingTx(t *testing.T, writable bool) { // FIXME: bad test that uses time to detect reader block return db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Start transaction. tx, err := db.Begin(true) if err != nil { t.Fatal(err) } // Open update in separate goroutine. done := make(chan struct{}) go func() { err := db.Close() if err != nil { t.Fatal(err) } close(done) }() // Ensure database hasn't closed. time.Sleep(100 * time.Millisecond) select { case <-done: t.Fatal("database closed too early") default: } // Commit transaction. err = tx.Commit() if err != nil { t.Fatal(err) } // Ensure database closed now. time.Sleep(100 * time.Millisecond) select { case <-done: default: t.Fatal("database did not close") } } // Ensure a database can provide a transactional block. func TestDB_Update(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } err = b.Put([]byte("baz"), []byte("bat")) if err != nil { t.Fatal(err) } err = b.Delete([]byte("foo")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) v := b.Get([]byte("foo")) if v != nil { t.Fatalf("expected nil value, got: %v", v) } v = b.Get([]byte("baz")) if !bytes.Equal(v, []byte("bat")) { t.Fatalf("unexpected value: %v", v) } return nil }) if err != nil { t.Fatal(err) } } // Ensure a closed database returns an error while running a transaction block func TestDB_Update_Closed(t *testing.T) { var db DB err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != ErrDatabaseNotOpen { t.Fatalf("unexpected error: %s", err) } } // Ensure a panic occurs while trying to commit a managed transaction. func TestDB_Update_ManualCommit(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) var panicked bool err := db.Update(func(tx *Tx) error { func() { defer func() { r := recover() if r != nil { panicked = true } }() err := tx.Commit() if err != nil { t.Fatal(err) } }() return nil }) if err != nil { t.Fatal(err) } else if !panicked { t.Fatal("expected panic") } } // Ensure a panic occurs while trying to rollback a managed transaction. func TestDB_Update_ManualRollback(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) var panicked bool err := db.Update(func(tx *Tx) error { func() { defer func() { r := recover() if r != nil { panicked = true } }() err := tx.Rollback() if err != nil { t.Fatal(err) } }() return nil }) if err != nil { t.Fatal(err) } else if !panicked { t.Fatal("expected panic") } } // Ensure a panic occurs while trying to commit a managed transaction. func TestDB_View_ManualCommit(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) var panicked bool err := db.View(func(tx *Tx) error { func() { defer func() { r := recover() if r != nil { panicked = true } }() err := tx.Commit() if err != nil { t.Fatal(err) } }() return nil }) if err != nil { t.Fatal(err) } else if !panicked { t.Fatal("expected panic") } } // Ensure a panic occurs while trying to rollback a managed transaction. func TestDB_View_ManualRollback(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) var panicked bool err := db.View(func(tx *Tx) error { func() { defer func() { r := recover() if r != nil { panicked = true } }() err := tx.Rollback() if err != nil { t.Fatal(err) } }() return nil }) if err != nil { t.Fatal(err) } else if !panicked { t.Fatal("expected panic") } } // Ensure a write transaction that panics does not hold open locks. func TestDB_Update_Panic(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Panic during update but recover. func() { defer func() { r := recover() if r != nil { // t.Log("recover: update", r) } }() err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } panic("omg") }) if err != nil { t.Fatal(err) } }() // Verify we can update again. err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } // Verify that our change persisted. err = db.Update(func(tx *Tx) error { if tx.Bucket([]byte("widgets")) == nil { t.Fatal("expected bucket") } return nil }) if err != nil { t.Fatal(err) } } // Ensure a database can return an error through a read-only transactional // block. func TestDB_View_Error(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.View(func(tx *Tx) error { return errors.New("xxx") }) if err == nil || err.Error() != "xxx" { t.Fatalf("unexpected error: %s", err) } } // Ensure a read transaction that panics does not hold open locks. func TestDB_View_Panic(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } // Panic during view transaction but recover. func() { defer func() { r := recover() if r != nil { // t.Log("recover: view", r) } }() err := db.View(func(tx *Tx) error { if tx.Bucket([]byte("widgets")) == nil { t.Fatal("expected bucket") } panic("omg") }) if err != nil { t.Fatal(err) } }() // Verify that we can still use read transactions. err = db.View(func(tx *Tx) error { if tx.Bucket([]byte("widgets")) == nil { t.Fatal("expected bucket") } return nil }) if err != nil { t.Fatal(err) } } // Ensure that database pages are in expected order and type. func TestDB_Consistency(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }) if err != nil { t.Fatal(err) } for i := 0; i < 10; i++ { err := db.Update(func(tx *Tx) error { err := tx.Bucket([]byte("widgets")).Put( []byte("foo"), []byte("bar"), ) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } } err = db.Update(func(tx *Tx) error { p, _ := tx.Page(0) if p == nil { t.Fatal("expected page") } else if p.Type != "meta" { t.Fatalf("unexpected page type: %s", p.Type) } p, _ = tx.Page(1) if p == nil { t.Fatal("expected page") } else if p.Type != "meta" { t.Fatalf("unexpected page type: %s", p.Type) } p, _ = tx.Page(2) if p == nil { t.Fatal("expected page") } else if p.Type != "free" { t.Fatalf("unexpected page type: %s", p.Type) } p, _ = tx.Page(3) if p == nil { t.Fatal("expected page") } else if p.Type != "free" { t.Fatalf("unexpected page type: %s", p.Type) } p, _ = tx.Page(4) if p == nil { t.Fatal("expected page") } else if p.Type != "leaf" { t.Fatalf("unexpected page type: %s", p.Type) } p, _ = tx.Page(5) if p == nil { t.Fatal("expected page") } else if p.Type != "freelist" { t.Fatalf("unexpected page type: %s", p.Type) } p, _ = tx.Page(6) if p != nil { t.Fatal("unexpected page") } return nil }) if err != nil { t.Fatal(err) } } // Ensure two functions can perform updates in a single batch. func TestDB_Batch(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } // Iterate over multiple updates in separate goroutines. n := 2 ch := make(chan error) for i := 0; i < n; i++ { go func(i int) { ch <- db.Batch(func(tx *Tx) error { return tx.Bucket([]byte("widgets")).Put( u64tob(uint64(i)), []byte{}, ) }) }(i) } // Check all responses to make sure there's no error. for i := 0; i < n; i++ { err := <-ch if err != nil { t.Fatal(err) } } // Ensure data is correct. err = db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for i := 0; i < n; i++ { v := b.Get(u64tob(uint64(i))) if v == nil { t.Errorf("key not found: %d", i) } } return nil }) if err != nil { t.Fatal(err) } } func TestDB_Batch_Panic(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) var ( sentinel int problem interface{} err error ) bork := &sentinel // Execute a function inside a batch that panics. func() { defer func() { p := recover() if p != nil { problem = p } }() err = db.Batch(func(tx *Tx) error { panic(bork) }) }() // Verify there is no error. g, e := err, error(nil) if g != e { t.Fatalf("wrong error: %v != %v", g, e) } // Verify the panic was captured. p, b := problem, bork if p != b { t.Fatalf("wrong error: %v != %v", p, b) } } func TestDB_BatchFull(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }) if err != nil { t.Fatal(err) } const size = 3 // buffered so we never leak goroutines ch := make(chan error, size) put := func(i int) { ch <- db.Batch(func(tx *Tx) error { return tx.Bucket([]byte("widgets")).Put( u64tob(uint64(i)), []byte{}, ) }) } db.MaxBatchSize = size // high enough to never trigger here db.MaxBatchDelay = 1 * time.Hour go put(1) go put(2) // Give the batch a chance to exhibit bugs. time.Sleep(10 * time.Millisecond) // not triggered yet select { case <-ch: t.Fatalf("batch triggered too early") default: } go put(3) // Check all responses to make sure there's no error. for i := 0; i < size; i++ { err := <-ch if err != nil { t.Fatal(err) } } // Ensure data is correct. err = db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for i := 1; i <= size; i++ { v := b.Get(u64tob(uint64(i))) if v == nil { t.Errorf("key not found: %d", i) } } return nil }) if err != nil { t.Fatal(err) } } func TestDB_BatchTime(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }) if err != nil { t.Fatal(err) } const size = 1 // buffered so we never leak goroutines ch := make(chan error, size) put := func(i int) { ch <- db.Batch(func(tx *Tx) error { return tx.Bucket([]byte("widgets")).Put( u64tob(uint64(i)), []byte{}, ) }) } db.MaxBatchSize = 1000 db.MaxBatchDelay = 0 go put(1) // Batch must trigger by time alone. // Check all responses to make sure there's no error. for i := 0; i < size; i++ { if err := <-ch; err != nil { t.Fatal(err) } } // Ensure data is correct. err = db.View(func(tx *Tx) error { b := tx.Bucket([]byte("widgets")) for i := 1; i <= size; i++ { if v := b.Get(u64tob(uint64(i))); v == nil { t.Errorf("key not found: %d", i) } } return nil }) if err != nil { t.Fatal(err) } } func ExampleDB_Update() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Execute several commands within a read-write transaction. err = db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { return err } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { return err } return nil }) if err != nil { log.Fatal(err) } // Read the value back from a separate read-only transaction. err = db.View(func(tx *Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value of 'foo' is: %s\n", value) return nil }) if err != nil { log.Fatal(err) } // Close database to release the file lock. err = db.Close() if err != nil { log.Fatal(err) } // Output: // The value of 'foo' is: bar } func ExampleDB_View() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Insert data into a bucket. err = db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("people")) if err != nil { return err } err = b.Put([]byte("john"), []byte("doe")) if err != nil { return err } err = b.Put([]byte("susy"), []byte("que")) if err != nil { return err } return nil }) if err != nil { log.Fatal(err) } // Access data from within a read-only transactional block. err = db.View(func(tx *Tx) error { v := tx.Bucket([]byte("people")).Get([]byte("john")) fmt.Printf("John's last name is %s.\n", v) return nil }) if err != nil { log.Fatal(err) } // Close database to release the file lock. err = db.Close() if err != nil { log.Fatal(err) } // Output: // John's last name is doe. } func ExampleDB_Begin_ReadOnly() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Create a bucket using a read-write transaction. err = db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }) if err != nil { log.Fatal(err) } // Create several keys in a transaction. tx, err := db.Begin(true) if err != nil { log.Fatal(err) } b := tx.Bucket([]byte("widgets")) err = b.Put([]byte("john"), []byte("blue")) if err != nil { log.Fatal(err) } err = b.Put([]byte("abby"), []byte("red")) if err != nil { log.Fatal(err) } err = b.Put([]byte("zephyr"), []byte("purple")) if err != nil { log.Fatal(err) } err = tx.Commit() if err != nil { log.Fatal(err) } // Iterate over the values in sorted key order. tx, err = db.Begin(false) if err != nil { log.Fatal(err) } c := tx.Bucket([]byte("widgets")).Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { fmt.Printf("%s likes %s\n", k, v) } err = tx.Rollback() if err != nil { log.Fatal(err) } err = db.Close() if err != nil { log.Fatal(err) } // Output: // abby likes red // john likes blue // zephyr likes purple } func BenchmarkDBBatchAutomatic(b *testing.B) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("bench")) return err }) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { start := make(chan struct{}) var wg sync.WaitGroup for round := 0; round < 1000; round++ { wg.Add(1) go func(id uint32) { defer wg.Done() <-start h := fnv.New32a() buf := make([]byte, 4) binary.LittleEndian.PutUint32(buf, id) _, _ = h.Write(buf[:]) k := h.Sum(nil) insert := func(tx *Tx) error { b := tx.Bucket([]byte("bench")) return b.Put(k, []byte("filler")) } err := db.Batch(insert) if err != nil { b.Error(err) return } }(uint32(round)) } close(start) wg.Wait() } b.StopTimer() validateBatchBench(b, db) } func BenchmarkDBBatchSingle(b *testing.B) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("bench")) return err }) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { start := make(chan struct{}) var wg sync.WaitGroup for round := 0; round < 1000; round++ { wg.Add(1) go func(id uint32) { defer wg.Done() <-start h := fnv.New32a() buf := make([]byte, 4) binary.LittleEndian.PutUint32(buf, id) _, _ = h.Write(buf[:]) k := h.Sum(nil) insert := func(tx *Tx) error { b := tx.Bucket([]byte("bench")) return b.Put(k, []byte("filler")) } err := db.Update(insert) if err != nil { b.Error(err) return } }(uint32(round)) } close(start) wg.Wait() } b.StopTimer() validateBatchBench(b, db) } func BenchmarkDBBatchManual10x100(b *testing.B) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("bench")) return err }) if err != nil { b.Fatal(err) } b.ResetTimer() for i := 0; i < b.N; i++ { start := make(chan struct{}) var wg sync.WaitGroup for major := 0; major < 10; major++ { wg.Add(1) go func(id uint32) { defer wg.Done() <-start insert100 := func(tx *Tx) error { h := fnv.New32a() buf := make([]byte, 4) for minor := 0; minor < 100; minor++ { n := (id * 100) + uint32(minor) binary.LittleEndian.PutUint32( buf, n, ) h.Reset() _, _ = h.Write(buf[:]) k := h.Sum(nil) b := tx.Bucket([]byte("bench")) err := b.Put( k, []byte("filler"), ) if err != nil { return err } } return nil } err := db.Update(insert100) if err != nil { b.Fatal(err) } }(uint32(major)) } close(start) wg.Wait() } b.StopTimer() validateBatchBench(b, db) } func validateBatchBench(b *testing.B, db *WDB) { rollback := errors.New("sentinel error to cause rollback") validate := func(tx *Tx) error { bucket := tx.Bucket([]byte("bench")) h := fnv.New32a() buf := make([]byte, 4) for id := uint32(0); id < 1000; id++ { binary.LittleEndian.PutUint32(buf, id) h.Reset() _, _ = h.Write(buf[:]) k := h.Sum(nil) v := bucket.Get(k) if v == nil { b.Errorf("not found id=%d key=%x", id, k) continue } g, e := v, []byte("filler") if !bytes.Equal(g, e) { b.Errorf( "bad value for id=%d key=%x: %s != %q", id, k, g, e, ) } err := bucket.Delete(k) if err != nil { return err } } // should be empty now c := bucket.Cursor() for k, v := c.First(); k != nil; k, v = c.Next() { b.Errorf("unexpected key: %x = %q", k, v) } return rollback } err := db.Update(validate) if err != nil && err != rollback { b.Error(err) } } // DB is a test wrapper for DB. type WDB struct { *DB } // MustOpenDB returns a new, open DB at a temporary location. func MustOpenDB() *WDB { db, err := Open(tempfile()) if err != nil { panic(err) } return &WDB{db} } // Close closes the database and deletes the underlying file. func (db *WDB) Close() error { defer os.Remove(db.Path()) // Check database consistency after every test. db.MustCheck() // Close database and remove file. return db.DB.Close() } // MustClose closes the database and deletes the underlying file. Panic on // error. func (db *WDB) MustClose() { err := db.Close() if err != nil { panic(err) } os.Remove(db.Path()) } // MustCheck runs a consistency check on the database and panics if any errors // are found. func (db *WDB) MustCheck() { err := db.Update(func(tx *Tx) error { // Collect all the errors. errors := []error{} for err := range tx.Check() { errors = append(errors, err) if len(errors) > 10 { break } } // If errors occurred, copy the DB and print the errors. if len(errors) > 0 { path := tempfile() defer os.Remove(path) err := tx.CopyFile(path, 0600) if err != nil { panic(err) } // Print errors. fmt.Print("\n\n") fmt.Printf( "consistency check failed (%d errors)\n", len(errors), ) for _, err := range errors { fmt.Println(err) } fmt.Println("") fmt.Println("db saved to:") fmt.Println(path) fmt.Print("\n\n") os.Exit(-1) } return nil }) if err != nil && err != ErrDatabaseNotOpen { panic(err) } } // CopyTempFile copies a database to a temporary file. func (db *WDB) CopyTempFile() { path := tempfile() err := db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) }) if err != nil { panic(err) } fmt.Println("db copied to: ", path) } // tempfile returns a temporary file path. var tempdir string func getTempdir() string { const ( shm = "/dev/shm" tmp = "/tmp" ) dir := os.Getenv("XDG_RUNTIME_DIR") if dir != "" { return dir } _, err := os.Stat(shm) if err != nil { return shm } dir = os.Getenv("TMPDIR") if dir != "" { return dir } return tmp } func tempfile() string { f, err := ioutil.TempFile(tempdir, "dedo-") if err != nil { panic(err) } err = f.Close() if err != nil { panic(err) } err = os.Remove(f.Name()) if err != nil { panic(err) } return f.Name() } // mustContainKeys checks that a bucket contains a given set of keys. func mustContainKeys(b *Bucket, m map[string]string) { found := make(map[string]string) err := b.ForEach(func(k, _ []byte) error { found[string(k)] = "" return nil }) if err != nil { panic(err) } // Check for keys found in bucket that shouldn't be there. keys := []string{} for k, _ := range found { _, ok := m[string(k)] if !ok { keys = append(keys, k) } } if len(keys) > 0 { sort.Strings(keys) panic(fmt.Sprintf( "keys found(%d): %s", len(keys), strings.Join(keys, ","), )) } // Check for keys not found in bucket that should be there. for k, _ := range m { _, ok := found[string(k)] if !ok { keys = append(keys, k) } } if len(keys) > 0 { sort.Strings(keys) panic(fmt.Sprintf( "keys not found(%d): %s", len(keys), strings.Join(keys, ","), )) } } func trunc(b []byte, length int) []byte { if length < len(b) { return b[:length] } return b } func truncDuration(d time.Duration) string { return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString( d.String(), "$1", ) } func fileSize(path string) int64 { fi, err := os.Stat(path) if err != nil { return 0 } return fi.Size() } // u64tob converts a uint64 into an 8-byte slice. func u64tob(v uint64) []byte { b := make([]byte, 8) binary.BigEndian.PutUint64(b, v) return b } // btou64 converts an 8-byte slice into an uint64. func btou64(b []byte) uint64 { return binary.BigEndian.Uint64(b) } // Ensure that a page is added to a transaction's freelist. func TestFreelist_free(t *testing.T) { f := newFreelist() f.free(100, &page{id: 12}) if !reflect.DeepEqual([]pgid{12}, f.pending[100]) { t.Fatalf("exp=%v, got=%v", []pgid{12}, f.pending[100]) } } // Ensure that a page and its overflow is added to a transaction's freelist. func TestFreelist_free_overflow(t *testing.T) { f := newFreelist() f.free(100, &page{id: 12, overflow: 3}) exp := []pgid{12, 13, 14, 15} if !reflect.DeepEqual(exp, f.pending[100]) { t.Fatalf("exp=%v, got=%v", exp, f.pending[100]) } } // Ensure that a transaction's free pages can be released. func TestFreelist_release(t *testing.T) { f := newFreelist() f.free(100, &page{id: 12, overflow: 1}) f.free(100, &page{id: 9}) f.free(102, &page{id: 39}) f.release(100) f.release(101) exp := []pgid{9, 12, 13} if !reflect.DeepEqual(exp, f.ids) { t.Fatalf("exp=%v, got=%v", exp, f.ids) } f.release(102) exp = []pgid{9, 12, 13, 39} if !reflect.DeepEqual(exp, f.ids) { t.Fatalf("exp=%v, got=%v", exp, f.ids) } } // Ensure that a freelist can find contiguous blocks of pages. func TestFreelist_allocate(t *testing.T) { f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}} id := int(f.allocate(3)) if id != 3 { t.Fatalf("exp=3, got=%v", id) } id = int(f.allocate(1)) if id != 6 { t.Fatalf("exp=6, got=%v", id) } id = int(f.allocate(3)) if id != 0 { t.Fatalf("exp=0, got=%v", id) } id = int(f.allocate(2)) if id != 12 { t.Fatalf("exp=12, got=%v", id) } id = int(f.allocate(1)) if id != 7 { t.Fatalf("exp=7, got=%v", id) } id = int(f.allocate(0)) if id != 0 { t.Fatalf("exp=0, got=%v", id) } id = int(f.allocate(0)) if id != 0 { t.Fatalf("exp=0, got=%v", id) } exp := []pgid{9, 18} if !reflect.DeepEqual(exp, f.ids) { t.Fatalf("exp=%v, got=%v", exp, f.ids) } id = int(f.allocate(1)) if id != 9 { t.Fatalf("exp=9, got=%v", id) } id = int(f.allocate(1)) if id != 18 { t.Fatalf("exp=18, got=%v", id) } id = int(f.allocate(1)) if id != 0 { t.Fatalf("exp=0, got=%v", id) } exp = []pgid{} if !reflect.DeepEqual(exp, f.ids) { t.Fatalf("exp=%v, got=%v", exp, f.ids) } } // Ensure that a freelist can deserialize from a freelist page. func TestFreelist_read(t *testing.T) { // Create a page. var buf [4096]byte page := (*page)(unsafe.Pointer(&buf[0])) page.flags = freelistPageFlag page.count = 2 // Insert 2 page ids. ids := (*[3]pgid)(unsafe.Pointer(&page.ptr)) ids[0] = 23 ids[1] = 50 // Deserialize page into a freelist. f := newFreelist() f.read(page) // Ensure that there are two page ids in the freelist. exp := []pgid{23, 50} if !reflect.DeepEqual(exp, f.ids) { t.Fatalf("exp=%v, got=%v", exp, f.ids) } } // Ensure that a freelist can serialize into a freelist page. func TestFreelist_write(t *testing.T) { // Create a freelist and write it to a page. var buf [4096]byte f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)} f.pending[100] = []pgid{28, 11} f.pending[101] = []pgid{3} p := (*page)(unsafe.Pointer(&buf[0])) err := f.write(p) if err != nil { t.Fatal(err) } // Read the page back out. f2 := newFreelist() f2.read(p) // Ensure that the freelist is correct. // All pages should be present and in reverse order. exp := []pgid{3, 11, 12, 28, 39} if !reflect.DeepEqual(exp, f2.ids) { t.Fatalf("exp=%v, got=%v", exp, f2.ids) } } func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) } func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) } func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) } func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) } func benchmark_FreelistRelease(b *testing.B, size int) { ids := randomPgids(size) pending := randomPgids(len(ids) / 400) b.ResetTimer() for i := 0; i < b.N; i++ { f := &freelist{ids: ids, pending: map[txid][]pgid{1: pending}} f.release(1) } } func randomPgids(n int) []pgid { rand.Seed(42) pgids := make(pgids, n) for i := range pgids { pgids[i] = pgid(rand.Int63()) } sort.Sort(pgids) return pgids } func puts( n *node, oldKey string, newKey string, value string, pgid pgid, flags uint32, ) { n.put([]byte(oldKey), []byte(newKey), []byte(value), pgid, flags) } // Ensure that a node can insert a key/value. func TestNode_put(t *testing.T) { n := &node{ inodes: make(inodes, 0), bucket: &Bucket{ tx: &Tx{ meta: &meta{ pgid: 1, }, }, }, } puts(n, "baz", "baz", "2", 0, 0) puts(n, "foo", "foo", "0", 0, 0) puts(n, "bar", "bar", "1", 0, 0) puts(n, "foo", "foo", "3", 0, leafPageFlag) if len(n.inodes) != 3 { t.Fatalf("exp=3, got=%d", len(n.inodes)) } k, v := n.inodes[0].key, n.inodes[0].value if string(k) != "bar" || string(v) != "1" { t.Fatalf("exp=, got=<%s,%s>", k, v) } k, v = n.inodes[1].key, n.inodes[1].value if string(k) != "baz" || string(v) != "2" { t.Fatalf("exp=, got=<%s,%s>", k, v) } k, v = n.inodes[2].key, n.inodes[2].value if string(k) != "foo" || string(v) != "3" { t.Fatalf("exp=, got=<%s,%s>", k, v) } if n.inodes[2].flags != uint32(leafPageFlag) { t.Fatalf("not a leaf: %d", n.inodes[2].flags) } } // Ensure that a node can deserialize from a leaf page. func TestNode_read_LeafPage(t *testing.T) { // Create a page. var buf [4096]byte page := (*page)(unsafe.Pointer(&buf[0])) page.flags = leafPageFlag page.count = 2 // Insert 2 elements at the beginning. sizeof(leafPageElement) == 16 nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr)) // pos = sizeof(leafPageElement) * 2 nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) + 3 + 4 nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // Write data for the nodes at the end. data := (*[4096]byte)(unsafe.Pointer(&nodes[2])) copy(data[:], []byte("barfooz")) copy(data[7:], []byte("helloworldbye")) // Deserialize page into a leaf. n := &node{} n.read(page) // Check that there are two inodes with correct data. if !n.isLeaf { t.Fatal("expected leaf") } if len(n.inodes) != 2 { t.Fatalf("exp=2, got=%d", len(n.inodes)) } k, v := n.inodes[0].key, n.inodes[0].value if string(k) != "bar" || string(v) != "fooz" { t.Fatalf("exp=, got=<%s,%s>", k, v) } k, v = n.inodes[1].key, n.inodes[1].value if string(k) != "helloworld" || string(v) != "bye" { t.Fatalf("exp=, got=<%s,%s>", k, v) } } // Ensure that a node can serialize into a leaf page. func TestNode_write_LeafPage(t *testing.T) { // Create a node. n := &node{ isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{ tx: &Tx{ db: &DB{}, meta: &meta{ pgid: 1, }, }, }, } puts(n, "susy", "susy", "que", 0, 0) puts(n, "ricki", "ricki", "lake", 0, 0) puts(n, "john", "john", "johnson", 0, 0) // Write it to a page. var buf [4096]byte p := (*page)(unsafe.Pointer(&buf[0])) n.write(p) // Read the page back in. n2 := &node{} n2.read(p) // Check that the two pages are the same. if len(n2.inodes) != 3 { t.Fatalf("exp=3, got=%d", len(n2.inodes)) } k, v := n2.inodes[0].key, n2.inodes[0].value if string(k) != "john" || string(v) != "johnson" { t.Fatalf("exp=, got=<%s,%s>", k, v) } k, v = n2.inodes[1].key, n2.inodes[1].value if string(k) != "ricki" || string(v) != "lake" { t.Fatalf("exp=, got=<%s,%s>", k, v) } k, v = n2.inodes[2].key, n2.inodes[2].value if string(k) != "susy" || string(v) != "que" { t.Fatalf("exp=, got=<%s,%s>", k, v) } } // Ensure that a node can split into appropriate subgroups. func TestNode_split(t *testing.T) { // Create a node. n := &node{ inodes: make(inodes, 0), bucket: &Bucket{ tx: &Tx{ db: &DB{}, meta: &meta{ pgid: 1, }, }, }, } puts(n, "00000001", "00000001", "0123456701234567", 0, 0) puts(n, "00000002", "00000002", "0123456701234567", 0, 0) puts(n, "00000003", "00000003", "0123456701234567", 0, 0) puts(n, "00000004", "00000004", "0123456701234567", 0, 0) puts(n, "00000005", "00000005", "0123456701234567", 0, 0) // Split between 2 & 3. n.split(100) parent := n.parent if len(parent.children) != 2 { t.Fatalf("exp=2, got=%d", len(parent.children)) } if len(parent.children[0].inodes) != 2 { t.Fatalf("exp=2, got=%d", len(parent.children[0].inodes)) } if len(parent.children[1].inodes) != 3 { t.Fatalf("exp=3, got=%d", len(parent.children[1].inodes)) } } // Ensure that a page with the minimum number of inodes just returns a single // node. func TestNode_split_MinKeys(t *testing.T) { // Create a node. n := &node{ inodes: make(inodes, 0), bucket: &Bucket{ tx: &Tx{ db: &DB{}, meta: &meta{ pgid: 1, }, }, }, } puts(n, "00000001", "00000001", "0123456701234567", 0, 0) puts(n, "00000002", "00000002", "0123456701234567", 0, 0) // Split. n.split(20) if n.parent != nil { t.Fatalf("expected nil parent") } } // Ensure that a node that has keys that all fit on a page just returns one // leaf. func TestNode_split_SinglePage(t *testing.T) { // Create a node. n := &node{ inodes: make(inodes, 0), bucket: &Bucket{ tx: &Tx{ db: &DB{}, meta: &meta{ pgid: 1, }, }, }, } puts(n, "00000001", "00000001", "0123456701234567", 0, 0) puts(n, "00000002", "00000002", "0123456701234567", 0, 0) puts(n, "00000003", "00000003", "0123456701234567", 0, 0) puts(n, "00000004", "00000004", "0123456701234567", 0, 0) puts(n, "00000005", "00000005", "0123456701234567", 0, 0) // Split. n.split(4096) if n.parent != nil { t.Fatalf("expected nil parent") } } // Ensure that the page type can be returned in human readable format. func TestPage_typ(t *testing.T) { typ := (&page{flags: branchPageFlag}).typ() if typ != "branch" { t.Fatalf("exp=branch, got=%v", typ) } typ = (&page{flags: leafPageFlag}).typ() if typ != "leaf" { t.Fatalf("exp=leaf, got=%v", typ) } typ = (&page{flags: metaPageFlag}).typ() if typ != "meta" { t.Fatalf("exp=meta, got=%v", typ) } typ = (&page{flags: freelistPageFlag}).typ() if typ != "freelist" { t.Fatalf("exp=freelist, got=%v", typ) } typ = (&page{flags: 20000}).typ() if typ != "unknown<4e20>" { t.Fatalf("exp=unknown<4e20>, got=%v", typ) } } func TestPgids_merge(t *testing.T) { a := pgids{4, 5, 6, 10, 11, 12, 13, 27} b := pgids{1, 3, 8, 9, 25, 30} c := a.merge(b) expected := pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30} if !reflect.DeepEqual(c, expected) { t.Errorf("mismatch: %v", c) } a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36} b = pgids{8, 9, 25, 30} c = a.merge(b) expected = pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36} if !reflect.DeepEqual(c, expected) { t.Errorf("mismatch: %v", c) } } func TestPgids_merge_quick(t *testing.T) { err := quick.Check(func(a, b pgids) bool { // Sort incoming lists. sort.Sort(a) sort.Sort(b) // Merge the two lists together. got := a.merge(b) // The expected value should be the two lists combined and // sorted. exp := append(a, b...) sort.Sort(exp) if !reflect.DeepEqual(exp, got) { t.Errorf("\nexp=%+v\ngot=%+v\n", exp, got) return false } return true }, nil) if err != nil { t.Fatal(err) } } // testing/quick defaults to 5 iterations and a random seed. // You can override these settings from the command line: // // -quick.count The number of iterations to perform. // -quick.seed The seed to use for randomizing. // -quick.maxitems The maximum number of items to insert into a DB. // -quick.maxksize The maximum size of a key. // -quick.maxvsize The maximum size of a value. // var ( qcount int qseed int qmaxitems int qmaxksize int qmaxvsize int ) func _init() { flag.IntVar(&qcount, "quick.count", 5, "") flag.IntVar(&qseed, "quick.seed", int(time.Now().UnixNano())%100000, "") flag.IntVar(&qmaxitems, "quick.maxitems", 1000, "") flag.IntVar(&qmaxksize, "quick.maxksize", 1024, "") flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "") flag.Parse() fmt.Fprintln(os.Stderr, "seed:", qseed) fmt.Fprintf( os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize, ) } func qconfig() *quick.Config { return &quick.Config{ MaxCount: qcount, Rand: rand.New(rand.NewSource(int64(qseed))), } } type testdata []testdataitem func (t testdata) Len() int { return len(t) } func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 } func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { n := rand.Intn(qmaxitems-1) + 1 items := make(testdata, n) used := make(map[string]bool) for i := 0; i < n; i++ { item := &items[i] // Ensure that keys are unique by looping until we find one that // we have not already used. for { item.Key = randByteSlice(rand, 1, qmaxksize) if !used[string(item.Key)] { used[string(item.Key)] = true break } } item.Value = randByteSlice(rand, 0, qmaxvsize) } return reflect.ValueOf(items) } type revtestdata []testdataitem func (t revtestdata) Len() int { return len(t) } func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] } func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 } type testdataitem struct { Key []byte Value []byte } func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte { n := rand.Intn(maxSize-minSize) + minSize b := make([]byte, n) for i := 0; i < n; i++ { b[i] = byte(rand.Intn(255)) } return b } func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) } func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) } func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) } func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) } func TestSimulate_10000op_1p(t *testing.T) { return // FIXME: too slow testSimulate(t, 10000, 1) } func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) } func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) } func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) } func TestSimulate_10000op_10p(t *testing.T) { return // FIXME: too slow testSimulate(t, 10000, 10) } func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) } func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) } func TestSimulate_10000op_100p(t *testing.T) { return // FIXME: too slow testSimulate(t, 10000, 100) } func TestSimulate_10000op_1000p(t *testing.T) { return // FIXME: too slow testSimulate(t, 10000, 1000) } // Randomly generate operations on a given database with multiple clients to // ensure consistency and thread safety. func testSimulate(t *testing.T, threadCount, parallelism int) { if testing.Short() { t.Skip("skipping test in short mode.") } rand.Seed(int64(qseed)) // A list of operations that readers and writers can perform. readerHandlers := []simulateHandler{ simulateGetHandler, } writerHandlers := []simulateHandler{ simulateGetHandler, simulatePutHandler, } versions := make(map[int]*QuickDB) versions[1] = NewQuickDB() db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) var mutex sync.Mutex // Run n threads in parallel, each with their own operation. var wg sync.WaitGroup threads := make(chan bool, parallelism) var i int for { threads <- true wg.Add(1) writable := ((rand.Int() % 100) < 20) // 20% writers // Choose an operation to execute. var handler simulateHandler if writable { handler = writerHandlers[rand.Intn(len(writerHandlers))] } else { handler = readerHandlers[rand.Intn(len(readerHandlers))] } // Execute a thread for the given operation. go func(writable bool, handler simulateHandler) { defer wg.Done() // Start transaction. tx, err := db.Begin(writable) if err != nil { t.Fatal("tx begin: ", err) } // Obtain current state of the dataset. mutex.Lock() var qdb = versions[tx.ID()] if writable { qdb = versions[tx.ID()-1].Copy() } mutex.Unlock() // Make sure we commit/rollback the tx at the end and // update the state. if writable { defer func() { mutex.Lock() versions[tx.ID()] = qdb mutex.Unlock() err := tx.Commit() if err != nil { t.Fatal(err) } }() } else { defer func() { _ = tx.Rollback() }() } // Ignore operation if we don't have data yet. if qdb == nil { return } // Execute handler. handler(tx, qdb) // Release a thread back to the scheduling loop. <-threads }(writable, handler) i++ if i > threadCount { break } } // Wait until all threads are done. wg.Wait() } type simulateHandler func(tx *Tx, qdb *QuickDB) // Retrieves a key from the database and verifies that it is what is expected. func simulateGetHandler(tx *Tx, qdb *QuickDB) { // Randomly retrieve an existing exist. keys := qdb.Rand() if len(keys) == 0 { return } // Retrieve root bucket. b := tx.Bucket(keys[0]) if b == nil { panic(fmt.Sprintf( "bucket[0] expected: %08x\n", trunc(keys[0], 4), )) } // Drill into nested buckets. for _, key := range keys[1 : len(keys)-1] { b = b.Bucket(key) if b == nil { panic(fmt.Sprintf( "bucket[n] expected: %v -> %v\n", keys, key, )) } } // Verify key/value on the final bucket. expected := qdb.Get(keys) actual := b.Get(keys[len(keys)-1]) if !bytes.Equal(actual, expected) { fmt.Println("=== EXPECTED ===") fmt.Println(expected) fmt.Println("=== ACTUAL ===") fmt.Println(actual) fmt.Println("=== END ===") panic("value mismatch") } } // Inserts a key into the database. func simulatePutHandler(tx *Tx, qdb *QuickDB) { var err error keys, value := randKeys(), randValue() // Retrieve root bucket. b := tx.Bucket(keys[0]) if b == nil { b, err = tx.CreateBucket(keys[0]) if err != nil { panic("create bucket: " + err.Error()) } } // Create nested buckets, if necessary. for _, key := range keys[1 : len(keys)-1] { child := b.Bucket(key) if child != nil { b = child } else { b, err = b.CreateBucket(key) if err != nil { panic("create bucket: " + err.Error()) } } } // Insert into database. err = b.Put(keys[len(keys)-1], value) if err != nil { panic("put: " + err.Error()) } // Insert into in-memory database. qdb.Put(keys, value) } // QuickDB is an in-memory database that replicates the functionality of the // Bolt DB type except that it is entirely in-memory. It is meant for testing // that the Bolt database is consistent. type QuickDB struct { sync.RWMutex m map[string]interface{} } // NewQuickDB returns an instance of QuickDB. func NewQuickDB() *QuickDB { return &QuickDB{m: make(map[string]interface{})} } // Get retrieves the value at a key path. func (db *QuickDB) Get(keys [][]byte) []byte { db.RLock() defer db.RUnlock() m := db.m for _, key := range keys[:len(keys)-1] { value := m[string(key)] if value == nil { return nil } switch value := value.(type) { case map[string]interface{}: m = value case []byte: return nil } } // Only return if it's a simple value. value, ok := m[string(keys[len(keys)-1])].([]byte) if ok { return value } return nil } // Put inserts a value into a key path. func (db *QuickDB) Put(keys [][]byte, value []byte) { db.Lock() defer db.Unlock() // Build buckets all the way down the key path. m := db.m for _, key := range keys[:len(keys)-1] { _, ok := m[string(key)].([]byte) if ok { // Keypath intersects with a simple value. Do nothing. return } if m[string(key)] == nil { m[string(key)] = make(map[string]interface{}) } m = m[string(key)].(map[string]interface{}) } // Insert value into the last key. m[string(keys[len(keys)-1])] = value } // Rand returns a random key path that points to a simple value. func (db *QuickDB) Rand() [][]byte { db.RLock() defer db.RUnlock() if len(db.m) == 0 { return nil } var keys [][]byte db.rand(db.m, &keys) return keys } func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) { i, index := 0, rand.Intn(len(m)) for k, v := range m { if i == index { *keys = append(*keys, []byte(k)) v, ok := v.(map[string]interface{}) if ok { db.rand(v, keys) } return } i++ } panic("quickdb rand: out-of-range") } // Copy copies the entire database. func (db *QuickDB) Copy() *QuickDB { db.RLock() defer db.RUnlock() return &QuickDB{m: db.copy(db.m)} } func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} { clone := make(map[string]interface{}, len(m)) for k, v := range m { switch v := v.(type) { case map[string]interface{}: clone[k] = db.copy(v) default: clone[k] = v } } return clone } func randKey() []byte { const ( min = 1 max = 1024 ) n := rand.Intn(max-min) + min b := make([]byte, n) for i := 0; i < n; i++ { b[i] = byte(rand.Intn(255)) } return b } func randKeys() [][]byte { keys := [][]byte{} count := rand.Intn(2) + 2 for i := 0; i < count; i++ { keys = append(keys, randKey()) } return keys } func randValue() []byte { n := rand.Intn(8192) b := make([]byte, n) for i := 0; i < n; i++ { b[i] = byte(rand.Intn(255)) } return b } // Ensure that committing a closed transaction returns an error. func TestTx_Commit_ErrTxClosed(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(true) if err != nil { t.Fatal(err) } _, err = tx.CreateBucket([]byte("foo")) if err != nil { t.Fatal(err) } err = tx.Commit() if err != nil { t.Fatal(err) } err = tx.Commit() if err != ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } // Ensure that rolling back a closed transaction returns an error. func TestTx_Rollback_ErrTxClosed(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(true) if err != nil { t.Fatal(err) } err = tx.Rollback() if err != nil { t.Fatal(err) } err = tx.Rollback() if err != ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } // Ensure that committing a read-only transaction returns an error. func TestTx_Commit_ErrTxNotWritable(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(false) if err != nil { t.Fatal(err) } err = tx.Commit() if err != ErrTxNotWritable { t.Fatal(err) } } // Ensure that a transaction can retrieve a cursor on the root bucket. func TestTx_Cursor(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } _, err = tx.CreateBucket([]byte("woojits")) if err != nil { t.Fatal(err) } c := tx.Cursor() k, v := c.First() if !bytes.Equal(k, []byte("widgets")) { t.Fatalf("unexpected key: %v", k) } else if v != nil { t.Fatalf("unexpected value: %v", v) } k, v = c.Next() if !bytes.Equal(k, []byte("woojits")) { t.Fatalf("unexpected key: %v", k) } else if v != nil { t.Fatalf("unexpected value: %v", v) } k, v = c.Next() if k != nil { t.Fatalf("unexpected key: %v", k) } else if v != nil { t.Fatalf("unexpected value: %v", k) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that creating a bucket with a read-only transaction returns an error. func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.View(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("foo")) if err != ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that creating a bucket on a closed transaction returns an error. func TestTx_CreateBucket_ErrTxClosed(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(true) if err != nil { t.Fatal(err) } err = tx.Commit() if err != nil { t.Fatal(err) } _, err = tx.CreateBucket([]byte("foo")) if err != ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } // Ensure that a Tx can retrieve a bucket. func TestTx_Bucket(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } if tx.Bucket([]byte("widgets")) == nil { t.Fatal("expected bucket") } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a Tx retrieving a non-existent key returns nil. func TestTx_Get_NotFound(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } if b.Get([]byte("no_such_key")) != nil { t.Fatal("expected nil value") } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can be created and retrieved. func TestTx_CreateBucket(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Create a bucket. err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } else if b == nil { t.Fatal("expected bucket") } return nil }) if err != nil { t.Fatal(err) } // Read the bucket through a separate transaction. err = db.View(func(tx *Tx) error { if tx.Bucket([]byte("widgets")) == nil { t.Fatal("expected bucket") } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can be created if it doesn't already exist. func TestTx_CreateBucketIfNotExists(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { // Create bucket. b, err := tx.CreateBucketIfNotExists([]byte("widgets")) if err != nil { t.Fatal(err) } else if b == nil { t.Fatal("expected bucket") } // Create bucket again. b, err = tx.CreateBucketIfNotExists([]byte("widgets")) if err != nil { t.Fatal(err) } else if b == nil { t.Fatal("expected bucket") } return nil }) if err != nil { t.Fatal(err) } // Read the bucket through a separate transaction. err = db.View(func(tx *Tx) error { if tx.Bucket([]byte("widgets")) == nil { t.Fatal("expected bucket") } return nil }) if err != nil { t.Fatal(err) } } // Ensure transaction returns an error if creating an unnamed bucket. func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucketIfNotExists([]byte{}) if err != ErrBucketNameRequired { t.Fatalf("unexpected error: %s", err) } _, err = tx.CreateBucketIfNotExists(nil) if err != ErrBucketNameRequired { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket cannot be created twice. func TestTx_CreateBucket_ErrBucketExists(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Create a bucket. err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } // Create the same bucket again. err = db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) if err != ErrBucketExists { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket is created with a non-blank name. func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { _, err := tx.CreateBucket(nil) if err != ErrBucketNameRequired { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that a bucket can be deleted. func TestTx_DeleteBucket(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) // Create a bucket and add a value. err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } // Delete the bucket and make sure we can't get the value. err = db.Update(func(tx *Tx) error { err := tx.DeleteBucket([]byte("widgets")) if err != nil { t.Fatal(err) } if tx.Bucket([]byte("widgets")) != nil { t.Fatal("unexpected bucket") } return nil }) if err != nil { t.Fatal(err) } err = db.Update(func(tx *Tx) error { // Create the bucket again and make sure there's not a phantom // value. b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } v := b.Get([]byte("foo")) if v != nil { t.Fatalf("unexpected phantom value: %v", v) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that deleting a bucket on a closed transaction returns an error. func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) tx, err := db.Begin(true) if err != nil { t.Fatal(err) } err = tx.Commit() if err != nil { t.Fatal(err) } err = tx.DeleteBucket([]byte("foo")) if err != ErrTxClosed { t.Fatalf("unexpected error: %s", err) } } // Ensure that deleting a bucket with a read-only transaction returns an error. func TestTx_DeleteBucket_ReadOnly(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.View(func(tx *Tx) error { err := tx.DeleteBucket([]byte("foo")) if err != ErrTxNotWritable { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that nothing happens when deleting a bucket that doesn't exist. func TestTx_DeleteBucket_NotFound(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { err := tx.DeleteBucket([]byte("widgets")) if err != ErrBucketNotFound { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that no error is returned when a tx.ForEach function does not return // an error. func TestTx_ForEach_NoError(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } err = tx.ForEach(func(name []byte, b *Bucket) error { return nil }) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that an error is returned when a tx.ForEach function returns an error. func TestTx_ForEach_WithError(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } marker := errors.New("marker") err = tx.ForEach(func(name []byte, b *Bucket) error { return marker }) if err != marker { t.Fatalf("unexpected error: %s", err) } return nil }) if err != nil { t.Fatal(err) } } // Ensure that Tx commit handlers are called after a transaction successfully // commits. func TestTx_OnCommit(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) var x int err := db.Update(func(tx *Tx) error { tx.OnCommit(func() { x += 1 }) tx.OnCommit(func() { x += 2 }) _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } else if x != 3 { t.Fatalf("unexpected x: %d", x) } } // Ensure that Tx commit handlers are NOT called after a transaction rolls back. func TestTx_OnCommit_Rollback(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) var x int err := db.Update(func(tx *Tx) error { tx.OnCommit(func() { x += 1 }) tx.OnCommit(func() { x += 2 }) _, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } return errors.New("rollback this commit") }) if err == nil || err.Error() != "rollback this commit" { t.Fatalf("unexpected error: %s", err) } else if x != 0 { t.Fatalf("unexpected x: %d", x) } } // Ensure that the database can be copied to a file path. func TestTx_CopyFile(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) path := tempfile() defer os.Remove(path) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } err = b.Put([]byte("baz"), []byte("bat")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) }) if err != nil { t.Fatal(err) } db2, err := Open(path) if err != nil { t.Fatal(err) } err = db2.View(func(tx *Tx) error { v := tx.Bucket([]byte("widgets")).Get([]byte("foo")) if !bytes.Equal(v, []byte("bar")) { t.Fatalf("unexpected value: %v", v) } v = tx.Bucket([]byte("widgets")).Get([]byte("baz")) if !bytes.Equal(v, []byte("bat")) { t.Fatalf("unexpected value: %v", v) } return nil }) if err != nil { t.Fatal(err) } err = db2.Close() if err != nil { t.Fatal(err) } } type failWriterError struct{} func (failWriterError) Error() string { return "error injected for tests" } type failWriter struct { // fail after this many bytes After int } func (f *failWriter) Write(p []byte) (n int, err error) { n = len(p) if n > f.After { n = f.After err = failWriterError{} } f.After -= n return n, err } // Ensure that Copy handles write errors right. func TestTx_CopyFile_Error_Meta(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } err = b.Put([]byte("baz"), []byte("bat")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } const expectedMsg = "meta 0 copy: error injected for tests" err = db.View(func(tx *Tx) error { _, err := tx.WriteTo(&failWriter{}) return err }) if err == nil || err.Error() != expectedMsg { t.Fatalf("unexpected error: %v", err) } } // Ensure that Copy handles write errors right. func TestTx_CopyFile_Error_Normal(t *testing.T) { db := MustOpenDB() defer db.MustClose() defer os.Remove(db.Path()) err := db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { t.Fatal(err) } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { t.Fatal(err) } err = b.Put([]byte("baz"), []byte("bat")) if err != nil { t.Fatal(err) } return nil }) if err != nil { t.Fatal(err) } err = db.View(func(tx *Tx) error { _, err := tx.WriteTo(&failWriter{3 * db.pageSize}) return err }) if err == nil || err.Error() != "error injected for tests" { t.Fatalf("unexpected error: %v", err) } } func ExampleTx_Rollback() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Create a bucket. err = db.Update(func(tx *Tx) error { _, err := tx.CreateBucket([]byte("widgets")) return err }) if err != nil { log.Fatal(err) } // Set a value for a key. err = db.Update(func(tx *Tx) error { return tx.Bucket([]byte("widgets")).Put( []byte("foo"), []byte("bar"), ) }) if err != nil { log.Fatal(err) } // Update the key but rollback the transaction so it never saves. tx, err := db.Begin(true) if err != nil { log.Fatal(err) } b := tx.Bucket([]byte("widgets")) err = b.Put([]byte("foo"), []byte("baz")) if err != nil { log.Fatal(err) } err = tx.Rollback() if err != nil { log.Fatal(err) } // Ensure that our original value is still set. err = db.View(func(tx *Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value for 'foo' is still: %s\n", value) return nil }) if err != nil { log.Fatal(err) } // Close database to release file lock. err = db.Close() if err != nil { log.Fatal(err) } // Output: // The value for 'foo' is still: bar } func ExampleTx_CopyFile() { // Open the database. db, err := Open(tempfile()) if err != nil { log.Fatal(err) } defer os.Remove(db.Path()) // Create a bucket and a key. err = db.Update(func(tx *Tx) error { b, err := tx.CreateBucket([]byte("widgets")) if err != nil { return err } err = b.Put([]byte("foo"), []byte("bar")) if err != nil { return err } return nil }) if err != nil { log.Fatal(err) } // Copy the database to another file. toFile := tempfile() defer os.Remove(toFile) err = db.View(func(tx *Tx) error { return tx.CopyFile(toFile, 0666) }) if err != nil { log.Fatal(err) } // Open the cloned database. db2, err := Open(toFile) if err != nil { log.Fatal(err) } // Ensure that the key exists in the copy. err = db2.View(func(tx *Tx) error { value := tx.Bucket([]byte("widgets")).Get([]byte("foo")) fmt.Printf("The value for 'foo' in the clone is: %s\n", value) return nil }) if err != nil { log.Fatal(err) } // Close database to release file lock. err = db.Close() if err != nil { log.Fatal(err) } err = db2.Close() if err != nil { log.Fatal(err) } // Output: // The value for 'foo' in the clone is: bar } // MustOpen creates a Bolt database in a temporary location. func MustOpen2() *WDB2 { // Create temporary path. f, _ := ioutil.TempFile("", "bolt-") f.Close() os.Remove(f.Name()) db, err := Open(f.Name()) if err != nil { panic(err.Error()) } return &WDB2{DB: db, Path: f.Name()} } // DB is a test wrapper for bolt.DB. type WDB2 struct { *DB Path string } // Close closes and removes the database. func (db *WDB2) Close() error { defer os.Remove(db.Path) return db.DB.Close() } func fillBucket(b *Bucket, prefix []byte) error { n := 10 + rand.Intn(50) for i := 0; i < n; i++ { v := make([]byte, 10*(1+rand.Intn(4))) _, err := crypto.Read(v) if err != nil { return err } k := append(prefix, []byte(fmt.Sprintf("k%d", i))...) err = b.Put(k, v) if err != nil { return err } } // limit depth of subbuckets s := 2 + rand.Intn(4) if len(prefix) > (2*s + 1) { return nil } n = 1 + rand.Intn(3) for i := 0; i < n; i++ { k := append(prefix, []byte(fmt.Sprintf("b%d", i))...) sb, err := b.CreateBucket(k) if err != nil { return err } err = fillBucket(sb, append(k, '.')) if err != nil { return err } } return nil } func walkBucket(parent *Bucket, k []byte, v []byte, w io.Writer) error { _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v) if err != nil { return err } // not a bucket, exit. if v != nil { return nil } return parent.ForEach(func(k, v []byte) error { if v == nil { return walkBucket(parent.Bucket(k), k, nil, w) } return walkBucket(parent, k, v, w) }) } func MainTest() { tempdir = getTempdir() test_newDB() test_openFile() test_readPageSize() test_initDB() test_Open() tests := []testing.InternalTest{ { "TestBucket_Get_NonExistent", TestBucket_Get_NonExistent }, { "TestBucket_Get_FromNode", TestBucket_Get_FromNode }, { "TestBucket_Get_IncompatibleValue", TestBucket_Get_IncompatibleValue }, { "TestBucket_Get_Capacity", TestBucket_Get_Capacity }, { "TestBucket_Put", TestBucket_Put }, { "TestBucket_Put_Repeat", TestBucket_Put_Repeat }, { "TestBucket_Put_Large", TestBucket_Put_Large }, { "TestDB_Put_VeryLarge", TestDB_Put_VeryLarge }, { "TestBucket_Put_IncompatibleValue", TestBucket_Put_IncompatibleValue }, { "TestBucket_Put_Closed", TestBucket_Put_Closed }, { "TestBucket_Put_ReadOnly", TestBucket_Put_ReadOnly }, { "TestBucket_Delete", TestBucket_Delete }, { "TestBucket_Delete_Large", TestBucket_Delete_Large }, { "TestBucket_Delete_FreelistOverflow", TestBucket_Delete_FreelistOverflow }, { "TestBucket_Nested", TestBucket_Nested }, { "TestBucket_Delete_Bucket", TestBucket_Delete_Bucket }, { "TestBucket_Delete_ReadOnly", TestBucket_Delete_ReadOnly }, { "TestBucket_Delete_Closed", TestBucket_Delete_Closed }, { "TestBucket_DeleteBucket_Nested", TestBucket_DeleteBucket_Nested }, { "TestBucket_DeleteBucket_Nested2", TestBucket_DeleteBucket_Nested2 }, { "TestBucket_DeleteBucket_Large", TestBucket_DeleteBucket_Large }, { "TestBucket_Bucket_IncompatibleValue", TestBucket_Bucket_IncompatibleValue }, { "TestBucket_CreateBucket_IncompatibleValue", TestBucket_CreateBucket_IncompatibleValue }, { "TestBucket_DeleteBucket_IncompatibleValue", TestBucket_DeleteBucket_IncompatibleValue }, { "TestBucket_Sequence", TestBucket_Sequence }, { "TestBucket_NextSequence", TestBucket_NextSequence }, { "TestBucket_NextSequence_Persist", TestBucket_NextSequence_Persist }, { "TestBucket_NextSequence_ReadOnly", TestBucket_NextSequence_ReadOnly }, { "TestBucket_NextSequence_Closed", TestBucket_NextSequence_Closed }, { "TestBucket_ForEach", TestBucket_ForEach }, { "TestBucket_ForEach_ShortCircuit", TestBucket_ForEach_ShortCircuit }, { "TestBucket_ForEach_Closed", TestBucket_ForEach_Closed }, { "TestBucket_Put_EmptyKey", TestBucket_Put_EmptyKey }, { "TestBucket_Put_KeyTooLarge", TestBucket_Put_KeyTooLarge }, { "TestBucket_Put_ValueTooLarge", TestBucket_Put_ValueTooLarge }, // { "TestBucket_Put_Single", TestBucket_Put_Single }, // { "TestBucket_Put_Multiple", TestBucket_Put_Multiple }, // { "TestBucket_Delete_Quick", TestBucket_Delete_Quick }, { "TestCursor_Bucket", TestCursor_Bucket }, { "TestCursor_Seek", TestCursor_Seek }, { "TestCursor_Delete", TestCursor_Delete }, { "TestCursor_Seek_Large", TestCursor_Seek_Large }, { "TestCursor_EmptyBucket", TestCursor_EmptyBucket }, { "TestCursor_EmptyBucketReverse", TestCursor_EmptyBucketReverse }, { "TestCursor_Iterate_Leaf", TestCursor_Iterate_Leaf }, { "TestCursor_LeafRootReverse", TestCursor_LeafRootReverse }, { "TestCursor_Restart", TestCursor_Restart }, { "TestCursor_First_EmptyPages", TestCursor_First_EmptyPages }, // { "TestCursor_QuickCheck", TestCursor_QuickCheck }, // { "TestCursor_QuickCheck_Reverse", TestCursor_QuickCheck_Reverse }, { "TestCursor_QuickCheck_BucketsOnly", TestCursor_QuickCheck_BucketsOnly }, { "TestCursor_QuickCheck_BucketsOnly_Reverse", TestCursor_QuickCheck_BucketsOnly_Reverse }, { "TestOpen", TestOpen }, { "TestOpen_ErrPathRequired", TestOpen_ErrPathRequired }, { "TestOpen_ErrNotExists", TestOpen_ErrNotExists }, { "TestOpen_ErrInvalid", TestOpen_ErrInvalid }, { "TestOpen_ErrVersionMismatch", TestOpen_ErrVersionMismatch }, { "TestOpen_ErrChecksum", TestOpen_ErrChecksum }, { "TestOpen_Size", TestOpen_Size }, { "TestOpen_Size_Large", TestOpen_Size_Large }, { "TestOpen_Check", TestOpen_Check }, { "TestOpen_MetaInitWriteError", TestOpen_MetaInitWriteError }, { "TestOpen_FileTooSmall", TestOpen_FileTooSmall }, { "TestDB_Open_InitialMmapSize", TestDB_Open_InitialMmapSize }, { "TestDB_Begin_ErrDatabaseNotOpen", TestDB_Begin_ErrDatabaseNotOpen }, { "TestDB_BeginRW", TestDB_BeginRW }, { "TestDB_BeginRW_Closed", TestDB_BeginRW_Closed }, { "TestDB_Close_PendingTx_RW", TestDB_Close_PendingTx_RW }, { "TestDB_Close_PendingTx_RO", TestDB_Close_PendingTx_RO }, { "TestDB_Update", TestDB_Update }, { "TestDB_Update_Closed", TestDB_Update_Closed }, { "TestDB_Update_ManualCommit", TestDB_Update_ManualCommit }, { "TestDB_Update_ManualRollback", TestDB_Update_ManualRollback }, { "TestDB_View_ManualCommit", TestDB_View_ManualCommit }, { "TestDB_View_ManualRollback", TestDB_View_ManualRollback }, { "TestDB_Update_Panic", TestDB_Update_Panic }, { "TestDB_View_Error", TestDB_View_Error }, { "TestDB_View_Panic", TestDB_View_Panic }, { "TestDB_Consistency", TestDB_Consistency }, { "TestDB_Batch", TestDB_Batch }, { "TestDB_Batch_Panic", TestDB_Batch_Panic }, { "TestDB_BatchFull", TestDB_BatchFull }, { "TestDB_BatchTime", TestDB_BatchTime }, { "TestFreelist_free", TestFreelist_free }, { "TestFreelist_free_overflow", TestFreelist_free_overflow }, { "TestFreelist_release", TestFreelist_release }, { "TestFreelist_allocate", TestFreelist_allocate }, { "TestFreelist_read", TestFreelist_read }, { "TestFreelist_write", TestFreelist_write }, { "TestNode_put", TestNode_put }, { "TestNode_read_LeafPage", TestNode_read_LeafPage }, { "TestNode_write_LeafPage", TestNode_write_LeafPage }, { "TestNode_split", TestNode_split }, { "TestNode_split_MinKeys", TestNode_split_MinKeys }, { "TestNode_split_SinglePage", TestNode_split_SinglePage }, { "TestPage_typ", TestPage_typ }, { "TestPgids_merge", TestPgids_merge }, { "TestPgids_merge_quick", TestPgids_merge_quick }, { "TestSimulate_1op_1p", TestSimulate_1op_1p }, { "TestSimulate_10op_1p", TestSimulate_10op_1p }, { "TestSimulate_100op_1p", TestSimulate_100op_1p }, { "TestSimulate_1000op_1p", TestSimulate_1000op_1p }, { "TestSimulate_10000op_1p", TestSimulate_10000op_1p }, { "TestSimulate_10op_10p", TestSimulate_10op_10p }, { "TestSimulate_100op_10p", TestSimulate_100op_10p }, { "TestSimulate_1000op_10p", TestSimulate_1000op_10p }, { "TestSimulate_10000op_10p", TestSimulate_10000op_10p }, { "TestSimulate_100op_100p", TestSimulate_100op_100p }, { "TestSimulate_1000op_100p", TestSimulate_1000op_100p }, { "TestSimulate_10000op_100p", TestSimulate_10000op_100p }, { "TestSimulate_10000op_1000p", TestSimulate_10000op_1000p }, { "TestTx_Commit_ErrTxClosed", TestTx_Commit_ErrTxClosed }, { "TestTx_Rollback_ErrTxClosed", TestTx_Rollback_ErrTxClosed }, { "TestTx_Commit_ErrTxNotWritable", TestTx_Commit_ErrTxNotWritable }, { "TestTx_Cursor", TestTx_Cursor }, { "TestTx_CreateBucket_ErrTxNotWritable", TestTx_CreateBucket_ErrTxNotWritable }, { "TestTx_CreateBucket_ErrTxClosed", TestTx_CreateBucket_ErrTxClosed }, { "TestTx_Bucket", TestTx_Bucket }, { "TestTx_Get_NotFound", TestTx_Get_NotFound }, { "TestTx_CreateBucket", TestTx_CreateBucket }, { "TestTx_CreateBucketIfNotExists", TestTx_CreateBucketIfNotExists }, { "TestTx_CreateBucketIfNotExists_ErrBucketNameRequired", TestTx_CreateBucketIfNotExists_ErrBucketNameRequired }, { "TestTx_CreateBucket_ErrBucketExists", TestTx_CreateBucket_ErrBucketExists }, { "TestTx_CreateBucket_ErrBucketNameRequired", TestTx_CreateBucket_ErrBucketNameRequired }, { "TestTx_DeleteBucket", TestTx_DeleteBucket }, { "TestTx_DeleteBucket_ErrTxClosed", TestTx_DeleteBucket_ErrTxClosed }, { "TestTx_DeleteBucket_ReadOnly", TestTx_DeleteBucket_ReadOnly }, { "TestTx_DeleteBucket_NotFound", TestTx_DeleteBucket_NotFound }, { "TestTx_ForEach_NoError", TestTx_ForEach_NoError }, { "TestTx_ForEach_WithError", TestTx_ForEach_WithError }, { "TestTx_OnCommit", TestTx_OnCommit }, { "TestTx_OnCommit_Rollback", TestTx_OnCommit_Rollback }, { "TestTx_CopyFile", TestTx_CopyFile }, { "TestTx_CopyFile_Error_Meta", TestTx_CopyFile_Error_Meta }, { "TestTx_CopyFile_Error_Normal", TestTx_CopyFile_Error_Normal }, } deps := testdeps.TestDeps{} benchmarks := []testing.InternalBenchmark {} fuzzTargets := []testing.InternalFuzzTarget{} examples := []testing.InternalExample {} m := testing.MainStart(deps, tests, benchmarks, fuzzTargets, examples) os.Exit(m.Run()) }