aboutsummaryrefslogtreecommitdiff
path: root/tests/dedo.go
diff options
context:
space:
mode:
Diffstat (limited to 'tests/dedo.go')
-rw-r--r--tests/dedo.go2737
1 files changed, 1894 insertions, 843 deletions
diff --git a/tests/dedo.go b/tests/dedo.go
index a43a3d2..b1eec95 100644
--- a/tests/dedo.go
+++ b/tests/dedo.go
@@ -137,16 +137,20 @@ func TestBucket_Get_NonExistent(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if v := b.Get([]byte("foo")); v != nil {
+
+ v := b.Get([]byte("foo"))
+ if v != nil {
t.Fatal("expected nil value")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -157,19 +161,25 @@ func TestBucket_Get_FromNode(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) {
+
+ v := b.Get([]byte("foo"))
+ if !bytes.Equal(v, []byte("bar")) {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -180,27 +190,33 @@ func TestBucket_Get_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
+ _, err = tx.Bucket([]byte("widgets")).CreateBucket([
+ ]byte("foo"),
+ )
+ if err != nil {
t.Fatal(err)
}
if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil {
t.Fatal("expected nil value")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a slice returned from a bucket has a capacity equal to its length.
-// This also allows slices to be appended to since it will require a realloc by Go.
+// Ensure that a slice returned from a bucket has a capacity equal to its
+// length. This also allows slices to be appended to since it will require a
+// realloc by Go.
//
// https://github.com/boltdb/bolt/issues/544
func TestBucket_Get_Capacity(t *testing.T) {
@@ -209,18 +225,20 @@ func TestBucket_Get_Capacity(t *testing.T) {
defer os.Remove(db.Path())
// Write key to a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("bucket"))
if err != nil {
return err
}
+
return b.Put([]byte("key"), []byte("val"))
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Retrieve value and attempt to append to it.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
k, v := tx.Bucket([]byte("bucket")).Cursor().First()
// Verify capacity.
@@ -235,7 +253,8 @@ func TestBucket_Get_Capacity(t *testing.T) {
v = append(v, []byte("123")...)
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -246,12 +265,14 @@ func TestBucket_Put(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
@@ -259,8 +280,10 @@ func TestBucket_Put(t *testing.T) {
if !bytes.Equal([]byte("bar"), v) {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -271,15 +294,19 @@ func TestBucket_Put_Repeat(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("baz")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("baz"))
+ if err != nil {
t.Fatal(err)
}
@@ -287,8 +314,10 @@ func TestBucket_Put_Repeat(t *testing.T) {
if !bytes.Equal([]byte("baz"), value) {
t.Fatalf("unexpected value: %v", value)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -300,31 +329,42 @@ func TestBucket_Put_Large(t *testing.T) {
defer os.Remove(db.Path())
count, factor := 100, 200
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
+
for i := 1; i < count; i++ {
- if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil {
+ key := strings.Repeat("0", i * factor)
+ value := strings.Repeat("X", (count - i) * factor)
+ err := b.Put([]byte(key), []byte(value))
+ if err != nil {
t.Fatal(err)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i < count; i++ {
value := b.Get([]byte(strings.Repeat("0", i*factor)))
- if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) {
+ expected := []byte(strings.Repeat(
+ "X",
+ (count - i) * factor,
+ ))
+ if !bytes.Equal(value, expected) {
t.Fatalf("unexpected value: %v", value)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -343,7 +383,7 @@ func TestDB_Put_VeryLarge(t *testing.T) {
defer os.Remove(db.Path())
for i := 0; i < n; i += batchN {
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -351,12 +391,14 @@ func TestDB_Put_VeryLarge(t *testing.T) {
for j := 0; j < batchN; j++ {
k, v := make([]byte, ksize), make([]byte, vsize)
binary.BigEndian.PutUint32(k, uint32(i+j))
- if err := b.Put(k, v); err != nil {
+ err := b.Put(k, v)
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -368,25 +410,33 @@ func TestBucket_Put_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b0, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
+ _, err = tx.Bucket([]byte("widgets")).CreateBucket(
+ []byte("foo"),
+ )
+ if err != nil {
t.Fatal(err)
}
- if err := b0.Put([]byte("foo"), []byte("bar")); err != ErrIncompatibleValue {
+
+ err = b0.Put([]byte("foo"), []byte("bar"))
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a setting a value while the transaction is closed returns an error.
+// Ensure that a setting a value while the transaction is closed returns an
+// error.
func TestBucket_Put_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
@@ -402,11 +452,13 @@ func TestBucket_Put_Closed(t *testing.T) {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != ErrTxClosed {
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -417,22 +469,28 @@ func TestBucket_Put_ReadOnly(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
- if err := b.Put([]byte("foo"), []byte("bar")); err != ErrTxNotWritable {
+ err := b.Put([]byte("foo"), []byte("bar"))
+ if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -443,22 +501,30 @@ func TestBucket_Delete(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Delete([]byte("foo")); err != nil {
+
+ err = b.Delete([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if v := b.Get([]byte("foo")); v != nil {
+
+ v := b.Get([]byte("foo"))
+ if v != nil {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -469,44 +535,54 @@ func TestBucket_Delete_Large(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 100; i++ {
- if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil {
+ err := b.Put(
+ []byte(strconv.Itoa(i)),
+ []byte(strings.Repeat("*", 1024)),
+ )
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ {
- if err := b.Delete([]byte(strconv.Itoa(i))); err != nil {
+ err := b.Delete([]byte(strconv.Itoa(i)))
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ {
- if v := b.Get([]byte(strconv.Itoa(i))); v != nil {
+ v := b.Get([]byte(strconv.Itoa(i)))
+ if v != nil {
t.Fatalf("unexpected value: %v, i=%d", v, i)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -524,7 +600,7 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
k := make([]byte, 16)
for i := uint64(0); i < n1; i++ {
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("0"))
if err != nil {
t.Fatalf("bucket error: %s", err)
@@ -533,28 +609,32 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
for j := uint64(0); j < n2; j++ {
binary.BigEndian.PutUint64(k[:8], i)
binary.BigEndian.PutUint64(k[8:], j)
- if err := b.Put(k, nil); err != nil {
+ err := b.Put(k, nil)
+ if err != nil {
t.Fatalf("put error: %s", err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
// Delete all of them in one large transaction
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("0"))
c := b.Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
- if err := c.Delete(); err != nil {
+ err := c.Delete()
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -565,7 +645,7 @@ func TestBucket_Nested(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
// Create a widgets bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
@@ -579,70 +659,94 @@ func TestBucket_Nested(t *testing.T) {
}
// Create a widgets/bar key.
- if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
+ err = b.Put([]byte("bar"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
db.MustCheck()
// Update widgets/bar.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
- if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil {
+ err := b.Put([]byte("bar"), []byte("xxxx"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
db.MustCheck()
// Cause a split.
- if err := db.Update(func(tx *Tx) error {
- var b = tx.Bucket([]byte("widgets"))
+ err = db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
for i := 0; i < 10000; i++ {
- if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
+ err := b.Put(
+ []byte(strconv.Itoa(i)),
+ []byte(strconv.Itoa(i)),
+ )
+ if err != nil {
t.Fatal(err)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
db.MustCheck()
// Insert into widgets/foo/baz.
- if err := db.Update(func(tx *Tx) error {
- var b = tx.Bucket([]byte("widgets"))
- if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ err := b.Bucket([]byte("foo")).Put(
+ []byte("baz"),
+ []byte("yyyy"),
+ )
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
db.MustCheck()
// Verify.
- if err := db.View(func(tx *Tx) error {
- var b = tx.Bucket([]byte("widgets"))
- if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) {
+ err = db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ v := b.Bucket([]byte("foo")).Get([]byte("baz"))
+ if !bytes.Equal(v, []byte("yyyy")) {
t.Fatalf("unexpected value: %v", v)
}
- if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) {
+
+ v = b.Get([]byte("bar"))
+ if !bytes.Equal(v, []byte("xxxx")) {
t.Fatalf("unexpected value: %v", v)
}
+
for i := 0; i < 10000; i++ {
- if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) {
+ v := b.Get([]byte(strconv.Itoa(i)))
+ if !bytes.Equal(v, []byte(strconv.Itoa(i))) {
t.Fatalf("unexpected value: %v", v)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -653,19 +757,25 @@ func TestBucket_Delete_Bucket(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("foo")); err != nil {
+
+ _, err = b.CreateBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Delete([]byte("foo")); err != ErrIncompatibleValue {
+
+ err = b.Delete([]byte("foo"))
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -676,26 +786,33 @@ func TestBucket_Delete_ReadOnly(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
- if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != ErrTxNotWritable {
+ err = db.View(func(tx *Tx) error {
+ err := tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
+ if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a deleting value while the transaction is closed returns an error.
+// Ensure that a deleting value while the transaction is closed returns an
+// error.
func TestBucket_Delete_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
@@ -711,10 +828,13 @@ func TestBucket_Delete_Closed(t *testing.T) {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if err := b.Delete([]byte("foo")); err != ErrTxClosed {
+
+ err = b.Delete([]byte("foo"))
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -725,7 +845,7 @@ func TestBucket_DeleteBucket_Nested(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -740,25 +860,32 @@ func TestBucket_DeleteBucket_Nested(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = bar.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil {
+
+ err = tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
+// Ensure that deleting a bucket causes nested buckets to be deleted after they
+// have been committed.
func TestBucket_DeleteBucket_Nested2(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -774,15 +901,17 @@ func TestBucket_DeleteBucket_Nested2(t *testing.T) {
t.Fatal(err)
}
- if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
+ err = bar.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
widgets := tx.Bucket([]byte("widgets"))
if widgets == nil {
t.Fatal("expected widgets bucket")
@@ -798,35 +927,42 @@ func TestBucket_DeleteBucket_Nested2(t *testing.T) {
t.Fatal("expected bar bucket")
}
- if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
+ v := bar.Get([]byte("baz"))
+ if !bytes.Equal(v, []byte("bat")) {
t.Fatalf("unexpected value: %v", v)
}
- if err := tx.DeleteBucket([]byte("widgets")); err != nil {
+
+ err := tx.DeleteBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) != nil {
t.Fatal("expected bucket to be deleted")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
-// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly.
+// Ensure that deleting a child bucket with multiple pages causes all pages to
+// get collected. NOTE: Consistency check in bolt_test.DB.Close() will panic if
+// pages not freed properly.
func TestBucket_DeleteBucket_Large(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -838,21 +974,29 @@ func TestBucket_DeleteBucket_Large(t *testing.T) {
}
for i := 0; i < 1000; i++ {
- if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil {
+ err := foo.Put(
+ []byte(fmt.Sprintf("%d", i)),
+ []byte(fmt.Sprintf("%0100d", i)),
+ )
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
- if err := tx.DeleteBucket([]byte("widgets")); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ err := tx.DeleteBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -863,20 +1007,25 @@ func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = widgets.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil {
+
+ b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))
+ if b != nil {
t.Fatal("expected nil bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -887,20 +1036,25 @@ func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = widgets.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := widgets.CreateBucket([]byte("foo")); err != ErrIncompatibleValue {
+
+ _, err = widgets.CreateBucket([]byte("foo"))
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -911,19 +1065,25 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = widgets.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != ErrIncompatibleValue {
+
+ err = tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -934,39 +1094,46 @@ func TestBucket_Sequence(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
bkt, err := tx.CreateBucket([]byte("0"))
if err != nil {
t.Fatal(err)
}
// Retrieve sequence.
- if v := bkt.Sequence(); v != 0 {
+ v := bkt.Sequence()
+ if v != 0 {
t.Fatalf("unexpected sequence: %d", v)
}
// Update sequence.
- if err := bkt.SetSequence(1000); err != nil {
+ err = bkt.SetSequence(1000)
+ if err != nil {
t.Fatal(err)
}
// Read sequence again.
- if v := bkt.Sequence(); v != 1000 {
+ v = bkt.Sequence()
+ if v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Verify sequence in separate transaction.
- if err := db.View(func(tx *Tx) error {
- if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 {
+ err = db.View(func(tx *Tx) error {
+ v := tx.Bucket([]byte("0")).Sequence()
+ if v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -977,38 +1144,43 @@ func TestBucket_NextSequence(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
+
woojits, err := tx.CreateBucket([]byte("woojits"))
if err != nil {
t.Fatal(err)
}
// Make sure sequence increments.
- if seq, err := widgets.NextSequence(); err != nil {
+ seq, err := widgets.NextSequence()
+ if err != nil {
t.Fatal(err)
} else if seq != 1 {
t.Fatalf("unexpecte sequence: %d", seq)
}
- if seq, err := widgets.NextSequence(); err != nil {
+ seq, err = widgets.NextSequence()
+ if err != nil {
t.Fatal(err)
} else if seq != 2 {
t.Fatalf("unexpected sequence: %d", seq)
}
// Buckets should be separate.
- if seq, err := woojits.NextSequence(); err != nil {
+ seq, err = woojits.NextSequence()
+ if err != nil {
t.Fatal(err)
} else if seq != 1 {
t.Fatalf("unexpected sequence: %d", 1)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1021,64 +1193,79 @@ func TestBucket_NextSequence_Persist(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ _, err := tx.Bucket([]byte("widgets")).NextSequence()
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
seq, err := tx.Bucket([]byte("widgets")).NextSequence()
if err != nil {
t.Fatalf("unexpected error: %s", err)
} else if seq != 2 {
t.Fatalf("unexpected sequence: %d", seq)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that retrieving the next sequence on a read-only bucket returns an error.
+// Ensure that retrieving the next sequence on a read-only bucket returns an
+// error.
func TestBucket_NextSequence_ReadOnly(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
_, err := tx.Bucket([]byte("widgets")).NextSequence()
if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that retrieving the next sequence for a bucket on a closed database return an error.
+// Ensure that retrieving the next sequence for a bucket on a closed database
+// return an error.
func TestBucket_NextSequence_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
@@ -1088,14 +1275,19 @@ func TestBucket_NextSequence_Closed(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.NextSequence(); err != ErrTxClosed {
+
+ _, err = b.NextSequence()
+ if err != ErrTxClosed {
t.Fatal(err)
}
}
@@ -1106,23 +1298,29 @@ func TestBucket_ForEach(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("0001")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("0001"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
+
+ err = b.Put([]byte("bar"), []byte("0002"))
+ if err != nil {
t.Fatal(err)
}
var index int
- if err := b.ForEach(func(k, v []byte) error {
+ err = b.ForEach(func(k, v []byte) error {
switch index {
case 0:
if !bytes.Equal(k, []byte("bar")) {
@@ -1144,8 +1342,10 @@ func TestBucket_ForEach(t *testing.T) {
}
}
index++
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -1154,7 +1354,8 @@ func TestBucket_ForEach(t *testing.T) {
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1165,29 +1366,38 @@ func TestBucket_ForEach_ShortCircuit(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
+
+ err = b.Put([]byte("bar"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("0000")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
var index int
- if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
+ err = tx.Bucket(
+ []byte("widgets"),
+ ).ForEach(func(k, v []byte) error {
index++
if bytes.Equal(k, []byte("baz")) {
return errors.New("marker")
}
return nil
- }); err == nil || err.Error() != "marker" {
+ })
+ if err == nil || err.Error() != "marker" {
t.Fatalf("unexpected error: %s", err)
}
if index != 2 {
@@ -1195,7 +1405,8 @@ func TestBucket_ForEach_ShortCircuit(t *testing.T) {
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1216,11 +1427,13 @@ func TestBucket_ForEach_Closed(t *testing.T) {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if err := b.ForEach(func(k, v []byte) error { return nil }); err != ErrTxClosed {
+ err = b.ForEach(func(k, v []byte) error { return nil })
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -1231,19 +1444,25 @@ func TestBucket_Put_EmptyKey(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte(""), []byte("bar")); err != ErrKeyRequired {
+
+ err = b.Put([]byte(""), []byte("bar"))
+ if err != ErrKeyRequired {
t.Fatalf("unexpected error: %s", err)
}
- if err := b.Put(nil, []byte("bar")); err != ErrKeyRequired {
+
+ err = b.Put(nil, []byte("bar"))
+ if err != ErrKeyRequired {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1254,16 +1473,19 @@ func TestBucket_Put_KeyTooLarge(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put(make([]byte, 32769), []byte("bar")); err != ErrKeyTooLarge {
+
+ err = b.Put(make([]byte, 32769), []byte("bar"))
+ if err != ErrKeyTooLarge {
t.Fatalf("unexpected error: %s", err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1274,75 +1496,104 @@ func TestBucket_Put_ValueTooLarge(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), make([]byte, MaxValueSize+1)); err != ErrValueTooLarge {
+
+ err = b.Put([]byte("foo"), make([]byte, MaxValueSize+1))
+ if err != ErrValueTooLarge {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a bucket can write random keys and values across multiple transactions.
+// Ensure that a bucket can write random keys and values across multiple
+// transactions.
func TestBucket_Put_Single(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
index := 0
- if err := quick.Check(func(items testdata) bool {
+ err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
m := make(map[string][]byte)
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
for _, item := range items {
- if err := db.Update(func(tx *Tx) error {
- if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ err := tx.Bucket([]byte("widgets")).Put(
+ item.Key,
+ item.Value,
+ )
+ if err != nil {
panic("put error: " + err.Error())
}
+
m[string(item.Key)] = item.Value
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+ const errmsg = "value mismatch [run %d] (%d of %d):\n" +
+ "key: %x\ngot: %x\nexp: %x"
// Verify all key/values so far.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
i := 0
for k, v := range m {
- value := tx.Bucket([]byte("widgets")).Get([]byte(k))
+ value := tx.Bucket(
+ []byte("widgets"),
+ ).Get([]byte(k))
if !bytes.Equal(value, v) {
- t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
+ t.Logf(
+ errmsg,
+ index,
+ i,
+ len(m),
+ []byte(k),
+ value,
+ v,
+ )
db.CopyTempFile()
t.FailNow()
}
i++
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
index++
return true
- }, nil); err != nil {
+ }, nil)
+ if err != nil {
t.Error(err)
}
}
@@ -1353,111 +1604,142 @@ func TestBucket_Put_Multiple(t *testing.T) {
t.Skip("skipping test in short mode.")
}
- if err := quick.Check(func(items testdata) bool {
+ err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
// Bulk insert all values.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
- if err := b.Put(item.Key, item.Value); err != nil {
+ err := b.Put(item.Key, item.Value)
+ if err != nil {
t.Fatal(err)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Verify all items exist.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
value := b.Get(item.Key)
if !bytes.Equal(item.Value, value) {
db.CopyTempFile()
- t.Fatalf("exp=%x; got=%x", item.Value, value)
+ t.Fatalf(
+ "exp=%x, got=%x",
+ item.Value,
+ value,
+ )
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
return true
- }, qconfig()); err != nil {
+ }, qconfig())
+ if err != nil {
t.Error(err)
}
}
-// Ensure that a transaction can delete all key/value pairs and return to a single leaf page.
+// Ensure that a transaction can delete all key/value pairs and return to a
+// single leaf page.
func TestBucket_Delete_Quick(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
- if err := quick.Check(func(items testdata) bool {
+ err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
// Bulk insert all values.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
- if err := b.Put(item.Key, item.Value); err != nil {
+ err := b.Put(item.Key, item.Value)
+ if err != nil {
t.Fatal(err)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Remove items one at a time and check consistency.
for _, item := range items {
- if err := db.Update(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Delete(item.Key)
- }); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Delete(
+ item.Key,
+ )
+ })
+ if err != nil {
t.Fatal(err)
}
}
// Anything before our deletion index should be nil.
- if err := db.View(func(tx *Tx) error {
- if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
- t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3))
+ err = db.View(func(tx *Tx) error {
+ err := tx.Bucket(
+ []byte("widgets"),
+ ).ForEach(func(k, v []byte) error {
+ t.Fatalf(
+ "bucket should be empty; found: %06x",
+ trunc(k, 3),
+ )
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
return true
- }, qconfig()); err != nil {
+ }, qconfig())
+ if err != nil {
t.Error(err)
}
}
@@ -1471,7 +1753,7 @@ func ExampleBucket_Put() {
defer os.Remove(db.Path())
// Start a write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
// Create a bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
@@ -1479,25 +1761,30 @@ func ExampleBucket_Put() {
}
// Set the value "bar" for the key "foo".
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
return err
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Read value back in a different read-only transaction.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value of 'foo' is: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -1514,7 +1801,7 @@ func ExampleBucket_Delete() {
defer os.Remove(db.Path())
// Start a write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
// Create a bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
@@ -1522,7 +1809,8 @@ func ExampleBucket_Delete() {
}
// Set the value "bar" for the key "foo".
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
return err
}
@@ -1531,30 +1819,34 @@ func ExampleBucket_Delete() {
fmt.Printf("The value of 'foo' was: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Delete the key in a different write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
return tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Retrieve the key again.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
if value == nil {
fmt.Printf("The value of 'foo' is now: nil\n")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -1572,37 +1864,45 @@ func ExampleBucket_ForEach() {
defer os.Remove(db.Path())
// Insert data into a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("animals"))
if err != nil {
return err
}
- if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
+ err = b.Put([]byte("dog"), []byte("fun"))
+ if err != nil {
return err
}
- if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
+
+ err = b.Put([]byte("cat"), []byte("lame"))
+ if err != nil {
return err
}
- if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
+
+ err = b.Put([]byte("liger"), []byte("awesome"))
+ if err != nil {
return err
}
// Iterate over items in sorted key order.
- if err := b.ForEach(func(k, v []byte) error {
+ err = b.ForEach(func(k, v []byte) error {
fmt.Printf("A %s is %s.\n", k, v)
return nil
- }); err != nil {
+ })
+ if err != nil {
return err
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -1618,16 +1918,20 @@ func TestCursor_Bucket(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) {
+
+ cb := b.Cursor().Bucket()
+ if !reflect.DeepEqual(cb, b) {
t.Fatal("cursor bucket mismatch")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1638,69 +1942,84 @@ func TestCursor_Seek(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("0001")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("0001"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
+
+ err = b.Put([]byte("bar"), []byte("0002"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("0003")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("0003"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("bkt")); err != nil {
+ _, err = b.CreateBucket([]byte("bkt"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
// Exact match should go to the key.
- if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) {
+ k, v := c.Seek([]byte("bar"))
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0002")) {
t.Fatalf("unexpected value: %v", v)
}
// Inexact match should go to the next key.
- if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) {
+ k, v = c.Seek([]byte("bas"))
+ if !bytes.Equal(k, []byte("baz")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0003")) {
t.Fatalf("unexpected value: %v", v)
}
// Low key should go to the first key.
- if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) {
+ k, v = c.Seek([]byte(""))
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0002")) {
t.Fatalf("unexpected value: %v", v)
}
// High key should return no key.
- if k, v := c.Seek([]byte("zzz")); k != nil {
+ k, v = c.Seek([]byte("zzz"))
+ if k != nil {
t.Fatalf("expected nil key: %v", k)
} else if v != nil {
t.Fatalf("expected nil value: %v", v)
}
// Buckets should return their key but no value.
- if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) {
+ k, v = c.Seek([]byte("bkt"))
+ if !bytes.Equal(k, []byte("bkt")) {
t.Fatalf("unexpected key: %v", k)
} else if v != nil {
t.Fatalf("expected nil value: %v", v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1713,43 +2032,55 @@ func TestCursor_Delete(t *testing.T) {
const count = 1000
// Insert every other key between 0 and $count.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
+
for i := 0; i < count; i += 1 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(i))
- if err := b.Put(k, make([]byte, 100)); err != nil {
+ err := b.Put(k, make([]byte, 100))
+ if err != nil {
t.Fatal(err)
}
}
- if _, err := b.CreateBucket([]byte("sub")); err != nil {
+
+ _, err = b.CreateBucket([]byte("sub"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
bound := make([]byte, 8)
binary.BigEndian.PutUint64(bound, uint64(count/2))
- for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
- if err := c.Delete(); err != nil {
+ for
+ key, _ := c.First();
+ bytes.Compare(key, bound) < 0;
+ key, _ = c.Next() {
+ err := c.Delete()
+ if err != nil {
t.Fatal(err)
}
}
c.Seek([]byte("sub"))
- if err := c.Delete(); err != ErrIncompatibleValue {
+ err = c.Delete()
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1764,10 +2095,10 @@ func TestCursor_Seek_Large(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- var count = 10000
+ const count = 10000
// Insert every other key between 0 and $count.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -1777,17 +2108,19 @@ func TestCursor_Seek_Large(t *testing.T) {
for j := i; j < i+100; j += 2 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(j))
- if err := b.Put(k, make([]byte, 100)); err != nil {
+ err := b.Put(k, make([]byte, 100))
+ if err != nil {
t.Fatal(err)
}
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
for i := 0; i < count; i++ {
seek := make([]byte, 8)
@@ -1804,7 +2137,8 @@ func TestCursor_Seek_Large(t *testing.T) {
continue
}
- // Otherwise we should seek to the exact key or the next key.
+ // Otherwise we should seek to the exact key or the next
+ // key.
num := binary.BigEndian.Uint64(k)
if i%2 == 0 {
if num != uint64(i) {
@@ -1818,7 +2152,8 @@ func TestCursor_Seek_Large(t *testing.T) {
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1829,14 +2164,15 @@ func TestCursor_EmptyBucket(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First()
if k != nil {
@@ -1845,24 +2181,28 @@ func TestCursor_EmptyBucket(t *testing.T) {
t.Fatalf("unexpected value: %v", v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
+// Ensure that a Tx cursor can reverse iterate over an empty bucket without
+// error.
func TestCursor_EmptyBucketReverse(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+
+ err = db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last()
if k != nil {
@@ -1871,35 +2211,46 @@ func TestCursor_EmptyBucketReverse(t *testing.T) {
t.Fatalf("unexpected value: %v", v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a Tx cursor can iterate over a single root with a couple elements.
+// Ensure that a Tx cursor can iterate over a single root with a couple
+// elements.
func TestCursor_Iterate_Leaf(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte{}); err != nil {
+
+ err = b.Put([]byte("baz"), []byte{})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte{0}); err != nil {
+
+ err = b.Put([]byte("foo"), []byte{0})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte{1}); err != nil {
+
+ err = b.Put([]byte("bar"), []byte{1})
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+
tx, err := db.Begin(false)
if err != nil {
t.Fatal(err)
@@ -1943,72 +2294,89 @@ func TestCursor_Iterate_Leaf(t *testing.T) {
t.Fatalf("expected nil value: %v", v)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
+// Ensure that a Tx cursor can iterate in reverse over a single root with a
+// couple elements.
func TestCursor_LeafRootReverse(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte{}); err != nil {
+
+ err = b.Put([]byte("baz"), []byte{})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte{0}); err != nil {
+
+ err = b.Put([]byte("foo"), []byte{0})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte{1}); err != nil {
+
+ err = b.Put([]byte("bar"), []byte{1})
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+
tx, err := db.Begin(false)
if err != nil {
t.Fatal(err)
}
- c := tx.Bucket([]byte("widgets")).Cursor()
- if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ k, v := c.Last()
+ if !bytes.Equal(k, []byte("foo")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte{0}) {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) {
+ k, v = c.Prev()
+ if !bytes.Equal(k, []byte("baz")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte{}) {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) {
+ k, v = c.Prev()
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte{1}) {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Prev(); k != nil {
+ k, v = c.Prev()
+ if k != nil {
t.Fatalf("expected nil key: %v", k)
} else if v != nil {
t.Fatalf("expected nil value: %v", v)
}
- if k, v := c.Prev(); k != nil {
+ k, v = c.Prev()
+ if k != nil {
t.Fatalf("expected nil key: %v", k)
} else if v != nil {
t.Fatalf("expected nil value: %v", v)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2019,19 +2387,25 @@ func TestCursor_Restart(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte{}); err != nil {
+
+ err = b.Put([]byte("bar"), []byte{})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte{}); err != nil {
+
+ err = b.Put([]byte("foo"), []byte{})
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -2039,23 +2413,30 @@ func TestCursor_Restart(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
c := tx.Bucket([]byte("widgets")).Cursor()
- if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) {
+ k, _ := c.First()
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
}
- if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) {
+
+ k, _ = c.Next()
+ if !bytes.Equal(k, []byte("foo")) {
t.Fatalf("unexpected key: %v", k)
}
- if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) {
+ k, _ = c.First()
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
}
- if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) {
+ k, _ = c.Next()
+ if !bytes.Equal(k, []byte("foo")) {
t.Fatalf("unexpected key: %v", k)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2067,28 +2448,31 @@ func TestCursor_First_EmptyPages(t *testing.T) {
defer os.Remove(db.Path())
// Create 1000 keys in the "widgets" bucket.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
- if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil {
+ err := b.Put(u64tob(uint64(i)), []byte{})
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Delete half the keys and then try to iterate.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 600; i++ {
- if err := b.Delete(u64tob(uint64(i))); err != nil {
+ err := b.Delete(u64tob(uint64(i)))
+ if err != nil {
t.Fatal(err)
}
}
@@ -2103,7 +2487,8 @@ func TestCursor_First_EmptyPages(t *testing.T) {
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -2125,11 +2510,14 @@ func TestCursor_QuickCheck(t *testing.T) {
t.Fatal(err)
}
for _, item := range items {
- if err := b.Put(item.Key, item.Value); err != nil {
+ err := b.Put(item.Key, item.Value)
+ if err != nil {
t.Fatal(err)
}
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
@@ -2137,14 +2525,17 @@ func TestCursor_QuickCheck(t *testing.T) {
sort.Sort(items)
// Iterate over all items and check consistency.
- var index = 0
+ index := 0
tx, err = db.Begin(false)
if err != nil {
t.Fatal(err)
}
c := tx.Bucket([]byte("widgets")).Cursor()
- for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
+ for
+ k, v := c.First();
+ k != nil && index < len(items);
+ k, v = c.Next() {
if !bytes.Equal(k, items[index].Key) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, items[index].Value) {
@@ -2153,21 +2544,29 @@ func TestCursor_QuickCheck(t *testing.T) {
index++
}
if len(items) != index {
- t.Fatalf("unexpected item count: %v, expected %v", len(items), index)
+ t.Fatalf(
+ "unexpected item count: %v, expected %v",
+ len(items),
+ index,
+ )
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
return true
}
- if err := quick.Check(f, qconfig()); err != nil {
+
+ err := quick.Check(f, qconfig())
+ if err != nil {
t.Error(err)
}
}
-// Ensure that a transaction can iterate over all elements in a bucket in reverse.
+// Ensure that a transaction can iterate over all elements in a bucket in
+// reverse.
func TestCursor_QuickCheck_Reverse(t *testing.T) {
f := func(items testdata) bool {
db := MustOpenDB()
@@ -2179,16 +2578,21 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
+
for _, item := range items {
- if err := b.Put(item.Key, item.Value); err != nil {
+ err := b.Put(item.Key, item.Value)
+ if err != nil {
t.Fatal(err)
}
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
@@ -2196,13 +2600,17 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
sort.Sort(revtestdata(items))
// Iterate over all items and check consistency.
- var index = 0
+ index := 0
tx, err = db.Begin(false)
if err != nil {
t.Fatal(err)
}
+
c := tx.Bucket([]byte("widgets")).Cursor()
- for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
+ for
+ k, v := c.Last();
+ k != nil && index < len(items);
+ k, v = c.Prev() {
if !bytes.Equal(k, items[index].Key) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, items[index].Value) {
@@ -2210,17 +2618,25 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
}
index++
}
+
if len(items) != index {
- t.Fatalf("unexpected item count: %v, expected %v", len(items), index)
+ t.Fatalf(
+ "unexpected item count: %v, expected %v",
+ len(items),
+ index,
+ )
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
return true
}
- if err := quick.Check(f, qconfig()); err != nil {
+
+ err := quick.Check(f, qconfig())
+ if err != nil {
t.Error(err)
}
}
@@ -2231,27 +2647,35 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("foo")); err != nil {
+
+ _, err = b.CreateBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("bar")); err != nil {
+
+ _, err = b.CreateBucket([]byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("baz")); err != nil {
+
+ _, err = b.CreateBucket([]byte("baz"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
- var names []string
+ err = db.View(func(tx *Tx) error {
+ names := []string{}
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
names = append(names, string(k))
@@ -2259,11 +2683,14 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
t.Fatalf("unexpected value: %v", v)
}
}
+
if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) {
t.Fatalf("unexpected names: %+v", names)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -2274,27 +2701,35 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("foo")); err != nil {
+
+ _, err = b.CreateBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("bar")); err != nil {
+
+ _, err = b.CreateBucket([]byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("baz")); err != nil {
+
+ _, err = b.CreateBucket([]byte("baz"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
- var names []string
+ err = db.View(func(tx *Tx) error {
+ names := []string{}
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil; k, v = c.Prev() {
names = append(names, string(k))
@@ -2302,11 +2737,14 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
t.Fatalf("unexpected value: %v", v)
}
}
+
if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) {
t.Fatalf("unexpected names: %+v", names)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -2320,7 +2758,7 @@ func ExampleCursor() {
defer os.Remove(db.Path())
// Start a read-write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
// Create a new bucket.
b, err := tx.CreateBucket([]byte("animals"))
if err != nil {
@@ -2328,13 +2766,18 @@ func ExampleCursor() {
}
// Insert data into a bucket.
- if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
+ err = b.Put([]byte("dog"), []byte("fun"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
+
+ err = b.Put([]byte("cat"), []byte("lame"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
+
+ err = b.Put([]byte("liger"), []byte("awesome"))
+ if err != nil {
log.Fatal(err)
}
@@ -2345,17 +2788,20 @@ func ExampleCursor() {
// first key/value pair and updates the k/v variables to the
// next key/value on each iteration.
//
- // The loop finishes at the end of the cursor when a nil key is returned.
+ // The loop finishes at the end of the cursor when a nil key is
+ // returned.
for k, v := c.First(); k != nil; k, v = c.Next() {
fmt.Printf("A %s is %s.\n", k, v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -2374,7 +2820,7 @@ func ExampleCursor_reverse() {
defer os.Remove(db.Path())
// Start a read-write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
// Create a new bucket.
b, err := tx.CreateBucket([]byte("animals"))
if err != nil {
@@ -2382,13 +2828,18 @@ func ExampleCursor_reverse() {
}
// Insert data into a bucket.
- if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
+ err = b.Put([]byte("dog"), []byte("fun"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
+
+ err = b.Put([]byte("cat"), []byte("lame"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
+
+ err = b.Put([]byte("liger"), []byte("awesome"))
+ if err != nil {
log.Fatal(err)
}
@@ -2399,19 +2850,21 @@ func ExampleCursor_reverse() {
// from the last key/value pair and updates the k/v variables to
// the previous key/value on each iteration.
//
- // The loop finishes at the beginning of the cursor when a nil key
- // is returned.
+ // The loop finishes at the beginning of the cursor when a nil
+ // key is returned.
for k, v := c.Last(); k != nil; k, v = c.Prev() {
fmt.Printf("A %s is %s.\n", k, v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close the database to release the file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -2435,11 +2888,13 @@ func TestOpen(t *testing.T) {
}
defer os.Remove(db.Path())
- if s := db.Path(); s != path {
+ s := db.Path()
+ if s != path {
t.Fatalf("unexpected path: %s", s)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2460,7 +2915,8 @@ func TestOpen_ErrNotExists(t *testing.T) {
}
}
-// Ensure that opening a file with two invalid versions returns ErrVersionMismatch.
+// Ensure that opening a file with two invalid versions returns
+// ErrVersionMismatch.
func TestOpen_ErrVersionMismatch(t *testing.T) {
if pageSize != os.Getpagesize() {
t.Skip("page size mismatch")
@@ -2473,7 +2929,8 @@ func TestOpen_ErrVersionMismatch(t *testing.T) {
path := db.Path()
// Close database.
- if err := db.DB.Close(); err != nil {
+ err := db.DB.Close()
+ if err != nil {
t.Fatal(err)
}
@@ -2488,12 +2945,14 @@ func TestOpen_ErrVersionMismatch(t *testing.T) {
meta0.version++
meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize]))
meta1.version++
- if err := ioutil.WriteFile(path, buf, 0666); err != nil {
+ err = ioutil.WriteFile(path, buf, 0666)
+ if err != nil {
t.Fatal(err)
}
// Reopen data file.
- if _, err := Open(path); err != ErrVersionMismatch {
+ _, err = Open(path)
+ if err != ErrVersionMismatch {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -2511,7 +2970,8 @@ func TestOpen_ErrChecksum(t *testing.T) {
path := db.Path()
// Close database.
- if err := db.DB.Close(); err != nil {
+ err := db.DB.Close()
+ if err != nil {
t.Fatal(err)
}
@@ -2526,12 +2986,14 @@ func TestOpen_ErrChecksum(t *testing.T) {
meta0.pgid++
meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize]))
meta1.pgid++
- if err := ioutil.WriteFile(path, buf, 0666); err != nil {
+ err = ioutil.WriteFile(path, buf, 0666)
+ if err != nil {
t.Fatal(err)
}
// Reopen data file.
- if _, err := Open(path); err != ErrChecksum {
+ _, err = Open(path)
+ if err != ErrChecksum {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -2548,22 +3010,29 @@ func TestOpen_Size(t *testing.T) {
pagesize := db.pageSize
// Insert until we get above the minimum 4MB size.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte("data"))
for i := 0; i < 10000; i++ {
- if err := b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)); err != nil {
+ err := b.Put([]byte(
+ fmt.Sprintf("%04d", i)),
+ make([]byte, 1000),
+ )
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Close database and grab the size.
- if err := db.DB.Close(); err != nil {
+ err = db.DB.Close()
+ if err != nil {
t.Fatal(err)
}
+
sz := fileSize(path)
if sz == 0 {
t.Fatalf("unexpected new file size: %d", sz)
@@ -2574,30 +3043,38 @@ func TestOpen_Size(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := db0.Update(func(tx *Tx) error {
- if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil {
+
+ err = db0.Update(func(tx *Tx) error {
+ err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0})
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db0.Close(); err != nil {
+
+ err = db0.Close()
+ if err != nil {
t.Fatal(err)
}
+
newSz := fileSize(path)
if newSz == 0 {
t.Fatalf("unexpected new file size: %d", newSz)
}
- // Compare the original size with the new size.
- // db size might increase by a few page sizes due to the new small update.
+ // Compare the original size with the new size. Database size might
+ // increase by a few page sizes due to the new small update.
if sz < newSz-5*int64(pagesize) {
t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
}
}
-// Ensure that opening a database beyond the max step size does not increase its size.
+// Ensure that opening a database beyond the max step size does not increase its
+// size.
// https://github.com/boltdb/bolt/issues/303
func TestOpen_Size_Large(t *testing.T) {
return // FIXME: way too slow
@@ -2618,24 +3095,29 @@ func TestOpen_Size_Large(t *testing.T) {
// Insert until we get above the minimum 4MB size.
var index uint64
for i := 0; i < n1; i++ {
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte("data"))
for j := 0; j < n2; j++ {
- if err := b.Put(u64tob(index), make([]byte, 50)); err != nil {
+ err := b.Put(u64tob(index), make([]byte, 50))
+ if err != nil {
t.Fatal(err)
}
+
index++
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
// Close database and grab the size.
- if err := db.DB.Close(); err != nil {
+ err := db.DB.Close()
+ if err != nil {
t.Fatal(err)
}
+
sz := fileSize(path)
if sz == 0 {
t.Fatalf("unexpected new file size: %d", sz)
@@ -2648,12 +3130,16 @@ func TestOpen_Size_Large(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := db0.Update(func(tx *Tx) error {
+
+ err = db0.Update(func(tx *Tx) error {
return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0})
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db0.Close(); err != nil {
+
+ err = db0.Close()
+ if err != nil {
t.Fatal(err)
}
@@ -2662,8 +3148,8 @@ func TestOpen_Size_Large(t *testing.T) {
t.Fatalf("unexpected new file size: %d", newSz)
}
- // Compare the original size with the new size.
- // db size might increase by a few page sizes due to the new small update.
+ // Compare the original size with the new size. Database size might
+ // increase by a few page sizes due to the new small update.
if sz < newSz - (5 * int64(pagesize)) {
t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
}
@@ -2679,11 +3165,13 @@ func TestOpen_Check(t *testing.T) {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error { return <-tx.Check() }); err != nil {
+ err = db.View(func(tx *Tx) error { return <-tx.Check() })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
t.Fatal(err)
}
@@ -2692,16 +3180,19 @@ func TestOpen_Check(t *testing.T) {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error { return <-tx.Check() }); err != nil {
+ err = db.View(func(tx *Tx) error { return <-tx.Check() })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that write errors to the meta file handler during initialization are returned.
+// Ensure that write errors to the meta file handler during initialization are
+// returned.
func TestOpen_MetaInitWriteError(t *testing.T) {
// FIXME
// t.Skip("pending")
@@ -2716,12 +3207,15 @@ func TestOpen_FileTooSmall(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := db.Close(); err != nil {
+
+ err = db.Close()
+ if err != nil {
t.Fatal(err)
}
// corrupt the database
- if err := os.Truncate(path, int64(os.Getpagesize())); err != nil {
+ err = os.Truncate(path, int64(os.Getpagesize()))
+ if err != nil {
t.Fatal(err)
}
@@ -2776,9 +3270,11 @@ func TestDB_Open_InitialMmapSize(t *testing.T) {
done := make(chan struct{})
go func() {
- if err := wtx.Commit(); err != nil {
+ err := wtx.Commit()
+ if err != nil {
t.Fatal(err)
}
+
done <- struct{}{}
}()
@@ -2788,7 +3284,8 @@ func TestDB_Open_InitialMmapSize(t *testing.T) {
case <-done:
}
- if err := rtx.Rollback(); err != nil {
+ err = rtx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2796,7 +3293,8 @@ func TestDB_Open_InitialMmapSize(t *testing.T) {
// Ensure that a database cannot open a transaction when it's not open.
func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) {
var db DB
- if _, err := db.Begin(false); err != ErrDatabaseNotOpen {
+ _, err := db.Begin(false)
+ if err != ErrDatabaseNotOpen {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -2820,7 +3318,8 @@ func TestDB_BeginRW(t *testing.T) {
t.Fatal("expected writable tx")
}
- if err := tx.Commit(); err != nil {
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2828,13 +3327,19 @@ func TestDB_BeginRW(t *testing.T) {
// Ensure that opening a transaction while the DB is closed returns an error.
func TestDB_BeginRW_Closed(t *testing.T) {
var db DB
- if _, err := db.Begin(true); err != ErrDatabaseNotOpen {
+ _, err := db.Begin(true)
+ if err != ErrDatabaseNotOpen {
t.Fatalf("unexpected error: %s", err)
}
}
-func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) }
-func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) }
+func TestDB_Close_PendingTx_RW(t *testing.T) {
+ testDB_Close_PendingTx(t, true)
+}
+
+func TestDB_Close_PendingTx_RO(t *testing.T) {
+ testDB_Close_PendingTx(t, false)
+}
// Ensure that a database cannot close while transactions are open.
func testDB_Close_PendingTx(t *testing.T, writable bool) {
@@ -2854,9 +3359,11 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) {
// Open update in separate goroutine.
done := make(chan struct{})
go func() {
- if err := db.Close(); err != nil {
+ err := db.Close()
+ if err != nil {
t.Fatal(err)
}
+
close(done)
}()
@@ -2869,16 +3376,17 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) {
}
// Commit transaction.
- if err := tx.Commit(); err != nil {
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
// Ensure database closed now.
time.Sleep(100 * time.Millisecond)
select {
- case <-done:
- default:
- t.Fatal("database did not close")
+ case <-done:
+ default:
+ t.Fatal("database did not close")
}
}
@@ -2888,34 +3396,48 @@ func TestDB_Update(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Delete([]byte("foo")); err != nil {
+
+ err = b.Delete([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
- if v := b.Get([]byte("foo")); v != nil {
+ v := b.Get([]byte("foo"))
+ if v != nil {
t.Fatalf("expected nil value, got: %v", v)
}
- if v := b.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
+
+ v = b.Get([]byte("baz"))
+ if !bytes.Equal(v, []byte("bat")) {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -2923,12 +3445,15 @@ func TestDB_Update(t *testing.T) {
// Ensure a closed database returns an error while running a transaction block
func TestDB_Update_Closed(t *testing.T) {
var db DB
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != ErrDatabaseNotOpen {
+ })
+ if err != ErrDatabaseNotOpen {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -2940,20 +3465,23 @@ func TestDB_Update_ManualCommit(t *testing.T) {
defer os.Remove(db.Path())
var panicked bool
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
panicked = true
}
}()
- if err := tx.Commit(); err != nil {
+ err := tx.Commit()
+ if err != nil {
t.Fatal(err)
}
}()
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if !panicked {
t.Fatal("expected panic")
@@ -2967,20 +3495,23 @@ func TestDB_Update_ManualRollback(t *testing.T) {
defer os.Remove(db.Path())
var panicked bool
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
panicked = true
}
}()
- if err := tx.Rollback(); err != nil {
+ err := tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}()
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if !panicked {
t.Fatal("expected panic")
@@ -2994,20 +3525,24 @@ func TestDB_View_ManualCommit(t *testing.T) {
defer os.Remove(db.Path())
var panicked bool
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
panicked = true
}
}()
- if err := tx.Commit(); err != nil {
+ err := tx.Commit()
+ if err != nil {
t.Fatal(err)
}
}()
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if !panicked {
t.Fatal("expected panic")
@@ -3021,20 +3556,23 @@ func TestDB_View_ManualRollback(t *testing.T) {
defer os.Remove(db.Path())
var panicked bool
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
panicked = true
}
}()
- if err := tx.Rollback(); err != nil {
+ err := tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}()
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if !panicked {
t.Fatal("expected panic")
@@ -3050,51 +3588,61 @@ func TestDB_Update_Panic(t *testing.T) {
// Panic during update but recover.
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
// t.Log("recover: update", r)
}
}()
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
panic("omg")
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}()
// Verify we can update again.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Verify that our change persisted.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure a database can return an error through a read-only transactional block.
+// Ensure a database can return an error through a read-only transactional
+// block.
func TestDB_View_Error(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
return errors.New("xxx")
- }); err == nil || err.Error() != "xxx" {
+ })
+ if err == nil || err.Error() != "xxx" {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -3105,40 +3653,46 @@ func TestDB_View_Panic(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Panic during view transaction but recover.
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
// t.Log("recover: view", r)
}
}()
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
panic("omg")
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}()
// Verify that we can still use read transactions.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3149,66 +3703,82 @@ func TestDB_Consistency(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
- if err := db.Update(func(tx *Tx) error {
- if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ err := tx.Bucket([]byte("widgets")).Put(
+ []byte("foo"),
+ []byte("bar"),
+ )
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
- if err := db.Update(func(tx *Tx) error {
- if p, _ := tx.Page(0); p == nil {
+ err = db.Update(func(tx *Tx) error {
+ p, _ := tx.Page(0)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "meta" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(1); p == nil {
+ p, _ = tx.Page(1)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "meta" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(2); p == nil {
+ p, _ = tx.Page(2)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "free" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(3); p == nil {
+ p, _ = tx.Page(3)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "free" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(4); p == nil {
+ p, _ = tx.Page(4)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "leaf" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(5); p == nil {
+ p, _ = tx.Page(5)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "freelist" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(6); p != nil {
+ p, _ = tx.Page(6)
+ if p != nil {
t.Fatal("unexpected page")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3219,12 +3789,15 @@ func TestDB_Batch(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -3234,28 +3807,34 @@ func TestDB_Batch(t *testing.T) {
for i := 0; i < n; i++ {
go func(i int) {
ch <- db.Batch(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
+ return tx.Bucket([]byte("widgets")).Put(
+ u64tob(uint64(i)),
+ []byte{},
+ )
})
}(i)
}
// Check all responses to make sure there's no error.
for i := 0; i < n; i++ {
- if err := <-ch; err != nil {
+ err := <-ch
+ if err != nil {
t.Fatal(err)
}
}
// Ensure data is correct.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < n; i++ {
- if v := b.Get(u64tob(uint64(i))); v == nil {
+ v := b.Get(u64tob(uint64(i)))
+ if v == nil {
t.Errorf("key not found: %d", i)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3265,15 +3844,18 @@ func TestDB_Batch_Panic(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- var sentinel int
- var bork = &sentinel
- var problem interface{}
- var err error
+ var (
+ sentinel int
+ problem interface{}
+ err error
+ )
+ bork := &sentinel
// Execute a function inside a batch that panics.
func() {
defer func() {
- if p := recover(); p != nil {
+ p := recover()
+ if p != nil {
problem = p
}
}()
@@ -3283,12 +3865,14 @@ func TestDB_Batch_Panic(t *testing.T) {
}()
// Verify there is no error.
- if g, e := err, error(nil); g != e {
+ g, e := err, error(nil)
+ if g != e {
t.Fatalf("wrong error: %v != %v", g, e)
}
// Verify the panic was captured.
- if g, e := problem, bork; g != e {
- t.Fatalf("wrong error: %v != %v", g, e)
+ p, b := problem, bork
+ if p != b {
+ t.Fatalf("wrong error: %v != %v", p, b)
}
}
@@ -3297,10 +3881,11 @@ func TestDB_BatchFull(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -3309,7 +3894,10 @@ func TestDB_BatchFull(t *testing.T) {
ch := make(chan error, size)
put := func(i int) {
ch <- db.Batch(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
+ return tx.Bucket([]byte("widgets")).Put(
+ u64tob(uint64(i)),
+ []byte{},
+ )
})
}
@@ -3334,21 +3922,25 @@ func TestDB_BatchFull(t *testing.T) {
// Check all responses to make sure there's no error.
for i := 0; i < size; i++ {
- if err := <-ch; err != nil {
+ err := <-ch
+ if err != nil {
t.Fatal(err)
}
}
// Ensure data is correct.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i <= size; i++ {
- if v := b.Get(u64tob(uint64(i))); v == nil {
+ v := b.Get(u64tob(uint64(i)))
+ if v == nil {
t.Errorf("key not found: %d", i)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3358,10 +3950,11 @@ func TestDB_BatchTime(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -3370,7 +3963,10 @@ func TestDB_BatchTime(t *testing.T) {
ch := make(chan error, size)
put := func(i int) {
ch <- db.Batch(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
+ return tx.Bucket([]byte("widgets")).Put(
+ u64tob(uint64(i)),
+ []byte{},
+ )
})
}
@@ -3389,7 +3985,7 @@ func TestDB_BatchTime(t *testing.T) {
}
// Ensure data is correct.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i <= size; i++ {
if v := b.Get(u64tob(uint64(i))); v == nil {
@@ -3397,7 +3993,8 @@ func TestDB_BatchTime(t *testing.T) {
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3411,30 +4008,36 @@ func ExampleDB_Update() {
defer os.Remove(db.Path())
// Execute several commands within a read-write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
return err
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
return err
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Read the value back from a separate read-only transaction.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value of 'foo' is: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release the file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -3451,33 +4054,41 @@ func ExampleDB_View() {
defer os.Remove(db.Path())
// Insert data into a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("people"))
if err != nil {
return err
}
- if err := b.Put([]byte("john"), []byte("doe")); err != nil {
+
+ err = b.Put([]byte("john"), []byte("doe"))
+ if err != nil {
return err
}
- if err := b.Put([]byte("susy"), []byte("que")); err != nil {
+
+ err = b.Put([]byte("susy"), []byte("que"))
+ if err != nil {
return err
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Access data from within a read-only transactional block.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
v := tx.Bucket([]byte("people")).Get([]byte("john"))
fmt.Printf("John's last name is %s.\n", v)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release the file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -3494,10 +4105,11 @@ func ExampleDB_Begin_ReadOnly() {
defer os.Remove(db.Path())
// Create a bucket using a read-write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
@@ -3506,17 +4118,25 @@ func ExampleDB_Begin_ReadOnly() {
if err != nil {
log.Fatal(err)
}
+
b := tx.Bucket([]byte("widgets"))
- if err := b.Put([]byte("john"), []byte("blue")); err != nil {
+ err = b.Put([]byte("john"), []byte("blue"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("abby"), []byte("red")); err != nil {
+
+ err = b.Put([]byte("abby"), []byte("red"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("zephyr"), []byte("purple")); err != nil {
+
+ err = b.Put([]byte("zephyr"), []byte("purple"))
+ if err != nil {
log.Fatal(err)
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
log.Fatal(err)
}
@@ -3530,11 +4150,13 @@ func ExampleDB_Begin_ReadOnly() {
fmt.Printf("%s likes %s\n", k, v)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
log.Fatal(err)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -3549,10 +4171,11 @@ func BenchmarkDBBatchAutomatic(b *testing.B) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("bench"))
return err
- }); err != nil {
+ })
+ if err != nil {
b.Fatal(err)
}
@@ -3577,7 +4200,9 @@ func BenchmarkDBBatchAutomatic(b *testing.B) {
b := tx.Bucket([]byte("bench"))
return b.Put(k, []byte("filler"))
}
- if err := db.Batch(insert); err != nil {
+
+ err := db.Batch(insert)
+ if err != nil {
b.Error(err)
return
}
@@ -3596,10 +4221,11 @@ func BenchmarkDBBatchSingle(b *testing.B) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("bench"))
return err
- }); err != nil {
+ })
+ if err != nil {
b.Fatal(err)
}
@@ -3623,7 +4249,9 @@ func BenchmarkDBBatchSingle(b *testing.B) {
b := tx.Bucket([]byte("bench"))
return b.Put(k, []byte("filler"))
}
- if err := db.Update(insert); err != nil {
+
+ err := db.Update(insert)
+ if err != nil {
b.Error(err)
return
}
@@ -3642,10 +4270,11 @@ func BenchmarkDBBatchManual10x100(b *testing.B) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("bench"))
return err
- }); err != nil {
+ })
+ if err != nil {
b.Fatal(err)
}
@@ -3663,19 +4292,29 @@ func BenchmarkDBBatchManual10x100(b *testing.B) {
insert100 := func(tx *Tx) error {
h := fnv.New32a()
buf := make([]byte, 4)
- for minor := uint32(0); minor < 100; minor++ {
- binary.LittleEndian.PutUint32(buf, uint32(id*100+minor))
+ for minor := 0; minor < 100; minor++ {
+ n := (id * 100) + uint32(minor)
+ binary.LittleEndian.PutUint32(
+ buf,
+ n,
+ )
h.Reset()
_, _ = h.Write(buf[:])
k := h.Sum(nil)
b := tx.Bucket([]byte("bench"))
- if err := b.Put(k, []byte("filler")); err != nil {
+ err := b.Put(
+ k,
+ []byte("filler"),
+ )
+ if err != nil {
return err
}
}
return nil
}
- if err := db.Update(insert100); err != nil {
+
+ err := db.Update(insert100)
+ if err != nil {
b.Fatal(err)
}
}(uint32(major))
@@ -3689,7 +4328,7 @@ func BenchmarkDBBatchManual10x100(b *testing.B) {
}
func validateBatchBench(b *testing.B, db *WDB) {
- var rollback = errors.New("sentinel error to cause rollback")
+ rollback := errors.New("sentinel error to cause rollback")
validate := func(tx *Tx) error {
bucket := tx.Bucket([]byte("bench"))
h := fnv.New32a()
@@ -3704,10 +4343,20 @@ func validateBatchBench(b *testing.B, db *WDB) {
b.Errorf("not found id=%d key=%x", id, k)
continue
}
- if g, e := v, []byte("filler"); !bytes.Equal(g, e) {
- b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e)
+
+ g, e := v, []byte("filler")
+ if !bytes.Equal(g, e) {
+ b.Errorf(
+ "bad value for id=%d key=%x: %s != %q",
+ id,
+ k,
+ g,
+ e,
+ )
}
- if err := bucket.Delete(k); err != nil {
+
+ err := bucket.Delete(k)
+ if err != nil {
return err
}
}
@@ -3718,7 +4367,9 @@ func validateBatchBench(b *testing.B, db *WDB) {
}
return rollback
}
- if err := db.Update(validate); err != nil && err != rollback {
+
+ err := db.Update(validate)
+ if err != nil && err != rollback {
b.Error(err)
}
}
@@ -3748,19 +4399,23 @@ func (db *WDB) Close() error {
return db.DB.Close()
}
-// MustClose closes the database and deletes the underlying file. Panic on error.
+// MustClose closes the database and deletes the underlying file. Panic on
+// error.
func (db *WDB) MustClose() {
- if err := db.Close(); err != nil {
+ err := db.Close()
+ if err != nil {
panic(err)
}
+
os.Remove(db.Path())
}
-// MustCheck runs a consistency check on the database and panics if any errors are found.
+// MustCheck runs a consistency check on the database and panics if any errors
+// are found.
func (db *WDB) MustCheck() {
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
// Collect all the errors.
- var errors []error
+ errors := []error{}
for err := range tx.Check() {
errors = append(errors, err)
if len(errors) > 10 {
@@ -3770,15 +4425,19 @@ func (db *WDB) MustCheck() {
// If errors occurred, copy the DB and print the errors.
if len(errors) > 0 {
- var path = tempfile()
+ path := tempfile()
defer os.Remove(path)
- if err := tx.CopyFile(path, 0600); err != nil {
+ err := tx.CopyFile(path, 0600)
+ if err != nil {
panic(err)
}
// Print errors.
fmt.Print("\n\n")
- fmt.Printf("consistency check failed (%d errors)\n", len(errors))
+ fmt.Printf(
+ "consistency check failed (%d errors)\n",
+ len(errors),
+ )
for _, err := range errors {
fmt.Println(err)
}
@@ -3790,7 +4449,8 @@ func (db *WDB) MustCheck() {
}
return nil
- }); err != nil && err != ErrDatabaseNotOpen {
+ })
+ if err != nil && err != ErrDatabaseNotOpen {
panic(err)
}
}
@@ -3798,11 +4458,13 @@ func (db *WDB) MustCheck() {
// CopyTempFile copies a database to a temporary file.
func (db *WDB) CopyTempFile() {
path := tempfile()
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
return tx.CopyFile(path, 0600)
- }); err != nil {
+ })
+ if err != nil {
panic(err)
}
+
fmt.Println("db copied to: ", path)
}
@@ -3813,9 +4475,8 @@ func getTempdir() string {
shm = "/dev/shm"
tmp = "/tmp"
)
- var dir string
- dir = os.Getenv("XDG_RUNTIME_DIR")
+ dir := os.Getenv("XDG_RUNTIME_DIR")
if dir != "" {
return dir
}
@@ -3839,46 +4500,61 @@ func tempfile() string {
panic(err)
}
- if err := f.Close(); err != nil {
+ err = f.Close()
+ if err != nil {
panic(err)
}
- if err := os.Remove(f.Name()); err != nil {
+
+ err = os.Remove(f.Name())
+ if err != nil {
panic(err)
}
+
return f.Name()
}
// mustContainKeys checks that a bucket contains a given set of keys.
func mustContainKeys(b *Bucket, m map[string]string) {
found := make(map[string]string)
- if err := b.ForEach(func(k, _ []byte) error {
+ err := b.ForEach(func(k, _ []byte) error {
found[string(k)] = ""
return nil
- }); err != nil {
+ })
+ if err != nil {
panic(err)
}
// Check for keys found in bucket that shouldn't be there.
- var keys []string
+ keys := []string{}
for k, _ := range found {
- if _, ok := m[string(k)]; !ok {
+ _, ok := m[string(k)]
+ if !ok {
keys = append(keys, k)
}
}
if len(keys) > 0 {
sort.Strings(keys)
- panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ",")))
+ panic(fmt.Sprintf(
+ "keys found(%d): %s",
+ len(keys),
+ strings.Join(keys, ","),
+ ))
}
// Check for keys not found in bucket that should be there.
for k, _ := range m {
- if _, ok := found[string(k)]; !ok {
+ _, ok := found[string(k)]
+ if !ok {
keys = append(keys, k)
}
}
if len(keys) > 0 {
sort.Strings(keys)
- panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ",")))
+ panic(fmt.Sprintf(
+ "keys not found(%d): %s",
+ len(keys),
+ strings.Join(keys, ","),
+ ))
}
}
@@ -3890,7 +4566,10 @@ func trunc(b []byte, length int) []byte {
}
func truncDuration(d time.Duration) string {
- return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
+ return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(
+ d.String(),
+ "$1",
+ )
}
func fileSize(path string) int64 {
@@ -3916,7 +4595,7 @@ func TestFreelist_free(t *testing.T) {
f := newFreelist()
f.free(100, &page{id: 12})
if !reflect.DeepEqual([]pgid{12}, f.pending[100]) {
- t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100])
+ t.Fatalf("exp=%v, got=%v", []pgid{12}, f.pending[100])
}
}
@@ -3924,8 +4603,9 @@ func TestFreelist_free(t *testing.T) {
func TestFreelist_free_overflow(t *testing.T) {
f := newFreelist()
f.free(100, &page{id: 12, overflow: 3})
- if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) {
- t.Fatalf("exp=%v; got=%v", exp, f.pending[100])
+ exp := []pgid{12, 13, 14, 15}
+ if !reflect.DeepEqual(exp, f.pending[100]) {
+ t.Fatalf("exp=%v, got=%v", exp, f.pending[100])
}
}
@@ -3937,55 +4617,79 @@ func TestFreelist_release(t *testing.T) {
f.free(102, &page{id: 39})
f.release(100)
f.release(101)
- if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ exp := []pgid{9, 12, 13}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
f.release(102)
- if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ exp = []pgid{9, 12, 13, 39}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
}
// Ensure that a freelist can find contiguous blocks of pages.
func TestFreelist_allocate(t *testing.T) {
f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}}
- if id := int(f.allocate(3)); id != 3 {
- t.Fatalf("exp=3; got=%v", id)
+ id := int(f.allocate(3))
+ if id != 3 {
+ t.Fatalf("exp=3, got=%v", id)
}
- if id := int(f.allocate(1)); id != 6 {
- t.Fatalf("exp=6; got=%v", id)
+
+ id = int(f.allocate(1))
+ if id != 6 {
+ t.Fatalf("exp=6, got=%v", id)
}
- if id := int(f.allocate(3)); id != 0 {
- t.Fatalf("exp=0; got=%v", id)
+
+ id = int(f.allocate(3))
+ if id != 0 {
+ t.Fatalf("exp=0, got=%v", id)
}
- if id := int(f.allocate(2)); id != 12 {
- t.Fatalf("exp=12; got=%v", id)
+
+ id = int(f.allocate(2))
+ if id != 12 {
+ t.Fatalf("exp=12, got=%v", id)
}
- if id := int(f.allocate(1)); id != 7 {
- t.Fatalf("exp=7; got=%v", id)
+
+ id = int(f.allocate(1))
+ if id != 7 {
+ t.Fatalf("exp=7, got=%v", id)
}
- if id := int(f.allocate(0)); id != 0 {
- t.Fatalf("exp=0; got=%v", id)
+
+ id = int(f.allocate(0))
+ if id != 0 {
+ t.Fatalf("exp=0, got=%v", id)
}
- if id := int(f.allocate(0)); id != 0 {
- t.Fatalf("exp=0; got=%v", id)
+
+ id = int(f.allocate(0))
+ if id != 0 {
+ t.Fatalf("exp=0, got=%v", id)
}
- if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+
+ exp := []pgid{9, 18}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
- if id := int(f.allocate(1)); id != 9 {
- t.Fatalf("exp=9; got=%v", id)
+ id = int(f.allocate(1))
+ if id != 9 {
+ t.Fatalf("exp=9, got=%v", id)
}
- if id := int(f.allocate(1)); id != 18 {
- t.Fatalf("exp=18; got=%v", id)
+
+ id = int(f.allocate(1))
+ if id != 18 {
+ t.Fatalf("exp=18, got=%v", id)
}
- if id := int(f.allocate(1)); id != 0 {
- t.Fatalf("exp=0; got=%v", id)
+
+ id = int(f.allocate(1))
+ if id != 0 {
+ t.Fatalf("exp=0, got=%v", id)
}
- if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+
+ exp = []pgid{}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
}
@@ -4007,8 +4711,9 @@ func TestFreelist_read(t *testing.T) {
f.read(page)
// Ensure that there are two page ids in the freelist.
- if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ exp := []pgid{23, 50}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
}
@@ -4020,7 +4725,9 @@ func TestFreelist_write(t *testing.T) {
f.pending[100] = []pgid{28, 11}
f.pending[101] = []pgid{3}
p := (*page)(unsafe.Pointer(&buf[0]))
- if err := f.write(p); err != nil {
+
+ err := f.write(p)
+ if err != nil {
t.Fatal(err)
}
@@ -4030,15 +4737,27 @@ func TestFreelist_write(t *testing.T) {
// Ensure that the freelist is correct.
// All pages should be present and in reverse order.
- if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f2.ids)
+ exp := []pgid{3, 11, 12, 28, 39}
+ if !reflect.DeepEqual(exp, f2.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f2.ids)
}
}
-func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) }
-func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) }
-func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) }
-func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) }
+func Benchmark_FreelistRelease10K(b *testing.B) {
+ benchmark_FreelistRelease(b, 10000)
+}
+
+func Benchmark_FreelistRelease100K(b *testing.B) {
+ benchmark_FreelistRelease(b, 100000)
+}
+
+func Benchmark_FreelistRelease1000K(b *testing.B) {
+ benchmark_FreelistRelease(b, 1000000)
+}
+
+func Benchmark_FreelistRelease10000K(b *testing.B) {
+ benchmark_FreelistRelease(b, 10000000)
+}
func benchmark_FreelistRelease(b *testing.B, size int) {
ids := randomPgids(size)
@@ -4060,26 +4779,53 @@ func randomPgids(n int) []pgid {
return pgids
}
+func puts(
+ n *node,
+ oldKey string,
+ newKey string,
+ value string,
+ pgid pgid,
+ flags uint32,
+) {
+ n.put([]byte(oldKey), []byte(newKey), []byte(value), pgid, flags)
+}
+
// Ensure that a node can insert a key/value.
func TestNode_put(t *testing.T) {
- n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}}
- n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0)
- n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
- n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
- n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
+ n := &node{
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "baz", "baz", "2", 0, 0)
+ puts(n, "foo", "foo", "0", 0, 0)
+ puts(n, "bar", "bar", "1", 0, 0)
+ puts(n, "foo", "foo", "3", 0, leafPageFlag)
if len(n.inodes) != 3 {
- t.Fatalf("exp=3; got=%d", len(n.inodes))
+ t.Fatalf("exp=3, got=%d", len(n.inodes))
}
- if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" {
- t.Fatalf("exp=<bar,1>; got=<%s,%s>", k, v)
+
+ k, v := n.inodes[0].key, n.inodes[0].value
+ if string(k) != "bar" || string(v) != "1" {
+ t.Fatalf("exp=<bar,1>, got=<%s,%s>", k, v)
}
- if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" {
- t.Fatalf("exp=<baz,2>; got=<%s,%s>", k, v)
+
+ k, v = n.inodes[1].key, n.inodes[1].value
+ if string(k) != "baz" || string(v) != "2" {
+ t.Fatalf("exp=<baz,2>, got=<%s,%s>", k, v)
}
- if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" {
- t.Fatalf("exp=<foo,3>; got=<%s,%s>", k, v)
+
+ k, v = n.inodes[2].key, n.inodes[2].value
+ if string(k) != "foo" || string(v) != "3" {
+ t.Fatalf("exp=<foo,3>, got=<%s,%s>", k, v)
}
+
if n.inodes[2].flags != uint32(leafPageFlag) {
t.Fatalf("not a leaf: %d", n.inodes[2].flags)
}
@@ -4095,8 +4841,12 @@ func TestNode_read_LeafPage(t *testing.T) {
// Insert 2 elements at the beginning. sizeof(leafPageElement) == 16
nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr))
- nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2
- nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4
+
+ // pos = sizeof(leafPageElement) * 2
+ nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4}
+
+ // pos = sizeof(leafPageElement) + 3 + 4
+ nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3}
// Write data for the nodes at the end.
data := (*[4096]byte)(unsafe.Pointer(&nodes[2]))
@@ -4111,24 +4861,40 @@ func TestNode_read_LeafPage(t *testing.T) {
if !n.isLeaf {
t.Fatal("expected leaf")
}
+
if len(n.inodes) != 2 {
- t.Fatalf("exp=2; got=%d", len(n.inodes))
+ t.Fatalf("exp=2, got=%d", len(n.inodes))
}
- if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" {
- t.Fatalf("exp=<bar,fooz>; got=<%s,%s>", k, v)
+
+ k, v := n.inodes[0].key, n.inodes[0].value
+ if string(k) != "bar" || string(v) != "fooz" {
+ t.Fatalf("exp=<bar,fooz>, got=<%s,%s>", k, v)
}
- if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" {
- t.Fatalf("exp=<helloworld,bye>; got=<%s,%s>", k, v)
+
+ k, v = n.inodes[1].key, n.inodes[1].value
+ if string(k) != "helloworld" || string(v) != "bye" {
+ t.Fatalf("exp=<helloworld,bye>, got=<%s,%s>", k, v)
}
}
// Ensure that a node can serialize into a leaf page.
func TestNode_write_LeafPage(t *testing.T) {
// Create a node.
- n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
- n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0)
- n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0)
- n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0)
+ n := &node{
+ isLeaf: true,
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ db: &DB{},
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "susy", "susy", "que", 0, 0)
+ puts(n, "ricki", "ricki", "lake", 0, 0)
+ puts(n, "john", "john", "johnson", 0, 0)
// Write it to a page.
var buf [4096]byte
@@ -4141,50 +4907,77 @@ func TestNode_write_LeafPage(t *testing.T) {
// Check that the two pages are the same.
if len(n2.inodes) != 3 {
- t.Fatalf("exp=3; got=%d", len(n2.inodes))
+ t.Fatalf("exp=3, got=%d", len(n2.inodes))
}
- if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" {
- t.Fatalf("exp=<john,johnson>; got=<%s,%s>", k, v)
+
+ k, v := n2.inodes[0].key, n2.inodes[0].value
+ if string(k) != "john" || string(v) != "johnson" {
+ t.Fatalf("exp=<john,johnson>, got=<%s,%s>", k, v)
}
- if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" {
- t.Fatalf("exp=<ricki,lake>; got=<%s,%s>", k, v)
+
+ k, v = n2.inodes[1].key, n2.inodes[1].value
+ if string(k) != "ricki" || string(v) != "lake" {
+ t.Fatalf("exp=<ricki,lake>, got=<%s,%s>", k, v)
}
- if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" {
- t.Fatalf("exp=<susy,que>; got=<%s,%s>", k, v)
+
+ k, v = n2.inodes[2].key, n2.inodes[2].value
+ if string(k) != "susy" || string(v) != "que" {
+ t.Fatalf("exp=<susy,que>, got=<%s,%s>", k, v)
}
}
// Ensure that a node can split into appropriate subgroups.
func TestNode_split(t *testing.T) {
// Create a node.
- n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
- n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
+ n := &node{
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ db: &DB{},
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "00000001", "00000001", "0123456701234567", 0, 0)
+ puts(n, "00000002", "00000002", "0123456701234567", 0, 0)
+ puts(n, "00000003", "00000003", "0123456701234567", 0, 0)
+ puts(n, "00000004", "00000004", "0123456701234567", 0, 0)
+ puts(n, "00000005", "00000005", "0123456701234567", 0, 0)
// Split between 2 & 3.
n.split(100)
- var parent = n.parent
+ parent := n.parent
if len(parent.children) != 2 {
- t.Fatalf("exp=2; got=%d", len(parent.children))
+ t.Fatalf("exp=2, got=%d", len(parent.children))
}
if len(parent.children[0].inodes) != 2 {
- t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes))
+ t.Fatalf("exp=2, got=%d", len(parent.children[0].inodes))
}
if len(parent.children[1].inodes) != 3 {
- t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes))
+ t.Fatalf("exp=3, got=%d", len(parent.children[1].inodes))
}
}
-// Ensure that a page with the minimum number of inodes just returns a single node.
+// Ensure that a page with the minimum number of inodes just returns a single
+// node.
func TestNode_split_MinKeys(t *testing.T) {
// Create a node.
- n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
- n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
+ n := &node{
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ db: &DB{},
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "00000001", "00000001", "0123456701234567", 0, 0)
+ puts(n, "00000002", "00000002", "0123456701234567", 0, 0)
// Split.
n.split(20)
@@ -4193,15 +4986,26 @@ func TestNode_split_MinKeys(t *testing.T) {
}
}
-// Ensure that a node that has keys that all fit on a page just returns one leaf.
+// Ensure that a node that has keys that all fit on a page just returns one
+// leaf.
func TestNode_split_SinglePage(t *testing.T) {
// Create a node.
- n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
- n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
+ n := &node{
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ db: &DB{},
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "00000001", "00000001", "0123456701234567", 0, 0)
+ puts(n, "00000002", "00000002", "0123456701234567", 0, 0)
+ puts(n, "00000003", "00000003", "0123456701234567", 0, 0)
+ puts(n, "00000004", "00000004", "0123456701234567", 0, 0)
+ puts(n, "00000005", "00000005", "0123456701234567", 0, 0)
// Split.
n.split(4096)
@@ -4212,20 +5016,29 @@ func TestNode_split_SinglePage(t *testing.T) {
// Ensure that the page type can be returned in human readable format.
func TestPage_typ(t *testing.T) {
- if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" {
- t.Fatalf("exp=branch; got=%v", typ)
+ typ := (&page{flags: branchPageFlag}).typ()
+ if typ != "branch" {
+ t.Fatalf("exp=branch, got=%v", typ)
}
- if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" {
- t.Fatalf("exp=leaf; got=%v", typ)
+
+ typ = (&page{flags: leafPageFlag}).typ()
+ if typ != "leaf" {
+ t.Fatalf("exp=leaf, got=%v", typ)
}
- if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" {
- t.Fatalf("exp=meta; got=%v", typ)
+
+ typ = (&page{flags: metaPageFlag}).typ()
+ if typ != "meta" {
+ t.Fatalf("exp=meta, got=%v", typ)
}
- if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" {
- t.Fatalf("exp=freelist; got=%v", typ)
+
+ typ = (&page{flags: freelistPageFlag}).typ()
+ if typ != "freelist" {
+ t.Fatalf("exp=freelist, got=%v", typ)
}
- if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" {
- t.Fatalf("exp=unknown<4e20>; got=%v", typ)
+
+ typ = (&page{flags: 20000}).typ()
+ if typ != "unknown<4e20>" {
+ t.Fatalf("exp=unknown<4e20>, got=%v", typ)
}
}
@@ -4233,20 +5046,23 @@ func TestPgids_merge(t *testing.T) {
a := pgids{4, 5, 6, 10, 11, 12, 13, 27}
b := pgids{1, 3, 8, 9, 25, 30}
c := a.merge(b)
- if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) {
+ expected := pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}
+ if !reflect.DeepEqual(c, expected) {
t.Errorf("mismatch: %v", c)
}
a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36}
b = pgids{8, 9, 25, 30}
c = a.merge(b)
- if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) {
+
+ expected = pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}
+ if !reflect.DeepEqual(c, expected) {
t.Errorf("mismatch: %v", c)
}
}
func TestPgids_merge_quick(t *testing.T) {
- if err := quick.Check(func(a, b pgids) bool {
+ err := quick.Check(func(a, b pgids) bool {
// Sort incoming lists.
sort.Sort(a)
sort.Sort(b)
@@ -4254,7 +5070,8 @@ func TestPgids_merge_quick(t *testing.T) {
// Merge the two lists together.
got := a.merge(b)
- // The expected value should be the two lists combined and sorted.
+ // The expected value should be the two lists combined and
+ // sorted.
exp := append(a, b...)
sort.Sort(exp)
@@ -4264,7 +5081,8 @@ func TestPgids_merge_quick(t *testing.T) {
}
return true
- }, nil); err != nil {
+ }, nil)
+ if err != nil {
t.Fatal(err)
}
}
@@ -4279,7 +5097,13 @@ func TestPgids_merge_quick(t *testing.T) {
// -quick.maxvsize The maximum size of a value.
//
-var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int
+var (
+ qcount int
+ qseed int
+ qmaxitems int
+ qmaxksize int
+ qmaxvsize int
+)
func _init() {
flag.IntVar(&qcount, "quick.count", 5, "")
@@ -4289,7 +5113,14 @@ func _init() {
flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "")
flag.Parse()
fmt.Fprintln(os.Stderr, "seed:", qseed)
- fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize)
+ fmt.Fprintf(
+ os.Stderr,
+ "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n",
+ qcount,
+ qmaxitems,
+ qmaxksize,
+ qmaxvsize,
+ )
}
func qconfig() *quick.Config {
@@ -4301,9 +5132,17 @@ func qconfig() *quick.Config {
type testdata []testdataitem
-func (t testdata) Len() int { return len(t) }
-func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 }
+func (t testdata) Len() int {
+ return len(t)
+}
+
+func (t testdata) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+
+func (t testdata) Less(i, j int) bool {
+ return bytes.Compare(t[i].Key, t[j].Key) == -1
+}
func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
n := rand.Intn(qmaxitems-1) + 1
@@ -4311,7 +5150,8 @@ func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
used := make(map[string]bool)
for i := 0; i < n; i++ {
item := &items[i]
- // Ensure that keys are unique by looping until we find one that we have not already used.
+ // Ensure that keys are unique by looping until we find one that
+ // we have not already used.
for {
item.Key = randByteSlice(rand, 1, qmaxksize)
if !used[string(item.Key)] {
@@ -4326,9 +5166,17 @@ func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
type revtestdata []testdataitem
-func (t revtestdata) Len() int { return len(t) }
-func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 }
+func (t revtestdata) Len() int {
+ return len(t)
+}
+
+func (t revtestdata) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+
+func (t revtestdata) Less(i, j int) bool {
+ return bytes.Compare(t[i].Key, t[j].Key) == 1
+}
type testdataitem struct {
Key []byte
@@ -4344,24 +5192,64 @@ func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte {
return b
}
-func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) }
-func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) }
-func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) }
-func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) }
-func TestSimulate_10000op_1p(t *testing.T) { return; /* FIXME: is too slow */ testSimulate(t, 10000, 1) }
+func TestSimulate_1op_1p(t *testing.T) {
+ testSimulate(t, 1, 1)
+}
+
+func TestSimulate_10op_1p(t *testing.T) {
+ testSimulate(t, 10, 1)
+}
+
+func TestSimulate_100op_1p(t *testing.T) {
+ testSimulate(t, 100, 1)
+}
+
+func TestSimulate_1000op_1p(t *testing.T) {
+ testSimulate(t, 1000, 1)
+}
-func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) }
-func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) }
-func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) }
-func TestSimulate_10000op_10p(t *testing.T) { return; /* FIXME: too slow */ testSimulate(t, 10000, 10) }
+func TestSimulate_10000op_1p(t *testing.T) {
+ return // FIXME: too slow
+ testSimulate(t, 10000, 1)
+}
-func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) }
-func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) }
-func TestSimulate_10000op_100p(t *testing.T) { return; /* FIXME: too slow */ testSimulate(t, 10000, 100) }
+func TestSimulate_10op_10p(t *testing.T) {
+ testSimulate(t, 10, 10)
+}
-func TestSimulate_10000op_1000p(t *testing.T) { return; /* FIXME: too slow */ testSimulate(t, 10000, 1000) }
+func TestSimulate_100op_10p(t *testing.T) {
+ testSimulate(t, 100, 10)
+}
-// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety.
+func TestSimulate_1000op_10p(t *testing.T) {
+ testSimulate(t, 1000, 10)
+}
+
+func TestSimulate_10000op_10p(t *testing.T) {
+ return // FIXME: too slow
+ testSimulate(t, 10000, 10)
+}
+
+func TestSimulate_100op_100p(t *testing.T) {
+ testSimulate(t, 100, 100)
+}
+
+func TestSimulate_1000op_100p(t *testing.T) {
+ testSimulate(t, 1000, 100)
+}
+
+func TestSimulate_10000op_100p(t *testing.T) {
+ return // FIXME: too slow
+ testSimulate(t, 10000, 100)
+}
+
+func TestSimulate_10000op_1000p(t *testing.T) {
+ return // FIXME: too slow
+ testSimulate(t, 10000, 1000)
+}
+
+// Randomly generate operations on a given database with multiple clients to
+// ensure consistency and thread safety.
func testSimulate(t *testing.T, threadCount, parallelism int) {
if testing.Short() {
t.Skip("skipping test in short mode.")
@@ -4370,10 +5258,15 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
rand.Seed(int64(qseed))
// A list of operations that readers and writers can perform.
- var readerHandlers = []simulateHandler{simulateGetHandler}
- var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler}
+ readerHandlers := []simulateHandler{
+ simulateGetHandler,
+ }
+ writerHandlers := []simulateHandler{
+ simulateGetHandler,
+ simulatePutHandler,
+ }
- var versions = make(map[int]*QuickDB)
+ versions := make(map[int]*QuickDB)
versions[1] = NewQuickDB()
db := MustOpenDB()
@@ -4384,7 +5277,7 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
// Run n threads in parallel, each with their own operation.
var wg sync.WaitGroup
- var threads = make(chan bool, parallelism)
+ threads := make(chan bool, parallelism)
var i int
for {
threads <- true
@@ -4417,14 +5310,16 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
}
mutex.Unlock()
- // Make sure we commit/rollback the tx at the end and update the state.
+ // Make sure we commit/rollback the tx at the end and
+ // update the state.
if writable {
defer func() {
mutex.Lock()
versions[tx.ID()] = qdb
mutex.Unlock()
- if err := tx.Commit(); err != nil {
+ err := tx.Commit()
+ if err != nil {
t.Fatal(err)
}
}()
@@ -4467,14 +5362,21 @@ func simulateGetHandler(tx *Tx, qdb *QuickDB) {
// Retrieve root bucket.
b := tx.Bucket(keys[0])
if b == nil {
- panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4)))
+ panic(fmt.Sprintf(
+ "bucket[0] expected: %08x\n",
+ trunc(keys[0], 4),
+ ))
}
// Drill into nested buckets.
for _, key := range keys[1 : len(keys)-1] {
b = b.Bucket(key)
if b == nil {
- panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key))
+ panic(fmt.Sprintf(
+ "bucket[n] expected: %v -> %v\n",
+ keys,
+ key,
+ ))
}
}
@@ -4519,7 +5421,8 @@ func simulatePutHandler(tx *Tx, qdb *QuickDB) {
}
// Insert into database.
- if err := b.Put(keys[len(keys)-1], value); err != nil {
+ err = b.Put(keys[len(keys)-1], value)
+ if err != nil {
panic("put: " + err.Error())
}
@@ -4560,9 +5463,11 @@ func (db *QuickDB) Get(keys [][]byte) []byte {
}
// Only return if it's a simple value.
- if value, ok := m[string(keys[len(keys)-1])].([]byte); ok {
+ value, ok := m[string(keys[len(keys)-1])].([]byte)
+ if ok {
return value
}
+
return nil
}
@@ -4574,13 +5479,16 @@ func (db *QuickDB) Put(keys [][]byte, value []byte) {
// Build buckets all the way down the key path.
m := db.m
for _, key := range keys[:len(keys)-1] {
- if _, ok := m[string(key)].([]byte); ok {
- return // Keypath intersects with a simple value. Do nothing.
+ _, ok := m[string(key)].([]byte)
+ if ok {
+ // Keypath intersects with a simple value. Do nothing.
+ return
}
if m[string(key)] == nil {
m[string(key)] = make(map[string]interface{})
}
+
m = m[string(key)].(map[string]interface{})
}
@@ -4605,9 +5513,11 @@ func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) {
for k, v := range m {
if i == index {
*keys = append(*keys, []byte(k))
- if v, ok := v.(map[string]interface{}); ok {
+ v, ok := v.(map[string]interface{})
+ if ok {
db.rand(v, keys)
}
+
return
}
i++
@@ -4636,7 +5546,10 @@ func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} {
}
func randKey() []byte {
- var min, max = 1, 1024
+ const (
+ min = 1
+ max = 1024
+ )
n := rand.Intn(max-min) + min
b := make([]byte, n)
for i := 0; i < n; i++ {
@@ -4646,8 +5559,8 @@ func randKey() []byte {
}
func randKeys() [][]byte {
- var keys [][]byte
- var count = rand.Intn(2) + 2
+ keys := [][]byte{}
+ count := rand.Intn(2) + 2
for i := 0; i < count; i++ {
keys = append(keys, randKey())
}
@@ -4674,15 +5587,18 @@ func TestTx_Commit_ErrTxClosed(t *testing.T) {
t.Fatal(err)
}
- if _, err := tx.CreateBucket([]byte("foo")); err != nil {
+ _, err = tx.CreateBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != nil {
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != ErrTxClosed {
+ err = tx.Commit()
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -4698,10 +5614,13 @@ func TestTx_Rollback_ErrTxClosed(t *testing.T) {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Rollback(); err != ErrTxClosed {
+
+ err = tx.Rollback()
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -4716,7 +5635,9 @@ func TestTx_Commit_ErrTxNotWritable(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != ErrTxNotWritable {
+
+ err = tx.Commit()
+ if err != ErrTxNotWritable {
t.Fatal(err)
}
}
@@ -4727,36 +5648,42 @@ func TestTx_Cursor(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := tx.CreateBucket([]byte("woojits")); err != nil {
+ _, err = tx.CreateBucket([]byte("woojits"))
+ if err != nil {
t.Fatal(err)
}
c := tx.Cursor()
- if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) {
+ k, v := c.First()
+ if !bytes.Equal(k, []byte("widgets")) {
t.Fatalf("unexpected key: %v", k)
} else if v != nil {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) {
+ k, v = c.Next()
+ if !bytes.Equal(k, []byte("woojits")) {
t.Fatalf("unexpected key: %v", k)
} else if v != nil {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Next(); k != nil {
+ k, v = c.Next()
+ if k != nil {
t.Fatalf("unexpected key: %v", k)
} else if v != nil {
t.Fatalf("unexpected value: %v", k)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4767,13 +5694,15 @@ func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("foo"))
if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4788,11 +5717,14 @@ func TestTx_CreateBucket_ErrTxClosed(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
- if _, err := tx.CreateBucket([]byte("foo")); err != ErrTxClosed {
+ _, err = tx.CreateBucket([]byte("foo"))
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -4803,15 +5735,19 @@ func TestTx_Bucket(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4822,20 +5758,24 @@ func TestTx_Get_NotFound(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
+
if b.Get([]byte("no_such_key")) != nil {
t.Fatal("expected nil value")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4847,25 +5787,29 @@ func TestTx_CreateBucket(t *testing.T) {
defer os.Remove(db.Path())
// Create a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
} else if b == nil {
t.Fatal("expected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Read the bucket through a separate transaction.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4876,33 +5820,38 @@ func TestTx_CreateBucketIfNotExists(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
// Create bucket.
- if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil {
+ b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
} else if b == nil {
t.Fatal("expected bucket")
}
// Create bucket again.
- if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil {
+ b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
} else if b == nil {
t.Fatal("expected bucket")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Read the bucket through a separate transaction.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4913,17 +5862,20 @@ func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucketIfNotExists([]byte{}); err != ErrBucketNameRequired {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucketIfNotExists([]byte{})
+ if err != ErrBucketNameRequired {
t.Fatalf("unexpected error: %s", err)
}
- if _, err := tx.CreateBucketIfNotExists(nil); err != ErrBucketNameRequired {
+ _, err = tx.CreateBucketIfNotExists(nil)
+ if err != ErrBucketNameRequired {
t.Fatalf("unexpected error: %s", err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4935,22 +5887,28 @@ func TestTx_CreateBucket_ErrBucketExists(t *testing.T) {
defer os.Remove(db.Path())
// Create a bucket.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Create the same bucket again.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != ErrBucketExists {
+ err = db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != ErrBucketExists {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4961,12 +5919,15 @@ func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket(nil); err != ErrBucketNameRequired {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket(nil)
+ if err != ErrBucketNameRequired {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4978,43 +5939,56 @@ func TestTx_DeleteBucket(t *testing.T) {
defer os.Remove(db.Path())
// Create a bucket and add a value.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Delete the bucket and make sure we can't get the value.
- if err := db.Update(func(tx *Tx) error {
- if err := tx.DeleteBucket([]byte("widgets")); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ err := tx.DeleteBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
if tx.Bucket([]byte("widgets")) != nil {
t.Fatal("unexpected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
- // Create the bucket again and make sure there's not a phantom value.
+ err = db.Update(func(tx *Tx) error {
+ // Create the bucket again and make sure there's not a phantom
+ // value.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if v := b.Get([]byte("foo")); v != nil {
+
+ v := b.Get([]byte("foo"))
+ if v != nil {
t.Fatalf("unexpected phantom value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -5029,10 +6003,14 @@ func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
- if err := tx.DeleteBucket([]byte("foo")); err != ErrTxClosed {
+
+ err = tx.DeleteBucket([]byte("foo"))
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -5043,12 +6021,15 @@ func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.View(func(tx *Tx) error {
- if err := tx.DeleteBucket([]byte("foo")); err != ErrTxNotWritable {
+ err := db.View(func(tx *Tx) error {
+ err := tx.DeleteBucket([]byte("foo"))
+ if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -5059,12 +6040,15 @@ func TestTx_DeleteBucket_NotFound(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if err := tx.DeleteBucket([]byte("widgets")); err != ErrBucketNotFound {
+ err := db.Update(func(tx *Tx) error {
+ err := tx.DeleteBucket([]byte("widgets"))
+ if err != ErrBucketNotFound {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -5076,22 +6060,27 @@ func TestTx_ForEach_NoError(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := tx.ForEach(func(name []byte, b *Bucket) error {
+ err = tx.ForEach(func(name []byte, b *Bucket) error {
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -5102,42 +6091,50 @@ func TestTx_ForEach_WithError(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
marker := errors.New("marker")
- if err := tx.ForEach(func(name []byte, b *Bucket) error {
+ err = tx.ForEach(func(name []byte, b *Bucket) error {
return marker
- }); err != marker {
+ })
+ if err != marker {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that Tx commit handlers are called after a transaction successfully commits.
+// Ensure that Tx commit handlers are called after a transaction successfully
+// commits.
func TestTx_OnCommit(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
var x int
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if x != 3 {
t.Fatalf("unexpected x: %d", x)
@@ -5151,14 +6148,17 @@ func TestTx_OnCommit_Rollback(t *testing.T) {
defer os.Remove(db.Path())
var x int
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return errors.New("rollback this commit")
- }); err == nil || err.Error() != "rollback this commit" {
+ })
+ if err == nil || err.Error() != "rollback this commit" {
t.Fatalf("unexpected error: %s", err)
} else if x != 0 {
t.Fatalf("unexpected x: %d", x)
@@ -5173,25 +6173,32 @@ func TestTx_CopyFile(t *testing.T) {
path := tempfile()
defer os.Remove(path)
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
return tx.CopyFile(path, 0600)
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -5200,19 +6207,25 @@ func TestTx_CopyFile(t *testing.T) {
t.Fatal(err)
}
- if err := db2.View(func(tx *Tx) error {
- if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) {
+ err = db2.View(func(tx *Tx) error {
+ v := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ if !bytes.Equal(v, []byte("bar")) {
t.Fatalf("unexpected value: %v", v)
}
- if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
+
+ v = tx.Bucket([]byte("widgets")).Get([]byte("baz"))
+ if !bytes.Equal(v, []byte("bat")) {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db2.Close(); err != nil {
+ err = db2.Close()
+ if err != nil {
t.Fatal(err)
}
}
@@ -5244,26 +6257,34 @@ func TestTx_CopyFile_Error_Meta(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ const expectedMsg = "meta 0 copy: error injected for tests"
+ err = db.View(func(tx *Tx) error {
_, err := tx.WriteTo(&failWriter{})
return err
- }); err == nil || err.Error() != "meta 0 copy: error injected for tests" {
+ })
+ if err == nil || err.Error() != expectedMsg {
t.Fatalf("unexpected error: %v", err)
}
}
@@ -5274,26 +6295,33 @@ func TestTx_CopyFile_Error_Normal(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
_, err := tx.WriteTo(&failWriter{3 * db.pageSize})
return err
- }); err == nil || err.Error() != "error injected for tests" {
+ })
+ if err == nil || err.Error() != "error injected for tests" {
t.Fatalf("unexpected error: %v", err)
}
}
@@ -5307,17 +6335,22 @@ func ExampleTx_Rollback() {
defer os.Remove(db.Path())
// Create a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Set a value for a key.
- if err := db.Update(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
- }); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Put(
+ []byte("foo"),
+ []byte("bar"),
+ )
+ })
+ if err != nil {
log.Fatal(err)
}
@@ -5326,25 +6359,31 @@ func ExampleTx_Rollback() {
if err != nil {
log.Fatal(err)
}
+
b := tx.Bucket([]byte("widgets"))
- if err := b.Put([]byte("foo"), []byte("baz")); err != nil {
+ err = b.Put([]byte("foo"), []byte("baz"))
+ if err != nil {
log.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+
+ err = tx.Rollback()
+ if err != nil {
log.Fatal(err)
}
// Ensure that our original value is still set.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value for 'foo' is still: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -5361,25 +6400,29 @@ func ExampleTx_CopyFile() {
defer os.Remove(db.Path())
// Create a bucket and a key.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
return err
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
return err
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Copy the database to another file.
toFile := tempfile()
defer os.Remove(toFile)
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
return tx.CopyFile(toFile, 0666)
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
@@ -5390,20 +6433,23 @@ func ExampleTx_CopyFile() {
}
// Ensure that the key exists in the copy.
- if err := db2.View(func(tx *Tx) error {
+ err = db2.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value for 'foo' in the clone is: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
- if err := db2.Close(); err != nil {
+ err = db2.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -5445,8 +6491,10 @@ func fillBucket(b *Bucket, prefix []byte) error {
if err != nil {
return err
}
+
k := append(prefix, []byte(fmt.Sprintf("k%d", i))...)
- if err := b.Put(k, v); err != nil {
+ err = b.Put(k, v)
+ if err != nil {
return err
}
}
@@ -5462,7 +6510,9 @@ func fillBucket(b *Bucket, prefix []byte) error {
if err != nil {
return err
}
- if err := fillBucket(sb, append(k, '.')); err != nil {
+
+ err = fillBucket(sb, append(k, '.'))
+ if err != nil {
return err
}
}
@@ -5470,7 +6520,8 @@ func fillBucket(b *Bucket, prefix []byte) error {
}
func walkBucket(parent *Bucket, k []byte, v []byte, w io.Writer) error {
- if _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v); err != nil {
+ _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v)
+ if err != nil {
return err
}