aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBen Johnson <benbjohnson@yahoo.com>2014-04-11 15:11:55 -0600
committerBen Johnson <benbjohnson@yahoo.com>2014-04-11 15:11:55 -0600
commit2c8020ec8e98e7b6c6c0fd3bd6e91d41caf7f25a (patch)
tree125c24e03c653417ce8bf5965b7fbcbeb2dedb04
parentMerge pull request #128 from benbjohnson/import-export (diff)
parentUpgrade import/export to use nested buckets. (diff)
downloaddedo-2c8020ec8e98e7b6c6c0fd3bd6e91d41caf7f25a.tar.gz
dedo-2c8020ec8e98e7b6c6c0fd3bd6e91d41caf7f25a.tar.xz
Merge pull request #127 from benbjohnson/nested-keys
Add nested buckets.
-rw-r--r--Makefile4
-rw-r--r--bolt_test.go327
-rw-r--r--bucket.go366
-rw-r--r--bucket_test.go524
-rw-r--r--buckets.go110
-rw-r--r--buckets_test.go70
-rw-r--r--cmd/bolt/buckets.go8
-rw-r--r--cmd/bolt/buckets_test.go6
-rw-r--r--cmd/bolt/export.go28
-rw-r--r--cmd/bolt/export_test.go17
-rw-r--r--cmd/bolt/get.go2
-rw-r--r--cmd/bolt/get_test.go6
-rw-r--r--cmd/bolt/import.go27
-rw-r--r--cmd/bolt/import_test.go11
-rw-r--r--cmd/bolt/keys.go2
-rw-r--r--cmd/bolt/keys_test.go8
-rw-r--r--cmd/bolt/set.go2
-rw-r--r--cmd/bolt/set_test.go2
-rw-r--r--const.go5
-rw-r--r--cursor.go80
-rw-r--r--cursor_test.go307
-rw-r--r--db.go107
-rw-r--r--db_test.go296
-rw-r--r--example_test.go251
-rw-r--r--freelist.go25
-rw-r--r--freelist_test.go12
-rw-r--r--functional_test.go144
-rw-r--r--meta.go2
-rw-r--r--meta_test.go4
-rw-r--r--node.go50
-rw-r--r--node_test.go51
-rw-r--r--page.go7
-rw-r--r--page_test.go5
-rw-r--r--tx.go294
-rw-r--r--tx_test.go413
35 files changed, 2054 insertions, 1519 deletions
diff --git a/Makefile b/Makefile
index 1302aed..cb44a12 100644
--- a/Makefile
+++ b/Makefile
@@ -13,7 +13,7 @@ cloc:
@cloc --not-match-f='Makefile|_test.go' .
cover: fmt
- go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) .
+ go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) .
go tool cover -html=$(COVERPROFILE)
rm $(COVERPROFILE)
@@ -47,6 +47,6 @@ test: fmt errcheck
@echo ""
@echo ""
@echo "=== RACE DETECTOR ==="
- @go test -v -race -test.run=Parallel
+ @go test -v -race -test.run="TestSimulate_(100op|1000op|10000op)"
.PHONY: bench cloc cover cpuprofile fmt memprofile test
diff --git a/bolt_test.go b/bolt_test.go
new file mode 100644
index 0000000..bbf00f5
--- /dev/null
+++ b/bolt_test.go
@@ -0,0 +1,327 @@
+package bolt
+
+import (
+ "bytes"
+ "fmt"
+ "math/rand"
+ "sync"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) }
+func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) }
+func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) }
+func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) }
+func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) }
+
+func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) }
+func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) }
+func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) }
+func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) }
+
+func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) }
+func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) }
+func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) }
+
+func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) }
+
+// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety.
+func testSimulate(t *testing.T, threadCount, parallelism int) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ rand.Seed(int64(qseed))
+
+ // A list of operations that readers and writers can perform.
+ var readerHandlers = []simulateHandler{simulateGetHandler}
+ var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler}
+
+ var versions = make(map[txid]*QuickDB)
+ versions[1] = NewQuickDB()
+ withOpenDB(func(db *DB, path string) {
+ var mutex sync.Mutex
+
+ // Run n threads in parallel, each with their own operation.
+ var wg sync.WaitGroup
+ var threads = make(chan bool, parallelism)
+ var i int
+ for {
+ threads <- true
+ wg.Add(1)
+ writable := ((rand.Int() % 100) < 20) // 20% writers
+
+ // Choose an operation to execute.
+ var handler simulateHandler
+ if writable {
+ handler = writerHandlers[rand.Intn(len(writerHandlers))]
+ } else {
+ handler = readerHandlers[rand.Intn(len(readerHandlers))]
+ }
+
+ // Execute a thread for the given operation.
+ go func(writable bool, handler simulateHandler) {
+ defer wg.Done()
+
+ // Start transaction.
+ tx, err := db.Begin(writable)
+ if err != nil {
+ t.Fatal("tx begin: ", err)
+ }
+
+ // Obtain current state of the dataset.
+ mutex.Lock()
+ var qdb = versions[tx.id()]
+ if writable {
+ qdb = versions[tx.id()-1].Copy()
+ }
+ mutex.Unlock()
+
+ // Make sure we commit/rollback the tx at the end and update the state.
+ if writable {
+ defer func() {
+ mutex.Lock()
+ versions[tx.id()] = qdb
+ mutex.Unlock()
+
+ assert.NoError(t, tx.Commit())
+ }()
+ } else {
+ defer tx.Rollback()
+ }
+
+ // Ignore operation if we don't have data yet.
+ if qdb == nil {
+ return
+ }
+
+ // Execute handler.
+ handler(tx, qdb)
+
+ // Release a thread back to the scheduling loop.
+ <-threads
+ }(writable, handler)
+
+ i++
+ if i > threadCount {
+ break
+ }
+ }
+
+ // Wait until all threads are done.
+ wg.Wait()
+ })
+}
+
+type simulateHandler func(tx *Tx, qdb *QuickDB)
+
+// Retrieves a key from the database and verifies that it is what is expected.
+func simulateGetHandler(tx *Tx, qdb *QuickDB) {
+ // Randomly retrieve an existing exist.
+ keys := qdb.Rand()
+ if len(keys) == 0 {
+ return
+ }
+
+ // Retrieve root bucket.
+ b := tx.Bucket(keys[0])
+ if b == nil {
+ panic(fmt.Sprintf("bucket[0] expected: %v\n", keys[0]))
+ }
+
+ // Drill into nested buckets.
+ for _, key := range keys[1 : len(keys)-1] {
+ b = b.Bucket(key)
+ if b == nil {
+ panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key))
+ }
+ }
+
+ // Verify key/value on the final bucket.
+ expected := qdb.Get(keys)
+ actual := b.Get(keys[len(keys)-1])
+ if !bytes.Equal(actual, expected) {
+ fmt.Println("=== EXPECTED ===")
+ fmt.Println(expected)
+ fmt.Println("=== ACTUAL ===")
+ fmt.Println(actual)
+ fmt.Println("=== END ===")
+ panic("value mismatch")
+ }
+}
+
+// Inserts a key into the database.
+func simulatePutHandler(tx *Tx, qdb *QuickDB) {
+ keys, value := randKeys(), randValue()
+
+ // Retrieve root bucket.
+ b := tx.Bucket(keys[0])
+ if b == nil {
+ if err := tx.CreateBucket(keys[0]); err != nil {
+ panic("create bucket: " + err.Error())
+ }
+ b = tx.Bucket(keys[0])
+ if b == nil {
+ panic(fmt.Sprintf("bucket[0] nil: %v", keys[0]))
+ }
+ }
+
+ // Create nested buckets, if necessary.
+ for _, key := range keys[1 : len(keys)-1] {
+ child := b.Bucket(key)
+ if child != nil {
+ b = child
+ } else {
+ if err := b.CreateBucket(key); err != nil {
+ panic("create bucket: " + err.Error())
+ }
+ b = b.Bucket(key)
+ }
+ }
+
+ // Insert into database.
+ if err := b.Put(keys[len(keys)-1], value); err != nil {
+ panic("put: " + err.Error())
+ }
+
+ // Insert into in-memory database.
+ qdb.Put(keys, value)
+}
+
+// QuickDB is an in-memory database that replicates the functionality of the
+// Bolt DB type except that it is entirely in-memory. It is meant for testing
+// that the Bolt database is consistent.
+type QuickDB struct {
+ sync.RWMutex
+ m map[string]interface{}
+}
+
+// NewQuickDB returns an instance of QuickDB.
+func NewQuickDB() *QuickDB {
+ return &QuickDB{m: make(map[string]interface{})}
+}
+
+// Get retrieves the value at a key path.
+func (db *QuickDB) Get(keys [][]byte) []byte {
+ db.RLock()
+ defer db.RUnlock()
+
+ m := db.m
+ for _, key := range keys[:len(keys)-1] {
+ value := m[string(key)]
+ if value == nil {
+ return nil
+ }
+ switch value := value.(type) {
+ case map[string]interface{}:
+ m = value
+ case []byte:
+ return nil
+ }
+ }
+
+ // Only return if it's a simple value.
+ if value, ok := m[string(keys[len(keys)-1])].([]byte); ok {
+ return value
+ }
+ return nil
+}
+
+// Put inserts a value into a key path.
+func (db *QuickDB) Put(keys [][]byte, value []byte) {
+ db.Lock()
+ defer db.Unlock()
+
+ // Build buckets all the way down the key path.
+ m := db.m
+ for _, key := range keys[:len(keys)-1] {
+ if _, ok := m[string(key)].([]byte); ok {
+ return // Keypath intersects with a simple value. Do nothing.
+ }
+
+ if m[string(key)] == nil {
+ m[string(key)] = make(map[string]interface{})
+ }
+ m = m[string(key)].(map[string]interface{})
+ }
+
+ // Insert value into the last key.
+ m[string(keys[len(keys)-1])] = value
+}
+
+// Rand returns a random key path that points to a simple value.
+func (db *QuickDB) Rand() [][]byte {
+ db.RLock()
+ defer db.RUnlock()
+ if len(db.m) == 0 {
+ return nil
+ }
+ var keys [][]byte
+ db.rand(db.m, &keys)
+ return keys
+}
+
+func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) {
+ i, index := 0, rand.Intn(len(m))
+ for k, v := range m {
+ if i == index {
+ *keys = append(*keys, []byte(k))
+ if v, ok := v.(map[string]interface{}); ok {
+ db.rand(v, keys)
+ }
+ return
+ }
+ i++
+ }
+ panic("quickdb rand: out-of-range")
+}
+
+// Copy copies the entire database.
+func (db *QuickDB) Copy() *QuickDB {
+ db.RLock()
+ defer db.RUnlock()
+ return &QuickDB{m: db.copy(db.m)}
+}
+
+func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} {
+ clone := make(map[string]interface{}, len(m))
+ for k, v := range m {
+ switch v := v.(type) {
+ case map[string]interface{}:
+ clone[k] = db.copy(v)
+ default:
+ clone[k] = v
+ }
+ }
+ return clone
+}
+
+func randKey() []byte {
+ var min, max = 1, 1024
+ n := rand.Intn(max-min) + min
+ b := make([]byte, n)
+ for i := 0; i < n; i++ {
+ b[i] = byte(rand.Intn(255))
+ }
+ return b
+}
+
+func randKeys() [][]byte {
+ var keys [][]byte
+ var count = rand.Intn(2) + 2
+ for i := 0; i < count; i++ {
+ keys = append(keys, randKey())
+ }
+ return keys
+}
+
+func randValue() []byte {
+ n := rand.Intn(8192)
+ b := make([]byte, n)
+ for i := 0; i < n; i++ {
+ b[i] = byte(rand.Intn(255))
+ }
+ return b
+}
diff --git a/bucket.go b/bucket.go
index 919d33b..c01d2c5 100644
--- a/bucket.go
+++ b/bucket.go
@@ -3,6 +3,9 @@ package bolt
import (
"bytes"
"errors"
+ "fmt"
+ "sort"
+ "unsafe"
)
var (
@@ -16,14 +19,6 @@ var (
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
ErrBucketNameRequired = errors.New("bucket name required")
- // ErrBucketNameTooLarge is returned when creating a bucket with a name
- // that is longer than MaxBucketNameSize.
- ErrBucketNameTooLarge = errors.New("bucket name too large")
-
- // ErrBucketNotWritable is returned when changing data on a bucket
- // reference that was created from a read-only transaction.
- ErrBucketNotWritable = errors.New("bucket not writable")
-
// ErrKeyRequired is returned when inserting a zero-length key.
ErrKeyRequired = errors.New("key required")
@@ -33,6 +28,11 @@ var (
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")
+ // ErrIncompatibleValue is returned when trying create or delete a bucket
+ // on an existing non-bucket key or when trying to create or delete a
+ // non-bucket key on an existing bucket key.
+ ErrIncompatibleValue = errors.New("incompatible value")
+
// ErrSequenceOverflow is returned when the next sequence number will be
// larger than the maximum integer size.
ErrSequenceOverflow = errors.New("sequence overflow")
@@ -41,8 +41,10 @@ var (
// Bucket represents a collection of key/value pairs inside the database.
type Bucket struct {
*bucket
- name string
- tx *Tx
+ tx *Tx
+ buckets map[string]*Bucket
+ nodes map[pgid]*node
+ pending []*node
}
// bucket represents the on-file representation of a bucket.
@@ -51,9 +53,14 @@ type bucket struct {
sequence uint64
}
-// Name returns the name of the bucket.
-func (b *Bucket) Name() string {
- return b.name
+// newBucket returns a new bucket associated with a transaction.
+func newBucket(tx *Tx) Bucket {
+ var b = Bucket{tx: tx}
+ b.buckets = make(map[string]*Bucket)
+ if tx.writable {
+ b.nodes = make(map[pgid]*node)
+ }
+ return b
}
// Writable returns whether the bucket is writable.
@@ -70,17 +77,145 @@ func (b *Bucket) Cursor() *Cursor {
// Allocate and return a cursor.
return &Cursor{
- tx: b.tx,
- root: b.root,
- stack: make([]elemRef, 0),
+ bucket: b,
+ stack: make([]elemRef, 0),
+ }
+}
+
+// Bucket retrieves a nested bucket by name.
+// Returns nil if the bucket does not exist.
+func (b *Bucket) Bucket(name []byte) *Bucket {
+ if child := b.buckets[string(name)]; child != nil {
+ return child
+ }
+
+ // Move cursor to key.
+ c := b.Cursor()
+ k, v, flags := c.seek(name)
+
+ // Return nil if the key doesn't exist or it is not a bucket.
+ if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
+ return nil
+ }
+
+ // Otherwise create a bucket and cache it.
+ var child = newBucket(b.tx)
+ child.bucket = &bucket{}
+ *child.bucket = *(*bucket)(unsafe.Pointer(&v[0]))
+ b.buckets[string(name)] = &child
+
+ return &child
+}
+
+// CreateBucket creates a new bucket at the given key.
+// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
+func (b *Bucket) CreateBucket(key []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.tx.writable {
+ return ErrTxNotWritable
+ } else if len(key) == 0 {
+ return ErrBucketNameRequired
}
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if there is an existing key.
+ if bytes.Equal(key, k) {
+ if (flags & bucketLeafFlag) != 0 {
+ return ErrBucketExists
+ } else {
+ return ErrIncompatibleValue
+ }
+ }
+
+ // Create a blank root leaf page.
+ p, err := b.tx.allocate(1)
+ if err != nil {
+ return err
+ }
+ p.flags = leafPageFlag
+
+ // Insert key/value.
+ value := make([]byte, unsafe.Sizeof(bucket{}))
+ bucket := (*bucket)(unsafe.Pointer(&value[0]))
+ bucket.root = p.id
+
+ // Insert into node.
+ c.node().put(key, key, value, 0, bucketLeafFlag)
+
+ return nil
+}
+
+// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
+// Returns an error if the bucket name is blank, or if the bucket name is too long.
+func (b *Bucket) CreateBucketIfNotExists(key []byte) error {
+ err := b.CreateBucket(key)
+ if err != nil && err != ErrBucketExists {
+ return err
+ }
+ return nil
+}
+
+// DeleteBucket deletes a bucket at the given key.
+// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
+func (b *Bucket) DeleteBucket(key []byte) error {
+ if b.tx.db == nil {
+ return ErrTxClosed
+ } else if !b.Writable() {
+ return ErrTxNotWritable
+ }
+
+ // Move cursor to correct position.
+ c := b.Cursor()
+ k, _, flags := c.seek(key)
+
+ // Return an error if bucket doesn't exist or is not a bucket.
+ if !bytes.Equal(key, k) {
+ return ErrBucketNotFound
+ } else if (flags & bucketLeafFlag) == 0 {
+ return ErrIncompatibleValue
+ }
+
+ // Recursively delete all child buckets.
+ child := b.Bucket(key)
+ err := child.ForEach(func(k, v []byte) error {
+ if v == nil {
+ if err := child.DeleteBucket(k); err != nil {
+ return fmt.Errorf("delete bucket: %s", err)
+ }
+ }
+ return nil
+ })
+ if err != nil {
+ return err
+ }
+
+ // Remove cached copy.
+ delete(b.buckets, string(key))
+
+ // Release all bucket pages to freelist.
+ b.tx.forEachPage(child.root, 0, func(p *page, _ int) {
+ b.tx.db.freelist.free(b.tx.id(), p)
+ })
+
+ // Delete the node if we have a matching key.
+ c.node().del(key)
+
+ return nil
}
// Get retrieves the value for a key in the bucket.
-// Returns a nil value if the key does not exist.
+// Returns a nil value if the key does not exist or if the key is a nested bucket.
func (b *Bucket) Get(key []byte) []byte {
- c := b.Cursor()
- k, v := c.Seek(key)
+ k, v, flags := b.Cursor().seek(key)
+
+ // Return nil if this is a bucket.
+ if (flags & bucketLeafFlag) != 0 {
+ return nil
+ }
// If our target node isn't the same key as what's passed in then return nil.
if !bytes.Equal(key, k) {
@@ -96,11 +231,8 @@ func (b *Bucket) Put(key []byte, value []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
- return ErrBucketNotWritable
- }
-
- // Validate the key and data size.
- if len(key) == 0 {
+ return ErrTxNotWritable
+ } else if len(key) == 0 {
return ErrKeyRequired
} else if len(key) > MaxKeySize {
return ErrKeyTooLarge
@@ -110,10 +242,15 @@ func (b *Bucket) Put(key []byte, value []byte) error {
// Move cursor to correct position.
c := b.Cursor()
- c.Seek(key)
+ k, _, flags := c.seek(key)
+
+ // Return an error if there is an existing key with a bucket value.
+ if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
- // Insert the key/value.
- c.node(b.tx).put(key, key, value, 0)
+ // Insert into node.
+ c.node().put(key, key, value, 0, 0)
return nil
}
@@ -125,15 +262,20 @@ func (b *Bucket) Delete(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
- return ErrBucketNotWritable
+ return ErrTxNotWritable
}
// Move cursor to correct position.
c := b.Cursor()
- c.Seek(key)
+ _, _, flags := c.seek(key)
+
+ // Return an error if there is already existing bucket value.
+ if (flags & bucketLeafFlag) != 0 {
+ return ErrIncompatibleValue
+ }
// Delete the node if we have a matching key.
- c.node(b.tx).del(key)
+ c.node().del(key)
return nil
}
@@ -143,7 +285,7 @@ func (b *Bucket) NextSequence() (int, error) {
if b.tx.db == nil {
return 0, ErrTxClosed
} else if !b.Writable() {
- return 0, ErrBucketNotWritable
+ return 0, ErrTxNotWritable
}
// Make sure next sequence number will not be larger than the maximum
@@ -194,6 +336,162 @@ func (b *Bucket) Stat() *BucketStat {
return s
}
+// spill writes all the nodes for this bucket to dirty pages.
+func (b *Bucket) spill() error {
+ // Spill all child buckets first.
+ for name, child := range b.buckets {
+ if err := child.spill(); err != nil {
+ return err
+ }
+
+ // Update the child bucket header in this bucket.
+ value := make([]byte, unsafe.Sizeof(bucket{}))
+ bucket := (*bucket)(unsafe.Pointer(&value[0]))
+ *bucket = *child.bucket
+
+ // Update parent node.
+ c := b.Cursor()
+ k, _, flags := c.seek([]byte(name))
+ _assert(bytes.Equal([]byte(name), k), "misplaced bucket header: %x -> %x", []byte(name), k)
+ _assert(flags&bucketLeafFlag != 0, "unexpected bucket header flag: %x", flags)
+ c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
+ }
+
+ // Ignore if there are no nodes to spill.
+ if len(b.nodes) == 0 {
+ return nil
+ }
+
+ // Sort nodes by highest depth first.
+ nodes := make(nodesByDepth, 0, len(b.nodes))
+ for _, n := range b.nodes {
+ nodes = append(nodes, n)
+ }
+ sort.Sort(nodes)
+
+ // Spill nodes by deepest first.
+ for i := 0; i < len(nodes); i++ {
+ n := nodes[i]
+
+ // Split nodes into appropriate sized nodes.
+ // The first node in this list will be a reference to n to preserve ancestry.
+ newNodes := n.split(b.tx.db.pageSize)
+ b.pending = newNodes
+
+ // If this is a root node that split then create a parent node.
+ if n.parent == nil && len(newNodes) > 1 {
+ n.parent = &node{bucket: b, isLeaf: false}
+ nodes = append(nodes, n.parent)
+ }
+
+ // Add node's page to the freelist.
+ if n.pgid > 0 {
+ b.tx.db.freelist.free(b.tx.id(), b.tx.page(n.pgid))
+ }
+
+ // Write nodes to dirty pages.
+ for i, newNode := range newNodes {
+ // Allocate contiguous space for the node.
+ p, err := b.tx.allocate((newNode.size() / b.tx.db.pageSize) + 1)
+ if err != nil {
+ return err
+ }
+
+ // Write the node to the page.
+ newNode.write(p)
+ newNode.pgid = p.id
+ newNode.parent = n.parent
+
+ // The first node should use the existing entry, other nodes are inserts.
+ var oldKey []byte
+ if i == 0 {
+ oldKey = n.key
+ } else {
+ oldKey = newNode.inodes[0].key
+ }
+
+ // Update the parent entry.
+ if newNode.parent != nil {
+ newNode.parent.put(oldKey, newNode.inodes[0].key, nil, newNode.pgid, 0)
+ }
+
+ // Update the statistics.
+ b.tx.stats.Spill++
+ }
+
+ b.pending = nil
+ }
+
+ // Clear out nodes now that they are all spilled.
+ b.nodes = make(map[pgid]*node)
+
+ // Update the root node for this bucket.
+ b.root = nodes[len(nodes)-1].pgid
+
+ return nil
+}
+
+// rebalance attempts to balance all nodes.
+func (b *Bucket) rebalance() {
+ for _, n := range b.nodes {
+ n.rebalance()
+ }
+ for _, child := range b.buckets {
+ child.rebalance()
+ }
+}
+
+// node creates a node from a page and associates it with a given parent.
+func (b *Bucket) node(pgid pgid, parent *node) *node {
+ _assert(b.nodes != nil, "nodes map expected")
+ // Retrieve node if it's already been created.
+ if n := b.nodes[pgid]; n != nil {
+ return n
+ }
+
+ // Otherwise create a branch and cache it.
+ n := &node{bucket: b, parent: parent}
+ if n.parent != nil {
+ n.depth = n.parent.depth + 1
+ }
+ n.read(b.tx.page(pgid))
+ b.nodes[pgid] = n
+
+ // Update statistics.
+ b.tx.stats.NodeCount++
+
+ return n
+}
+
+// dereference removes all references to the old mmap.
+func (b *Bucket) dereference() {
+ for _, n := range b.nodes {
+ n.dereference()
+ }
+
+ for _, n := range b.pending {
+ n.dereference()
+ }
+
+ for _, child := range b.buckets {
+ child.dereference()
+ }
+
+ // Update statistics
+ b.tx.stats.NodeDeref += len(b.nodes) + len(b.pending)
+}
+
+// pageNode returns the in-memory node, if it exists.
+// Otherwise returns the underlying page.
+func (b *Bucket) pageNode(id pgid) (*page, *node) {
+ if b.nodes != nil {
+ if n := b.nodes[id]; n != nil {
+ return nil, n
+ }
+ }
+ return b.tx.page(id), nil
+}
+
// BucketStat represents stats on a bucket such as branch pages and leaf pages.
type BucketStat struct {
BranchPageCount int
@@ -202,9 +500,3 @@ type BucketStat struct {
KeyCount int
MaxDepth int
}
-
-type bucketsByName []*Bucket
-
-func (s bucketsByName) Len() int { return len(s) }
-func (s bucketsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s bucketsByName) Less(i, j int) bool { return s[i].name < s[j].name }
diff --git a/bucket_test.go b/bucket_test.go
index c49a923..75cbbe8 100644
--- a/bucket_test.go
+++ b/bucket_test.go
@@ -14,11 +14,11 @@ import (
)
// Ensure that a bucket that gets a non-existent key returns nil.
-func TestBucketGetNonExistent(t *testing.T) {
+func TestBucket_Get_NonExistent(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- value := tx.Bucket("widgets").Get([]byte("foo"))
+ tx.CreateBucket([]byte("widgets"))
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
assert.Nil(t, value)
return nil
})
@@ -26,11 +26,11 @@ func TestBucketGetNonExistent(t *testing.T) {
}
// Ensure that a bucket can read a value that is not flushed yet.
-func TestBucketGetFromNode(t *testing.T) {
+func TestBucket_Get_FromNode(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- b := tx.Bucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("bar"))
value := b.Get([]byte("foo"))
assert.Equal(t, value, []byte("bar"))
@@ -39,84 +39,278 @@ func TestBucketGetFromNode(t *testing.T) {
})
}
+// Ensure that a bucket retrieved via Get() returns a nil.
+func TestBucket_Get_IncompatibleValue(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
+ assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
+ return nil
+ })
+ })
+}
+
// Ensure that a bucket can write a key/value.
-func TestBucketPut(t *testing.T) {
+func TestBucket_Put(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- err := tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
+ tx.CreateBucket([]byte("widgets"))
+ err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
assert.NoError(t, err)
- value := tx.Bucket("widgets").Get([]byte("foo"))
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
assert.Equal(t, value, []byte("bar"))
return nil
})
})
}
+// Ensure that a bucket can rewrite a key in the same transaction.
+func TestBucket_Put_Repeat(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ assert.NoError(t, b.Put([]byte("foo"), []byte("bar")))
+ assert.NoError(t, b.Put([]byte("foo"), []byte("baz")))
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ assert.Equal(t, value, []byte("baz"))
+ return nil
+ })
+ })
+}
+
+// Ensure that a bucket can write a bunch of large values.
+func TestBucket_Put_Large(t *testing.T) {
+ var count = 100
+ var factor = 200
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ for i := 1; i < count; i++ {
+ assert.NoError(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))))
+ }
+ return nil
+ })
+ db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ for i := 1; i < count; i++ {
+ value := b.Get([]byte(strings.Repeat("0", i*factor)))
+ assert.Equal(t, []byte(strings.Repeat("X", (count-i)*factor)), value)
+ }
+ return nil
+ })
+ })
+}
+
+// Ensure that a setting a value on a key with a bucket value returns an error.
+func TestBucket_Put_IncompatibleValue(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
+ assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
+ return nil
+ })
+ })
+}
+
+// Ensure that a setting a value while the transaction is closed returns an error.
+func TestBucket_Put_Closed(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ tx, _ := db.Begin(true)
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ tx.Rollback()
+ assert.Equal(t, ErrTxClosed, b.Put([]byte("foo"), []byte("bar")))
+ })
+}
+
// Ensure that setting a value on a read-only bucket returns an error.
-func TestBucketPutReadOnly(t *testing.T) {
+func TestBucket_Put_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
return nil
})
db.View(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
err := b.Put([]byte("foo"), []byte("bar"))
- assert.Equal(t, err, ErrBucketNotWritable)
+ assert.Equal(t, err, ErrTxNotWritable)
return nil
})
})
}
// Ensure that a bucket can delete an existing key.
-func TestBucketDelete(t *testing.T) {
+func TestBucket_Delete(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
- err := tx.Bucket("widgets").Delete([]byte("foo"))
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+ err := tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
assert.NoError(t, err)
- value := tx.Bucket("widgets").Get([]byte("foo"))
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
assert.Nil(t, value)
return nil
})
})
}
+// Ensure that deleting a bucket using Delete() returns an error.
+func TestBucket_Delete_Bucket(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ assert.NoError(t, b.CreateBucket([]byte("foo")))
+ assert.Equal(t, ErrIncompatibleValue, b.Delete([]byte("foo")))
+ return nil
+ })
+ })
+}
+
// Ensure that deleting a key on a read-only bucket returns an error.
-func TestBucketDeleteReadOnly(t *testing.T) {
+func TestBucket_Delete_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
return nil
})
db.View(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
err := b.Delete([]byte("foo"))
- assert.Equal(t, err, ErrBucketNotWritable)
+ assert.Equal(t, err, ErrTxNotWritable)
+ return nil
+ })
+ })
+}
+
+// Ensure that a deleting value while the transaction is closed returns an error.
+func TestBucket_Delete_Closed(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ tx, _ := db.Begin(true)
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ tx.Rollback()
+ assert.Equal(t, ErrTxClosed, b.Delete([]byte("foo")))
+ })
+}
+
+// Ensure that deleting a bucket causes nested buckets to be deleted.
+func TestBucket_DeleteBucket_Nested(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
+ return nil
+ })
+ })
+}
+
+// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
+func TestBucket_DeleteBucket_Nested2(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
+ return nil
+ })
+ db.Update(func(tx *Tx) error {
+ assert.NotNil(t, tx.Bucket([]byte("widgets")))
+ assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")))
+ assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")))
+ assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz")))
+ assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
+ return nil
+ })
+ db.View(func(tx *Tx) error {
+ assert.Nil(t, tx.Bucket([]byte("widgets")))
+ return nil
+ })
+ })
+}
+
+// Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
+func TestBucket_DeleteBucket_Large(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
+ b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))
+ for i := 0; i < 1000; i++ {
+ assert.NoError(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))))
+ }
+ return nil
+ })
+ db.Update(func(tx *Tx) error {
+ assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
+ return nil
+ })
+
+ // NOTE: Consistency check in withOpenDB() will error if pages not freed properly.
+ })
+}
+
+// Ensure that a simple value retrieved via Bucket() returns a nil.
+func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
+ assert.Nil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")))
+ return nil
+ })
+ })
+}
+
+// Ensure that creating a bucket on an existing non-bucket key returns an error.
+func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
+ assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
+ return nil
+ })
+ })
+}
+
+// Ensure that deleting a bucket on an existing non-bucket key returns an error.
+func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
+ assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
return nil
})
})
}
// Ensure that a bucket can return an autoincrementing sequence.
-func TestBucketNextSequence(t *testing.T) {
+func TestBucket_NextSequence(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.CreateBucket("woojits")
+ tx.CreateBucket([]byte("widgets"))
+ tx.CreateBucket([]byte("woojits"))
// Make sure sequence increments.
- seq, err := tx.Bucket("widgets").NextSequence()
+ seq, err := tx.Bucket([]byte("widgets")).NextSequence()
assert.NoError(t, err)
assert.Equal(t, seq, 1)
- seq, err = tx.Bucket("widgets").NextSequence()
+ seq, err = tx.Bucket([]byte("widgets")).NextSequence()
assert.NoError(t, err)
assert.Equal(t, seq, 2)
// Buckets should be separate.
- seq, err = tx.Bucket("woojits").NextSequence()
+ seq, err = tx.Bucket([]byte("woojits")).NextSequence()
assert.NoError(t, err)
assert.Equal(t, seq, 1)
return nil
@@ -125,31 +319,31 @@ func TestBucketNextSequence(t *testing.T) {
}
// Ensure that retrieving the next sequence on a read-only bucket returns an error.
-func TestBucketNextSequenceReadOnly(t *testing.T) {
+func TestBucket_NextSequence_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
return nil
})
db.View(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
i, err := b.NextSequence()
assert.Equal(t, i, 0)
- assert.Equal(t, err, ErrBucketNotWritable)
+ assert.Equal(t, err, ErrTxNotWritable)
return nil
})
})
}
// Ensure that incrementing past the maximum sequence number will return an error.
-func TestBucketNextSequenceOverflow(t *testing.T) {
+func TestBucket_NextSequence_Overflow(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
return nil
})
db.Update(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
b.bucket.sequence = uint64(maxInt)
seq, err := b.NextSequence()
assert.Equal(t, err, ErrSequenceOverflow)
@@ -159,17 +353,29 @@ func TestBucketNextSequenceOverflow(t *testing.T) {
})
}
-// Ensure a database can loop over all key/value pairs in a bucket.
-func TestBucketForEach(t *testing.T) {
+// Ensure that retrieving the next sequence for a bucket on a closed database return an error.
+func TestBucket_NextSequence_Closed(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ tx, _ := db.Begin(true)
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ tx.Rollback()
+ _, err := b.NextSequence()
+ assert.Equal(t, ErrTxClosed, err)
+ })
+}
+
+// Ensure a user can loop over all key/value pairs in a bucket.
+func TestBucket_ForEach(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("foo"), []byte("0000"))
- tx.Bucket("widgets").Put([]byte("baz"), []byte("0001"))
- tx.Bucket("widgets").Put([]byte("bar"), []byte("0002"))
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000"))
+ tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001"))
+ tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002"))
var index int
- err := tx.Bucket("widgets").ForEach(func(k, v []byte) error {
+ err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
switch index {
case 0:
assert.Equal(t, k, []byte("bar"))
@@ -192,16 +398,16 @@ func TestBucketForEach(t *testing.T) {
}
// Ensure a database can stop iteration early.
-func TestBucketForEachShortCircuit(t *testing.T) {
+func TestBucket_ForEach_ShortCircuit(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("bar"), []byte("0000"))
- tx.Bucket("widgets").Put([]byte("baz"), []byte("0000"))
- tx.Bucket("widgets").Put([]byte("foo"), []byte("0000"))
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000"))
+ tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000"))
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000"))
var index int
- err := tx.Bucket("widgets").ForEach(func(k, v []byte) error {
+ err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
index++
if bytes.Equal(k, []byte("baz")) {
return errors.New("marker")
@@ -215,14 +421,26 @@ func TestBucketForEachShortCircuit(t *testing.T) {
})
}
+// Ensure that looping over a bucket on a closed database returns an error.
+func TestBucket_ForEach_Closed(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ tx, _ := db.Begin(true)
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ tx.Rollback()
+ err := b.ForEach(func(k, v []byte) error { return nil })
+ assert.Equal(t, ErrTxClosed, err)
+ })
+}
+
// Ensure that an error is returned when inserting with an empty key.
-func TestBucketPutEmptyKey(t *testing.T) {
+func TestBucket_Put_EmptyKey(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- err := tx.Bucket("widgets").Put([]byte(""), []byte("bar"))
+ tx.CreateBucket([]byte("widgets"))
+ err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar"))
assert.Equal(t, err, ErrKeyRequired)
- err = tx.Bucket("widgets").Put(nil, []byte("bar"))
+ err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar"))
assert.Equal(t, err, ErrKeyRequired)
return nil
})
@@ -230,11 +448,11 @@ func TestBucketPutEmptyKey(t *testing.T) {
}
// Ensure that an error is returned when inserting with a key that's too large.
-func TestBucketPutKeyTooLarge(t *testing.T) {
+func TestBucket_Put_KeyTooLarge(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- err := tx.Bucket("widgets").Put(make([]byte, 32769), []byte("bar"))
+ tx.CreateBucket([]byte("widgets"))
+ err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar"))
assert.Equal(t, err, ErrKeyTooLarge)
return nil
})
@@ -242,54 +460,35 @@ func TestBucketPutKeyTooLarge(t *testing.T) {
}
// Ensure a bucket can calculate stats.
-func TestBucketStat(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping test in short mode.")
- }
-
+func TestBucket_Stat(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- // Add bucket with lots of keys.
- tx.CreateBucket("widgets")
- b := tx.Bucket("widgets")
- for i := 0; i < 100000; i++ {
- b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
- }
-
// Add bucket with fewer keys but one big value.
- tx.CreateBucket("woojits")
- b = tx.Bucket("woojits")
+ assert.NoError(t, tx.CreateBucket([]byte("woojits")))
+ b := tx.Bucket([]byte("woojits"))
for i := 0; i < 500; i++ {
b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
}
b.Put([]byte("really-big-value"), []byte(strings.Repeat("*", 10000)))
// Add a bucket that fits on a single root leaf.
- tx.CreateBucket("whozawhats")
- b = tx.Bucket("whozawhats")
+ assert.NoError(t, tx.CreateBucket([]byte("whozawhats")))
+ b = tx.Bucket([]byte("whozawhats"))
b.Put([]byte("foo"), []byte("bar"))
return nil
})
mustCheck(db)
db.View(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("woojits"))
stat := b.Stat()
- assert.Equal(t, stat.BranchPageCount, 15)
- assert.Equal(t, stat.LeafPageCount, 1281)
- assert.Equal(t, stat.OverflowPageCount, 0)
- assert.Equal(t, stat.KeyCount, 100000)
- assert.Equal(t, stat.MaxDepth, 3)
-
- b = tx.Bucket("woojits")
- stat = b.Stat()
assert.Equal(t, stat.BranchPageCount, 1)
assert.Equal(t, stat.LeafPageCount, 6)
assert.Equal(t, stat.OverflowPageCount, 2)
assert.Equal(t, stat.KeyCount, 501)
assert.Equal(t, stat.MaxDepth, 2)
- b = tx.Bucket("whozawhats")
+ b = tx.Bucket([]byte("whozawhats"))
stat = b.Stat()
assert.Equal(t, stat.BranchPageCount, 0)
assert.Equal(t, stat.LeafPageCount, 1)
@@ -302,8 +501,38 @@ func TestBucketStat(t *testing.T) {
})
}
+// Ensure a large bucket can calculate stats.
+func TestBucket_Stat_Large(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode.")
+ }
+
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ // Add bucket with lots of keys.
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ for i := 0; i < 100000; i++ {
+ b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
+ }
+ return nil
+ })
+ mustCheck(db)
+ db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ stat := b.Stat()
+ assert.Equal(t, stat.BranchPageCount, 15)
+ assert.Equal(t, stat.LeafPageCount, 1281)
+ assert.Equal(t, stat.OverflowPageCount, 0)
+ assert.Equal(t, stat.KeyCount, 100000)
+ assert.Equal(t, stat.MaxDepth, 3)
+ return nil
+ })
+ })
+}
+
// Ensure that a bucket can write random keys and values across multiple transactions.
-func TestBucketPutSingle(t *testing.T) {
+func TestBucket_Put_Single(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
@@ -314,11 +543,11 @@ func TestBucketPutSingle(t *testing.T) {
m := make(map[string][]byte)
db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
+ return tx.CreateBucket([]byte("widgets"))
})
for _, item := range items {
db.Update(func(tx *Tx) error {
- if err := tx.Bucket("widgets").Put(item.Key, item.Value); err != nil {
+ if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil {
panic("put error: " + err.Error())
}
m[string(item.Key)] = item.Value
@@ -329,10 +558,10 @@ func TestBucketPutSingle(t *testing.T) {
db.View(func(tx *Tx) error {
i := 0
for k, v := range m {
- value := tx.Bucket("widgets").Get([]byte(k))
+ value := tx.Bucket([]byte("widgets")).Get([]byte(k))
if !bytes.Equal(value, v) {
- db.CopyFile("/tmp/bolt.put.single.db", 0666)
- t.Fatalf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
+ t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
+ copyAndFailNow(t, db)
}
i++
}
@@ -347,11 +576,10 @@ func TestBucketPutSingle(t *testing.T) {
if err := quick.Check(f, qconfig()); err != nil {
t.Error(err)
}
- fmt.Fprint(os.Stderr, "\n")
}
// Ensure that a transaction can insert multiple key/value pairs at once.
-func TestBucketPutMultiple(t *testing.T) {
+func TestBucket_Put_Multiple(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
@@ -360,10 +588,10 @@ func TestBucketPutMultiple(t *testing.T) {
withOpenDB(func(db *DB, path string) {
// Bulk insert all values.
db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
+ return tx.CreateBucket([]byte("widgets"))
})
err := db.Update(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
@@ -373,12 +601,11 @@ func TestBucketPutMultiple(t *testing.T) {
// Verify all items exist.
db.View(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
for _, item := range items {
value := b.Get(item.Key)
if !assert.Equal(t, item.Value, value) {
- db.CopyFile("/tmp/bolt.put.multiple.db", 0666)
- t.FailNow()
+ copyAndFailNow(t, db)
}
}
return nil
@@ -389,11 +616,10 @@ func TestBucketPutMultiple(t *testing.T) {
if err := quick.Check(f, qconfig()); err != nil {
t.Error(err)
}
- fmt.Fprint(os.Stderr, "\n")
}
// Ensure that a transaction can delete all key/value pairs and return to a single leaf page.
-func TestBucketDeleteQuick(t *testing.T) {
+func TestBucket_Delete_Quick(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
@@ -402,10 +628,10 @@ func TestBucketDeleteQuick(t *testing.T) {
withOpenDB(func(db *DB, path string) {
// Bulk insert all values.
db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
+ return tx.CreateBucket([]byte("widgets"))
})
err := db.Update(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
@@ -416,13 +642,13 @@ func TestBucketDeleteQuick(t *testing.T) {
// Remove items one at a time and check consistency.
for i, item := range items {
err := db.Update(func(tx *Tx) error {
- return tx.Bucket("widgets").Delete(item.Key)
+ return tx.Bucket([]byte("widgets")).Delete(item.Key)
})
assert.NoError(t, err)
// Anything before our deletion index should be nil.
db.View(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
for j, exp := range items {
if j > i {
value := b.Get(exp.Key)
@@ -445,5 +671,99 @@ func TestBucketDeleteQuick(t *testing.T) {
if err := quick.Check(f, qconfig()); err != nil {
t.Error(err)
}
- fmt.Fprint(os.Stderr, "\n")
+}
+
+func ExampleBucket_Put() {
+ // Open the database.
+ db, _ := Open(tempfile(), 0666)
+ defer os.Remove(db.Path())
+ defer db.Close()
+
+ // Start a write transaction.
+ db.Update(func(tx *Tx) error {
+ // Create a bucket.
+ tx.CreateBucket([]byte("widgets"))
+
+ // Set the value "bar" for the key "foo".
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+ return nil
+ })
+
+ // Read value back in a different read-only transaction.
+ db.Update(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ fmt.Printf("The value of 'foo' is: %s\n", string(value))
+ return nil
+ })
+
+ // Output:
+ // The value of 'foo' is: bar
+}
+
+func ExampleBucket_Delete() {
+ // Open the database.
+ db, _ := Open(tempfile(), 0666)
+ defer os.Remove(db.Path())
+ defer db.Close()
+
+ // Start a write transaction.
+ db.Update(func(tx *Tx) error {
+ // Create a bucket.
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+
+ // Set the value "bar" for the key "foo".
+ b.Put([]byte("foo"), []byte("bar"))
+
+ // Retrieve the key back from the database and verify it.
+ value := b.Get([]byte("foo"))
+ fmt.Printf("The value of 'foo' was: %s\n", string(value))
+ return nil
+ })
+
+ // Delete the key in a different write transaction.
+ db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
+ })
+
+ // Retrieve the key again.
+ db.View(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ if value == nil {
+ fmt.Printf("The value of 'foo' is now: nil\n")
+ }
+ return nil
+ })
+
+ // Output:
+ // The value of 'foo' was: bar
+ // The value of 'foo' is now: nil
+}
+
+func ExampleBucket_ForEach() {
+ // Open the database.
+ db, _ := Open(tempfile(), 0666)
+ defer os.Remove(db.Path())
+ defer db.Close()
+
+ // Insert data into a bucket.
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("animals"))
+ b := tx.Bucket([]byte("animals"))
+ b.Put([]byte("dog"), []byte("fun"))
+ b.Put([]byte("cat"), []byte("lame"))
+ b.Put([]byte("liger"), []byte("awesome"))
+
+ // Iterate over items in sorted key order.
+ b.ForEach(func(k, v []byte) error {
+ fmt.Printf("A %s is %s.\n", string(k), string(v))
+ return nil
+ })
+ return nil
+ })
+
+ // Output:
+ // A cat is lame.
+ // A dog is fun.
+ // A liger is awesome.
}
diff --git a/buckets.go b/buckets.go
deleted file mode 100644
index 3226873..0000000
--- a/buckets.go
+++ /dev/null
@@ -1,110 +0,0 @@
-package bolt
-
-import (
- "sort"
- "unsafe"
-)
-
-// buckets represents a in-memory buckets page.
-type buckets struct {
- pgid pgid
- items map[string]*bucket
-}
-
-// size returns the size of the page after serialization.
-func (b *buckets) size() int {
- var size = pageHeaderSize
- for key := range b.items {
- size += int(unsafe.Sizeof(bucket{})) + len(key)
- }
- return size
-}
-
-// get retrieves a bucket by name.
-func (b *buckets) get(key string) *bucket {
- return b.items[key]
-}
-
-// put sets a new value for a bucket.
-func (b *buckets) put(key string, item *bucket) {
- b.items[key] = item
-}
-
-// del deletes a bucket by name.
-func (b *buckets) del(key string) {
- if item := b.items[key]; item != nil {
- delete(b.items, key)
- }
-}
-
-// read initializes the data from an on-disk page.
-func (b *buckets) read(p *page) {
- b.pgid = p.id
- b.items = make(map[string]*bucket)
-
- var items []*bucket
- var keys []string
-
- // Read items.
- nodes := (*[maxNodesPerPage]bucket)(unsafe.Pointer(&p.ptr))
- for i := 0; i < int(p.count); i++ {
- node := &nodes[i]
- items = append(items, node)
- }
-
- // Read keys.
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(&nodes[p.count]))[:]
- for i := 0; i < int(p.count); i++ {
- size := int(buf[0])
- buf = buf[1:]
- keys = append(keys, string(buf[:size]))
- buf = buf[size:]
- }
-
- // Associate keys and items.
- for index, key := range keys {
- b.items[key] = &bucket{
- root: items[index].root,
- sequence: items[index].sequence,
- }
- }
-}
-
-// write writes the items onto a page.
-func (b *buckets) write(p *page) {
- // Initialize page.
- p.flags |= bucketsPageFlag
- p.count = uint16(len(b.items))
-
- // Sort keys.
- var keys []string
- for key := range b.items {
- keys = append(keys, key)
- }
- sort.StringSlice(keys).Sort()
-
- // Write each bucket to the page.
- items := (*[maxNodesPerPage]bucket)(unsafe.Pointer(&p.ptr))
- for index, key := range keys {
- items[index] = *b.items[key]
- }
-
- // Write each key to the page.
- buf := (*[maxAllocSize]byte)(unsafe.Pointer(&items[p.count]))[:]
- for _, key := range keys {
- buf[0] = byte(len(key))
- buf = buf[1:]
- copy(buf, []byte(key))
- buf = buf[len(key):]
- }
-}
-
-// updateRoot finds a bucket by root id and then updates it to point to a new root.
-func (b *buckets) updateRoot(oldid, newid pgid) {
- for _, b := range b.items {
- if b.root == oldid {
- b.root = newid
- return
- }
- }
-}
diff --git a/buckets_test.go b/buckets_test.go
deleted file mode 100644
index 0fc6288..0000000
--- a/buckets_test.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package bolt
-
-import (
- "testing"
- "unsafe"
-
- "github.com/stretchr/testify/assert"
-)
-
-// Ensure that a buckets page can set a bucket.
-func TestBucketsPut(t *testing.T) {
- b := &buckets{items: make(map[string]*bucket)}
- b.put("foo", &bucket{root: 2})
- b.put("bar", &bucket{root: 3})
- b.put("foo", &bucket{root: 4})
- assert.Equal(t, len(b.items), 2)
- assert.Equal(t, b.get("foo").root, pgid(4))
- assert.Equal(t, b.get("bar").root, pgid(3))
- assert.Nil(t, b.get("no_such_bucket"))
-}
-
-// Ensure that a buckets page can deserialize from a page.
-func TestBucketsRead(t *testing.T) {
- // Create a page.
- var buf [4096]byte
- page := (*page)(unsafe.Pointer(&buf[0]))
- page.count = 2
-
- // Insert 2 items at the beginning.
- s := (*[3]bucket)(unsafe.Pointer(&page.ptr))
- s[0] = bucket{root: 3}
- s[1] = bucket{root: 4}
-
- // Write data for the nodes at the end.
- data := (*[4096]byte)(unsafe.Pointer(&s[2]))
- data[0] = 3
- copy(data[1:], []byte("bar"))
- data[4] = 10
- copy(data[5:], []byte("helloworld"))
-
- // Deserialize page into a buckets page.
- b := &buckets{items: make(map[string]*bucket)}
- b.read(page)
-
- // Check that there are two items with correct data.
- assert.Equal(t, len(b.items), 2)
- assert.Equal(t, b.get("bar").root, pgid(3))
- assert.Equal(t, b.get("helloworld").root, pgid(4))
-}
-
-// Ensure that a buckets page can serialize itself.
-func TestBucketsWrite(t *testing.T) {
- b := &buckets{items: make(map[string]*bucket)}
- b.put("foo", &bucket{root: 2})
- b.put("bar", &bucket{root: 3})
-
- // Write it to a page.
- var buf [4096]byte
- p := (*page)(unsafe.Pointer(&buf[0]))
- b.write(p)
-
- // Read the page back in.
- b2 := &buckets{items: make(map[string]*bucket)}
- b2.read(p)
-
- // Check that the two pages are the same.
- assert.Equal(t, len(b.items), 2)
- assert.Equal(t, b.get("foo").root, pgid(2))
- assert.Equal(t, b.get("bar").root, pgid(3))
-}
diff --git a/cmd/bolt/buckets.go b/cmd/bolt/buckets.go
index 10766a6..48395d8 100644
--- a/cmd/bolt/buckets.go
+++ b/cmd/bolt/buckets.go
@@ -21,10 +21,10 @@ func Buckets(path string) {
defer db.Close()
err = db.View(func(tx *bolt.Tx) error {
- for _, b := range tx.Buckets() {
- println(b.Name())
- }
- return nil
+ return tx.ForEach(func(name []byte, _ *bolt.Bucket) error {
+ println(string(name))
+ return nil
+ })
})
if err != nil {
fatal(err)
diff --git a/cmd/bolt/buckets_test.go b/cmd/bolt/buckets_test.go
index 5f72bb2..27ee619 100644
--- a/cmd/bolt/buckets_test.go
+++ b/cmd/bolt/buckets_test.go
@@ -13,9 +13,9 @@ func TestBuckets(t *testing.T) {
SetTestMode(true)
open(func(db *bolt.DB, path string) {
db.Update(func(tx *bolt.Tx) error {
- tx.CreateBucket("woojits")
- tx.CreateBucket("widgets")
- tx.CreateBucket("whatchits")
+ tx.CreateBucket([]byte("woojits"))
+ tx.CreateBucket([]byte("widgets"))
+ tx.CreateBucket([]byte("whatchits"))
return nil
})
db.Close()
diff --git a/cmd/bolt/export.go b/cmd/bolt/export.go
index f3cafc1..2dcbc1f 100644
--- a/cmd/bolt/export.go
+++ b/cmd/bolt/export.go
@@ -23,25 +23,33 @@ func Export(path string) {
}
defer db.Close()
- db.View(func(tx *bolt.Tx) error {
+ err = db.View(func(tx *bolt.Tx) error {
// Loop over every bucket and export it as a raw message.
var root []*rawMessage
- for _, b := range tx.Buckets() {
+ err := tx.ForEach(func(name []byte, b *bolt.Bucket) error {
message, err := exportBucket(b)
if err != nil {
fatal(err)
}
+ message.Key = name
root = append(root, message)
+ return nil
+ })
+ if err != nil {
+ return err
}
// Encode all buckets into JSON.
output, err := json.Marshal(root)
if err != nil {
- fatal("encode: ", err)
+ return fmt.Errorf("encode: ", err)
}
print(string(output))
return nil
})
+ if err != nil {
+ fatal(err)
+ }
}
func exportBucket(b *bolt.Bucket) (*rawMessage, error) {
@@ -50,11 +58,22 @@ func exportBucket(b *bolt.Bucket) (*rawMessage, error) {
err := b.ForEach(func(k, v []byte) error {
var err error
+ // If there is no value then it is a bucket.
+ if v == nil {
+ child, err := exportBucket(b.Bucket(k))
+ if err != nil {
+ return fmt.Errorf("bucket: %s", err)
+ }
+ child.Key = k
+ children = append(children, child)
+ return nil
+ }
+
+ // Otherwise it's a regular key.
var child = &rawMessage{Key: k}
if child.Value, err = json.Marshal(v); err != nil {
return fmt.Errorf("value: %s", err)
}
-
children = append(children, child)
return nil
})
@@ -64,7 +83,6 @@ func exportBucket(b *bolt.Bucket) (*rawMessage, error) {
// Encode bucket into a raw message.
var root = rawMessage{Type: "bucket"}
- root.Key = []byte(b.Name())
if root.Value, err = json.Marshal(children); err != nil {
return nil, fmt.Errorf("children: %s", err)
}
diff --git a/cmd/bolt/export_test.go b/cmd/bolt/export_test.go
index 3d6c21a..13f57d1 100644
--- a/cmd/bolt/export_test.go
+++ b/cmd/bolt/export_test.go
@@ -13,19 +13,26 @@ func TestExport(t *testing.T) {
SetTestMode(true)
open(func(db *bolt.DB, path string) {
db.Update(func(tx *bolt.Tx) error {
- tx.CreateBucket("widgets")
- b := tx.Bucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("0000"))
b.Put([]byte("bar"), []byte(""))
- tx.CreateBucket("woojits")
- b = tx.Bucket("woojits")
+ tx.CreateBucket([]byte("woojits"))
+ b = tx.Bucket([]byte("woojits"))
b.Put([]byte("baz"), []byte("XXXX"))
+
+ b.CreateBucket([]byte("woojits/subbucket"))
+ b = b.Bucket([]byte("woojits/subbucket"))
+ b.Put([]byte("bat"), []byte("A"))
+
+ tx.CreateBucket([]byte("empty"))
+
return nil
})
db.Close()
output := run("export", path)
- assert.Equal(t, `[{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="}]}]`, output)
+ assert.Equal(t, `[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`, output)
})
}
diff --git a/cmd/bolt/get.go b/cmd/bolt/get.go
index 10216e3..6ea7f04 100644
--- a/cmd/bolt/get.go
+++ b/cmd/bolt/get.go
@@ -22,7 +22,7 @@ func Get(path, name, key string) {
err = db.View(func(tx *bolt.Tx) error {
// Find bucket.
- b := tx.Bucket(name)
+ b := tx.Bucket([]byte(name))
if b == nil {
fatalf("bucket not found: %s", name)
return nil
diff --git a/cmd/bolt/get_test.go b/cmd/bolt/get_test.go
index 09883d4..d491971 100644
--- a/cmd/bolt/get_test.go
+++ b/cmd/bolt/get_test.go
@@ -13,8 +13,8 @@ func TestGet(t *testing.T) {
SetTestMode(true)
open(func(db *bolt.DB, path string) {
db.Update(func(tx *bolt.Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil
})
db.Close()
@@ -45,7 +45,7 @@ func TestGetKeyNotFound(t *testing.T) {
SetTestMode(true)
open(func(db *bolt.DB, path string) {
db.Update(func(tx *bolt.Tx) error {
- return tx.CreateBucket("widgets")
+ return tx.CreateBucket([]byte("widgets"))
})
db.Close()
output := run("get", path, "widgets", "foo")
diff --git a/cmd/bolt/import.go b/cmd/bolt/import.go
index ec8cee1..7554ae7 100644
--- a/cmd/bolt/import.go
+++ b/cmd/bolt/import.go
@@ -41,7 +41,7 @@ func Import(path string, input string) {
}
// Create the bucket if it doesn't exist.
- if err := tx.CreateBucketIfNotExists(string(message.Key)); err != nil {
+ if err := tx.CreateBucketIfNotExists(message.Key); err != nil {
return fmt.Errorf("create bucket: %s", err)
}
@@ -52,7 +52,7 @@ func Import(path string, input string) {
}
// Import all the values into the bucket.
- b := tx.Bucket(string(message.Key))
+ b := tx.Bucket(message.Key)
if err := importBucket(b, children); err != nil {
return fmt.Errorf("import bucket: %s", err)
}
@@ -67,7 +67,28 @@ func Import(path string, input string) {
func importBucket(b *bolt.Bucket, children []*rawMessage) error {
// Decode each message into a key/value pair.
for _, child := range children {
- // Decode the base64 value.
+ // Bucket messages are handled recursively.
+ if child.Type == "bucket" {
+ // Create the bucket if it doesn't exist.
+ if err := b.CreateBucketIfNotExists(child.Key); err != nil {
+ return fmt.Errorf("create bucket: %s", err)
+ }
+
+ // Decode child messages.
+ var subchildren []*rawMessage
+ if err := json.Unmarshal(child.Value, &subchildren); err != nil {
+ return fmt.Errorf("decode children: %s", err)
+ }
+
+ // Import subbucket.
+ subbucket := b.Bucket(child.Key)
+ if err := importBucket(subbucket, subchildren); err != nil {
+ return fmt.Errorf("import bucket: %s", err)
+ }
+ continue
+ }
+
+ // Non-bucket values are decoded from base64.
var value []byte
if err := json.Unmarshal(child.Value, &value); err != nil {
return fmt.Errorf("decode value: %s", err)
diff --git a/cmd/bolt/import_test.go b/cmd/bolt/import_test.go
index be41f5c..263f561 100644
--- a/cmd/bolt/import_test.go
+++ b/cmd/bolt/import_test.go
@@ -15,7 +15,7 @@ func TestImport(t *testing.T) {
// Write input file.
input := tempfile()
- assert.NoError(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="}]}]`), 0600))
+ assert.NoError(t, ioutil.WriteFile(input, []byte(`[{"type":"bucket","key":"ZW1wdHk=","value":[]},{"type":"bucket","key":"d2lkZ2V0cw==","value":[{"key":"YmFy","value":""},{"key":"Zm9v","value":"MDAwMA=="}]},{"type":"bucket","key":"d29vaml0cw==","value":[{"key":"YmF6","value":"WFhYWA=="},{"type":"bucket","key":"d29vaml0cy9zdWJidWNrZXQ=","value":[{"key":"YmF0","value":"QQ=="}]}]}]`), 0600))
// Import database.
path := tempfile()
@@ -26,15 +26,20 @@ func TestImport(t *testing.T) {
db, err := bolt.Open(path, 0600)
assert.NoError(t, err)
db.View(func(tx *bolt.Tx) error {
- b := tx.Bucket("widgets")
+ assert.NotNil(t, tx.Bucket([]byte("empty")))
+
+ b := tx.Bucket([]byte("widgets"))
if assert.NotNil(t, b) {
assert.Equal(t, []byte("0000"), b.Get([]byte("foo")))
assert.Equal(t, []byte(""), b.Get([]byte("bar")))
}
- b = tx.Bucket("woojits")
+ b = tx.Bucket([]byte("woojits"))
if assert.NotNil(t, b) {
assert.Equal(t, []byte("XXXX"), b.Get([]byte("baz")))
+
+ b = b.Bucket([]byte("woojits/subbucket"))
+ assert.Equal(t, []byte("A"), b.Get([]byte("bat")))
}
return nil
diff --git a/cmd/bolt/keys.go b/cmd/bolt/keys.go
index 56245b8..6affefe 100644
--- a/cmd/bolt/keys.go
+++ b/cmd/bolt/keys.go
@@ -22,7 +22,7 @@ func Keys(path, name string) {
err = db.View(func(tx *bolt.Tx) error {
// Find bucket.
- b := tx.Bucket(name)
+ b := tx.Bucket([]byte(name))
if b == nil {
fatalf("bucket not found: %s", name)
return nil
diff --git a/cmd/bolt/keys_test.go b/cmd/bolt/keys_test.go
index ea530f6..2b5a9a0 100644
--- a/cmd/bolt/keys_test.go
+++ b/cmd/bolt/keys_test.go
@@ -13,10 +13,10 @@ func TestKeys(t *testing.T) {
SetTestMode(true)
open(func(db *bolt.DB, path string) {
db.Update(func(tx *bolt.Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("0002"), []byte(""))
- tx.Bucket("widgets").Put([]byte("0001"), []byte(""))
- tx.Bucket("widgets").Put([]byte("0003"), []byte(""))
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("0002"), []byte(""))
+ tx.Bucket([]byte("widgets")).Put([]byte("0001"), []byte(""))
+ tx.Bucket([]byte("widgets")).Put([]byte("0003"), []byte(""))
return nil
})
db.Close()
diff --git a/cmd/bolt/set.go b/cmd/bolt/set.go
index ff12024..9761f44 100644
--- a/cmd/bolt/set.go
+++ b/cmd/bolt/set.go
@@ -22,7 +22,7 @@ func Set(path, name, key, value string) {
err = db.Update(func(tx *bolt.Tx) error {
// Find bucket.
- b := tx.Bucket(name)
+ b := tx.Bucket([]byte(name))
if b == nil {
fatalf("bucket not found: %s", name)
return nil
diff --git a/cmd/bolt/set_test.go b/cmd/bolt/set_test.go
index 519d888..be07148 100644
--- a/cmd/bolt/set_test.go
+++ b/cmd/bolt/set_test.go
@@ -13,7 +13,7 @@ func TestSet(t *testing.T) {
SetTestMode(true)
open(func(db *bolt.DB, path string) {
db.Update(func(tx *bolt.Tx) error {
- tx.CreateBucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
return nil
})
db.Close()
diff --git a/const.go b/const.go
index 00228be..4669347 100644
--- a/const.go
+++ b/const.go
@@ -1,6 +1,6 @@
package bolt
-const version = 1
+const version = 2
const (
maxUint = ^uint(0)
@@ -10,9 +10,6 @@ const (
)
const (
- // MaxBucketNameSize is the maximum length of a bucket name, in bytes.
- MaxBucketNameSize = 255
-
// MaxKeySize is the maximum length of a key, in bytes.
MaxKeySize = 32768
diff --git a/cursor.go b/cursor.go
index 2887791..f6dc79c 100644
--- a/cursor.go
+++ b/cursor.go
@@ -8,39 +8,47 @@ import (
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
type Cursor struct {
- tx *Tx
- root pgid
- stack []elemRef
+ bucket *Bucket
+ stack []elemRef
}
// First moves the cursor to the first item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
func (c *Cursor) First() (key []byte, value []byte) {
- _assert(c.tx.db != nil, "tx closed")
+ _assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
- p, n := c.tx.pageNode(c.root)
+ p, n := c.bucket.pageNode(c.bucket.root)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
c.first()
- return c.keyValue()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+
}
// Last moves the cursor to the last item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
func (c *Cursor) Last() (key []byte, value []byte) {
- _assert(c.tx.db != nil, "tx closed")
+ _assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
- p, n := c.tx.pageNode(c.root)
+ p, n := c.bucket.pageNode(c.bucket.root)
ref := elemRef{page: p, node: n}
ref.index = ref.count() - 1
c.stack = append(c.stack, ref)
c.last()
- return c.keyValue()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
}
// Next moves the cursor to the next item in the bucket and returns its key and value.
// If the cursor is at the end of the bucket then a nil key and value are returned.
func (c *Cursor) Next() (key []byte, value []byte) {
- _assert(c.tx.db != nil, "tx closed")
+ _assert(c.bucket.tx.db != nil, "tx closed")
// Attempt to move over one element until we're successful.
// Move up the stack as we hit the end of each page in our stack.
@@ -60,13 +68,17 @@ func (c *Cursor) Next() (key []byte, value []byte) {
// Move down the stack to find the first element of the first leaf under this branch.
c.first()
- return c.keyValue()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
}
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
func (c *Cursor) Prev() (key []byte, value []byte) {
- _assert(c.tx.db != nil, "tx closed")
+ _assert(c.bucket.tx.db != nil, "tx closed")
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
@@ -86,25 +98,43 @@ func (c *Cursor) Prev() (key []byte, value []byte) {
// Move down the stack to find the last element of the last leaf under this branch.
c.last()
- return c.keyValue()
+ k, v, flags := c.keyValue()
+ if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
}
// Seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used. If no keys
// follow, a nil value is returned.
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
- _assert(c.tx.db != nil, "tx closed")
+ k, v, flags := c.seek(seek)
+ if k == nil {
+ return nil, nil
+ } else if (flags & uint32(bucketLeafFlag)) != 0 {
+ return k, nil
+ }
+ return k, v
+}
+
+// seek moves the cursor to a given key and returns it.
+// If the key does not exist then the next key is used. If no keys
+// follow, a nil value is returned.
+func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
+ _assert(c.bucket.tx.db != nil, "tx closed")
// Start from root page/node and traverse to correct page.
c.stack = c.stack[:0]
- c.search(seek, c.root)
+ c.search(seek, c.bucket.root)
ref := &c.stack[len(c.stack)-1]
// If the cursor is pointing to the end of page/node then return nil.
if ref.index >= ref.count() {
- return nil, nil
+ return nil, nil, 0
}
+ // If this is a bucket then return a nil value.
return c.keyValue()
}
@@ -124,7 +154,7 @@ func (c *Cursor) first() {
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
}
- p, n := c.tx.pageNode(pgid)
+ p, n := c.bucket.pageNode(pgid)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
}
}
@@ -145,7 +175,7 @@ func (c *Cursor) last() {
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
}
- p, n := c.tx.pageNode(pgid)
+ p, n := c.bucket.pageNode(pgid)
var nextRef = elemRef{page: p, node: n}
nextRef.index = nextRef.count() - 1
@@ -155,7 +185,7 @@ func (c *Cursor) last() {
// search recursively performs a binary search against a given page/node until it finds a given key.
func (c *Cursor) search(key []byte, pgid pgid) {
- p, n := c.tx.pageNode(pgid)
+ p, n := c.bucket.pageNode(pgid)
if p != nil {
_assert((p.flags&(branchPageFlag|leafPageFlag)) != 0, "invalid page type: "+p.typ())
}
@@ -241,25 +271,25 @@ func (c *Cursor) nsearch(key []byte) {
}
// keyValue returns the key and value of the current leaf element.
-func (c *Cursor) keyValue() ([]byte, []byte) {
+func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
ref := &c.stack[len(c.stack)-1]
if ref.count() == 0 || ref.index >= ref.count() {
- return nil, nil
+ return nil, nil, 0
}
// Retrieve value from node.
if ref.node != nil {
inode := &ref.node.inodes[ref.index]
- return inode.key, inode.value
+ return inode.key, inode.value, inode.flags
}
// Or retrieve value from page.
elem := ref.page.leafPageElement(uint16(ref.index))
- return elem.key(), elem.value()
+ return elem.key(), elem.value(), elem.flags
}
// node returns the node that the cursor is currently positioned on.
-func (c *Cursor) node(tx *Tx) *node {
+func (c *Cursor) node() *node {
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
// If the top of the stack is a leaf node then just return it.
@@ -270,7 +300,7 @@ func (c *Cursor) node(tx *Tx) *node {
// Start from root and traverse down the hierarchy.
var n = c.stack[0].node
if n == nil {
- n = tx.node(c.stack[0].page.id, nil)
+ n = c.bucket.node(c.stack[0].page.id, nil)
}
for _, ref := range c.stack[:len(c.stack)-1] {
_assert(!n.isLeaf, "expected branch node")
diff --git a/cursor_test.go b/cursor_test.go
new file mode 100644
index 0000000..d9d258c
--- /dev/null
+++ b/cursor_test.go
@@ -0,0 +1,307 @@
+package bolt
+
+import (
+ "sort"
+ "testing"
+ "testing/quick"
+
+ "github.com/stretchr/testify/assert"
+)
+
+// Ensure that a Tx cursor can seek to the appropriate keys.
+func TestCursor_Seek(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
+ b := tx.Bucket([]byte("widgets"))
+ assert.NoError(t, b.Put([]byte("foo"), []byte("0001")))
+ assert.NoError(t, b.Put([]byte("bar"), []byte("0002")))
+ assert.NoError(t, b.Put([]byte("baz"), []byte("0003")))
+ assert.NoError(t, b.CreateBucket([]byte("bkt")))
+ return nil
+ })
+ db.View(func(tx *Tx) error {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+
+ // Exact match should go to the key.
+ k, v := c.Seek([]byte("bar"))
+ assert.Equal(t, []byte("bar"), k)
+ assert.Equal(t, []byte("0002"), v)
+
+ // Inexact match should go to the next key.
+ k, v = c.Seek([]byte("bas"))
+ assert.Equal(t, []byte("baz"), k)
+ assert.Equal(t, []byte("0003"), v)
+
+ // Low key should go to the first key.
+ k, v = c.Seek([]byte(""))
+ assert.Equal(t, []byte("bar"), k)
+ assert.Equal(t, []byte("0002"), v)
+
+ // High key should return no key.
+ k, v = c.Seek([]byte("zzz"))
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+
+ // Buckets should return their key but no value.
+ k, v = c.Seek([]byte("bkt"))
+ assert.Equal(t, []byte("bkt"), k)
+ assert.Nil(t, v)
+
+ return nil
+ })
+ })
+}
+
+// Ensure that a cursor can iterate over an empty bucket without error.
+func TestCursor_EmptyBucket(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ return tx.CreateBucket([]byte("widgets"))
+ })
+ db.View(func(tx *Tx) error {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ k, v := c.First()
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+ return nil
+ })
+ })
+}
+
+// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
+func TestCursor_EmptyBucketReverse(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ return tx.CreateBucket([]byte("widgets"))
+ })
+ db.View(func(tx *Tx) error {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ k, v := c.Last()
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+ return nil
+ })
+ })
+}
+
+// Ensure that a Tx cursor can iterate over a single root with a couple elements.
+func TestCursor_LeafRoot(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
+ tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
+ return nil
+ })
+ tx, _ := db.Begin(false)
+ c := tx.Bucket([]byte("widgets")).Cursor()
+
+ k, v := c.First()
+ assert.Equal(t, string(k), "bar")
+ assert.Equal(t, v, []byte{1})
+
+ k, v = c.Next()
+ assert.Equal(t, string(k), "baz")
+ assert.Equal(t, v, []byte{})
+
+ k, v = c.Next()
+ assert.Equal(t, string(k), "foo")
+ assert.Equal(t, v, []byte{0})
+
+ k, v = c.Next()
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+
+ k, v = c.Next()
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+
+ tx.Rollback()
+ })
+}
+
+// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
+func TestCursor_LeafRootReverse(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
+ tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
+ return nil
+ })
+ tx, _ := db.Begin(false)
+ c := tx.Bucket([]byte("widgets")).Cursor()
+
+ k, v := c.Last()
+ assert.Equal(t, string(k), "foo")
+ assert.Equal(t, v, []byte{0})
+
+ k, v = c.Prev()
+ assert.Equal(t, string(k), "baz")
+ assert.Equal(t, v, []byte{})
+
+ k, v = c.Prev()
+ assert.Equal(t, string(k), "bar")
+ assert.Equal(t, v, []byte{1})
+
+ k, v = c.Prev()
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+
+ k, v = c.Prev()
+ assert.Nil(t, k)
+ assert.Nil(t, v)
+
+ tx.Rollback()
+ })
+}
+
+// Ensure that a Tx cursor can restart from the beginning.
+func TestCursor_Restart(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
+ return nil
+ })
+
+ tx, _ := db.Begin(false)
+ c := tx.Bucket([]byte("widgets")).Cursor()
+
+ k, _ := c.First()
+ assert.Equal(t, string(k), "bar")
+
+ k, _ = c.Next()
+ assert.Equal(t, string(k), "foo")
+
+ k, _ = c.First()
+ assert.Equal(t, string(k), "bar")
+
+ k, _ = c.Next()
+ assert.Equal(t, string(k), "foo")
+
+ tx.Rollback()
+ })
+}
+
+// Ensure that a Tx can iterate over all elements in a bucket.
+func TestCursor_Iterate(t *testing.T) {
+ f := func(items testdata) bool {
+ withOpenDB(func(db *DB, path string) {
+ // Bulk insert all values.
+ tx, _ := db.Begin(true)
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ for _, item := range items {
+ assert.NoError(t, b.Put(item.Key, item.Value))
+ }
+ assert.NoError(t, tx.Commit())
+
+ // Sort test data.
+ sort.Sort(items)
+
+ // Iterate over all items and check consistency.
+ var index = 0
+ tx, _ = db.Begin(false)
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
+ assert.Equal(t, k, items[index].Key)
+ assert.Equal(t, v, items[index].Value)
+ index++
+ }
+ assert.Equal(t, len(items), index)
+ tx.Rollback()
+ })
+ return true
+ }
+ if err := quick.Check(f, qconfig()); err != nil {
+ t.Error(err)
+ }
+}
+
+// Ensure that a transaction can iterate over all elements in a bucket in reverse.
+func TestCursor_Iterate_Reverse(t *testing.T) {
+ f := func(items testdata) bool {
+ withOpenDB(func(db *DB, path string) {
+ // Bulk insert all values.
+ tx, _ := db.Begin(true)
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ for _, item := range items {
+ assert.NoError(t, b.Put(item.Key, item.Value))
+ }
+ assert.NoError(t, tx.Commit())
+
+ // Sort test data.
+ sort.Sort(revtestdata(items))
+
+ // Iterate over all items and check consistency.
+ var index = 0
+ tx, _ = db.Begin(false)
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
+ assert.Equal(t, k, items[index].Key)
+ assert.Equal(t, v, items[index].Value)
+ index++
+ }
+ assert.Equal(t, len(items), index)
+ tx.Rollback()
+ })
+ return true
+ }
+ if err := quick.Check(f, qconfig()); err != nil {
+ t.Error(err)
+ }
+}
+
+// Ensure that a Tx cursor can iterate over subbuckets.
+func TestCursor_Iterate_BucketsOnly(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
+ b := tx.Bucket([]byte("widgets"))
+ assert.NoError(t, b.CreateBucket([]byte("foo")))
+ assert.NoError(t, b.CreateBucket([]byte("bar")))
+ assert.NoError(t, b.CreateBucket([]byte("baz")))
+ return nil
+ })
+ db.View(func(tx *Tx) error {
+ var names []string
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ names = append(names, string(k))
+ assert.Nil(t, v)
+ }
+ assert.Equal(t, names, []string{"bar", "baz", "foo"})
+ return nil
+ })
+ })
+}
+
+// Ensure that a Tx cursor can reverse iterate over subbuckets.
+func TestCursor_Iterate_BucketsOnly_Reverse(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
+ b := tx.Bucket([]byte("widgets"))
+ assert.NoError(t, b.CreateBucket([]byte("foo")))
+ assert.NoError(t, b.CreateBucket([]byte("bar")))
+ assert.NoError(t, b.CreateBucket([]byte("baz")))
+ return nil
+ })
+ db.View(func(tx *Tx) error {
+ var names []string
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.Last(); k != nil; k, v = c.Prev() {
+ names = append(names, string(k))
+ assert.Nil(t, v)
+ }
+ assert.Equal(t, names, []string{"foo", "baz", "bar"})
+ return nil
+ })
+ })
+}
diff --git a/db.go b/db.go
index a76639d..c9611f9 100644
--- a/db.go
+++ b/db.go
@@ -45,6 +45,7 @@ type DB struct {
rwlock sync.Mutex // Allows only one writer at a time.
metalock sync.Mutex // Protects meta page access.
mmaplock sync.RWMutex // Protects mmap access during remapping.
+ statlock sync.RWMutex // Protects stats access.
ops struct {
writeAt func(b []byte, off int64) (n int, err error)
@@ -133,7 +134,7 @@ func (db *DB) mmap(minsz int) error {
// Dereference all mmap references before unmapping.
if db.rwtx != nil {
- db.rwtx.dereference()
+ db.rwtx.root.dereference()
}
// Unmap existing data before continuing.
@@ -224,7 +225,7 @@ func (db *DB) init() error {
m.pageSize = uint32(db.pageSize)
m.version = version
m.freelist = 2
- m.buckets = 3
+ m.root = bucket{root: 3}
m.pgid = 4
m.txid = txid(i)
}
@@ -238,7 +239,7 @@ func (db *DB) init() error {
// Write an empty leaf page at page 4.
p = db.pageInBuffer(buf[:], pgid(3))
p.id = pgid(3)
- p.flags = bucketsPageFlag
+ p.flags = leafPageFlag
p.count = 0
// Write the buffer to our data file.
@@ -305,16 +306,18 @@ func (db *DB) Begin(writable bool) (*Tx, error) {
}
func (db *DB) beginTx() (*Tx, error) {
- db.metalock.Lock()
- defer db.metalock.Unlock()
-
// Obtain a read-only lock on the mmap. When the mmap is remapped it will
// obtain a write lock so all transactions must finish before it can be
// remapped.
db.mmaplock.RLock()
+ // Lock the meta pages while we initialize the transaction.
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
// Exit if the database is not open yet.
if !db.opened {
+ db.mmaplock.RUnlock()
return nil, ErrDatabaseNotOpen
}
@@ -329,12 +332,15 @@ func (db *DB) beginTx() (*Tx, error) {
}
func (db *DB) beginRWTx() (*Tx, error) {
- db.metalock.Lock()
- defer db.metalock.Unlock()
-
// Obtain writer lock. This is released by the transaction when it closes.
+ // This enforces only one writer transaction at a time.
db.rwlock.Lock()
+ // Once we have the writer lock then we can lock the meta pages so that
+ // we can set up the transaction.
+ db.metalock.Lock()
+ defer db.metalock.Unlock()
+
// Exit if the database is not open yet.
if !db.opened {
db.rwlock.Unlock()
@@ -363,7 +369,6 @@ func (db *DB) beginRWTx() (*Tx, error) {
// removeTx removes a transaction from the database.
func (db *DB) removeTx(tx *Tx) {
db.metalock.Lock()
- defer db.metalock.Unlock()
// Release the read lock on the mmap.
db.mmaplock.RUnlock()
@@ -376,8 +381,13 @@ func (db *DB) removeTx(tx *Tx) {
}
}
+ // Unlock the meta pages.
+ db.metalock.Unlock()
+
// Merge statistics.
+ db.statlock.Lock()
db.stats.TxStats.add(&tx.stats)
+ db.statlock.Unlock()
}
// Update executes a function within the context of a read-write managed transaction.
@@ -497,8 +507,8 @@ func (db *DB) CopyFile(path string, mode os.FileMode) error {
// Stats retrieves ongoing performance stats for the database.
// This is only updated when a transaction closes.
func (db *DB) Stats() Stats {
- db.metalock.Lock()
- defer db.metalock.Unlock()
+ db.statlock.RLock()
+ defer db.statlock.RUnlock()
return db.stats
}
@@ -510,40 +520,14 @@ func (db *DB) Check() error {
// Track every reachable page.
reachable := make(map[pgid]*page)
- reachable[0] = tx.page(0) // meta0
- reachable[1] = tx.page(1) // meta1
- for i := uint32(0); i <= tx.page(tx.meta.buckets).overflow; i++ {
- reachable[tx.meta.buckets+pgid(i)] = tx.page(tx.meta.buckets)
- }
- for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
- reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
+ reachable[0] = db.page(0) // meta0
+ reachable[1] = db.page(1) // meta1
+ for i := uint32(0); i <= db.page(tx.meta.freelist).overflow; i++ {
+ reachable[tx.meta.freelist+pgid(i)] = db.page(tx.meta.freelist)
}
- // Check each reachable page within each bucket.
- for _, bucket := range tx.Buckets() {
- // warnf("[bucket] %s", bucket.name)
- tx.forEachPage(bucket.root, 0, func(p *page, _ int) {
- // Ensure each page is only referenced once.
- for i := pgid(0); i <= pgid(p.overflow); i++ {
- var id = p.id + i
- if _, ok := reachable[id]; ok {
- errors = append(errors, fmt.Errorf("page %d: multiple references", int(id)))
- }
- reachable[id] = p
- }
-
- // Retrieve page info.
- info, err := tx.Page(int(p.id))
- // warnf("[page] %d + %d (%s)", p.id, p.overflow, info.Type)
- if err != nil {
- errors = append(errors, err)
- } else if info == nil {
- errors = append(errors, fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(tx.meta.pgid)))
- } else if info.Type != "branch" && info.Type != "leaf" {
- errors = append(errors, fmt.Errorf("page %d: invalid type: %s", int(p.id), info.Type))
- }
- })
- }
+ // Recursively check buckets.
+ db.checkBucket(&tx.root, reachable, &errors)
// Ensure all pages below high water mark are either reachable or freed.
for i := pgid(0); i < tx.meta.pgid; i++ {
@@ -553,8 +537,6 @@ func (db *DB) Check() error {
}
}
- // TODO(benbjohnson): Ensure that only one buckets page exists.
-
if len(errors) > 0 {
return errors
}
@@ -563,6 +545,39 @@ func (db *DB) Check() error {
})
}
+func (db *DB) checkBucket(b *Bucket, reachable map[pgid]*page, errors *ErrorList) {
+ // Check every page used by this bucket.
+ b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
+ // Ensure each page is only referenced once.
+ for i := pgid(0); i <= pgid(p.overflow); i++ {
+ var id = p.id + i
+ if _, ok := reachable[id]; ok {
+ *errors = append(*errors, fmt.Errorf("page %d: multiple references", int(id)))
+ }
+ reachable[id] = p
+ }
+
+ // Retrieve page info.
+ info, err := b.tx.Page(int(p.id))
+ // warnf("[page] %d + %d (%s)", p.id, p.overflow, info.Type)
+ if err != nil {
+ *errors = append(*errors, err)
+ } else if info == nil {
+ *errors = append(*errors, fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)))
+ } else if info.Type != "branch" && info.Type != "leaf" {
+ *errors = append(*errors, fmt.Errorf("page %d: invalid type: %s", int(p.id), info.Type))
+ }
+ })
+
+ // Check each bucket within this bucket.
+ _ = b.ForEach(func(k, v []byte) error {
+ if child := b.Bucket(k); child != nil {
+ db.checkBucket(child, reachable, errors)
+ }
+ return nil
+ })
+}
+
// page retrieves a page reference from the mmap based on the current page size.
func (db *DB) page(id pgid) *page {
pos := id * pgid(db.pageSize)
diff --git a/db_test.go b/db_test.go
index 10f3220..aa72461 100644
--- a/db_test.go
+++ b/db_test.go
@@ -5,8 +5,11 @@ import (
"flag"
"fmt"
"io/ioutil"
+ "math/rand"
"os"
"regexp"
+ "strconv"
+ "strings"
"testing"
"time"
"unsafe"
@@ -31,14 +34,14 @@ func TestOpen(t *testing.T) {
}
// Ensure that opening a database with a bad path returns an error.
-func TestOpenBadPath(t *testing.T) {
+func TestOpen_BadPath(t *testing.T) {
db, err := Open("/../bad-path", 0666)
assert.Error(t, err)
assert.Nil(t, db)
}
// Ensure that a database can be opened without error.
-func TestDBOpen(t *testing.T) {
+func TestDB_Open(t *testing.T) {
withTempPath(func(path string) {
db, err := Open(path, 0666)
assert.NotNil(t, db)
@@ -49,7 +52,7 @@ func TestDBOpen(t *testing.T) {
}
// Ensure that a re-opened database is consistent.
-func TestOpenCheck(t *testing.T) {
+func TestOpen_Check(t *testing.T) {
withTempPath(func(path string) {
db, err := Open(path, 0666)
assert.NoError(t, err)
@@ -64,7 +67,7 @@ func TestOpenCheck(t *testing.T) {
}
// Ensure that the database returns an error if the file handle cannot be open.
-func TestDBOpenFileError(t *testing.T) {
+func TestDB_Open_FileError(t *testing.T) {
withTempPath(func(path string) {
_, err := Open(path+"/youre-not-my-real-parent", 0666)
if err, _ := err.(*os.PathError); assert.Error(t, err) {
@@ -75,12 +78,12 @@ func TestDBOpenFileError(t *testing.T) {
}
// Ensure that write errors to the meta file handler during initialization are returned.
-func TestDBMetaInitWriteError(t *testing.T) {
+func TestDB_Open_MetaInitWriteError(t *testing.T) {
t.Skip("pending")
}
// Ensure that a database that is too small returns an error.
-func TestDBFileTooSmall(t *testing.T) {
+func TestDB_Open_FileTooSmall(t *testing.T) {
withTempPath(func(path string) {
db, err := Open(path, 0666)
assert.NoError(t, err)
@@ -95,7 +98,7 @@ func TestDBFileTooSmall(t *testing.T) {
}
// Ensure that corrupt meta0 page errors get returned.
-func TestDBCorruptMeta0(t *testing.T) {
+func TestDB_Open_CorruptMeta0(t *testing.T) {
withTempPath(func(path string) {
var m meta
m.magic = magic
@@ -119,16 +122,16 @@ func TestDBCorruptMeta0(t *testing.T) {
}
// Ensure that a corrupt meta page checksum causes the open to fail.
-func TestDBMetaChecksumError(t *testing.T) {
+func TestDB_Open_MetaChecksumError(t *testing.T) {
for i := 0; i < 2; i++ {
withTempPath(func(path string) {
db, err := Open(path, 0600)
pageSize := db.pageSize
db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
+ return tx.CreateBucket([]byte("widgets"))
})
db.Update(func(tx *Tx) error {
- return tx.CreateBucket("woojits")
+ return tx.CreateBucket([]byte("woojits"))
})
db.Close()
@@ -152,7 +155,7 @@ func TestDBMetaChecksumError(t *testing.T) {
}
// Ensure that a database cannot open a transaction when it's not open.
-func TestDBTxErrDatabaseNotOpen(t *testing.T) {
+func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
var db DB
tx, err := db.Begin(false)
assert.Nil(t, tx)
@@ -160,7 +163,7 @@ func TestDBTxErrDatabaseNotOpen(t *testing.T) {
}
// Ensure that a read-write transaction can be retrieved.
-func TestDBBeginRW(t *testing.T) {
+func TestDB_BeginRW(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, err := db.Begin(true)
assert.NotNil(t, tx)
@@ -172,7 +175,7 @@ func TestDBBeginRW(t *testing.T) {
}
// Ensure that opening a transaction while the DB is closed returns an error.
-func TestDBRWTxOpenWithClosedDB(t *testing.T) {
+func TestDB_BeginRW_Closed(t *testing.T) {
var db DB
tx, err := db.Begin(true)
assert.Equal(t, err, ErrDatabaseNotOpen)
@@ -180,11 +183,11 @@ func TestDBRWTxOpenWithClosedDB(t *testing.T) {
}
// Ensure a database can provide a transactional block.
-func TestDBTxBlock(t *testing.T) {
+func TestDB_Update(t *testing.T) {
withOpenDB(func(db *DB, path string) {
err := db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- b := tx.Bucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("bar"))
b.Put([]byte("baz"), []byte("bat"))
b.Delete([]byte("foo"))
@@ -192,8 +195,8 @@ func TestDBTxBlock(t *testing.T) {
})
assert.NoError(t, err)
err = db.View(func(tx *Tx) error {
- assert.Nil(t, tx.Bucket("widgets").Get([]byte("foo")))
- assert.Equal(t, []byte("bat"), tx.Bucket("widgets").Get([]byte("baz")))
+ assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
+ assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
assert.NoError(t, err)
@@ -201,20 +204,20 @@ func TestDBTxBlock(t *testing.T) {
}
// Ensure a closed database returns an error while running a transaction block
-func TestDBTxBlockWhileClosed(t *testing.T) {
+func TestDB_Update_Closed(t *testing.T) {
var db DB
err := db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
return nil
})
assert.Equal(t, err, ErrDatabaseNotOpen)
}
// Ensure a panic occurs while trying to commit a managed transaction.
-func TestDBTxBlockWithManualCommitAndRollback(t *testing.T) {
+func TestDB_Update_ManualCommitAndRollback(t *testing.T) {
var db DB
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
assert.Panics(t, func() { tx.Commit() })
assert.Panics(t, func() { tx.Rollback() })
return nil
@@ -226,37 +229,58 @@ func TestDBTxBlockWithManualCommitAndRollback(t *testing.T) {
})
}
+// Ensure a database can return an error through a read-only transactional block.
+func TestDB_View_Error(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ err := db.View(func(tx *Tx) error {
+ return errors.New("xxx")
+ })
+ assert.Equal(t, errors.New("xxx"), err)
+ })
+}
+
// Ensure that the database can be copied to a file path.
-func TestDBCopyFile(t *testing.T) {
+func TestDB_CopyFile(t *testing.T) {
withOpenDB(func(db *DB, path string) {
+ var dest = tempfile()
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
- tx.Bucket("widgets").Put([]byte("baz"), []byte("bat"))
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+ tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
- assert.NoError(t, os.RemoveAll("/tmp/bolt.copyfile.db"))
- assert.NoError(t, db.CopyFile("/tmp/bolt.copyfile.db", 0666))
+ assert.NoError(t, db.CopyFile(dest, 0600))
- db2, err := Open("/tmp/bolt.copyfile.db", 0666)
+ db2, err := Open(dest, 0600)
assert.NoError(t, err)
defer db2.Close()
db2.View(func(tx *Tx) error {
- assert.Equal(t, []byte("bar"), tx.Bucket("widgets").Get([]byte("foo")))
- assert.Equal(t, []byte("bat"), tx.Bucket("widgets").Get([]byte("baz")))
+ assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
+ assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
})
}
// Ensure that an error is returned when a database write fails.
-func TestDBWriteFail(t *testing.T) {
+func TestDB_Commit_WriteFail(t *testing.T) {
t.Skip("pending") // TODO(benbjohnson)
}
+// Ensure that DB stats can be returned.
+func TestDB_Stats(t *testing.T) {
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ return tx.CreateBucket([]byte("widgets"))
+ })
+ stats := db.Stats()
+ assert.Equal(t, 3, stats.TxStats.PageCount)
+ })
+}
+
// Ensure that the mmap grows appropriately.
-func TestDBMmapSize(t *testing.T) {
+func TestDB_mmapSize(t *testing.T) {
db := &DB{pageSize: 4096}
assert.Equal(t, db.mmapSize(0), minMmapSize)
assert.Equal(t, db.mmapSize(16384), minMmapSize)
@@ -268,15 +292,15 @@ func TestDBMmapSize(t *testing.T) {
}
// Ensure that database pages are in expected order and type.
-func TestDBConsistency(t *testing.T) {
+func TestDB_Consistency(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
+ return tx.CreateBucket([]byte("widgets"))
})
for i := 0; i < 10; i++ {
db.Update(func(tx *Tx) error {
- assert.NoError(t, tx.Bucket("widgets").Put([]byte("foo"), []byte("bar")))
+ assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
return nil
})
}
@@ -297,7 +321,7 @@ func TestDBConsistency(t *testing.T) {
assert.Equal(t, "freelist", p.Type)
}
if p, _ := tx.Page(5); assert.NotNil(t, p) {
- assert.Equal(t, "buckets", p.Type)
+ assert.Equal(t, "leaf", p.Type) // root leaf
}
if p, _ := tx.Page(6); assert.NotNil(t, p) {
assert.Equal(t, "leaf", p.Type)
@@ -313,20 +337,187 @@ func TestDBConsistency(t *testing.T) {
}
// Ensure that a database can return a string representation of itself.
-func TestDBString(t *testing.T) {
- db := &DB{path: "/tmp/foo"}
- assert.Equal(t, db.String(), `DB<"/tmp/foo">`)
- assert.Equal(t, db.GoString(), `bolt.DB{path:"/tmp/foo"}`)
+func TestDB_String(t *testing.T) {
+ db := &DB{path: "/foo/bar"}
+ assert.Equal(t, db.String(), `DB<"/foo/bar">`)
+ assert.Equal(t, db.GoString(), `bolt.DB{path:"/foo/bar"}`)
}
-// withTempPath executes a function with a database reference.
-func withTempPath(fn func(string)) {
+// Ensure that DB stats can be substracted from one another.
+func TestDBStats_Sub(t *testing.T) {
+ var a, b Stats
+ a.TxStats.PageCount = 3
+ b.TxStats.PageCount = 10
+ diff := b.Sub(&a)
+ assert.Equal(t, 7, diff.TxStats.PageCount)
+}
+
+// Benchmark the performance of single put transactions in random order.
+func BenchmarkDB_Put_Sequential(b *testing.B) {
+ value := []byte(strings.Repeat("0", 64))
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ return tx.CreateBucket([]byte("widgets"))
+ })
+ for i := 0; i < b.N; i++ {
+ db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Put([]byte(strconv.Itoa(i)), value)
+ })
+ }
+ })
+}
+
+// Benchmark the performance of single put transactions in random order.
+func BenchmarkDB_Put_Random(b *testing.B) {
+ indexes := rand.Perm(b.N)
+ value := []byte(strings.Repeat("0", 64))
+ withOpenDB(func(db *DB, path string) {
+ db.Update(func(tx *Tx) error {
+ return tx.CreateBucket([]byte("widgets"))
+ })
+ for i := 0; i < b.N; i++ {
+ db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Put([]byte(strconv.Itoa(indexes[i])), value)
+ })
+ }
+ })
+}
+
+func ExampleDB_Update() {
+ // Open the database.
+ db, _ := Open(tempfile(), 0666)
+ defer os.Remove(db.Path())
+ defer db.Close()
+
+ // Execute several commands within a write transaction.
+ err := db.Update(func(tx *Tx) error {
+ if err := tx.CreateBucket([]byte("widgets")); err != nil {
+ return err
+ }
+ b := tx.Bucket([]byte("widgets"))
+ if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ return err
+ }
+ return nil
+ })
+
+ // If our transactional block didn't return an error then our data is saved.
+ if err == nil {
+ db.View(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ fmt.Printf("The value of 'foo' is: %s\n", string(value))
+ return nil
+ })
+ }
+
+ // Output:
+ // The value of 'foo' is: bar
+}
+
+func ExampleDB_View() {
+ // Open the database.
+ db, _ := Open(tempfile(), 0666)
+ defer os.Remove(db.Path())
+ defer db.Close()
+
+ // Insert data into a bucket.
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("people"))
+ b := tx.Bucket([]byte("people"))
+ b.Put([]byte("john"), []byte("doe"))
+ b.Put([]byte("susy"), []byte("que"))
+ return nil
+ })
+
+ // Access data from within a read-only transactional block.
+ db.View(func(tx *Tx) error {
+ v := tx.Bucket([]byte("people")).Get([]byte("john"))
+ fmt.Printf("John's last name is %s.\n", string(v))
+ return nil
+ })
+
+ // Output:
+ // John's last name is doe.
+}
+
+func ExampleDB_Begin_ReadOnly() {
+ // Open the database.
+ db, _ := Open(tempfile(), 0666)
+ defer os.Remove(db.Path())
+ defer db.Close()
+
+ // Create a bucket.
+ db.Update(func(tx *Tx) error {
+ return tx.CreateBucket([]byte("widgets"))
+ })
+
+ // Create several keys in a transaction.
+ tx, _ := db.Begin(true)
+ b := tx.Bucket([]byte("widgets"))
+ b.Put([]byte("john"), []byte("blue"))
+ b.Put([]byte("abby"), []byte("red"))
+ b.Put([]byte("zephyr"), []byte("purple"))
+ tx.Commit()
+
+ // Iterate over the values in sorted key order.
+ tx, _ = db.Begin(false)
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ for k, v := c.First(); k != nil; k, v = c.Next() {
+ fmt.Printf("%s likes %s\n", string(k), string(v))
+ }
+ tx.Rollback()
+
+ // Output:
+ // abby likes red
+ // john likes blue
+ // zephyr likes purple
+}
+
+func ExampleDB_CopyFile() {
+ // Open the database.
+ db, _ := Open(tempfile(), 0666)
+ defer os.Remove(db.Path())
+ defer db.Close()
+
+ // Create a bucket and a key.
+ db.Update(func(tx *Tx) error {
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+ return nil
+ })
+
+ // Copy the database to another file.
+ toFile := tempfile()
+ db.CopyFile(toFile, 0666)
+ defer os.Remove(toFile)
+
+ // Open the cloned database.
+ db2, _ := Open(toFile, 0666)
+ defer db2.Close()
+
+ // Ensure that the key exists in the copy.
+ db2.View(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ fmt.Printf("The value for 'foo' in the clone is: %s\n", string(value))
+ return nil
+ })
+
+ // Output:
+ // The value for 'foo' in the clone is: bar
+}
+
+// tempfile returns a temporary file path.
+func tempfile() string {
f, _ := ioutil.TempFile("", "bolt-")
- path := f.Name()
f.Close()
- os.Remove(path)
- defer os.RemoveAll(path)
+ os.Remove(f.Name())
+ return f.Name()
+}
+// withTempPath executes a function with a database reference.
+func withTempPath(fn func(string)) {
+ path := tempfile()
+ defer os.RemoveAll(path)
fn(path)
}
@@ -354,7 +545,8 @@ func withOpenDB(fn func(*DB, string)) {
func mustCheck(db *DB) {
if err := db.Check(); err != nil {
// Copy db off first.
- db.CopyFile("/tmp/check.db", 0600)
+ var path = tempfile()
+ db.CopyFile(path, 0600)
if errors, ok := err.(ErrorList); ok {
for _, err := range errors {
@@ -362,7 +554,7 @@ func mustCheck(db *DB) {
}
}
warn(err)
- panic("check failure: see /tmp/check.db")
+ panic("check failure: " + path)
}
}
@@ -391,3 +583,11 @@ func logStats(db *DB) {
func truncDuration(d time.Duration) string {
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
}
+
+// copyAndFailNow copies a database to a new location and then fails then test.
+func copyAndFailNow(t *testing.T, db *DB) {
+ path := tempfile()
+ db.CopyFile(path, 0600)
+ fmt.Println("db copied to: ", path)
+ t.FailNow()
+}
diff --git a/example_test.go b/example_test.go
deleted file mode 100644
index 3e3758e..0000000
--- a/example_test.go
+++ /dev/null
@@ -1,251 +0,0 @@
-package bolt
-
-import (
- "fmt"
- "os"
-)
-
-func init() {
- os.RemoveAll("/tmp/bolt")
- os.MkdirAll("/tmp/bolt", 0777)
-}
-
-func ExampleDB_Update() {
- // Open the database.
- db, _ := Open("/tmp/bolt/db_do.db", 0666)
- defer db.Close()
-
- // Execute several commands within a write transaction.
- err := db.Update(func(tx *Tx) error {
- if err := tx.CreateBucket("widgets"); err != nil {
- return err
- }
- b := tx.Bucket("widgets")
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
- return err
- }
- return nil
- })
-
- // If our transactional block didn't return an error then our data is saved.
- if err == nil {
- db.View(func(tx *Tx) error {
- value := tx.Bucket("widgets").Get([]byte("foo"))
- fmt.Printf("The value of 'foo' is: %s\n", string(value))
- return nil
- })
- }
-
- // Output:
- // The value of 'foo' is: bar
-}
-
-func ExampleDB_View() {
- // Open the database.
- db, _ := Open("/tmp/bolt/db_with.db", 0666)
- defer db.Close()
-
- // Insert data into a bucket.
- db.Update(func(tx *Tx) error {
- tx.CreateBucket("people")
- tx.Bucket("people").Put([]byte("john"), []byte("doe"))
- tx.Bucket("people").Put([]byte("susy"), []byte("que"))
- return nil
- })
-
- // Access data from within a read-only transactional block.
- db.View(func(tx *Tx) error {
- v := tx.Bucket("people").Get([]byte("john"))
- fmt.Printf("John's last name is %s.\n", string(v))
- return nil
- })
-
- // Output:
- // John's last name is doe.
-}
-
-func ExampleTx_Put() {
- // Open the database.
- db, _ := Open("/tmp/bolt/db_put.db", 0666)
- defer db.Close()
-
- // Start a write transaction.
- db.Update(func(tx *Tx) error {
- // Create a bucket.
- tx.CreateBucket("widgets")
-
- // Set the value "bar" for the key "foo".
- tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
- return nil
- })
-
- // Read value back in a different read-only transaction.
- db.Update(func(tx *Tx) error {
- value := tx.Bucket("widgets").Get([]byte("foo"))
- fmt.Printf("The value of 'foo' is: %s\n", string(value))
- return nil
- })
-
- // Output:
- // The value of 'foo' is: bar
-}
-
-func ExampleTx_Delete() {
- // Open the database.
- db, _ := Open("/tmp/bolt/db_delete.db", 0666)
- defer db.Close()
-
- // Start a write transaction.
- db.Update(func(tx *Tx) error {
- // Create a bucket.
- tx.CreateBucket("widgets")
- b := tx.Bucket("widgets")
-
- // Set the value "bar" for the key "foo".
- b.Put([]byte("foo"), []byte("bar"))
-
- // Retrieve the key back from the database and verify it.
- value := b.Get([]byte("foo"))
- fmt.Printf("The value of 'foo' was: %s\n", string(value))
- return nil
- })
-
- // Delete the key in a different write transaction.
- db.Update(func(tx *Tx) error {
- return tx.Bucket("widgets").Delete([]byte("foo"))
- })
-
- // Retrieve the key again.
- db.View(func(tx *Tx) error {
- value := tx.Bucket("widgets").Get([]byte("foo"))
- if value == nil {
- fmt.Printf("The value of 'foo' is now: nil\n")
- }
- return nil
- })
-
- // Output:
- // The value of 'foo' was: bar
- // The value of 'foo' is now: nil
-}
-
-func ExampleTx_ForEach() {
- // Open the database.
- db, _ := Open("/tmp/bolt/tx_foreach.db", 0666)
- defer db.Close()
-
- // Insert data into a bucket.
- db.Update(func(tx *Tx) error {
- tx.CreateBucket("animals")
- b := tx.Bucket("animals")
- b.Put([]byte("dog"), []byte("fun"))
- b.Put([]byte("cat"), []byte("lame"))
- b.Put([]byte("liger"), []byte("awesome"))
-
- // Iterate over items in sorted key order.
- b.ForEach(func(k, v []byte) error {
- fmt.Printf("A %s is %s.\n", string(k), string(v))
- return nil
- })
- return nil
- })
-
- // Output:
- // A cat is lame.
- // A dog is fun.
- // A liger is awesome.
-}
-
-func ExampleBegin_ReadOnly() {
- // Open the database.
- db, _ := Open("/tmp/bolt/tx.db", 0666)
- defer db.Close()
-
- // Create a bucket.
- db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
- })
-
- // Create several keys in a transaction.
- tx, _ := db.Begin(true)
- b := tx.Bucket("widgets")
- b.Put([]byte("john"), []byte("blue"))
- b.Put([]byte("abby"), []byte("red"))
- b.Put([]byte("zephyr"), []byte("purple"))
- tx.Commit()
-
- // Iterate over the values in sorted key order.
- tx, _ = db.Begin(false)
- c := tx.Bucket("widgets").Cursor()
- for k, v := c.First(); k != nil; k, v = c.Next() {
- fmt.Printf("%s likes %s\n", string(k), string(v))
- }
- tx.Rollback()
-
- // Output:
- // abby likes red
- // john likes blue
- // zephyr likes purple
-}
-
-func ExampleTx_rollback() {
- // Open the database.
- db, _ := Open("/tmp/bolt/tx_rollback.db", 0666)
- defer db.Close()
-
- // Create a bucket.
- db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
- })
-
- // Set a value for a key.
- db.Update(func(tx *Tx) error {
- return tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
- })
-
- // Update the key but rollback the transaction so it never saves.
- tx, _ := db.Begin(true)
- b := tx.Bucket("widgets")
- b.Put([]byte("foo"), []byte("baz"))
- tx.Rollback()
-
- // Ensure that our original value is still set.
- db.View(func(tx *Tx) error {
- value := tx.Bucket("widgets").Get([]byte("foo"))
- fmt.Printf("The value for 'foo' is still: %s\n", string(value))
- return nil
- })
-
- // Output:
- // The value for 'foo' is still: bar
-}
-
-func ExampleDB_CopyFile() {
- // Open the database.
- db, _ := Open("/tmp/bolt/db_copy.db", 0666)
- defer db.Close()
-
- // Create a bucket and a key.
- db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
- return nil
- })
-
- // Copy the database to another file.
- db.CopyFile("/tmp/bolt/db_copy_2.db", 0666)
-
- // Open the cloned database.
- db2, _ := Open("/tmp/bolt/db_copy_2.db", 0666)
- defer db2.Close()
-
- // Ensure that the key exists in the copy.
- db2.View(func(tx *Tx) error {
- value := tx.Bucket("widgets").Get([]byte("foo"))
- fmt.Printf("The value for 'foo' in the clone is: %s\n", string(value))
- return nil
- })
-
- // Output:
- // The value for 'foo' in the clone is: bar
-}
diff --git a/freelist.go b/freelist.go
index cb58a54..ebe2810 100644
--- a/freelist.go
+++ b/freelist.go
@@ -62,6 +62,8 @@ func (f *freelist) free(txid txid, p *page) {
ids = append(ids, p.id+pgid(i))
}
f.pending[txid] = ids
+
+ // DEBUG ONLY: f.check()
}
// release moves all page ids for a transaction id (or older) to the freelist.
@@ -109,6 +111,29 @@ func (f *freelist) write(p *page) {
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
}
+// check verifies there are no double free pages.
+// This is slow so it should only be used while debugging.
+// If errors are found then a panic invoked.
+/*
+func (f *freelist) check() {
+ var lookup = make(map[pgid]txid)
+ for _, id := range f.ids {
+ if _, ok := lookup[id]; ok {
+ panic(fmt.Sprintf("page %d already freed", id))
+ }
+ lookup[id] = 0
+ }
+ for txid, m := range f.pending {
+ for _, id := range m {
+ if _, ok := lookup[id]; ok {
+ panic(fmt.Sprintf("tx %d: page %d already freed in tx %d", txid, id, lookup[id]))
+ }
+ lookup[id] = txid
+ }
+ }
+}
+*/
+
type reverseSortedPgids []pgid
func (s reverseSortedPgids) Len() int { return len(s) }
diff --git a/freelist_test.go b/freelist_test.go
index 8421392..2b321a4 100644
--- a/freelist_test.go
+++ b/freelist_test.go
@@ -8,21 +8,21 @@ import (
)
// Ensure that a page is added to a transaction's freelist.
-func TestFreelistFree(t *testing.T) {
+func TestFreelist_free(t *testing.T) {
f := &freelist{pending: make(map[txid][]pgid)}
f.free(100, &page{id: 12})
assert.Equal(t, f.pending[100], []pgid{12})
}
// Ensure that a page and its overflow is added to a transaction's freelist.
-func TestFreelistFreeOverflow(t *testing.T) {
+func TestFreelist_free_overflow(t *testing.T) {
f := &freelist{pending: make(map[txid][]pgid)}
f.free(100, &page{id: 12, overflow: 3})
assert.Equal(t, f.pending[100], []pgid{12, 13, 14, 15})
}
// Ensure that a transaction's free pages can be released.
-func TestFreelistRelease(t *testing.T) {
+func TestFreelist_release(t *testing.T) {
f := &freelist{pending: make(map[txid][]pgid)}
f.free(100, &page{id: 12, overflow: 1})
f.free(100, &page{id: 9})
@@ -35,7 +35,7 @@ func TestFreelistRelease(t *testing.T) {
}
// Ensure that a freelist can find contiguous blocks of pages.
-func TestFreelistAllocate(t *testing.T) {
+func TestFreelist_allocate(t *testing.T) {
f := &freelist{ids: []pgid{18, 13, 12, 9, 7, 6, 5, 4, 3}}
assert.Equal(t, f.allocate(2), pgid(12))
assert.Equal(t, f.allocate(1), pgid(18))
@@ -48,7 +48,7 @@ func TestFreelistAllocate(t *testing.T) {
}
// Ensure that a freelist can deserialize from a freelist page.
-func TestFreelistRead(t *testing.T) {
+func TestFreelist_read(t *testing.T) {
// Create a page.
var buf [4096]byte
page := (*page)(unsafe.Pointer(&buf[0]))
@@ -71,7 +71,7 @@ func TestFreelistRead(t *testing.T) {
}
// Ensure that a freelist can serialize into a freelist page.
-func TestFreelistWrite(t *testing.T) {
+func TestFreelist_write(t *testing.T) {
// Create a freelist and write it to a page.
var buf [4096]byte
f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)}
diff --git a/functional_test.go b/functional_test.go
deleted file mode 100644
index fdaecbb..0000000
--- a/functional_test.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package bolt
-
-import (
- "fmt"
- "os"
- "sync"
- "testing"
- "testing/quick"
- "time"
-
- "github.com/stretchr/testify/assert"
-)
-
-// Ensure that multiple threads can use the DB without race detector errors.
-func TestParallelTxs(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping test in short mode.")
- }
-
- var mutex sync.RWMutex
-
- err := quick.Check(func(numReaders, batchSize uint, items testdata) bool {
- // Limit the readers & writers to something reasonable.
- numReaders = (numReaders % 10) + 1
- batchSize = (batchSize % 50) + 1
-
- // warn("")
- // warn("================================================================")
- // warn("numReaders:", numReaders, "batchSize", batchSize)
-
- // Maintain the current dataset.
- var current testdata
-
- withOpenDB(func(db *DB, path string) {
- db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
- })
-
- // Maintain a set of concurrent readers.
- var wg sync.WaitGroup
- var c = make(chan bool, 0)
- go func() {
- var readers = make(chan int, numReaders)
- for {
- wg.Add(1)
-
- // Attempt to start a new reader unless we're stopped.
- select {
- case readers <- 0:
- case <-c:
- wg.Done()
- return
- }
-
- go func() {
- mutex.RLock()
- local := make(map[string][]byte)
- for _, item := range current {
- local[string(item.Key)] = item.Value
- }
- tx, err := db.Begin(false)
- mutex.RUnlock()
- if err == ErrDatabaseNotOpen {
- wg.Done()
- return
- } else if !assert.NoError(t, err) {
- t.FailNow()
- }
-
- // Verify all data is in for local data list.
- for k, v := range local {
- value := tx.Bucket("widgets").Get([]byte(k))
- if !assert.NoError(t, err) || !assert.Equal(t, value, v, fmt.Sprintf("reader (%p)", tx)) {
- tx.Rollback()
- wg.Done()
- t.FailNow()
- }
- }
-
- tx.Rollback()
- wg.Done()
- <-readers
- }()
- }
- }()
-
- // Batch insert items.
- pending := items
- for {
- // Determine next batch.
- currentBatchSize := int(batchSize)
- if currentBatchSize > len(pending) {
- currentBatchSize = len(pending)
- }
- batchItems := pending[0:currentBatchSize]
- pending = pending[currentBatchSize:]
-
- // Start write transaction.
- tx, err := db.Begin(true)
- if !assert.NoError(t, err) {
- t.FailNow()
- }
-
- // warnf("[writer] BEGIN (%d)", currentBatchSize)
-
- // Insert whole batch.
- b := tx.Bucket("widgets")
- for _, item := range batchItems {
- // warnf("[writer] PUT %x: %x", trunc(item.Key, 3), trunc(item.Value, 3))
- err := b.Put(item.Key, item.Value)
- if !assert.NoError(t, err) {
- t.FailNow()
- }
- }
-
- // Commit and update the current list.
- mutex.Lock()
- // warnf("[writer] COMMIT\n\n")
- err = tx.Commit()
- current = append(current, batchItems...)
- mutex.Unlock()
- if !assert.NoError(t, err) {
- t.FailNow()
- }
-
- // If there are no more left then exit.
- if len(pending) == 0 {
- break
- }
-
- time.Sleep(1 * time.Millisecond)
- }
-
- // Notify readers to stop.
- close(c)
-
- // Wait for readers to finish.
- wg.Wait()
- })
- return true
- }, qconfig())
- assert.NoError(t, err)
- fmt.Fprint(os.Stderr, "\n")
-}
diff --git a/meta.go b/meta.go
index abb2a93..4212252 100644
--- a/meta.go
+++ b/meta.go
@@ -25,7 +25,7 @@ type meta struct {
version uint32
pageSize uint32
flags uint32
- buckets pgid
+ root bucket
freelist pgid
pgid pgid
txid txid
diff --git a/meta_test.go b/meta_test.go
index b229078..1a2d6b5 100644
--- a/meta_test.go
+++ b/meta_test.go
@@ -6,13 +6,13 @@ import (
)
// Ensure that meta with bad magic is invalid.
-func TestMetaValidateMagic(t *testing.T) {
+func TestMeta_validate_magic(t *testing.T) {
m := &meta{magic: 0x01234567}
assert.Equal(t, m.validate(), ErrInvalid)
}
// Ensure that meta with a bad version is invalid.
-func TestMetaValidateVersion(t *testing.T) {
+func TestMeta_validate_version(t *testing.T) {
m := &meta{magic: magic, version: 200}
assert.Equal(t, m.validate(), ErrVersionMismatch)
}
diff --git a/node.go b/node.go
index 709c0ca..49de144 100644
--- a/node.go
+++ b/node.go
@@ -8,7 +8,7 @@ import (
// node represents an in-memory, deserialized page.
type node struct {
- tx *Tx
+ bucket *Bucket
isLeaf bool
unbalanced bool
key []byte
@@ -45,18 +45,10 @@ func (n *node) pageElementSize() int {
return branchPageElementSize
}
-// root returns the root node in the tree.
-func (n *node) root() *node {
- if n.parent == nil {
- return n
- }
- return n.parent.root()
-}
-
// childAt returns the child node at a given index.
func (n *node) childAt(index int) *node {
_assert(!n.isLeaf, "invalid childAt(%d) on a leaf node", index)
- return n.tx.node(n.inodes[index].pgid, n)
+ return n.bucket.node(n.inodes[index].pgid, n)
}
// childIndex returns the index of a given child node.
@@ -95,7 +87,7 @@ func (n *node) prevSibling() *node {
}
// put inserts a key/value.
-func (n *node) put(oldKey, newKey, value []byte, pgid pgid) {
+func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
// Find insertion index.
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
@@ -107,6 +99,7 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid) {
}
inode := &n.inodes[index]
+ inode.flags = flags
inode.key = newKey
inode.value = value
inode.pgid = pgid
@@ -139,6 +132,7 @@ func (n *node) read(p *page) {
inode := &n.inodes[i]
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
+ inode.flags = elem.flags
inode.key = elem.key()
inode.value = elem.value()
} else {
@@ -173,6 +167,7 @@ func (n *node) write(p *page) {
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
+ elem.flags = item.flags
elem.ksize = uint32(len(item.key))
elem.vsize = uint32(len(item.value))
} else {
@@ -214,7 +209,7 @@ func (n *node) split(pageSize int) []*node {
if len(current.inodes) >= minKeysPerPage && i < len(inodes)-minKeysPerPage && size+elemSize > threshold {
size = pageHeaderSize
nodes = append(nodes, current)
- current = &node{tx: n.tx, isLeaf: n.isLeaf}
+ current = &node{bucket: n.bucket, isLeaf: n.isLeaf}
}
size += elemSize
@@ -234,10 +229,10 @@ func (n *node) rebalance() {
n.unbalanced = false
// Update statistics.
- n.tx.stats.Rebalance++
+ n.bucket.tx.stats.Rebalance++
// Ignore if node is above threshold (25%) and has enough keys.
- var threshold = n.tx.db.pageSize / 4
+ var threshold = n.bucket.tx.db.pageSize / 4
if n.size() > threshold && len(n.inodes) > n.minKeys() {
return
}
@@ -247,20 +242,20 @@ func (n *node) rebalance() {
// If root node is a branch and only has one node then collapse it.
if !n.isLeaf && len(n.inodes) == 1 {
// Move child's children up.
- child := n.tx.nodes[n.inodes[0].pgid]
+ child := n.bucket.nodes[n.inodes[0].pgid]
n.isLeaf = child.isLeaf
n.inodes = child.inodes[:]
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
- if child, ok := n.tx.nodes[inode.pgid]; ok {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent = n
}
}
// Remove old child.
child.parent = nil
- delete(n.tx.nodes, child.pgid)
+ delete(n.bucket.nodes, child.pgid)
child.free()
}
@@ -282,18 +277,18 @@ func (n *node) rebalance() {
if target.numChildren() > target.minKeys() {
if useNextSibling {
// Reparent and move node.
- if child, ok := n.tx.nodes[target.inodes[0].pgid]; ok {
+ if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok {
child.parent = n
}
n.inodes = append(n.inodes, target.inodes[0])
target.inodes = target.inodes[1:]
// Update target key on parent.
- target.parent.put(target.key, target.inodes[0].key, nil, target.pgid)
+ target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0)
target.key = target.inodes[0].key
} else {
// Reparent and move node.
- if child, ok := n.tx.nodes[target.inodes[len(target.inodes)-1].pgid]; ok {
+ if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok {
child.parent = n
}
n.inodes = append(n.inodes, inode{})
@@ -303,7 +298,7 @@ func (n *node) rebalance() {
}
// Update parent key for node.
- n.parent.put(n.key, n.inodes[0].key, nil, n.pgid)
+ n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0)
n.key = n.inodes[0].key
return
@@ -313,7 +308,7 @@ func (n *node) rebalance() {
if useNextSibling {
// Reparent all child nodes being moved.
for _, inode := range target.inodes {
- if child, ok := n.tx.nodes[inode.pgid]; ok {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent = n
}
}
@@ -321,12 +316,12 @@ func (n *node) rebalance() {
// Copy over inodes from target and remove target.
n.inodes = append(n.inodes, target.inodes...)
n.parent.del(target.key)
- delete(n.tx.nodes, target.pgid)
+ delete(n.bucket.nodes, target.pgid)
target.free()
} else {
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
- if child, ok := n.tx.nodes[inode.pgid]; ok {
+ if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent = target
}
}
@@ -334,8 +329,8 @@ func (n *node) rebalance() {
// Copy over inodes to target and remove node.
target.inodes = append(target.inodes, n.inodes...)
n.parent.del(n.key)
- n.parent.put(target.key, target.inodes[0].key, nil, target.pgid)
- delete(n.tx.nodes, n.pgid)
+ n.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0)
+ delete(n.bucket.nodes, n.pgid)
n.free()
}
@@ -366,7 +361,7 @@ func (n *node) dereference() {
// free adds the node's underlying page to the freelist.
func (n *node) free() {
if n.pgid != 0 {
- n.tx.db.freelist.free(n.tx.id(), n.tx.page(n.pgid))
+ n.bucket.tx.db.freelist.free(n.bucket.tx.id(), n.bucket.tx.page(n.pgid))
}
}
@@ -381,6 +376,7 @@ func (s nodesByDepth) Less(i, j int) bool { return s[i].depth > s[j].depth }
// It can be used to point to elements in a page or point
// to an element which hasn't been added to a page yet.
type inode struct {
+ flags uint32
pgid pgid
key []byte
value []byte
diff --git a/node_test.go b/node_test.go
index 8555223..e58a544 100644
--- a/node_test.go
+++ b/node_test.go
@@ -8,12 +8,12 @@ import (
)
// Ensure that a node can insert a key/value.
-func TestNodePut(t *testing.T) {
+func TestNode_put(t *testing.T) {
n := &node{inodes: make(inodes, 0)}
- n.put([]byte("baz"), []byte("baz"), []byte("2"), 0)
- n.put([]byte("foo"), []byte("foo"), []byte("0"), 0)
- n.put([]byte("bar"), []byte("bar"), []byte("1"), 0)
- n.put([]byte("foo"), []byte("foo"), []byte("3"), 0)
+ n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0)
+ n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
+ n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
+ n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
assert.Equal(t, len(n.inodes), 3)
assert.Equal(t, n.inodes[0].key, []byte("bar"))
assert.Equal(t, n.inodes[0].value, []byte("1"))
@@ -21,10 +21,11 @@ func TestNodePut(t *testing.T) {
assert.Equal(t, n.inodes[1].value, []byte("2"))
assert.Equal(t, n.inodes[2].key, []byte("foo"))
assert.Equal(t, n.inodes[2].value, []byte("3"))
+ assert.Equal(t, n.inodes[2].flags, uint32(leafPageFlag))
}
// Ensure that a node can deserialize from a leaf page.
-func TestNodeReadLeafPage(t *testing.T) {
+func TestNode_read_LeafPage(t *testing.T) {
// Create a page.
var buf [4096]byte
page := (*page)(unsafe.Pointer(&buf[0]))
@@ -55,12 +56,12 @@ func TestNodeReadLeafPage(t *testing.T) {
}
// Ensure that a node can serialize into a leaf page.
-func TestNodeWriteLeafPage(t *testing.T) {
+func TestNode_write_LeafPage(t *testing.T) {
// Create a node.
n := &node{isLeaf: true, inodes: make(inodes, 0)}
- n.put([]byte("susy"), []byte("susy"), []byte("que"), 0)
- n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0)
- n.put([]byte("john"), []byte("john"), []byte("johnson"), 0)
+ n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0)
+ n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0)
+ n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0)
// Write it to a page.
var buf [4096]byte
@@ -82,14 +83,14 @@ func TestNodeWriteLeafPage(t *testing.T) {
}
// Ensure that a node can split into appropriate subgroups.
-func TestNodeSplit(t *testing.T) {
+func TestNode_split(t *testing.T) {
// Create a node.
n := &node{inodes: make(inodes, 0)}
- n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0)
- n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0)
- n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0)
- n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0)
- n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0)
+ n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
// Split between 2 & 3.
nodes := n.split(100)
@@ -100,11 +101,11 @@ func TestNodeSplit(t *testing.T) {
}
// Ensure that a page with the minimum number of inodes just returns a single node.
-func TestNodeSplitWithMinKeys(t *testing.T) {
+func TestNode_split_MinKeys(t *testing.T) {
// Create a node.
n := &node{inodes: make(inodes, 0)}
- n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0)
- n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0)
+ n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
// Split.
nodes := n.split(20)
@@ -113,14 +114,14 @@ func TestNodeSplitWithMinKeys(t *testing.T) {
}
// Ensure that a node that has keys that all fit on a page just returns one leaf.
-func TestNodeSplitFitsInPage(t *testing.T) {
+func TestNode_split_SinglePage(t *testing.T) {
// Create a node.
n := &node{inodes: make(inodes, 0)}
- n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0)
- n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0)
- n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0)
- n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0)
- n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0)
+ n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
+ n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
// Split.
nodes := n.split(4096)
diff --git a/page.go b/page.go
index 0d46f09..56cf064 100644
--- a/page.go
+++ b/page.go
@@ -19,10 +19,13 @@ const (
branchPageFlag = 0x01
leafPageFlag = 0x02
metaPageFlag = 0x04
- bucketsPageFlag = 0x08
freelistPageFlag = 0x10
)
+const (
+ bucketLeafFlag = 0x01
+)
+
type pgid uint64
type page struct {
@@ -41,8 +44,6 @@ func (p *page) typ() string {
return "leaf"
} else if (p.flags & metaPageFlag) != 0 {
return "meta"
- } else if (p.flags & bucketsPageFlag) != 0 {
- return "buckets"
} else if (p.flags & freelistPageFlag) != 0 {
return "freelist"
}
diff --git a/page_test.go b/page_test.go
index 976af0d..be90096 100644
--- a/page_test.go
+++ b/page_test.go
@@ -6,16 +6,15 @@ import (
)
// Ensure that the page type can be returned in human readable format.
-func TestPageTyp(t *testing.T) {
+func TestPage_typ(t *testing.T) {
assert.Equal(t, (&page{flags: branchPageFlag}).typ(), "branch")
assert.Equal(t, (&page{flags: leafPageFlag}).typ(), "leaf")
assert.Equal(t, (&page{flags: metaPageFlag}).typ(), "meta")
- assert.Equal(t, (&page{flags: bucketsPageFlag}).typ(), "buckets")
assert.Equal(t, (&page{flags: freelistPageFlag}).typ(), "freelist")
assert.Equal(t, (&page{flags: 20000}).typ(), "unknown<4e20>")
}
// Ensure that the hexdump debugging function doesn't blow up.
-func TestPageDump(t *testing.T) {
+func TestPage_dump(t *testing.T) {
(&page{id: 256}).hexdump(16)
}
diff --git a/tx.go b/tx.go
index bfdcce1..9f86d5e 100644
--- a/tx.go
+++ b/tx.go
@@ -33,10 +33,8 @@ type Tx struct {
managed bool
db *DB
meta *meta
- buckets *buckets
- nodes map[pgid]*node
+ root Bucket
pages map[pgid]*page
- pending []*node
stats TxStats
commitHandlers []func()
}
@@ -50,15 +48,14 @@ func (tx *Tx) init(db *DB) {
tx.meta = &meta{}
db.meta().copy(tx.meta)
- // Read in the buckets page.
- tx.buckets = &buckets{}
- tx.buckets.read(tx.page(tx.meta.buckets))
+ // Copy over the root bucket.
+ tx.root = newBucket(tx)
+ tx.root.bucket = &bucket{}
+ *tx.root.bucket = tx.meta.root
+ // Increment the transaction id and add a page cache for writable transactions.
if tx.writable {
tx.pages = make(map[pgid]*page)
- tx.nodes = make(map[pgid]*node)
-
- // Increment the transaction id.
tx.meta.txid += txid(1)
}
}
@@ -85,95 +82,38 @@ func (tx *Tx) Stats() TxStats {
// Bucket retrieves a bucket by name.
// Returns nil if the bucket does not exist.
-func (tx *Tx) Bucket(name string) *Bucket {
- b := tx.buckets.get(name)
- if b == nil {
- return nil
- }
-
- return &Bucket{
- bucket: b,
- name: name,
- tx: tx,
- }
-}
-
-// Buckets retrieves a list of all buckets.
-func (tx *Tx) Buckets() []*Bucket {
- buckets := make([]*Bucket, 0, len(tx.buckets.items))
- for name, b := range tx.buckets.items {
- bucket := &Bucket{
- bucket: b,
- name: name,
- tx: tx,
- }
- buckets = append(buckets, bucket)
- }
- sort.Sort(bucketsByName(buckets))
- return buckets
+func (tx *Tx) Bucket(name []byte) *Bucket {
+ return tx.root.Bucket(name)
}
// CreateBucket creates a new bucket.
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
-func (tx *Tx) CreateBucket(name string) error {
- if tx.db == nil {
- return ErrTxClosed
- } else if !tx.writable {
- return ErrTxNotWritable
- } else if b := tx.Bucket(name); b != nil {
- return ErrBucketExists
- } else if len(name) == 0 {
- return ErrBucketNameRequired
- } else if len(name) > MaxBucketNameSize {
- return ErrBucketNameTooLarge
- }
-
- // Create a blank root leaf page.
- p, err := tx.allocate(1)
- if err != nil {
- return err
- }
- p.flags = leafPageFlag
-
- // Add bucket to buckets page.
- tx.buckets.put(name, &bucket{root: p.id})
-
- return nil
+func (tx *Tx) CreateBucket(name []byte) error {
+ return tx.root.CreateBucket(name)
}
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
// Returns an error if the bucket name is blank, or if the bucket name is too long.
-func (tx *Tx) CreateBucketIfNotExists(name string) error {
- err := tx.CreateBucket(name)
- if err != nil && err != ErrBucketExists {
- return err
- }
- return nil
+func (tx *Tx) CreateBucketIfNotExists(name []byte) error {
+ return tx.root.CreateBucketIfNotExists(name)
}
// DeleteBucket deletes a bucket.
-// Returns an error if the bucket cannot be found.
-func (tx *Tx) DeleteBucket(name string) error {
- if tx.db == nil {
- return ErrTxClosed
- } else if !tx.writable {
- return ErrTxNotWritable
- }
-
- b := tx.Bucket(name)
- if b == nil {
- return ErrBucketNotFound
- }
-
- // Remove from buckets page.
- tx.buckets.del(name)
+// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
+func (tx *Tx) DeleteBucket(name []byte) error {
+ return tx.root.DeleteBucket(name)
+}
- // Free all pages.
- tx.forEachPage(b.root, 0, func(p *page, depth int) {
- tx.db.freelist.free(tx.id(), p)
+// ForEach executes a function for each bucket in the root.
+// If the provided function returns an error then the iteration is stopped and
+// the error is returned to the caller.
+func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
+ return tx.root.ForEach(func(k, v []byte) error {
+ if err := fn(k, tx.root.Bucket(k)); err != nil {
+ return err
+ }
+ return nil
})
-
- return nil
}
// OnCommit adds a handler function to be executed after the transaction successfully commits.
@@ -184,9 +124,8 @@ func (tx *Tx) OnCommit(fn func()) {
// Commit writes all changes to disk and updates the meta page.
// Returns an error if a disk write error occurs.
func (tx *Tx) Commit() error {
- if tx.managed {
- panic("managed tx commit not allowed")
- } else if tx.db == nil {
+ _assert(!tx.managed, "managed tx commit not allowed")
+ if tx.db == nil {
return ErrTxClosed
} else if !tx.writable {
return ErrTxNotWritable
@@ -196,33 +135,24 @@ func (tx *Tx) Commit() error {
// Rebalance nodes which have had deletions.
var startTime = time.Now()
- tx.rebalance()
+ tx.root.rebalance()
tx.stats.RebalanceTime += time.Since(startTime)
// spill data onto dirty pages.
startTime = time.Now()
- if err := tx.spill(); err != nil {
+ if err := tx.root.spill(); err != nil {
tx.close()
return err
}
tx.stats.SpillTime += time.Since(startTime)
- // Spill buckets page.
- p, err := tx.allocate((tx.buckets.size() / tx.db.pageSize) + 1)
- if err != nil {
- tx.close()
- return err
- }
- tx.buckets.write(p)
-
- // Free previous bucket page and update meta.
- tx.db.freelist.free(tx.id(), tx.page(tx.meta.buckets))
- tx.meta.buckets = p.id
+ // Free the old root bucket.
+ tx.meta.root.root = tx.root.root
// Free the freelist and allocate new pages for it. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad).
- tx.db.freelist.free(tx.id(), tx.page(tx.meta.freelist))
- p, err = tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
+ tx.db.freelist.free(tx.id(), tx.db.page(tx.meta.freelist))
+ p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
if err != nil {
tx.close()
return err
@@ -257,9 +187,8 @@ func (tx *Tx) Commit() error {
// Rollback closes the transaction and ignores all previous updates.
func (tx *Tx) Rollback() error {
- if tx.managed {
- panic("managed tx rollback not allowed")
- } else if tx.db == nil {
+ _assert(!tx.managed, "managed tx rollback not allowed")
+ if tx.db == nil {
return ErrTxClosed
}
tx.close()
@@ -268,13 +197,13 @@ func (tx *Tx) Rollback() error {
func (tx *Tx) close() {
if tx.writable {
- // Merge statistics.
- tx.db.metalock.Lock()
- tx.db.stats.TxStats.add(&tx.stats)
- tx.db.metalock.Unlock()
-
// Remove writer lock.
tx.db.rwlock.Unlock()
+
+ // Merge statistics.
+ tx.db.statlock.Lock()
+ tx.db.stats.TxStats.add(&tx.stats)
+ tx.db.statlock.Unlock()
} else {
tx.db.removeTx(tx)
}
@@ -298,99 +227,6 @@ func (tx *Tx) allocate(count int) (*page, error) {
return p, nil
}
-// rebalance attempts to balance all nodes.
-func (tx *Tx) rebalance() {
- for _, n := range tx.nodes {
- n.rebalance()
- }
-}
-
-// spill writes all the nodes to dirty pages.
-func (tx *Tx) spill() error {
- // Keep track of the current root nodes.
- // We will update this at the end once all nodes are created.
- type root struct {
- node *node
- pgid pgid
- }
- var roots []root
-
- // Sort nodes by highest depth first.
- nodes := make(nodesByDepth, 0, len(tx.nodes))
- for _, n := range tx.nodes {
- nodes = append(nodes, n)
- }
- sort.Sort(nodes)
-
- // Spill nodes by deepest first.
- for i := 0; i < len(nodes); i++ {
- n := nodes[i]
-
- // Save existing root buckets for later.
- if n.parent == nil && n.pgid != 0 {
- roots = append(roots, root{n, n.pgid})
- }
-
- // Split nodes into appropriate sized nodes.
- // The first node in this list will be a reference to n to preserve ancestry.
- newNodes := n.split(tx.db.pageSize)
- tx.pending = newNodes
-
- // If this is a root node that split then create a parent node.
- if n.parent == nil && len(newNodes) > 1 {
- n.parent = &node{tx: tx, isLeaf: false}
- nodes = append(nodes, n.parent)
- }
-
- // Add node's page to the freelist.
- if n.pgid > 0 {
- tx.db.freelist.free(tx.id(), tx.page(n.pgid))
- }
-
- // Write nodes to dirty pages.
- for i, newNode := range newNodes {
- // Allocate contiguous space for the node.
- p, err := tx.allocate((newNode.size() / tx.db.pageSize) + 1)
- if err != nil {
- return err
- }
-
- // Write the node to the page.
- newNode.write(p)
- newNode.pgid = p.id
- newNode.parent = n.parent
-
- // The first node should use the existing entry, other nodes are inserts.
- var oldKey []byte
- if i == 0 {
- oldKey = n.key
- } else {
- oldKey = newNode.inodes[0].key
- }
-
- // Update the parent entry.
- if newNode.parent != nil {
- newNode.parent.put(oldKey, newNode.inodes[0].key, nil, newNode.pgid)
- }
-
- // Update the statistics.
- tx.stats.Spill++
- }
-
- tx.pending = nil
- }
-
- // Update roots with new roots.
- for _, root := range roots {
- tx.buckets.updateRoot(root.pgid, root.node.root().pgid)
- }
-
- // Clear out nodes now that they are all spilled.
- tx.nodes = make(map[pgid]*node)
-
- return nil
-}
-
// write writes any dirty pages to disk.
func (tx *Tx) write() error {
// Sort pages by id.
@@ -443,43 +279,6 @@ func (tx *Tx) writeMeta() error {
return nil
}
-// node creates a node from a page and associates it with a given parent.
-func (tx *Tx) node(pgid pgid, parent *node) *node {
- // Retrieve node if it's already been created.
- if tx.nodes == nil {
- return nil
- } else if n := tx.nodes[pgid]; n != nil {
- return n
- }
-
- // Otherwise create a branch and cache it.
- n := &node{tx: tx, parent: parent}
- if n.parent != nil {
- n.depth = n.parent.depth + 1
- }
- n.read(tx.page(pgid))
- tx.nodes[pgid] = n
-
- // Update statistics.
- tx.stats.NodeCount++
-
- return n
-}
-
-// dereference removes all references to the old mmap.
-func (tx *Tx) dereference() {
- for _, n := range tx.nodes {
- n.dereference()
- }
-
- for _, n := range tx.pending {
- n.dereference()
- }
-
- // Update statistics
- tx.stats.NodeDeref += len(tx.nodes) + len(tx.pending)
-}
-
// page returns a reference to the page with a given id.
// If page has been written to then a temporary bufferred page is returned.
func (tx *Tx) page(id pgid) *page {
@@ -494,17 +293,6 @@ func (tx *Tx) page(id pgid) *page {
return tx.db.page(id)
}
-// pageNode returns the in-memory node, if it exists.
-// Otherwise returns the underlying page.
-func (tx *Tx) pageNode(id pgid) (*page, *node) {
- if tx.nodes != nil {
- if n := tx.nodes[id]; n != nil {
- return nil, n
- }
- }
- return tx.page(id), nil
-}
-
// forEachPage iterates over every page within a given page and executes a function.
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
p := tx.page(pgid)
@@ -533,7 +321,7 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
}
// Build the page info.
- p := tx.page(pgid(id))
+ p := tx.db.page(pgid(id))
info := &PageInfo{
ID: id,
Count: int(p.count),
diff --git a/tx_test.go b/tx_test.go
index 299cccc..036c0c5 100644
--- a/tx_test.go
+++ b/tx_test.go
@@ -5,27 +5,25 @@ import (
"fmt"
"math/rand"
"os"
- "sort"
"strconv"
"strings"
"testing"
- "testing/quick"
"github.com/stretchr/testify/assert"
)
// Ensure that committing a closed transaction returns an error.
-func TestTxCommitClosed(t *testing.T) {
+func TestTx_Commit_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(true)
- tx.CreateBucket("foo")
+ tx.CreateBucket([]byte("foo"))
assert.NoError(t, tx.Commit())
assert.Equal(t, tx.Commit(), ErrTxClosed)
})
}
// Ensure that rolling back a closed transaction returns an error.
-func TestTxRollbackClosed(t *testing.T) {
+func TestTx_Rollback_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(true)
assert.NoError(t, tx.Rollback())
@@ -34,114 +32,69 @@ func TestTxRollbackClosed(t *testing.T) {
}
// Ensure that committing a read-only transaction returns an error.
-func TestTxCommitReadOnly(t *testing.T) {
+func TestTx_Commit_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(false)
assert.Equal(t, tx.Commit(), ErrTxNotWritable)
})
}
-// Ensure that the database can retrieve a list of buckets.
-func TestTxBuckets(t *testing.T) {
- withOpenDB(func(db *DB, path string) {
- db.Update(func(tx *Tx) error {
- tx.CreateBucket("foo")
- tx.CreateBucket("bar")
- tx.CreateBucket("baz")
- buckets := tx.Buckets()
- if assert.Equal(t, len(buckets), 3) {
- assert.Equal(t, buckets[0].Name(), "bar")
- assert.Equal(t, buckets[1].Name(), "baz")
- assert.Equal(t, buckets[2].Name(), "foo")
- }
- return nil
- })
- })
-}
-
// Ensure that creating a bucket with a read-only transaction returns an error.
-func TestTxCreateBucketReadOnly(t *testing.T) {
+func TestTx_CreateBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.View(func(tx *Tx) error {
- assert.Equal(t, tx.CreateBucket("foo"), ErrTxNotWritable)
+ assert.Equal(t, tx.CreateBucket([]byte("foo")), ErrTxNotWritable)
return nil
})
})
}
// Ensure that creating a bucket on a closed transaction returns an error.
-func TestTxCreateBucketClosed(t *testing.T) {
+func TestTx_CreateBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(true)
tx.Commit()
- assert.Equal(t, tx.CreateBucket("foo"), ErrTxClosed)
+ assert.Equal(t, tx.CreateBucket([]byte("foo")), ErrTxClosed)
})
}
// Ensure that a Tx can retrieve a bucket.
-func TestTxBucket(t *testing.T) {
+func TestTx_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- b := tx.Bucket("widgets")
- if assert.NotNil(t, b) {
- assert.Equal(t, "widgets", b.Name())
- }
+ tx.CreateBucket([]byte("widgets"))
+ b := tx.Bucket([]byte("widgets"))
+ assert.NotNil(t, b)
return nil
})
})
}
// Ensure that a Tx retrieving a non-existent key returns nil.
-func TestTxGetMissing(t *testing.T) {
+func TestTx_Get_Missing(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
- value := tx.Bucket("widgets").Get([]byte("no_such_key"))
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+ value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
assert.Nil(t, value)
return nil
})
})
}
-// Ensure that retrieving all buckets returns writable buckets.
-func TestTxWritableBuckets(t *testing.T) {
- withOpenDB(func(db *DB, path string) {
- db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.CreateBucket("woojits")
- return nil
- })
- db.Update(func(tx *Tx) error {
- buckets := tx.Buckets()
- assert.Equal(t, len(buckets), 2)
- assert.Equal(t, buckets[0].Name(), "widgets")
- assert.Equal(t, buckets[1].Name(), "woojits")
- buckets[0].Put([]byte("foo"), []byte("0000"))
- buckets[1].Put([]byte("bar"), []byte("0001"))
- return nil
- })
- db.View(func(tx *Tx) error {
- assert.Equal(t, []byte("0000"), tx.Bucket("widgets").Get([]byte("foo")))
- assert.Equal(t, []byte("0001"), tx.Bucket("woojits").Get([]byte("bar")))
- return nil
- })
- })
-}
-
// Ensure that a bucket can be created and retrieved.
-func TestTxCreateBucket(t *testing.T) {
+func TestTx_CreateBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
// Create a bucket.
db.Update(func(tx *Tx) error {
- assert.NoError(t, tx.CreateBucket("widgets"))
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
return nil
})
// Read the bucket through a separate transaction.
db.View(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
})
@@ -149,18 +102,19 @@ func TestTxCreateBucket(t *testing.T) {
}
// Ensure that a bucket can be created if it doesn't already exist.
-func TestTxCreateBucketIfNotExists(t *testing.T) {
+func TestTx_CreateBucketIfNotExists(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- assert.NoError(t, tx.CreateBucketIfNotExists("widgets"))
- assert.NoError(t, tx.CreateBucketIfNotExists("widgets"))
- assert.Equal(t, tx.CreateBucketIfNotExists(""), ErrBucketNameRequired)
+ assert.NoError(t, tx.CreateBucketIfNotExists([]byte("widgets")))
+ assert.NoError(t, tx.CreateBucketIfNotExists([]byte("widgets")))
+ assert.Equal(t, ErrBucketNameRequired, tx.CreateBucketIfNotExists([]byte{}))
+ assert.Equal(t, ErrBucketNameRequired, tx.CreateBucketIfNotExists(nil))
return nil
})
// Read the bucket through a separate transaction.
db.View(func(tx *Tx) error {
- b := tx.Bucket("widgets")
+ b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
})
@@ -168,64 +122,53 @@ func TestTxCreateBucketIfNotExists(t *testing.T) {
}
// Ensure that a bucket cannot be created twice.
-func TestTxRecreateBucket(t *testing.T) {
+func TestTx_CreateBucket_Exists(t *testing.T) {
withOpenDB(func(db *DB, path string) {
// Create a bucket.
db.Update(func(tx *Tx) error {
- assert.NoError(t, tx.CreateBucket("widgets"))
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
return nil
})
// Create the same bucket again.
db.Update(func(tx *Tx) error {
- assert.Equal(t, ErrBucketExists, tx.CreateBucket("widgets"))
+ assert.Equal(t, ErrBucketExists, tx.CreateBucket([]byte("widgets")))
return nil
})
})
}
// Ensure that a bucket is created with a non-blank name.
-func TestTxCreateBucketWithoutName(t *testing.T) {
+func TestTx_CreateBucket_NameRequired(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- assert.Equal(t, ErrBucketNameRequired, tx.CreateBucket(""))
- return nil
- })
- })
-}
-
-// Ensure that a bucket name is not too long.
-func TestTxCreateBucketWithLongName(t *testing.T) {
- withOpenDB(func(db *DB, path string) {
- db.Update(func(tx *Tx) error {
- assert.NoError(t, tx.CreateBucket(strings.Repeat("X", 255)))
- assert.Equal(t, ErrBucketNameTooLarge, tx.CreateBucket(strings.Repeat("X", 256)))
+ assert.Equal(t, ErrBucketNameRequired, tx.CreateBucket(nil))
return nil
})
})
}
// Ensure that a bucket can be deleted.
-func TestTxDeleteBucket(t *testing.T) {
+func TestTx_DeleteBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
// Create a bucket and add a value.
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
+ tx.CreateBucket([]byte("widgets"))
+ tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil
})
// Save root page id.
var root pgid
db.View(func(tx *Tx) error {
- root = tx.Bucket("widgets").root
+ root = tx.Bucket([]byte("widgets")).root
return nil
})
// Delete the bucket and make sure we can't get the value.
db.Update(func(tx *Tx) error {
- assert.NoError(t, tx.DeleteBucket("widgets"))
- assert.Nil(t, tx.Bucket("widgets"))
+ assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
+ assert.Nil(t, tx.Bucket([]byte("widgets")))
return nil
})
@@ -234,257 +177,42 @@ func TestTxDeleteBucket(t *testing.T) {
assert.Equal(t, []pgid{7, 6, root, 2}, db.freelist.all())
// Create the bucket again and make sure there's not a phantom value.
- assert.NoError(t, tx.CreateBucket("widgets"))
- assert.Nil(t, tx.Bucket("widgets").Get([]byte("foo")))
+ assert.NoError(t, tx.CreateBucket([]byte("widgets")))
+ assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
return nil
})
})
}
// Ensure that deleting a bucket on a closed transaction returns an error.
-func TestTxDeleteBucketClosed(t *testing.T) {
+func TestTx_DeleteBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(true)
tx.Commit()
- assert.Equal(t, tx.DeleteBucket("foo"), ErrTxClosed)
+ assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed)
})
}
// Ensure that deleting a bucket with a read-only transaction returns an error.
-func TestTxDeleteBucketReadOnly(t *testing.T) {
+func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.View(func(tx *Tx) error {
- assert.Equal(t, tx.DeleteBucket("foo"), ErrTxNotWritable)
- return nil
- })
- })
-}
-
-// Ensure that an error is returned when deleting from a bucket that doesn't exist.
-func TestTxDeleteBucketNotFound(t *testing.T) {
- withOpenDB(func(db *DB, path string) {
- db.Update(func(tx *Tx) error {
- assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket("widgets"))
+ assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable)
return nil
})
})
}
-// Ensure that a Tx cursor can iterate over an empty bucket without error.
-func TestTxCursorEmptyBucket(t *testing.T) {
+// Ensure that nothing happens when deleting a bucket that doesn't exist.
+func TestTx_DeleteBucket_NotFound(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
- })
- db.View(func(tx *Tx) error {
- c := tx.Bucket("widgets").Cursor()
- k, v := c.First()
- assert.Nil(t, k)
- assert.Nil(t, v)
+ assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
return nil
})
})
}
-// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
-func TestCursorEmptyBucketReverse(t *testing.T) {
- withOpenDB(func(db *DB, path string) {
- db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
- })
- db.View(func(tx *Tx) error {
- c := tx.Bucket("widgets").Cursor()
- k, v := c.Last()
- assert.Nil(t, k)
- assert.Nil(t, v)
- return nil
- })
- })
-}
-
-// Ensure that a Tx cursor can iterate over a single root with a couple elements.
-func TestTxCursorLeafRoot(t *testing.T) {
- withOpenDB(func(db *DB, path string) {
- db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("baz"), []byte{})
- tx.Bucket("widgets").Put([]byte("foo"), []byte{0})
- tx.Bucket("widgets").Put([]byte("bar"), []byte{1})
- return nil
- })
- tx, _ := db.Begin(false)
- c := tx.Bucket("widgets").Cursor()
-
- k, v := c.First()
- assert.Equal(t, string(k), "bar")
- assert.Equal(t, v, []byte{1})
-
- k, v = c.Next()
- assert.Equal(t, string(k), "baz")
- assert.Equal(t, v, []byte{})
-
- k, v = c.Next()
- assert.Equal(t, string(k), "foo")
- assert.Equal(t, v, []byte{0})
-
- k, v = c.Next()
- assert.Nil(t, k)
- assert.Nil(t, v)
-
- k, v = c.Next()
- assert.Nil(t, k)
- assert.Nil(t, v)
-
- tx.Rollback()
- })
-}
-
-// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
-func TestTxCursorLeafRootReverse(t *testing.T) {
- withOpenDB(func(db *DB, path string) {
- db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("baz"), []byte{})
- tx.Bucket("widgets").Put([]byte("foo"), []byte{0})
- tx.Bucket("widgets").Put([]byte("bar"), []byte{1})
- return nil
- })
- tx, _ := db.Begin(false)
- c := tx.Bucket("widgets").Cursor()
-
- k, v := c.Last()
- assert.Equal(t, string(k), "foo")
- assert.Equal(t, v, []byte{0})
-
- k, v = c.Prev()
- assert.Equal(t, string(k), "baz")
- assert.Equal(t, v, []byte{})
-
- k, v = c.Prev()
- assert.Equal(t, string(k), "bar")
- assert.Equal(t, v, []byte{1})
-
- k, v = c.Prev()
- assert.Nil(t, k)
- assert.Nil(t, v)
-
- k, v = c.Prev()
- assert.Nil(t, k)
- assert.Nil(t, v)
-
- tx.Rollback()
- })
-}
-
-// Ensure that a Tx cursor can restart from the beginning.
-func TestTxCursorRestart(t *testing.T) {
- withOpenDB(func(db *DB, path string) {
- db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- tx.Bucket("widgets").Put([]byte("bar"), []byte{})
- tx.Bucket("widgets").Put([]byte("foo"), []byte{})
- return nil
- })
-
- tx, _ := db.Begin(false)
- c := tx.Bucket("widgets").Cursor()
-
- k, _ := c.First()
- assert.Equal(t, string(k), "bar")
-
- k, _ = c.Next()
- assert.Equal(t, string(k), "foo")
-
- k, _ = c.First()
- assert.Equal(t, string(k), "bar")
-
- k, _ = c.Next()
- assert.Equal(t, string(k), "foo")
-
- tx.Rollback()
- })
-}
-
-// Ensure that a Tx can iterate over all elements in a bucket.
-func TestTxCursorIterate(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping test in short mode.")
- }
-
- f := func(items testdata) bool {
- withOpenDB(func(db *DB, path string) {
- // Bulk insert all values.
- tx, _ := db.Begin(true)
- tx.CreateBucket("widgets")
- b := tx.Bucket("widgets")
- for _, item := range items {
- assert.NoError(t, b.Put(item.Key, item.Value))
- }
- assert.NoError(t, tx.Commit())
-
- // Sort test data.
- sort.Sort(items)
-
- // Iterate over all items and check consistency.
- var index = 0
- tx, _ = db.Begin(false)
- c := tx.Bucket("widgets").Cursor()
- for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
- assert.Equal(t, k, items[index].Key)
- assert.Equal(t, v, items[index].Value)
- index++
- }
- assert.Equal(t, len(items), index)
- tx.Rollback()
- })
- return true
- }
- if err := quick.Check(f, qconfig()); err != nil {
- t.Error(err)
- }
- fmt.Fprint(os.Stderr, "\n")
-}
-
-// Ensure that a transaction can iterate over all elements in a bucket in reverse.
-func TestTxCursorIterateReverse(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping test in short mode.")
- }
-
- f := func(items testdata) bool {
- withOpenDB(func(db *DB, path string) {
- // Bulk insert all values.
- tx, _ := db.Begin(true)
- tx.CreateBucket("widgets")
- b := tx.Bucket("widgets")
- for _, item := range items {
- assert.NoError(t, b.Put(item.Key, item.Value))
- }
- assert.NoError(t, tx.Commit())
-
- // Sort test data.
- sort.Sort(revtestdata(items))
-
- // Iterate over all items and check consistency.
- var index = 0
- tx, _ = db.Begin(false)
- c := tx.Bucket("widgets").Cursor()
- for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
- assert.Equal(t, k, items[index].Key)
- assert.Equal(t, v, items[index].Value)
- index++
- }
- assert.Equal(t, len(items), index)
- tx.Rollback()
- })
- return true
- }
- if err := quick.Check(f, qconfig()); err != nil {
- t.Error(err)
- }
- fmt.Fprint(os.Stderr, "\n")
-}
-
// Ensure that Tx commit handlers are called after a transaction successfully commits.
func TestTx_OnCommit(t *testing.T) {
var x int
@@ -492,7 +220,7 @@ func TestTx_OnCommit(t *testing.T) {
db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
- return tx.CreateBucket("widgets")
+ return tx.CreateBucket([]byte("widgets"))
})
})
assert.Equal(t, 3, x)
@@ -505,7 +233,7 @@ func TestTx_OnCommit_Rollback(t *testing.T) {
db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
- tx.CreateBucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
return errors.New("rollback this commit")
})
})
@@ -526,8 +254,8 @@ func benchmarkTxCursor(b *testing.B, total int) {
withOpenDB(func(db *DB, path string) {
// Write data to bucket.
db.Update(func(tx *Tx) error {
- tx.CreateBucket("widgets")
- bucket := tx.Bucket("widgets")
+ tx.CreateBucket([]byte("widgets"))
+ bucket := tx.Bucket([]byte("widgets"))
for i := 0; i < total; i++ {
bucket.Put([]byte(fmt.Sprintf("%016d", indexes[i])), value)
}
@@ -539,7 +267,7 @@ func benchmarkTxCursor(b *testing.B, total int) {
for i := 0; i < b.N; i++ {
db.View(func(tx *Tx) error {
count := 0
- c := tx.Bucket("widgets").Cursor()
+ c := tx.Bucket([]byte("widgets")).Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
count++
}
@@ -564,7 +292,7 @@ func benchmarkTxPutRandom(b *testing.B, total int) {
value := []byte(strings.Repeat("0", 64))
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
+ return tx.CreateBucket([]byte("widgets"))
})
var tx *Tx
var bucket *Bucket
@@ -575,7 +303,7 @@ func benchmarkTxPutRandom(b *testing.B, total int) {
tx.Commit()
}
tx, _ = db.Begin(true)
- bucket = tx.Bucket("widgets")
+ bucket = tx.Bucket([]byte("widgets"))
}
bucket.Put([]byte(strconv.Itoa(indexes[i])), value)
}
@@ -595,10 +323,10 @@ func benchmarkTxPutSequential(b *testing.B, total int) {
value := []byte(strings.Repeat("0", 64))
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
- return tx.CreateBucket("widgets")
+ return tx.CreateBucket([]byte("widgets"))
})
db.Update(func(tx *Tx) error {
- bucket := tx.Bucket("widgets")
+ bucket := tx.Bucket([]byte("widgets"))
for j := 0; j < b.N; j++ {
for i := 0; i < total; i++ {
bucket.Put([]byte(strconv.Itoa(i)), value)
@@ -608,3 +336,36 @@ func benchmarkTxPutSequential(b *testing.B, total int) {
})
})
}
+
+func ExampleTx_Rollback() {
+ // Open the database.
+ db, _ := Open(tempfile(), 0666)
+ defer os.Remove(db.Path())
+ defer db.Close()
+
+ // Create a bucket.
+ db.Update(func(tx *Tx) error {
+ return tx.CreateBucket([]byte("widgets"))
+ })
+
+ // Set a value for a key.
+ db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
+ })
+
+ // Update the key but rollback the transaction so it never saves.
+ tx, _ := db.Begin(true)
+ b := tx.Bucket([]byte("widgets"))
+ b.Put([]byte("foo"), []byte("baz"))
+ tx.Rollback()
+
+ // Ensure that our original value is still set.
+ db.View(func(tx *Tx) error {
+ value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ fmt.Printf("The value for 'foo' is still: %s\n", string(value))
+ return nil
+ })
+
+ // Output:
+ // The value for 'foo' is still: bar
+}