aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEuAndreh <eu@euandre.org>2025-01-25 13:14:28 -0300
committerEuAndreh <eu@euandre.org>2025-01-25 13:40:50 -0300
commit66f788960c5b46a04d31977156913eafeb2b6103 (patch)
tree4914509e3d682bdf7a734c57f51cd856a33bbb80
parentsrc/dedo.go: Remove unused DB.lockfile field (diff)
downloaddedo-66f788960c5b46a04d31977156913eafeb2b6103.tar.gz
dedo-66f788960c5b46a04d31977156913eafeb2b6103.tar.xz
Stylistic: stick to 80 columns and rewrite if err := ... code
-rw-r--r--src/dedo.go1723
-rw-r--r--tests/dedo.go2737
2 files changed, 2901 insertions, 1559 deletions
diff --git a/src/dedo.go b/src/dedo.go
index 3850348..2dde5eb 100644
--- a/src/dedo.go
+++ b/src/dedo.go
@@ -1,3 +1,41 @@
+/// Package dedo implements a low-level key/value store in pure Go. It supports
+/// fully serializable transactions, ACID semantics, and lock-free MVCC with
+/// multiple readers and a single writer. Dedo can be used for projects that
+/// want a simple data store without the need to add large dependencies such as
+/// Postgres or MySQL.
+///
+/// Dedo is a single-level, zero-copy, B+tree data store. This means that Dedo
+/// is optimized for fast read access and does not require recovery in the event
+/// of a system crash. Transactions which have not finished committing will
+/// simply be rolled back in the event of a crash.
+///
+/// The design of Dedo is based on Howard Chu's LMDB database project.
+///
+///
+/// == Basics
+///
+/// There are only a few types in Dedo: DB, Bucket, Tx, and Cursor. The DB is
+/// a collection of buckets and is represented by a single file on disk. A
+/// bucket is a collection of unique keys that are associated with values.
+///
+/// Transactions provide either read-only or read-write access to the database.
+/// Read-only transactions can retrieve key/value pairs and can use Cursors to
+/// iterate over the dataset sequentially. Read-write transactions can create
+/// and delete buckets and can insert and remove keys. Only one read-write
+/// transaction is allowed at a time.
+///
+///
+/// == Caveats
+///
+/// The database uses a read-only, memory-mapped data file to ensure that
+/// applications cannot corrupt the database, however, this means that keys and
+/// values returned from Dedo cannot be changed. Writing to a read-only byte
+/// slice will cause Go to panic.
+///
+/// Keys and values retrieved from the database are only valid for the life of
+/// the transaction. When used outside the transaction, these byte slices can
+/// point to different data or can point to invalid memory which will cause a
+/// panic.
package dedo
import (
@@ -24,84 +62,87 @@ import (
type pgid uint64
-// bucket represents the on-file representation of a bucket.
-// This is stored as the "value" of a bucket key. If the bucket is small enough,
-// then its root page can be stored inline in the "value", after the bucket
-// header. In the case of inline buckets, the "root" will be 0.
+/// bucket represents the on-file representation of a bucket. This is stored as
+/// the "value" of a bucket key. If the bucket is small enough, then its root
+/// page can be stored inline in the "value", after the bucket header. In the
+/// case of inline buckets, the "root" will be 0.
type bucket struct {
- root pgid // page id of the bucket's root-level page
- sequence uint64 // monotonically incrementing, used by NextSequence()
+ root pgid /// page id of the bucket's root-level page
+ sequence uint64 /// monotonically incrementing, used by NextSequence()
}
-// Bucket represents a distinct collection of key/value pairs inside the
-// database. Keys aren't unique across different buckets.
+/// Bucket represents a distinct collection of key/value pairs inside the
+/// database. Keys aren't unique across different buckets.
type Bucket struct {
ref *bucket
- tx *Tx // the associated transaction
- buckets map[string]*Bucket // subbucket cache
- page *page // inline page reference
- rootNode *node // materialized node for the root page.
- nodes map[pgid]*node // node cache
-}
-
-// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
-// Cursors see nested buckets with value == nil.
-// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
-//
-// Keys and values returned from the cursor are only valid for the life of the transaction.
-//
-// Changing data while traversing with a cursor may cause it to be invalidated
-// and return unexpected keys and/or values. You must reposition your cursor
-// after mutating data.
+ tx *Tx /// the associated transaction
+ buckets map[string]*Bucket /// subbucket cache
+ page *page /// inline page reference
+ rootNode *node /// materialized node for the root page
+ nodes map[pgid]*node /// node cache
+}
+
+/// Cursor represents an iterator that can traverse over all key/value pairs in
+/// a bucket in sorted order. Cursors see nested buckets with value == nil.
+/// Cursors can be obtained from a transaction and are valid as long as the
+/// transaction is open.
+///
+/// Keys and values returned from the cursor are only valid for the life of the
+/// transaction.
+///
+/// Changing data while traversing with a cursor may cause it to be invalidated
+/// and return unexpected keys and/or values. You must reposition your cursor
+/// after mutating data.
type Cursor struct {
bucket *Bucket
stack []elemRef
}
-// elemRef represents a reference to an element on a given page/node.
+/// elemRef represents a reference to an element on a given page/node.
type elemRef struct {
page *page
node *node
index int
}
-// DB represents a collection of buckets persisted to a file on disk.
-// All data access is performed through transactions which can be obtained through the DB.
-// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called.
+/// DB represents a collection of buckets persisted to a file on disk. All data
+/// access is performed through transactions which can be obtained through the
+/// DB. All the functions on DB will return a ErrDatabaseNotOpen if accessed
+/// before Open() is called.
type DB struct {
- // When enabled, the database will perform a Check() after every commit.
- // A panic is issued if the database is in an inconsistent state. This
- // flag has a large performance impact so it should only be used for
- // debugging purposes.
+ /// When enabled, the database will perform a Check() after every
+ /// commit. A panic is issued if the database is in an inconsistent
+ /// state. This flag has a large performance impact so it should only
+ /// be used for debugging purposes.
StrictMode bool
- // MaxBatchSize is the maximum size of a batch. Default value is
- // copied from DefaultMaxBatchSize in Open.
- //
- // If <=0, disables batching.
- //
- // Do not change concurrently with calls to Batch.
+ /// MaxBatchSize is the maximum size of a batch. Default value is
+ /// copied from DefaultMaxBatchSize in Open.
+ ///
+ /// If <=0, disables batching.
+ ///
+ /// Do not change concurrently with calls to Batch.
MaxBatchSize int
- // MaxBatchDelay is the maximum delay before a batch starts.
- // Default value is copied from DefaultMaxBatchDelay in Open.
- //
- // If <=0, effectively disables batching.
- //
- // Do not change concurrently with calls to Batch.
+ /// MaxBatchDelay is the maximum delay before a batch starts. Default
+ /// value is copied from DefaultMaxBatchDelay in Open.
+ ///
+ /// If <=0, effectively disables batching.
+ ///
+ /// Do not change concurrently with calls to Batch.
MaxBatchDelay time.Duration
- // AllocSize is the amount of space allocated when the database
- // needs to create new pages. This is done to amortize the cost
- // of truncate() and fsync() when growing the data file.
+ /// AllocSize is the amount of space allocated when the database needs
+ /// to create new pages. This is done to amortize the cost of
+ /// truncate() and fsync() when growing the data file.
AllocSize int
path string
file *os.File
- dataref []byte // mmap'ed read-only via PROT_READ, write throws SEGV
+ dataref []byte /// mmap'ed read-only via PROT_READ, write throws SEGV
data *[maxMapSize]byte
datasz int
- filesz int // current on disk file size
+ filesz int /// current on disk file size
meta0 *meta
meta1 *meta
pageSize int
@@ -115,9 +156,9 @@ type DB struct {
batchMu sync.Mutex
batch *batch
- rwlock sync.Mutex // Allows only one writer at a time.
- metalock sync.Mutex // Protects meta page access.
- mmaplock sync.RWMutex // Protects mmap access during remapping.
+ rwlock sync.Mutex /// Allows only one writer at a time.
+ metalock sync.Mutex /// Protects meta page access.
+ mmaplock sync.RWMutex /// Protects mmap access during remapping.
}
type call struct {
@@ -148,15 +189,16 @@ type meta struct {
checksum uint64
}
-// freelist represents a list of all pages that are available for allocation.
-// It also tracks pages that have been freed but are still in use by open transactions.
+/// freelist represents a list of all pages that are available for allocation.
+/// It also tracks pages that have been freed but are still in use by open
+/// transactions.
type freelist struct {
- ids []pgid // all free and available free page ids.
- pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
- cache map[pgid]bool // fast lookup of all free and pending page ids.
+ ids []pgid /// all free and available free page ids
+ pending map[txid][]pgid /// mapping of soon-to-be free page ids by tx
+ cache map[pgid]bool /// fast lookup of all free and pending page ids
}
-// node represents an in-memory, deserialized page.
+/// node represents an in-memory, deserialized page.
type node struct {
bucket *Bucket
isLeaf bool
@@ -173,9 +215,9 @@ type nodes []*node
type pages []*page
-// inode represents an internal node inside of a node.
-// It can be used to point to elements in a page or point
-// to an element which hasn't been added to a page yet.
+/// inode represents an internal node inside of a node.
+/// It can be used to point to elements in a page or point
+/// to an element which hasn't been added to a page yet.
type inode struct {
flags uint32
pgid pgid
@@ -193,14 +235,14 @@ type page struct {
ptr uintptr
}
-// branchPageElement represents a node on a branch page.
+/// branchPageElement represents a node on a branch page.
type branchPageElement struct {
pos uint32
ksize uint32
pgid pgid
}
-// leafPageElement represents a node on a leaf page.
+/// leafPageElement represents a node on a leaf page.
type leafPageElement struct {
flags uint32
pos uint32
@@ -208,7 +250,7 @@ type leafPageElement struct {
vsize uint32
}
-// PageInfo represents human readable information about a page.
+/// PageInfo represents human readable information about a page.
type PageInfo struct {
ID int
Type string
@@ -218,17 +260,18 @@ type PageInfo struct {
type pgids []pgid
-// txid represents the internal transaction identifier.
+/// txid represents the internal transaction identifier.
type txid uint64
-// Tx represents a read-only or read/write transaction on the database.
-// Read-only transactions can be used for retrieving values for keys and creating cursors.
-// Read/write transactions can create and remove buckets and create and remove keys.
-//
-// IMPORTANT: You must commit or rollback transactions when you are done with
-// them. Pages can not be reclaimed by the writer until no more transactions
-// are using them. A long running read transaction can cause the database to
-// quickly grow.
+/// Tx represents a read-only or read/write transaction on the database.
+/// Read-only transactions can be used for retrieving values for keys and
+/// creating cursors. Read/write transactions can create and remove buckets
+/// and create and remove keys.
+///
+/// IMPORTANT: You must commit or rollback transactions when you are done with
+/// them. Pages can not be reclaimed by the writer until no more transactions
+/// are using them. A long running read transaction can cause the database to
+/// quickly grow.
type Tx struct {
writable bool
managed bool
@@ -239,9 +282,9 @@ type Tx struct {
commitHandlers []func()
}
-// walkFunc is the type of the function called for keys (buckets and "normal"
-// values) discovered by Walk. keys is the list of keys to descend to the bucket
-// owning the discovered key/value pair k/v.
+/// walkFunc is the type of the function called for keys (buckets and "normal"
+/// values) discovered by Walk. Keys is the list of keys to descend to the
+/// bucket owning the discovered key/value pair k/v.
type walkFunc func(keys [][]byte, k, v []byte, seq uint64) error
type argsT struct{
@@ -263,32 +306,32 @@ type commandT struct{
const (
- // maxMapSize represents the largest mmap size supported by Bolt.
- maxMapSize = 0xFFFFFFFFFFFF // 256TB
+ /// maxMapSize represents the largest mmap size supported by Dedo.
+ maxMapSize = 0xFFFFFFFFFFFF /// 256TB
- // maxAllocSize is the size used when creating array pointers.
+ /// maxAllocSize is the size used when creating array pointers.
maxAllocSize = 0x7FFFFFFF
// FIXME: why?
- // MaxKeySize is the maximum length of a key, in bytes.
+ /// MaxKeySize is the maximum length of a key, in bytes.
MaxKeySize = 32768
// FIXME: why?
- // MaxValueSize is the maximum length of a value, in bytes.
+ /// MaxValueSize is the maximum length of a value, in bytes.
MaxValueSize = (1 << 31) - 2
bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
- // The largest step that can be taken when remapping the mmap.
- maxMmapStep = 1 << 30 // 1GB
+ /// The largest step that can be taken when remapping the mmap.
+ maxMmapStep = 1 << 30 /// 1GB
- // The data file format version.
+ /// The data file format version.
version = 2
- // Represents a marker value to indicate that a file is a Bolt DB.
+ /// Represents a marker value to indicate that a file is a Dedo DB.
magic uint32 = 0xFACADAAB
- // Default values if not set in a DB instance.
+ /// Default values if not set in a DB instance.
DefaultMaxBatchSize int = 1000
DefaultMaxBatchDelay = 10 * time.Millisecond
DefaultAllocSize = 16 * 1024 * 1024
@@ -298,7 +341,7 @@ const (
minKeysPerPage = 2
branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
- leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
+ leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
branchPageFlag = 0x01
leafPageFlag = 0x02
@@ -307,116 +350,125 @@ const (
bucketLeafFlag = 0x01
- // PageHeaderSize represents the size of the bolt.page header.
+ /// PageHeaderSize represents the size of the dedo.page header.
PageHeaderSize = 16
)
var (
- // trySolo is a special sentinel error value used for signaling that a
- // transaction function should be re-run. It should never be seen by
- // callers.
- trySolo = errors.New("batch function returned an error and should be re-run solo")
+ /// trySolo is a special sentinel error value used for signaling that a
+ /// transaction function should be re-run. It should never be seen by
+ /// callers.
+ trySolo = errors.New(
+ "batch function returned an error and should be re-run solo",
+ )
- //
- // These errors can be returned when opening or calling methods on a DB.
- //
+ ///
+ /// These errors can be returned when opening or calling methods on a
+ /// DB.
+ ///
- // ErrDatabaseNotOpen is returned when a DB instance is accessed before it
- // is opened or after it is closed.
+ /// ErrDatabaseNotOpen is returned when a DB instance is accessed before
+ /// it is opened or after it is closed.
ErrDatabaseNotOpen = errors.New("database not open")
- // ErrInvalid is returned when both meta pages on a database are invalid.
- // This typically occurs when a file is not a bolt database.
+ /// ErrInvalid is returned when both meta pages on a database are
+ /// invalid. This typically occurs when a file is not a dedo database.
ErrInvalid = errors.New("invalid database")
- // ErrVersionMismatch is returned when the data file was created with a
- // different version of Bolt.
+ /// ErrVersionMismatch is returned when the data file was created with a
+ /// different version of Dedo.
ErrVersionMismatch = errors.New("version mismatch")
- // ErrChecksum is returned when either meta page checksum does not match.
+ /// ErrChecksum is returned when either meta page checksum does not
+ /// match.
ErrChecksum = errors.New("checksum error")
- //
- // These errors can occur when beginning or committing a Tx.
- //
+ ///
+ /// These errors can occur when beginning or committing a Tx.
+ ///
- // ErrTxNotWritable is returned when performing a write operation on a
- // read-only transaction.
+ /// ErrTxNotWritable is returned when performing a write operation on a
+ /// read-only transaction.
ErrTxNotWritable = errors.New("tx not writable")
- // ErrTxClosed is returned when committing or rolling back a transaction
- // that has already been committed or rolled back.
+ /// ErrTxClosed is returned when committing or rolling back a
+ /// transaction that has already been committed or rolled back.
ErrTxClosed = errors.New("tx closed")
- //
- // These errors can occur when putting or deleting a value or a bucket.
- //
+ ///
+ /// These errors can occur when putting or deleting a value or a bucket.
+ ///
- // ErrBucketNotFound is returned when trying to access a bucket that has
- // not been created yet.
+ /// ErrBucketNotFound is returned when trying to access a bucket that
+ /// has not been created yet.
ErrBucketNotFound = errors.New("bucket not found")
- // ErrBucketExists is returned when creating a bucket that already exists.
+ /// ErrBucketExists is returned when creating a bucket that already
+ /// exists.
ErrBucketExists = errors.New("bucket already exists")
- // ErrBucketNameRequired is returned when creating a bucket with a blank name.
+ /// ErrBucketNameRequired is returned when creating a bucket with a
+ /// blank name.
ErrBucketNameRequired = errors.New("bucket name required")
- // ErrKeyRequired is returned when inserting a zero-length key.
+ /// ErrKeyRequired is returned when inserting a zero-length key.
ErrKeyRequired = errors.New("key required")
- // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
+ /// ErrKeyTooLarge is returned when inserting a key that is larger than
+ /// MaxKeySize.
ErrKeyTooLarge = errors.New("key too large")
- // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
+ /// ErrValueTooLarge is returned when inserting a value that is larger
+ /// than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")
- // ErrIncompatibleValue is returned when trying create or delete a bucket
- // on an existing non-bucket key or when trying to create or delete a
- // non-bucket key on an existing bucket key.
+ /// ErrIncompatibleValue is returned when trying create or delete a
+ /// bucket on an existing non-bucket key or when trying to create or
+ /// delete a non-bucket key on an existing bucket key.
ErrIncompatibleValue = errors.New("incompatible value")
-
- //
- //
- //
-
+ /// ErrMissingKey is returned when the CLI tries to get a key that it
+ /// can't find in the specified bucket.
ErrMissingKey = errors.New("missing key")
- // ErrUsage is returned when a usage message was printed and the process
- // should simply exit with an error.
+ /// ErrUsage is returned when a usage message was printed and the
+ /// process should simply exit with an error.
ErrUsage = errors.New("usage")
- // ErrUnknownCommand is returned when a CLI command is not specified.
+ /// ErrUnknownCommand is returned when a CLI command is not specified.
ErrUnknownCommand = errors.New("unknown command")
- // ErrPathRequired is returned when the path to a Bolt database is not specified.
+ /// ErrPathRequired is returned when the path to a Dedo database is not
+ /// specified.
ErrPathRequired = errors.New("path required")
- // ErrFileNotFound is returned when a Bolt database does not exist.
+ /// ErrFileNotFound is returned when a Dedo database does not exist.
ErrFileNotFound = errors.New("file not found")
- // ErrCorrupt is returned when a checking a data file finds errors.
+ /// ErrCorrupt is returned when a checking a data file finds errors.
ErrCorrupt = errors.New("invalid value")
- // ErrNonDivisibleBatchSize is returned when the batch size can't be evenly
- // divided by the iteration count.
- ErrNonDivisibleBatchSize = errors.New("number of iterations must be divisible by the batch size")
+ /// ErrNonDivisibleBatchSize is returned when the batch size can't be
+ /// evenly divided by the iteration count.
+ ErrNonDivisibleBatchSize = errors.New(
+ "number of iterations must be divisible by the batch size",
+ )
- // ErrPageIDRequired is returned when a required page id is not specified.
+ /// ErrPageIDRequired is returned when a required page id is not
+ /// specified.
ErrPageIDRequired = errors.New("page id required")
)
-// fdatasync flushes written data to a file descriptor.
+/// fdatasync() flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}
-// flock acquires an advisory lock on a file descriptor.
+/// flock() acquires an advisory lock on a file descriptor.
func flock(db *DB) error {
const lockFlags = syscall.LOCK_EX | syscall.LOCK_NB
@@ -433,22 +485,21 @@ func flock(db *DB) error {
}
}
-// funlock releases an advisory lock on a file descriptor.
+/// funlock() releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
}
-// mmap memory maps a DB's data file.
+/// mmap memory() maps a DB's data file.
func mmap(db *DB, sz int) error {
- // Map the data file to memory.
fd := int(db.file.Fd())
b, err := syscall.Mmap(fd, 0, sz, syscall.PROT_READ, syscall.MAP_SHARED)
if err != nil {
return err
}
- // Advise the kernel that the mmap is accessed randomly.
- if err := madvise(b, syscall.MADV_RANDOM); err != nil {
+ err = madvise(b, syscall.MADV_RANDOM)
+ if err != nil {
return fmt.Errorf("madvise: %s", err)
}
@@ -459,14 +510,12 @@ func mmap(db *DB, sz int) error {
return nil
}
-// munmap unmaps a DB's data file from memory.
+/// munmap() unmaps a DB's data file from memory.
func munmap(db *DB) error {
- // Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
- // Unmap using the original byte slice.
err := syscall.Munmap(db.dataref)
db.dataref = nil
db.data = nil
@@ -474,18 +523,22 @@ func munmap(db *DB) error {
return err
}
-// NOTE: This function is copied from stdlib because it is not available on darwin.
func madvise(b []byte, advice int) (err error) {
- _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
+ _, _, e1 := syscall.Syscall(
+ syscall.SYS_MADVISE,
+ uintptr(unsafe.Pointer(&b[0])),
+ uintptr(len(b)),
+ uintptr(advice),
+ )
if e1 != 0 {
err = e1
}
return
}
-// newBucket returns a new bucket associated with a transaction.
+/// newBucket() returns a new bucket associated with a transaction.
func newBucket(tx *Tx) Bucket {
- var b = Bucket{tx: tx}
+ b := Bucket{tx: tx}
if tx.writable {
b.buckets = make(map[string]*Bucket)
b.nodes = make(map[pgid]*node)
@@ -493,28 +546,28 @@ func newBucket(tx *Tx) Bucket {
return b
}
-// Writable returns whether the bucket is writable.
+/// Bucket.Writable() returns whether the bucket is writable.
func (b *Bucket) Writable() bool {
return b.tx.writable
}
-// Cursor creates a cursor associated with the bucket.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
+/// Bucket.Cursor() creates a cursor associated with the bucket. The cursor is
+/// only valid as long as the transaction is open. Do not use a cursor after
+/// the transaction is closed.
func (b *Bucket) Cursor() *Cursor {
- // Allocate and return a cursor.
return &Cursor{
bucket: b,
stack: make([]elemRef, 0),
}
}
-// Bucket retrieves a nested bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
+/// Bucket.Bucket() retrieves a nested bucket by name. Returns nil if the
+/// bucket does not exist. The bucket instance is only valid for the lifetime
+/// of the transaction.
func (b *Bucket) Bucket(name []byte) *Bucket {
if b.buckets != nil {
- if child := b.buckets[string(name)]; child != nil {
+ child := b.buckets[string(name)]
+ if child != nil {
return child
}
}
@@ -529,7 +582,7 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
}
// Otherwise create a bucket and cache it.
- var child = b.openBucket(v)
+ child := b.openBucket(v)
if b.buckets != nil {
b.buckets[string(name)] = child
}
@@ -537,13 +590,13 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
return child
}
-// Helper method that re-interprets a sub-bucket value
-// from a parent into a Bucket
+/// Bucket.Helper() method that re-interprets a sub-bucket value from a parent
+/// into a Bucket
func (b *Bucket) openBucket(value []byte) *Bucket {
- var child = newBucket(b.tx)
+ child := newBucket(b.tx)
- // If this is a writable transaction then we need to copy the bucket entry.
- // Read-only transactions can point directly at the mmap entry.
+ // If this is a writable transaction then we need to copy the bucket
+ // entry. Read-only transactions can point directly at the mmap entry.
if b.tx.writable {
child.ref = &bucket{}
*child.ref = *(*bucket)(unsafe.Pointer(&value[0]))
@@ -559,9 +612,10 @@ func (b *Bucket) openBucket(value []byte) *Bucket {
return &child
}
-// CreateBucket creates a new bucket at the given key and returns the new bucket.
-// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
+/// Bucket.CreateBucket() creates a new bucket at the given key and returns the
+/// new bucket. Returns an error if the key already exists, if the bucket name
+/// is blank, or if the bucket name is too long. The bucket instance is only
+/// valid for the lifetime of the transaction.
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if b.tx.db == nil {
return nil, ErrTxClosed
@@ -584,27 +638,29 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
}
// Create empty, inline bucket.
- var bucket = Bucket{
+ bucket := Bucket{
ref: &bucket{},
rootNode: &node{isLeaf: true},
}
- var value = bucket.write()
+ value := bucket.write()
// Insert into node.
key = cloneBytes(key)
c.node().put(key, key, value, 0, bucketLeafFlag)
// Since subbuckets are not allowed on inline buckets, we need to
- // dereference the inline page, if it exists. This will cause the bucket
- // to be treated as a regular, non-inline bucket for the rest of the tx.
+ // dereference the inline page, if it exists. This will cause the
+ // bucket to be treated as a regular, non-inline bucket for the rest of
+ // the tx.
b.page = nil
return b.Bucket(key), nil
}
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
+/// Bucket.CreateBucketIfNotExists() creates a new bucket if it doesn't already
+/// exist and returns a reference to it. Returns an error if the bucket name is
+/// blank, or if the bucket name is too long. The bucket instance is only valid
+/// for the lifetime of the transaction.
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
child, err := b.CreateBucket(key)
if err == ErrBucketExists {
@@ -615,8 +671,8 @@ func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
return child, nil
}
-// DeleteBucket deletes a bucket at the given key.
-// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
+/// Bucket.DeleteBucket() deletes a bucket at the given key. Returns an error
+/// if the bucket does not exists, or if the key represents a non-bucket value.
func (b *Bucket) DeleteBucket(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
@@ -639,7 +695,8 @@ func (b *Bucket) DeleteBucket(key []byte) error {
child := b.Bucket(key)
err := child.ForEach(func(k, v []byte) error {
if v == nil {
- if err := child.DeleteBucket(k); err != nil {
+ err := child.DeleteBucket(k)
+ if err != nil {
return fmt.Errorf("delete bucket: %s", err)
}
}
@@ -663,9 +720,9 @@ func (b *Bucket) DeleteBucket(key []byte) error {
return nil
}
-// Get retrieves the value for a key in the bucket.
-// Returns a nil value if the key does not exist or if the key is a nested bucket.
-// The returned value is only valid for the life of the transaction.
+/// Bucket.Get() retrieves the value for a key in the bucket. Returns a nil
+/// value if the key does not exist or if the key is a nested bucket. The
+/// returned value is only valid for the life of the transaction.
func (b *Bucket) Get(key []byte) []byte {
k, v, flags := b.Cursor().seek(key)
@@ -674,17 +731,19 @@ func (b *Bucket) Get(key []byte) []byte {
return nil
}
- // If our target node isn't the same key as what's passed in then return nil.
+ // If our target node isn't the same key as what's passed in then return
+ // nil.
if !bytes.Equal(key, k) {
return nil
}
return v
}
-// Put sets the value for a key in the bucket.
-// If the key exist then its previous value will be overwritten.
-// Supplied value must remain valid for the life of the transaction.
-// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
+/// Bucket.Put() sets the value for a key in the bucket. If the key exist then
+/// its previous value will be overwritten. Supplied value must remain valid
+/// for the life of the transaction. Returns an error if the bucket was created
+/// from a read-only transaction, if the key is blank, if the key is too large,
+/// or if the value is too large.
func (b *Bucket) Put(key []byte, value []byte) error {
if b.tx.db == nil {
return ErrTxClosed
@@ -714,9 +773,9 @@ func (b *Bucket) Put(key []byte, value []byte) error {
return nil
}
-// Delete removes a key from the bucket.
-// If the key does not exist then nothing is done and a nil error is returned.
-// Returns an error if the bucket was created from a read-only transaction.
+/// Bucket.Delete() removes a key from the bucket. If the key does not exist
+/// then nothing is done and a nil error is returned. Returns an error if the
+/// bucket was created from a read-only transaction.
func (b *Bucket) Delete(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
@@ -739,12 +798,13 @@ func (b *Bucket) Delete(key []byte) error {
return nil
}
-// Sequence returns the current integer for the bucket without incrementing it.
+/// Bucket.Sequence() returns the current integer for the bucket without
+/// incrementing it.
func (b *Bucket) Sequence() uint64 {
return b.ref.sequence
}
-// SetSequence updates the sequence number for the bucket.
+/// Bucket.SetSequence() updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
@@ -763,7 +823,7 @@ func (b *Bucket) SetSequence(v uint64) error {
return nil
}
-// NextSequence returns an autoincrementing integer for the bucket.
+/// Bucket.NextSequence() returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {
return 0, ErrTxClosed
@@ -782,24 +842,26 @@ func (b *Bucket) NextSequence() (uint64, error) {
return b.ref.sequence, nil
}
-// ForEach executes a function for each key/value pair in a bucket.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller. The provided function must not modify
-// the bucket; this will result in undefined behavior.
+/// Bucket.ForEach() executes a function for each key/value pair in a bucket.
+/// If the provided function returns an error then the iteration is stopped and
+/// the error is returned to the caller. The provided function must not modify
+/// the bucket; this will result in undefined behavior.
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
if b.tx.db == nil {
return ErrTxClosed
}
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
- if err := fn(k, v); err != nil {
+ err := fn(k, v)
+ if err != nil {
return err
}
}
return nil
}
-// forEachPage iterates over every page in a bucket, including inline pages.
+/// Bucket.forEachPage() iterates over every page in a bucket, including inline
+/// pages.
func (b *Bucket) forEachPage(fn func(*page, int)) {
// If we have an inline page then just use that.
if b.page != nil {
@@ -811,8 +873,8 @@ func (b *Bucket) forEachPage(fn func(*page, int)) {
b.tx.forEachPage(b.ref.root, 0, fn)
}
-// forEachPageNode iterates over every page (or node) in a bucket.
-// This also includes inline pages.
+/// Bucket.forEachPageNode() iterates over every page (or node) in a bucket.
+/// This also includes inline pages.
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
// If we have an inline page or root node then just use that.
if b.page != nil {
@@ -822,8 +884,12 @@ func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
b._forEachPageNode(b.ref.root, 0, fn)
}
-func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
- var p, n = b.pageNode(pgid)
+func (b *Bucket) _forEachPageNode(
+ pgid pgid,
+ depth int,
+ fn func(*page, *node, int),
+) {
+ p, n := b.pageNode(pgid)
// Execute function.
fn(p, n, depth)
@@ -845,25 +911,27 @@ func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, in
}
}
-// spill writes all the nodes for this bucket to dirty pages.
+/// Bucket.spill() writes all the nodes for this bucket to dirty pages.
func (b *Bucket) spill() error {
// Spill all child buckets first.
for name, child := range b.buckets {
- // If the child bucket is small enough and it has no child buckets then
- // write it inline into the parent bucket's page. Otherwise spill it
- // like a normal bucket and make the parent value a pointer to the page.
+ // If the child bucket is small enough and it has no child
+ // buckets then write it inline into the parent bucket's page.
+ // Otherwise spill it like a normal bucket and make the parent
+ // value a pointer to the page.
var value []byte
if child.inlineable() {
child.free()
value = child.write()
} else {
- if err := child.spill(); err != nil {
+ err := child.spill()
+ if err != nil {
return err
}
// Update the child bucket header in this bucket.
value = make([]byte, unsafe.Sizeof(bucket{}))
- var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ bucket := (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *child.ref
}
@@ -873,15 +941,28 @@ func (b *Bucket) spill() error {
}
// Update parent node.
- var c = b.Cursor()
+ c := b.Cursor()
k, _, flags := c.seek([]byte(name))
if !bytes.Equal([]byte(name), k) {
- panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
+ panic(fmt.Sprintf(
+ "misplaced bucket header: %x -> %x",
+ []byte(name),
+ k,
+ ))
}
if flags&bucketLeafFlag == 0 {
- panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
+ panic(fmt.Sprintf(
+ "unexpected bucket header flag: %x",
+ flags,
+ ))
}
- c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
+ c.node().put(
+ []byte(name),
+ []byte(name),
+ value,
+ 0,
+ bucketLeafFlag,
+ )
}
// Ignore if there's not a materialized root node.
@@ -890,33 +971,38 @@ func (b *Bucket) spill() error {
}
// Spill nodes.
- if err := b.rootNode.spill(); err != nil {
+ err := b.rootNode.spill()
+ if err != nil {
return err
}
b.rootNode = b.rootNode.root()
// Update the root node for this bucket.
if b.rootNode.pgid >= b.tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
+ panic(fmt.Sprintf(
+ "pgid (%d) above high water mark (%d)",
+ b.rootNode.pgid,
+ b.tx.meta.pgid,
+ ))
}
b.ref.root = b.rootNode.pgid
return nil
}
-// inlineable returns true if a bucket is small enough to be written inline
-// and if it contains no subbuckets. Otherwise returns false.
+/// Bucket.inlineable() returns true if a bucket is small enough to be written
+/// inline and if it contains no subbuckets. Otherwise returns false.
func (b *Bucket) inlineable() bool {
- var n = b.rootNode
+ n := b.rootNode
// Bucket must only contain a single leaf node.
if n == nil || !n.isLeaf {
return false
}
- // Bucket is not inlineable if it contains subbuckets or if it goes beyond
- // our threshold for inline bucket size.
- var size = pageHeaderSize
+ // Bucket is not inlineable if it contains subbuckets or if it goes
+ // beyond our threshold for inline bucket size.
+ size := pageHeaderSize
for _, inode := range n.inodes {
size += leafPageElementSize + len(inode.key) + len(inode.value)
@@ -930,29 +1016,30 @@ func (b *Bucket) inlineable() bool {
return true
}
-// Returns the maximum total size of a bucket to make it a candidate for inlining.
+/// Bucket.maInlineBucketSize() returns the maximum total size of a bucket to
+/// make it a candidate for inlining.
func (b *Bucket) maxInlineBucketSize() int {
return b.tx.db.pageSize / 4
}
-// write allocates and writes a bucket to a byte slice.
+/// Bucket.write() allocates and writes a bucket to a byte slice.
func (b *Bucket) write() []byte {
// Allocate the appropriate size.
- var n = b.rootNode
- var value = make([]byte, bucketHeaderSize+n.size())
+ n := b.rootNode
+ value := make([]byte, bucketHeaderSize+n.size())
// Write a bucket header.
- var bucket = (*bucket)(unsafe.Pointer(&value[0]))
+ bucket := (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *b.ref
// Convert byte slice to a fake page and write the root node.
- var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
+ p := (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
n.write(p)
return value
}
-// rebalance attempts to balance all nodes.
+/// Bucket.rebalance() attempts to balance all nodes.
func (b *Bucket) rebalance() {
for _, n := range b.nodes {
n.rebalance()
@@ -962,17 +1049,19 @@ func (b *Bucket) rebalance() {
}
}
-// node creates a node from a page and associates it with a given parent.
+/// Bucket.node() creates a node from a page and associates it with a given
+/// parent.
func (b *Bucket) node(pgid pgid, parent *node) *node {
- _assert(b.nodes != nil, "nodes map expected")
+ g.Assert(b.nodes != nil, "nodes map expected")
// Retrieve node if it's already been created.
- if n := b.nodes[pgid]; n != nil {
+ n := b.nodes[pgid]
+ if n != nil {
return n
}
// Otherwise create a node and cache it.
- n := &node{bucket: b, parent: parent}
+ n = &node{bucket: b, parent: parent}
if parent == nil {
b.rootNode = n
} else {
@@ -980,7 +1069,7 @@ func (b *Bucket) node(pgid pgid, parent *node) *node {
}
// Use the inline page if this is an inline bucket.
- var p = b.page
+ p := b.page
if p == nil {
p = b.tx.page(pgid)
}
@@ -992,13 +1081,13 @@ func (b *Bucket) node(pgid pgid, parent *node) *node {
return n
}
-// free recursively frees all pages in the bucket.
+/// Bucket.free() recursively frees all pages in the bucket.
func (b *Bucket) free() {
if b.ref.root == 0 {
return
}
- var tx = b.tx
+ tx := b.tx
b.forEachPageNode(func(p *page, n *node, _ int) {
if p != nil {
tx.db.freelist.free(tx.meta.txid, p)
@@ -1009,7 +1098,7 @@ func (b *Bucket) free() {
b.ref.root = 0
}
-// dereference removes all references to the old mmap.
+/// Bucket.dereference() removes all references to the old mmap.
func (b *Bucket) dereference() {
if b.rootNode != nil {
b.rootNode.root().dereference()
@@ -1020,14 +1109,19 @@ func (b *Bucket) dereference() {
}
}
-// pageNode returns the in-memory node, if it exists.
-// Otherwise returns the underlying page.
+/// Bucket.pageNode() returns the in-memory node, if it exists. Otherwise
+/// returns the underlying page.
func (b *Bucket) pageNode(id pgid) (*page, *node) {
// Inline buckets have a fake page embedded in their value so treat them
- // differently. We'll return the rootNode (if available) or the fake page.
+ // differently. We'll return the rootNode (if available) or the fake
+ // page.
if b.ref.root == 0 {
if id != 0 {
- panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
+ panic(fmt.Sprintf(
+ "inline bucket non-zero page access(2): " +
+ "%d != 0",
+ id,
+ ))
}
if b.rootNode != nil {
return nil, b.rootNode
@@ -1037,32 +1131,35 @@ func (b *Bucket) pageNode(id pgid) (*page, *node) {
// Check the node cache for non-inline buckets.
if b.nodes != nil {
- if n := b.nodes[id]; n != nil {
+ n := b.nodes[id]
+ if n != nil {
return nil, n
}
}
- // Finally lookup the page from the transaction if no node is materialized.
+ // Finally lookup the page from the transaction if no node is
+ // materialized.
return b.tx.page(id), nil
}
-// cloneBytes returns a copy of a given slice.
+/// cloneBytes() returns a copy of a given slice.
func cloneBytes(v []byte) []byte {
- var clone = make([]byte, len(v))
+ clone := make([]byte, len(v))
copy(clone, v)
return clone
}
-// Bucket returns the bucket that this cursor was created from.
+/// Cursor.Bucket() returns the bucket that this cursor was created from.
func (c *Cursor) Bucket() *Bucket {
return c.bucket
}
-// First moves the cursor to the first item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
+/// Cursor.First() moves the cursor to the first item in the bucket and returns
+/// its key and value. If the bucket is empty then a nil key and value are
+/// returned. The returned key and value are only valid for the life of the
+/// transaction.
func (c *Cursor) First() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
+ g.Assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.ref.root)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
@@ -1082,11 +1179,12 @@ func (c *Cursor) First() (key []byte, value []byte) {
}
-// Last moves the cursor to the last item in the bucket and returns its key and value.
-// If the bucket is empty then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
+/// Cursor.Last() moves the cursor to the last item in the bucket and returns
+/// its key and value. If the bucket is empty then a nil key and value are
+/// returned. The returned key and value are only valid for the life of the
+/// transaction.
func (c *Cursor) Last() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
+ g.Assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.ref.root)
ref := elemRef{page: p, node: n}
@@ -1100,11 +1198,12 @@ func (c *Cursor) Last() (key []byte, value []byte) {
return k, v
}
-// Next moves the cursor to the next item in the bucket and returns its key and value.
-// If the cursor is at the end of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
+/// Cursor.Next() moves the cursor to the next item in the bucket and returns
+/// its key and value. If the cursor is at the end of the bucket then a nil key
+/// and value are returned. The returned key and value are only valid for the
+/// life of the transaction.
func (c *Cursor) Next() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
+ g.Assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.next()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
@@ -1112,11 +1211,12 @@ func (c *Cursor) Next() (key []byte, value []byte) {
return k, v
}
-// Prev moves the cursor to the previous item in the bucket and returns its key and value.
-// If the cursor is at the beginning of the bucket then a nil key and value are returned.
-// The returned key and value are only valid for the life of the transaction.
+/// Cursor.Prev() moves the cursor to the previous item in the bucket and
+/// returns its key and value. If the cursor is at the beginning of the bucket
+/// then a nil key and value are returned. The returned key and value are only
+/// valid for the life of the transaction.
func (c *Cursor) Prev() (key []byte, value []byte) {
- _assert(c.bucket.tx.db != nil, "tx closed")
+ g.Assert(c.bucket.tx.db != nil, "tx closed")
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
@@ -1134,7 +1234,8 @@ func (c *Cursor) Prev() (key []byte, value []byte) {
return nil, nil
}
- // Move down the stack to find the last element of the last leaf under this branch.
+ // Move down the stack to find the last element of the last leaf under
+ // this branch.
c.last()
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
@@ -1143,15 +1244,17 @@ func (c *Cursor) Prev() (key []byte, value []byte) {
return k, v
}
-// Seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used. If no keys
-// follow, a nil key is returned.
-// The returned key and value are only valid for the life of the transaction.
+/// Cursor.Seek() moves the cursor to a given key and returns it. If the key
+/// does not exist then the next key is used. If no keys follow, a nil key is
+/// returned. The returned key and value are only valid for the life of the
+/// transaction.
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
k, v, flags := c.seek(seek)
- // If we ended up after the last element of a page then move to the next one.
- if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
+ // If we ended up after the last element of a page then move to the next
+ // one.
+ ref := &c.stack[len(c.stack)-1]
+ if ref.index >= ref.count() {
k, v, flags = c.next()
}
@@ -1159,12 +1262,14 @@ func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
return nil, nil
} else if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
+ } else {
+ return k, v
}
- return k, v
}
-// Delete removes the current key/value under the cursor from the bucket.
-// Delete fails if current key/value is a bucket or if the transaction is not writable.
+/// Cursor.Delete() removes the current key/value under the cursor from the
+/// bucket. Cursor.Delete() fails if current key/value is a bucket or if the
+/// transaction is not writable.
func (c *Cursor) Delete() error {
if c.bucket.tx.db == nil {
return ErrTxClosed
@@ -1182,10 +1287,10 @@ func (c *Cursor) Delete() error {
return nil
}
-// seek moves the cursor to a given key and returns it.
-// If the key does not exist then the next key is used.
+/// Cursor.seek() moves the cursor to a given key and returns it. If the key
+/// does not exist then the next key is used.
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
- _assert(c.bucket.tx.db != nil, "tx closed")
+ g.Assert(c.bucket.tx.db != nil, "tx closed")
// Start from root page/node and traverse to correct page.
c.stack = c.stack[:0]
@@ -1201,11 +1306,12 @@ func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
return c.keyValue()
}
-// first moves the cursor to the first leaf element under the last page in the stack.
+/// Cursor.first() moves the cursor to the first leaf element under the last
+/// page in the stack.
func (c *Cursor) first() {
for {
// Exit when we hit a leaf page.
- var ref = &c.stack[len(c.stack)-1]
+ ref := &c.stack[len(c.stack)-1]
if ref.isLeaf() {
break
}
@@ -1215,14 +1321,17 @@ func (c *Cursor) first() {
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
} else {
- pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+ pgid = ref.page.branchPageElement(
+ uint16(ref.index),
+ ).pgid
}
p, n := c.bucket.pageNode(pgid)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
}
}
-// last moves the cursor to the last leaf element under the last page in the stack.
+/// Cursor.last() moves the cursor to the last leaf element under the last page
+/// in the stack.
func (c *Cursor) last() {
for {
// Exit when we hit a leaf page.
@@ -1236,22 +1345,26 @@ func (c *Cursor) last() {
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
} else {
- pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
+ pgid = ref.page.branchPageElement(
+ uint16(ref.index),
+ ).pgid
}
p, n := c.bucket.pageNode(pgid)
- var nextRef = elemRef{page: p, node: n}
+ nextRef := elemRef{page: p, node: n}
nextRef.index = nextRef.count() - 1
c.stack = append(c.stack, nextRef)
}
}
-// next moves to the next leaf element and returns the key and value.
-// If the cursor is at the last leaf element then it stays there and returns nil.
+/// Cursor.next() moves to the next leaf element and returns the key and value.
+/// If the cursor is at the last leaf element then it stays there and returns
+/// nil.
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
for {
// Attempt to move over one element until we're successful.
- // Move up the stack as we hit the end of each page in our stack.
+ // Move up the stack as we hit the end of each page in our
+ // stack.
var i int
for i = len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
@@ -1261,18 +1374,19 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
}
}
- // If we've hit the root page then stop and return. This will leave the
- // cursor on the last element of the last page.
+ // If we've hit the root page then stop and return. This will
+ // leave the cursor on the last element of the last page.
if i == -1 {
return nil, nil, 0
}
- // Otherwise start from where we left off in the stack and find the
- // first element of the first leaf page.
+ // Otherwise start from where we left off in the stack and find
+ // the first element of the first leaf page.
c.stack = c.stack[:i+1]
c.first()
- // If this is an empty page then restart and move back up the stack.
+ // If this is an empty page then restart and move back up the
+ // stack.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
continue
@@ -1282,7 +1396,8 @@ func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
}
}
-// search recursively performs a binary search against a given page/node until it finds a given key.
+/// Cursor.search() recursively performs a binary search against a given
+/// page/node until it finds a given key.
func (c *Cursor) search(key []byte, pgid pgid) {
p, n := c.bucket.pageNode(pgid)
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
@@ -1307,8 +1422,9 @@ func (c *Cursor) search(key []byte, pgid pgid) {
func (c *Cursor) searchNode(key []byte, n *node) {
var exact bool
index := sort.Search(len(n.inodes), func(i int) bool {
- // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
- // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ // TODO(benbjohnson): Optimize this range search. It's a bit
+ // hacky right now. sort.Search() finds the lowest index where
+ // f() != -1 but we need the highest index.
ret := bytes.Compare(n.inodes[i].key, key)
if ret == 0 {
exact = true
@@ -1330,8 +1446,9 @@ func (c *Cursor) searchPage(key []byte, p *page) {
var exact bool
index := sort.Search(int(p.count), func(i int) bool {
- // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
- // sort.Search() finds the lowest index where f() != -1 but we need the highest index.
+ // TODO(benbjohnson): Optimize this range search. It's a bit
+ // hacky right now. sort.Search() finds the lowest index where
+ // f() != -1 but we need the highest index.
ret := bytes.Compare(inodes[i].key(), key)
if ret == 0 {
exact = true
@@ -1347,7 +1464,7 @@ func (c *Cursor) searchPage(key []byte, p *page) {
c.search(key, inodes[index].pgid)
}
-// nsearch searches the leaf node on the top of the stack for a key.
+/// Cursor.nsearch() searches the leaf node on the top of the stack for a key.
func (c *Cursor) nsearch(key []byte) {
e := &c.stack[len(c.stack)-1]
p, n := e.page, e.node
@@ -1369,7 +1486,7 @@ func (c *Cursor) nsearch(key []byte) {
e.index = index
}
-// keyValue returns the key and value of the current leaf element.
+/// Cursor.keyValue() returns the key and value of the current leaf element.
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
ref := &c.stack[len(c.stack)-1]
if ref.count() == 0 || ref.index >= ref.count() {
@@ -1387,29 +1504,33 @@ func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
return elem.key(), elem.value(), elem.flags
}
-// node returns the node that the cursor is currently positioned on.
+/// Cursor.node() returns the node that the cursor is currently positioned on.
func (c *Cursor) node() *node {
- _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
+ g.Assert(
+ len(c.stack) > 0,
+ "accessing a node with a zero-length cursor stack",
+ )
// If the top of the stack is a leaf node then just return it.
- if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
+ ref := &c.stack[len(c.stack)-1]
+ if ref.node != nil && ref.isLeaf() {
return ref.node
}
// Start from root and traverse down the hierarchy.
- var n = c.stack[0].node
+ n := c.stack[0].node
if n == nil {
n = c.bucket.node(c.stack[0].page.id, nil)
}
for _, ref := range c.stack[:len(c.stack)-1] {
- _assert(!n.isLeaf, "expected branch node")
+ g.Assert(!n.isLeaf, "expected branch node")
n = n.childAt(int(ref.index))
}
- _assert(n.isLeaf, "expected leaf node")
+ g.Assert(n.isLeaf, "expected leaf node")
return n
}
-// isLeaf returns whether the ref is pointing at a leaf page/node.
+/// elemRef.isLeaf() returns whether the ref is pointing at a leaf page/node.
func (r *elemRef) isLeaf() bool {
if r.node != nil {
return r.node.isLeaf
@@ -1417,7 +1538,7 @@ func (r *elemRef) isLeaf() bool {
return (r.page.flags & leafPageFlag) != 0
}
-// count returns the number of inodes or page elements.
+/// elemRef.count() returns the number of inodes or page elements.
func (r *elemRef) count() int {
if r.node != nil {
return len(r.node.inodes)
@@ -1425,7 +1546,7 @@ func (r *elemRef) count() int {
return int(r.page.count)
}
-// Path returns the path to currently open database file.
+/// DB.Path() returns the path to currently open database file.
func (db *DB) Path() string {
return db.path
}
@@ -1478,7 +1599,7 @@ func readPageSize(db *DB) (int, error) {
return int(m.pageSize), nil
}
-// init creates a new database file and initializes its meta pages.
+/// initDB() creates a new database file and initializes its meta pages.
func initDB(db *DB, size int64) error {
if size != 0 {
pageSize, err := readPageSize(db)
@@ -1524,18 +1645,21 @@ func initDB(db *DB, size int64) error {
p.count = 0
// Write the buffer to our data file.
- if _, err := db.file.WriteAt(buf, 0); err != nil {
+ _, err := db.file.WriteAt(buf, 0)
+ if err != nil {
return err
}
- if err := fdatasync(db); err != nil {
+
+ err = fdatasync(db)
+ if err != nil {
return err
}
return nil
}
-// Open creates and opens a database at the given path.
-// If the file does not exist then it will be created automatically.
+/// Open creates and opens a database at the given path. If the file does not
+/// exist then it will be created automatically.
func Open(path string) (*DB, error) {
file, err := openFile(path)
if err != nil {
@@ -1583,8 +1707,8 @@ func Open(path string) (*DB, error) {
return db, nil
}
-// mmap opens the underlying memory-mapped file and initializes the meta references.
-// minsz is the minimum size that the new mmap can be.
+/// DB.mmap() opens the underlying memory-mapped file and initializes the meta
+/// references. minsz is the minimum size that the new mmap can be.
func (db *DB) mmap(minsz int) error {
db.mmaplock.Lock()
defer db.mmaplock.Unlock()
@@ -1597,7 +1721,7 @@ func (db *DB) mmap(minsz int) error {
}
// Ensure the size is at least the minimum size.
- var size = int(info.Size())
+ size := int(info.Size())
if size < minsz {
size = minsz
}
@@ -1612,12 +1736,14 @@ func (db *DB) mmap(minsz int) error {
}
// Unmap existing data before continuing.
- if err := db.munmap(); err != nil {
+ err = db.munmap()
+ if err != nil {
return err
}
// Memory-map the data file as a byte slice.
- if err := mmap(db, size); err != nil {
+ err = mmap(db, size)
+ if err != nil {
return err
}
@@ -1625,9 +1751,9 @@ func (db *DB) mmap(minsz int) error {
db.meta0 = db.page(0).meta()
db.meta1 = db.page(1).meta()
- // Validate the meta pages. We only return an error if both meta pages fail
- // validation, since meta0 failing validation means that it wasn't saved
- // properly -- but we can recover using meta1. And vice-versa.
+ // Validate the meta pages. We only return an error if both meta pages
+ // fail validation, since meta0 failing validation means that it wasn't
+ // saved properly -- but we can recover using meta1. And vice-versa.
err0 := db.meta0.validate()
err1 := db.meta1.validate()
if err0 != nil && err1 != nil {
@@ -1637,17 +1763,19 @@ func (db *DB) mmap(minsz int) error {
return nil
}
-// munmap unmaps the data file from memory.
+/// DB.munmap() unmaps the data file from memory.
func (db *DB) munmap() error {
- if err := munmap(db); err != nil {
+ err := munmap(db)
+ if err != nil {
return fmt.Errorf("unmap error: " + err.Error())
}
return nil
}
-// mmapSize determines the appropriate size for the mmap given the current size
-// of the database. The minimum size is 32KB and doubles until it reaches 1GB.
-// Returns an error if the new mmap size is greater than the max allowed.
+/// DB. mmapSize() determines the appropriate size for the mmap given the
+/// current size of the database. The minimum size is 32KB and doubles until it
+/// reaches 1GB. Returns an error if the new mmap size is greater than the max
+/// allowed.
func (db *DB) mmapSize(size int) (int, error) {
// Double the size from 32KB until 1GB.
for i := uint(15); i <= 30; i++ {
@@ -1663,7 +1791,8 @@ func (db *DB) mmapSize(size int) (int, error) {
// If larger than 1GB then grow by 1GB at a time.
sz := int64(size)
- if remainder := sz % int64(maxMmapStep); remainder > 0 {
+ remainder := sz % int64(maxMmapStep)
+ if remainder > 0 {
sz += int64(maxMmapStep) - remainder
}
@@ -1682,8 +1811,8 @@ func (db *DB) mmapSize(size int) (int, error) {
return int(sz), nil
}
-// Close releases all database resources.
-// All transactions must be closed before closing the database.
+/// DB.Close() releases all database resources. All transactions must be closed
+/// before closing the database.
func (db *DB) Close() error {
db.rwlock.Lock()
defer db.rwlock.Unlock()
@@ -1707,7 +1836,8 @@ func (db *DB) close() error {
db.freelist = nil
// Close the mmap.
- if err := db.munmap(); err != nil {
+ err := db.munmap()
+ if err != nil {
return err
}
@@ -1715,12 +1845,14 @@ func (db *DB) close() error {
if db.file != nil {
// No need to unlock read-only file.
// Unlock the file.
- if err := funlock(db); err != nil {
- log.Printf("bolt.Close(): funlock error: %s", err)
+ err := funlock(db)
+ if err != nil {
+ log.Printf("dedo.Close(): funlock error: %s", err)
}
// Close the file descriptor.
- if err := db.file.Close(); err != nil {
+ err = db.file.Close()
+ if err != nil {
return fmt.Errorf("db file close: %s", err)
}
db.file = nil
@@ -1730,23 +1862,22 @@ func (db *DB) close() error {
return nil
}
-// Begin starts a new transaction.
-// Multiple read-only transactions can be used concurrently but only one
-// write transaction can be used at a time. Starting multiple write transactions
-// will cause the calls to block and be serialized until the current write
-// transaction finishes.
-//
-// Transactions should not be dependent on one another. Opening a read
-// transaction and a write transaction in the same goroutine can cause the
-// writer to deadlock because the database periodically needs to re-mmap itself
-// as it grows and it cannot do that while a read transaction is open.
-//
-// If a long running read transaction (for example, a snapshot transaction) is
-// needed, you might want to set DB.InitialMmapSize to a large enough value
-// to avoid potential blocking of write transaction.
-//
-// IMPORTANT: You must close read-only transactions after you are finished or
-// else the database will not reclaim old pages.
+/// DB.Begin() starts a new transaction. Multiple read-only transactions can be
+/// used concurrently but only one write transaction can be used at a time.
+/// Starting multiple write transactions will cause the calls to block and be
+/// serialized until the current write transaction finishes.
+///
+/// Transactions should not be dependent on one another. Opening a read
+/// transaction and a write transaction in the same goroutine can cause the
+/// writer to deadlock because the database periodically needs to re-mmap itself
+/// as it grows and it cannot do that while a read transaction is open.
+///
+/// If a long running read transaction (for example, a snapshot transaction) is
+/// needed, you might want to set DB.InitialMmapSize to a large enough value to
+/// avoid potential blocking of write transaction.
+///
+/// IMPORTANT: You must close read-only transactions after you are finished or
+/// else the database will not reclaim old pages.
func (db *DB) Begin(writable bool) (*Tx, error) {
if writable {
return db.beginRWTx()
@@ -1756,14 +1887,14 @@ func (db *DB) Begin(writable bool) (*Tx, error) {
}
func (db *DB) beginTx() (*Tx, error) {
- // Lock the meta pages while we initialize the transaction. We obtain
+ // Lock the meta pages while we initialize the transaction. We obtain
// the meta lock before the mmap lock because that's the order that the
// write transaction will obtain them.
db.metalock.Lock()
- // Obtain a read-only lock on the mmap. When the mmap is remapped it will
- // obtain a write lock so all transactions must finish before it can be
- // remapped.
+ // Obtain a read-only lock on the mmap. When the mmap is remapped it
+ // will obtain a write lock so all transactions must finish before it
+ // can be remapped.
db.mmaplock.RLock()
// Exit if the database is not open yet.
@@ -1787,8 +1918,8 @@ func (db *DB) beginTx() (*Tx, error) {
}
func (db *DB) beginRWTx() (*Tx, error) {
- // Obtain writer lock. This is released by the transaction when it closes.
- // This enforces only one writer transaction at a time.
+ // Obtain writer lock. This is released by the transaction when it
+ // closes. This enforces only one writer transaction at a time.
db.rwlock.Lock()
// Once we have the writer lock then we can lock the meta pages so that
@@ -1808,7 +1939,7 @@ func (db *DB) beginRWTx() (*Tx, error) {
db.rwtx = t
// Free any pages associated with closed read-only transactions.
- var minid txid = 0xFFFFFFFFFFFFFFFF
+ minid := txid(0xFFFFFFFFFFFFFFFF)
for _, t := range db.txs {
if t.meta.txid < minid {
minid = t.meta.txid
@@ -1821,7 +1952,7 @@ func (db *DB) beginRWTx() (*Tx, error) {
return t, nil
}
-// removeTx removes a transaction from the database.
+/// DB.removeTx() removes a transaction from the database.
func (db *DB) removeTx(tx *Tx) {
// Release the read lock on the mmap.
db.mmaplock.RUnlock()
@@ -1844,13 +1975,14 @@ func (db *DB) removeTx(tx *Tx) {
db.metalock.Unlock()
}
-// Update executes a function within the context of a read-write managed transaction.
-// If no error is returned from the function then the transaction is committed.
-// If an error is returned then the entire transaction is rolled back.
-// Any error that is returned from the function or returned from the commit is
-// returned from the Update() method.
-//
-// Attempting to manually commit or rollback within the function will cause a panic.
+/// DB.Update() executes a function within the context of a read-write managed
+/// transaction. If no error is returned from the function then the transaction
+/// is committed. If an error is returned then the entire transaction is rolled
+/// back. Any error that is returned from the function or returned from the
+/// commit is returned from the DB.Update() method.
+///
+/// Attempting to manually commit or rollback within the function will cause a
+/// panic.
func (db *DB) Update(fn func(*Tx) error) error {
t, err := db.Begin(true)
if err != nil {
@@ -1864,10 +1996,12 @@ func (db *DB) Update(fn func(*Tx) error) error {
}
}()
- // Mark as a managed tx so that the inner function cannot manually commit.
+ // Mark as a managed tx so that the inner function cannot manually
+ // commit.
t.managed = true
- // If an error is returned from the function then rollback and return error.
+ // If an error is returned from the function then rollback and return
+ // error.
err = fn(t)
t.managed = false
if err != nil {
@@ -1878,10 +2012,11 @@ func (db *DB) Update(fn func(*Tx) error) error {
return t.Commit()
}
-// View executes a function within the context of a managed read-only transaction.
-// Any error that is returned from the function is returned from the View() method.
-//
-// Attempting to manually rollback within the function will cause a panic.
+/// DB.View() executes a function within the context of a managed read-only
+/// transaction. Any error that is returned from the function is returned from
+/// the DB.View() method.
+///
+/// Attempting to manually rollback within the function will cause a panic.
func (db *DB) View(fn func(*Tx) error) error {
t, err := db.Begin(false)
if err != nil {
@@ -1895,7 +2030,8 @@ func (db *DB) View(fn func(*Tx) error) error {
}
}()
- // Mark as a managed tx so that the inner function cannot manually rollback.
+ // Mark as a managed tx so that the inner function cannot manually
+ // rollback.
t.managed = true
// If an error is returned from the function then pass it through.
@@ -1906,40 +2042,48 @@ func (db *DB) View(fn func(*Tx) error) error {
return err
}
- if err := t.Rollback(); err != nil {
+ err = t.Rollback()
+ if err != nil {
return err
}
return nil
}
-// Batch calls fn as part of a batch. It behaves similar to Update,
-// except:
-//
-// 1. concurrent Batch calls can be combined into a single Bolt
-// transaction.
-//
-// 2. the function passed to Batch may be called multiple times,
-// regardless of whether it returns error or not.
-//
-// This means that Batch function side effects must be idempotent and
-// take permanent effect only after a successful return is seen in
-// caller.
-//
-// The maximum batch size and delay can be adjusted with DB.MaxBatchSize
-// and DB.MaxBatchDelay, respectively.
-//
-// Batch is only useful when there are multiple goroutines calling it.
+func needsNewBatch(batch *batch, max int) bool {
+ return (batch == nil) || (batch != nil && len(batch.calls) >= max)
+}
+
+/// DB.Batch() calls fn as part of a batch. It behaves similar to Update,
+/// except:
+///
+/// 1. concurrent DB.Batch() calls can be combined into a single Dedo
+/// transaction.
+///
+/// 2. the function passed to DB.Batch() may be called multiple times,
+/// regardless of whether it returns error or not.
+///
+/// This means that DB.Batch() function side effects must be idempotent and take
+/// permanent effect only after a successful return is seen in caller.
+///
+/// The maximum batch size and delay can be adjusted with DB.MaxBatchSize and
+/// DB.MaxBatchDelay, respectively.
+///
+/// DB.Batch() is only useful when there are multiple goroutines calling it.
func (db *DB) Batch(fn func(*Tx) error) error {
errCh := make(chan error, 1)
db.batchMu.Lock()
- if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) {
- // There is no existing batch, or the existing batch is full; start a new one.
+ if needsNewBatch(db.batch, db.MaxBatchSize) {
+ // There is no existing batch, or the existing batch is full;
+ // start a new one.
db.batch = &batch{
db: db,
}
- db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger)
+ db.batch.timer = time.AfterFunc(
+ db.MaxBatchDelay,
+ db.batch.trigger,
+ )
}
db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh})
if len(db.batch.calls) >= db.MaxBatchSize {
@@ -1955,13 +2099,13 @@ func (db *DB) Batch(fn func(*Tx) error) error {
return err
}
-// trigger runs the batch if it hasn't already been run.
+/// batch.trigger() runs the batch if it hasn't already been run.
func (b *batch) trigger() {
b.start.Do(b.run)
}
-// run performs the transactions in the batch and communicates results
-// back to DB.Batch.
+/// batch.run() performs the transactions in the batch and communicates results
+/// back to DB.Batch.
func (b *batch) run() {
b.db.batchMu.Lock()
b.timer.Stop()
@@ -1974,10 +2118,11 @@ func (b *batch) run() {
retry:
for len(b.calls) > 0 {
- var failIdx = -1
+ failIdx := -1
err := b.db.Update(func(tx *Tx) error {
for i, c := range b.calls {
- if err := safelyCall(c.fn, tx); err != nil {
+ err := safelyCall(c.fn, tx)
+ if err != nil {
failIdx = i
return err
}
@@ -1986,17 +2131,20 @@ retry:
})
if failIdx >= 0 {
- // take the failing transaction out of the batch. it's
- // safe to shorten b.calls here because db.batch no longer
- // points to us, and we hold the mutex anyway.
+ // take the failing transaction out of the batch. it's
+ // safe to shorten b.calls here because db.batch no
+ // longer points to us, and we hold the mutex anyway.
c := b.calls[failIdx]
- b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1]
- // tell the submitter re-run it solo, continue with the rest of the batch
+ b.calls[failIdx], b.calls =
+ b.calls[ len(b.calls) - 1],
+ b.calls[:len(b.calls) - 1]
+ // tell the submitter re-run it solo, continue with the
+ // rest of the batch
c.err <- trySolo
continue retry
}
- // pass success, or bolt internal errors, to all callers
+ // pass success, or dedo internal errors, to all callers
for _, c := range b.calls {
c.err <- err
}
@@ -2005,7 +2153,8 @@ retry:
}
func (p panicked) Error() string {
- if err, ok := p.reason.(error); ok {
+ err, ok := p.reason.(error)
+ if ok {
return err.Error()
}
return fmt.Sprintf("panic: %v", p.reason)
@@ -2013,29 +2162,32 @@ func (p panicked) Error() string {
func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
defer func() {
- if p := recover(); p != nil {
+ p := recover()
+ if p != nil {
err = panicked{p}
}
}()
return fn(tx)
}
-// page retrieves a page reference from the mmap based on the current page size.
+/// db.page() retrieves a page reference from the mmap based on the current page
+/// size.
func (db *DB) page(id pgid) *page {
pos := id * pgid(db.pageSize)
return (*page)(unsafe.Pointer(&db.data[pos]))
}
-// pageInBuffer retrieves a page reference from a given byte array based on the current page size.
+/// pageInBuffer() retrieves a page reference from a given byte array based on
+/// the current page size.
func pageInBuffer(db *DB, b []byte, id pgid) *page {
return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)]))
}
-// meta retrieves the current meta page reference.
+/// db.meta() retrieves the current meta page reference.
func (db *DB) meta() *meta {
// We have to return the meta with the highest txid which doesn't fail
- // validation. Otherwise, we can cause errors when in fact the database is
- // in a consistent state. metaA is the one with the higher txid.
+ // validation. Otherwise, we can cause errors when in fact the database
+ // is in a consistent state. metaA is the one with the higher txid.
metaA := db.meta0
metaB := db.meta1
if db.meta1.txid > db.meta0.txid {
@@ -2043,19 +2195,24 @@ func (db *DB) meta() *meta {
metaB = db.meta0
}
- // Use higher meta page if valid. Otherwise fallback to previous, if valid.
- if err := metaA.validate(); err == nil {
+ // Use higher meta page if valid. Otherwise fallback to previous, if
+ // valid.
+ err := metaA.validate()
+ if err == nil {
return metaA
- } else if err := metaB.validate(); err == nil {
+ }
+
+ err = metaB.validate()
+ if err == nil {
return metaB
}
- // This should never be reached, because both meta1 and meta0 were validated
- // on mmap() and we do fsync() on every write.
- panic("bolt.DB.meta(): invalid meta pages")
+ // This should never be reached, because both meta1 and meta0 were
+ // validated on mmap() and we do fsync() on every write.
+ panic("dedo.DB.meta(): invalid meta pages")
}
-// allocate returns a contiguous block of memory starting at a given page.
+/// db.allocate() returns a contiguous block of memory starting at a given page.
func (db *DB) allocate(count int) (*page, error) {
// Allocate a temporary buffer for the page.
var buf []byte
@@ -2068,15 +2225,17 @@ func (db *DB) allocate(count int) (*page, error) {
p.overflow = uint32(count - 1)
// Use pages from the freelist if they are available.
- if p.id = db.freelist.allocate(count); p.id != 0 {
+ p.id = db.freelist.allocate(count)
+ if p.id != 0 {
return p, nil
}
// Resize mmap() if we're at the end.
p.id = db.rwtx.meta.pgid
- var minsz = int((p.id+pgid(count))+1) * db.pageSize
+ minsz := int((p.id+pgid(count))+1) * db.pageSize
if minsz >= db.datasz {
- if err := db.mmap(minsz); err != nil {
+ err := db.mmap(minsz)
+ if err != nil {
return nil, fmt.Errorf("mmap allocate error: %s", err)
}
}
@@ -2087,15 +2246,16 @@ func (db *DB) allocate(count int) (*page, error) {
return p, nil
}
-// grow grows the size of the database to the given sz.
+/// db.grow() grows the size of the database to the given sz.
func (db *DB) grow(sz int) error {
// Ignore if the new size is less than available file size.
if sz <= db.filesz {
return nil
}
- // If the data is smaller than the alloc size then only allocate what's needed.
- // Once it goes over the allocation size then allocate in chunks.
+ // If the data is smaller than the alloc size then only allocate what's
+ // needed. Once it goes over the allocation size then allocate in
+ // chunks.
if db.datasz < db.AllocSize {
sz = db.datasz
} else {
@@ -2104,10 +2264,13 @@ func (db *DB) grow(sz int) error {
// Truncate and fsync to ensure file size metadata is flushed.
// https://github.com/boltdb/bolt/issues/284
- if err := db.file.Truncate(int64(sz)); err != nil {
+ err := db.file.Truncate(int64(sz))
+ if err != nil {
return fmt.Errorf("file resize error: %s", err)
}
- if err := db.file.Sync(); err != nil {
+
+ err = db.file.Sync()
+ if err != nil {
return fmt.Errorf("file sync error: %s", err)
}
@@ -2115,7 +2278,8 @@ func (db *DB) grow(sz int) error {
return nil
}
-// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
+/// meta.validate() checks the marker bytes and version of the meta page to
+/// ensure it matches this binary.
func (m *meta) validate() error {
if m.magic != magic {
return ErrInvalid
@@ -2127,20 +2291,29 @@ func (m *meta) validate() error {
return nil
}
-// copy copies one meta object to another.
+/// meta.copy() copies one meta object to another.
func (m *meta) copy(dest *meta) {
*dest = *m
}
-// write writes the meta onto a page.
+/// meta.write() writes the meta onto a page.
func (m *meta) write(p *page) {
if m.root.root >= m.pgid {
- panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid))
+ panic(fmt.Sprintf(
+ "root bucket pgid (%d) above high water mark (%d)",
+ m.root.root,
+ m.pgid,
+ ))
} else if m.freelist >= m.pgid {
- panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid))
+ panic(fmt.Sprintf(
+ "freelist pgid (%d) above high water mark (%d)",
+ m.freelist,
+ m.pgid,
+ ))
}
- // Page id is either going to be 0 or 1 which we can determine by the transaction ID.
+ // Page id is either going to be 0 or 1 which we can determine by the
+ // transaction ID.
p.id = pgid(m.txid % 2)
p.flags |= metaPageFlag
@@ -2150,66 +2323,16 @@ func (m *meta) write(p *page) {
m.copy(p.meta())
}
-// generates the checksum for the meta.
+/// meta.sum64() generates the checksum for the meta.
func (m *meta) sum64() uint64 {
- var h = fnv.New64a()
- _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:])
+ h := fnv.New64a()
+ _, _ = h.Write(
+ (*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:],
+ )
return h.Sum64()
}
-// _assert will panic with a given formatted message if the given condition is false.
-func _assert(condition bool, msg string, v ...interface{}) {
- if !condition {
- panic(fmt.Sprintf("assertion failed: "+msg, v...))
- }
-}
-
-
-/*
-Package bolt implements a low-level key/value store in pure Go. It supports
-fully serializable transactions, ACID semantics, and lock-free MVCC with
-multiple readers and a single writer. Bolt can be used for projects that
-want a simple data store without the need to add large dependencies such as
-Postgres or MySQL.
-
-Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
-optimized for fast read access and does not require recovery in the event of a
-system crash. Transactions which have not finished committing will simply be
-rolled back in the event of a crash.
-
-The design of Bolt is based on Howard Chu's LMDB database project.
-
-Bolt currently works on Windows, Mac OS X, and Linux.
-
-
-Basics
-
-There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
-a collection of buckets and is represented by a single file on disk. A bucket is
-a collection of unique keys that are associated with values.
-
-Transactions provide either read-only or read-write access to the database.
-Read-only transactions can retrieve key/value pairs and can use Cursors to
-iterate over the dataset sequentially. Read-write transactions can create and
-delete buckets and can insert and remove keys. Only one read-write transaction
-is allowed at a time.
-
-
-Caveats
-
-The database uses a read-only, memory-mapped data file to ensure that
-applications cannot corrupt the database, however, this means that keys and
-values returned from Bolt cannot be changed. Writing to a read-only byte slice
-will cause Go to panic.
-
-Keys and values retrieved from the database are only valid for the life of
-the transaction. When used outside the transaction, these byte slices can
-point to different data or can point to invalid memory which will cause a panic.
-
-
-*/
-
-// newFreelist returns an empty, initialized freelist.
+/// newFreelist() returns an empty, initialized freelist.
func newFreelist() *freelist {
return &freelist{
pending: make(map[txid][]pgid),
@@ -2217,37 +2340,39 @@ func newFreelist() *freelist {
}
}
-// size returns the size of the page after serialization.
+/// freelist.size() returns the size of the page after serialization.
func (f *freelist) size() int {
n := f.count()
if n >= 0xFFFF {
- // The first element will be used to store the count. See freelist.write.
+ // The first element will be used to store the count. See
+ // freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
}
-// count returns count of pages on the freelist
+/// freelist.count() returns count of pages on the freelist
func (f *freelist) count() int {
return f.free_count() + f.pending_count()
}
-// free_count returns count of free pages
+/// freelist.free_count() returns count of free pages
func (f *freelist) free_count() int {
return len(f.ids)
}
-// pending_count returns count of pending pages
+/// freelist.pending_count() returns count of pending pages
func (f *freelist) pending_count() int {
- var count int
+ count := 0
for _, list := range f.pending {
count += len(list)
}
return count
}
-// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
-// f.count returns the minimum length required for dst.
+/// freelist.copyall() copies into dst a list of all free ids and all pending
+/// ids in one sorted list.
+/// f.count returns the minimum length required for dst.
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
for _, list := range f.pending {
@@ -2257,14 +2382,16 @@ func (f *freelist) copyall(dst []pgid) {
mergepgids(dst, f.ids, m)
}
-// allocate returns the starting page id of a contiguous list of pages of a given size.
-// If a contiguous block cannot be found then 0 is returned.
+/// freelist.allocate() returns the starting page id of a contiguous list of
+/// pages of a given size. If a contiguous block cannot be found then 0 is
+/// returned.
func (f *freelist) allocate(n int) pgid {
if len(f.ids) == 0 {
return 0
}
- var initial, previd pgid
+ initial := pgid(0)
+ previd := pgid(0)
for i, id := range f.ids {
if id <= 1 {
panic(fmt.Sprintf("invalid page allocation: %d", id))
@@ -2277,10 +2404,10 @@ func (f *freelist) allocate(n int) pgid {
// If we found a contiguous block then remove it and return it.
if (id-initial)+1 == pgid(n) {
- // If we're allocating off the beginning then take the fast path
- // and just adjust the existing slice. This will use extra memory
- // temporarily but the append() in free() will realloc the slice
- // as is necessary.
+ // If we're allocating off the beginning then take the
+ // fast path and just adjust the existing slice. This
+ // will use extra memory temporarily but the append() in
+ // free() will realloc the slice as is necessary.
if (i + 1) == n {
f.ids = f.ids[i+1:]
} else {
@@ -2301,15 +2428,15 @@ func (f *freelist) allocate(n int) pgid {
return 0
}
-// free releases a page and its overflow for a given transaction id.
-// If the page is already free then a panic will occur.
+/// freelist.free() releases a page and its overflow for a given transaction id.
+/// If the page is already free then a panic will occur.
func (f *freelist) free(txid txid, p *page) {
if p.id <= 1 {
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
}
// Free page and all its overflow pages.
- var ids = f.pending[txid]
+ ids := f.pending[txid]
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
// Verify that page is not already free.
if f.cache[id] {
@@ -2323,13 +2450,14 @@ func (f *freelist) free(txid txid, p *page) {
f.pending[txid] = ids
}
-// release moves all page ids for a transaction id (or older) to the freelist.
+/// release moves all page ids for a transaction id (or older) to the freelist.
func (f *freelist) release(txid txid) {
m := make(pgids, 0)
for tid, ids := range f.pending {
if tid <= txid {
- // Move transaction's pending pages to the available freelist.
- // Don't remove from the cache since the page is still free.
+ // Move transaction's pending pages to the available
+ // freelist. Don't remove from the cache since the page
+ // is still free.
m = append(m, ids...)
delete(f.pending, tid)
}
@@ -2338,7 +2466,7 @@ func (f *freelist) release(txid txid) {
f.ids = pgids(f.ids).merge(m)
}
-// rollback removes the pages from a given pending tx.
+/// rollback removes the pages from a given pending tx.
func (f *freelist) rollback(txid txid) {
// Remove page ids from cache.
for _, id := range f.pending[txid] {
@@ -2349,15 +2477,16 @@ func (f *freelist) rollback(txid txid) {
delete(f.pending, txid)
}
-// freed returns whether a given page is in the free list.
+/// freed returns whether a given page is in the free list.
func (f *freelist) freed(pgid pgid) bool {
return f.cache[pgid]
}
-// read initializes the freelist from a freelist page.
+/// freelist.read() initializes the freelist from a freelist page.
func (f *freelist) read(p *page) {
- // If the page.count is at the max uint16 value (64k) then it's considered
- // an overflow and the size of the freelist is stored as the first element.
+ // If the page.count is at the max uint16 value (64k) then it's
+ // considered an overflow and the size of the freelist is stored as the
+ // first element.
idx, count := 0, int(p.count)
if count == 0xFFFF {
idx = 1
@@ -2368,7 +2497,7 @@ func (f *freelist) read(p *page) {
if count == 0 {
f.ids = nil
} else {
- ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
+ ids := (*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
@@ -2380,17 +2509,18 @@ func (f *freelist) read(p *page) {
f.reindex()
}
-// write writes the page ids onto a freelist page. All free and pending ids are
-// saved to disk since in the event of a program crash, all pending ids will
-// become free.
+/// freelist.write() writes the page ids onto a freelist page. All free and
+/// pending ids are saved to disk since in the event of a program crash, all
+/// pending ids will become free.
func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= freelistPageFlag
- // The page.count can only hold up to 64k elements so if we overflow that
- // number then we handle it by putting the size in the first element.
+ // The page.count can only hold up to 64k elements so if we overflow
+ // that number then we handle it by putting the size in the first
+ // element.
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
@@ -2399,14 +2529,15 @@ func (f *freelist) write(p *page) error {
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
} else {
p.count = 0xFFFF
- ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
+ pageID := pgid(lenids)
+ ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pageID
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
}
return nil
}
-// reload reads the freelist from a page and filters out pending items.
+/// reload reads the freelist from a page and filters out pending items.
func (f *freelist) reload(p *page) {
f.read(p)
@@ -2420,7 +2551,7 @@ func (f *freelist) reload(p *page) {
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
- var a []pgid
+ a := []pgid{}
for _, id := range f.ids {
if !pcache[id] {
a = append(a, id)
@@ -2428,12 +2559,13 @@ func (f *freelist) reload(p *page) {
}
f.ids = a
- // Once the available list is rebuilt then rebuild the free cache so that
- // it includes the available and pending free pages.
+ // Once the available list is rebuilt then rebuild the free cache so
+ // that it includes the available and pending free pages.
f.reindex()
}
-// reindex rebuilds the free cache based on available and pending free lists.
+/// freelist.reindex() rebuilds the free cache based on available and pending
+/// free lists.
func (f *freelist) reindex() {
f.cache = make(map[pgid]bool, len(f.ids))
for _, id := range f.ids {
@@ -2446,7 +2578,7 @@ func (f *freelist) reindex() {
}
}
-// root returns the top-level node this node is attached to.
+/// node.root() returns the top-level node this node is attached to.
func (n *node) root() *node {
if n.parent == nil {
return n
@@ -2454,7 +2586,7 @@ func (n *node) root() *node {
return n.parent.root()
}
-// minKeys returns the minimum number of inodes this node should have.
+/// node.minKeys() returns the minimum number of inodes this node should have.
func (n *node) minKeys() int {
if n.isLeaf {
return 1
@@ -2462,7 +2594,7 @@ func (n *node) minKeys() int {
return 2
}
-// size returns the size of the node after serialization.
+/// node.size() returns the size of the node after serialization.
func (n *node) size() int {
sz, elsz := pageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
@@ -2472,9 +2604,9 @@ func (n *node) size() int {
return sz
}
-// sizeLessThan returns true if the node is less than a given size.
-// This is an optimization to avoid calculating a large node when we only need
-// to know if it fits inside a certain page size.
+/// node.sizeLessThan() returns true if the node is less than a given size.
+/// This is an optimization to avoid calculating a large node when we only need
+/// to know if it fits inside a certain page size.
func (n *node) sizeLessThan(v int) bool {
sz, elsz := pageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
@@ -2487,15 +2619,17 @@ func (n *node) sizeLessThan(v int) bool {
return true
}
-// pageElementSize returns the size of each page element based on the type of node.
+/// node.pageElementSize() returns the size of each page element based on the
+/// type of node.
func (n *node) pageElementSize() int {
if n.isLeaf {
return leafPageElementSize
+ } else {
+ return branchPageElementSize
}
- return branchPageElementSize
}
-// childAt returns the child node at a given index.
+/// node.childAt() returns the child node at a given index.
func (n *node) childAt(index int) *node {
if n.isLeaf {
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
@@ -2503,18 +2637,20 @@ func (n *node) childAt(index int) *node {
return n.bucket.node(n.inodes[index].pgid, n)
}
-// childIndex returns the index of a given child node.
+/// node.childIndex() returns the index of a given child node.
func (n *node) childIndex(child *node) int {
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ return bytes.Compare(n.inodes[i].key, child.key) != -1
+ })
return index
}
-// numChildren returns the number of children.
+/// node.numChildren() returns the number of children.
func (n *node) numChildren() int {
return len(n.inodes)
}
-// nextSibling returns the next node with the same parent.
+/// node.nextSibling() returns the next node with the same parent.
func (n *node) nextSibling() *node {
if n.parent == nil {
return nil
@@ -2526,7 +2662,7 @@ func (n *node) nextSibling() *node {
return n.parent.childAt(index + 1)
}
-// prevSibling returns the previous node with the same parent.
+/// node.prevSibling() returns the previous node with the same parent.
func (n *node) prevSibling() *node {
if n.parent == nil {
return nil
@@ -2538,10 +2674,14 @@ func (n *node) prevSibling() *node {
return n.parent.childAt(index - 1)
}
-// put inserts a key/value.
+/// node.put() inserts a key/value.
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
if pgid >= n.bucket.tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
+ panic(fmt.Sprintf(
+ "pgid (%d) above high water mark (%d)",
+ pgid,
+ n.bucket.tx.meta.pgid,
+ ))
} else if len(oldKey) <= 0 {
panic("put: zero-length old key")
} else if len(newKey) <= 0 {
@@ -2549,10 +2689,16 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
}
// Find insertion index.
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ return bytes.Compare(n.inodes[i].key, oldKey) != -1
+ })
- // Add capacity and shift nodes if we don't have an exact match and need to insert.
- exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
+ // Add capacity and shift nodes if we don't have an exact match and need
+ // to insert.
+ exact :=
+ len(n.inodes) > 0 &&
+ index < len(n.inodes) &&
+ bytes.Equal(n.inodes[index].key, oldKey)
if !exact {
n.inodes = append(n.inodes, inode{})
copy(n.inodes[index+1:], n.inodes[index:])
@@ -2563,13 +2709,15 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
inode.key = newKey
inode.value = value
inode.pgid = pgid
- _assert(len(inode.key) > 0, "put: zero-length inode key")
+ g.Assert(len(inode.key) > 0, "put: zero-length inode key")
}
-// del removes a key from the node.
+/// node.del() removes a key from the node.
func (n *node) del(key []byte) {
// Find index of key.
- index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
+ index := sort.Search(len(n.inodes), func(i int) bool {
+ return bytes.Compare(n.inodes[i].key, key) != -1
+ })
// Exit if the key isn't found.
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
@@ -2583,7 +2731,7 @@ func (n *node) del(key []byte) {
n.unbalanced = true
}
-// read initializes the node from a page.
+/// node.read() initializes the node from a page.
func (n *node) read(p *page) {
n.pgid = p.id
n.isLeaf = ((p.flags & leafPageFlag) != 0)
@@ -2601,19 +2749,23 @@ func (n *node) read(p *page) {
inode.pgid = elem.pgid
inode.key = elem.key()
}
- _assert(len(inode.key) > 0, "read: zero-length inode key")
+ g.Assert(len(inode.key) > 0, "read: zero-length inode key")
}
// Save first key so we can find the node in the parent when we spill.
if len(n.inodes) > 0 {
n.key = n.inodes[0].key
- _assert(len(n.key) > 0, "read: zero-length node key")
+ g.Assert(len(n.key) > 0, "read: zero-length node key")
} else {
n.key = nil
}
}
-// write writes the items onto one or more pages.
+func u32position(a unsafe.Pointer, b unsafe.Pointer) uint32 {
+ return uint32(uintptr(a) - uintptr(b))
+}
+
+/// node.write() writes the items onto one or more pages.
func (n *node) write(p *page) {
// Initialize page.
if n.isLeaf {
@@ -2623,7 +2775,11 @@ func (n *node) write(p *page) {
}
if len(n.inodes) >= 0xFFFF {
- panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
+ panic(fmt.Sprintf(
+ "inode overflow: %d (pgid=%d)",
+ len(n.inodes),
+ p.id,
+ ))
}
p.count = uint16(len(n.inodes))
@@ -2633,27 +2789,37 @@ func (n *node) write(p *page) {
}
// Loop over each item and write it to the page.
- b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
+ nth := n.pageElementSize() * len(n.inodes)
+ b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[nth:]
for i, item := range n.inodes {
- _assert(len(item.key) > 0, "write: zero-length inode key")
+ g.Assert(len(item.key) > 0, "write: zero-length inode key")
// Write the page element.
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
- elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
+ elem.pos = u32position(
+ unsafe.Pointer(&b[0]),
+ unsafe.Pointer(elem),
+ )
elem.flags = item.flags
elem.ksize = uint32(len(item.key))
elem.vsize = uint32(len(item.value))
} else {
elem := p.branchPageElement(uint16(i))
- elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
+ elem.pos = u32position(
+ unsafe.Pointer(&b[0]),
+ unsafe.Pointer(elem),
+ )
elem.ksize = uint32(len(item.key))
elem.pgid = item.pgid
- _assert(elem.pgid != p.id, "write: circular dependency occurred")
+ g.Assert(
+ elem.pgid != p.id,
+ "write: circular dependency occurred",
+ )
}
- // If the length of key+value is larger than the max allocation size
- // then we need to reallocate the byte array pointer.
+ // If the length of key+value is larger than the max allocation
+ // size then we need to reallocate the byte array pointer.
//
// See: https://github.com/boltdb/bolt/pull/335
klen, vlen := len(item.key), len(item.value)
@@ -2667,15 +2833,12 @@ func (n *node) write(p *page) {
copy(b[0:], item.value)
b = b[vlen:]
}
-
- // DEBUG ONLY: n.dump()
}
-// split breaks up a node into multiple smaller nodes, if appropriate.
-// This should only be called from the spill() function.
+/// node.split() breaks up a node into multiple smaller nodes, if appropriate.
+/// This should only be called from the spill() function.
func (n *node) split(pageSize int) []*node {
- var nodes []*node
-
+ nodes := []*node{}
node := n
for {
// Split node into two.
@@ -2694,8 +2857,8 @@ func (n *node) split(pageSize int) []*node {
return nodes
}
-// splitTwo breaks up a node into two smaller nodes, if appropriate.
-// This should only be called from the split() function.
+/// node.splitTwo() breaks up a node into two smaller nodes, if appropriate.
+/// This should only be called from the split() function.
func (n *node) splitTwo(pageSize int) (*node, *node) {
const fillPercent = 0.5
@@ -2727,20 +2890,25 @@ func (n *node) splitTwo(pageSize int) (*node, *node) {
return n, next
}
-// splitIndex finds the position where a page will fill a given threshold.
-// It returns the index as well as the size of the first page.
-// This is only be called from split().
+/// node.splitIndex() finds the position where a page will fill a given
+/// threshold. It returns the index as well as the size of the first page.
+/// This is only be called from split().
func (n *node) splitIndex(threshold int) (index, sz int) {
sz = pageHeaderSize
- // Loop until we only have the minimum number of keys required for the second page.
+ // Loop until we only have the minimum number of keys required for the
+ // second page.
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
index = i
inode := n.inodes[i]
- elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
-
- // If we have at least the minimum number of keys and adding another
- // node would put us over the threshold then exit and return.
+ elsize :=
+ n.pageElementSize() +
+ len(inode.key) +
+ len(inode.value)
+
+ // If we have at least the minimum number of keys and adding
+ // another node would put us over the threshold then exit and
+ // return.
if i >= minKeysPerPage && sz+elsize > threshold {
break
}
@@ -2752,29 +2920,31 @@ func (n *node) splitIndex(threshold int) (index, sz int) {
return
}
-// spill writes the nodes to dirty pages and splits nodes as it goes.
-// Returns an error if dirty pages cannot be allocated.
+/// node.spill() writes the nodes to dirty pages and splits nodes as it goes.
+/// Returns an error if dirty pages cannot be allocated.
func (n *node) spill() error {
- var tx = n.bucket.tx
+ tx := n.bucket.tx
if n.spilled {
return nil
}
- // Spill child nodes first. Child nodes can materialize sibling nodes in
- // the case of split-merge so we cannot use a range loop. We have to check
- // the children size on every loop iteration.
+ // Spill child nodes first. Child nodes can materialize sibling nodes
+ // in the case of split-merge so we cannot use a range loop. We have to
+ // check the children size on every loop iteration.
sort.Sort(n.children)
for i := 0; i < len(n.children); i++ {
- if err := n.children[i].spill(); err != nil {
+ err := n.children[i].spill()
+ if err != nil {
return err
}
}
- // We no longer need the child list because it's only used for spill tracking.
+ // We no longer need the child list because it's only used for spill
+ // tracking.
n.children = nil
- // Split nodes into appropriate sizes. The first node will always be n.
- var nodes = n.split(tx.db.pageSize)
+ // Split nodes into appropriate sizes. The first node will always be n.
+ nodes := n.split(tx.db.pageSize)
for _, node := range nodes {
// Add node's page to the freelist if it's not new.
if node.pgid > 0 {
@@ -2790,7 +2960,11 @@ func (n *node) spill() error {
// Write the node.
if p.id >= tx.meta.pgid {
- panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
+ panic(fmt.Sprintf(
+ "pgid (%d) above high water mark (%d)",
+ p.id,
+ tx.meta.pgid,
+ ))
}
node.pgid = p.id
node.write(p)
@@ -2798,19 +2972,29 @@ func (n *node) spill() error {
// Insert into parent inodes.
if node.parent != nil {
- var key = node.key
+ key := node.key
if key == nil {
key = node.inodes[0].key
}
- node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
+ node.parent.put(
+ key,
+ node.inodes[0].key,
+ nil,
+ node.pgid,
+ 0,
+ )
node.key = node.inodes[0].key
- _assert(len(node.key) > 0, "spill: zero-length node key")
+ g.Assert(
+ len(node.key) > 0,
+ "spill: zero-length node key",
+ )
}
}
- // If the root node split and created a new root then we need to spill that
- // as well. We'll clear out the children to make sure it doesn't try to respill.
+ // If the root node split and created a new root then we need to spill
+ // that as well. We'll clear out the children to make sure it doesn't
+ // try to respill.
if n.parent != nil && n.parent.pgid == 0 {
n.children = nil
return n.parent.spill()
@@ -2819,8 +3003,8 @@ func (n *node) spill() error {
return nil
}
-// rebalance attempts to combine the node with sibling nodes if the node fill
-// size is below a threshold or if there are not enough keys.
+/// node.rebalance() attempts to combine the node with sibling nodes if the node
+/// fill size is below a threshold or if there are not enough keys.
func (n *node) rebalance() {
if !n.unbalanced {
return
@@ -2828,14 +3012,15 @@ func (n *node) rebalance() {
n.unbalanced = false
// Ignore if node is above threshold (25%) and has enough keys.
- var threshold = n.bucket.tx.db.pageSize / 4
+ threshold := n.bucket.tx.db.pageSize / 4
if n.size() > threshold && len(n.inodes) > n.minKeys() {
return
}
// Root node has special handling.
if n.parent == nil {
- // If root node is a branch and only has one node then collapse it.
+ // If root node is a branch and only has one node then collapse
+ // it.
if !n.isLeaf && len(n.inodes) == 1 {
// Move root's child up.
child := n.bucket.node(n.inodes[0].pgid, n)
@@ -2845,7 +3030,8 @@ func (n *node) rebalance() {
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child, ok := n.bucket.nodes[inode.pgid]
+ if ok {
child.parent = n
}
}
@@ -2869,11 +3055,15 @@ func (n *node) rebalance() {
return
}
- _assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
+ g.Assert(
+ n.parent.numChildren() > 1,
+ "parent must have at least 2 children",
+ )
- // Destination node is right sibling if idx == 0, otherwise left sibling.
+ // Destination node is right sibling if idx == 0, otherwise left
+ // sibling.
var target *node
- var useNextSibling = (n.parent.childIndex(n) == 0)
+ useNextSibling := n.parent.childIndex(n) == 0
if useNextSibling {
target = n.nextSibling()
} else {
@@ -2884,10 +3074,14 @@ func (n *node) rebalance() {
if useNextSibling {
// Reparent all child nodes being moved.
for _, inode := range target.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child, ok := n.bucket.nodes[inode.pgid]
+ if ok {
child.parent.removeChild(child)
child.parent = n
- child.parent.children = append(child.parent.children, child)
+ child.parent.children = append(
+ child.parent.children,
+ child,
+ )
}
}
@@ -2900,10 +3094,14 @@ func (n *node) rebalance() {
} else {
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
- if child, ok := n.bucket.nodes[inode.pgid]; ok {
+ child, ok := n.bucket.nodes[inode.pgid]
+ if ok {
child.parent.removeChild(child)
child.parent = target
- child.parent.children = append(child.parent.children, child)
+ child.parent.children = append(
+ child.parent.children,
+ child,
+ )
}
}
@@ -2915,12 +3113,13 @@ func (n *node) rebalance() {
n.free()
}
- // Either this node or the target node was deleted from the parent so rebalance it.
+ // Either this node or the target node was deleted from the parent so
+ // rebalance it.
n.parent.rebalance()
}
-// removes a node from the list of in-memory children.
-// This does not affect the inodes.
+/// node.removeChild() removes a node from the list of in-memory children. This
+/// does not affect the inodes.
func (n *node) removeChild(target *node) {
for i, child := range n.children {
if child == target {
@@ -2930,14 +3129,18 @@ func (n *node) removeChild(target *node) {
}
}
-// dereference causes the node to copy all its inode key/value references to heap memory.
-// This is required when the mmap is reallocated so inodes are not pointing to stale data.
+/// node.dereference() causes the node to copy all its inode key/value
+/// references to heap memory. This is required when the mmap is reallocated so
+/// inodes are not pointing to stale data.
func (n *node) dereference() {
if n.key != nil {
key := make([]byte, len(n.key))
copy(key, n.key)
n.key = key
- _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
+ g.Assert(
+ n.pgid == 0 || len(n.key) > 0,
+ "dereference: zero-length node key on existing node",
+ )
}
for i := range n.inodes {
@@ -2946,7 +3149,10 @@ func (n *node) dereference() {
key := make([]byte, len(inode.key))
copy(key, inode.key)
inode.key = key
- _assert(len(inode.key) > 0, "dereference: zero-length inode key")
+ g.Assert(
+ len(inode.key) > 0,
+ "dereference: zero-length inode key",
+ )
value := make([]byte, len(inode.value))
copy(value, inode.value)
@@ -2959,19 +3165,30 @@ func (n *node) dereference() {
}
}
-// free adds the node's underlying page to the freelist.
+/// node.free() adds the node's underlying page to the freelist.
func (n *node) free() {
if n.pgid != 0 {
- n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
+ n.bucket.tx.db.freelist.free(
+ n.bucket.tx.meta.txid,
+ n.bucket.tx.page(n.pgid),
+ )
n.pgid = 0
}
}
-func (s nodes) Len() int { return len(s) }
-func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
+func (s nodes) Len() int {
+ return len(s)
+}
+
+func (s nodes) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s nodes) Less(i, j int) bool {
+ return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1
+}
-// typ returns a human readable page type string used for debugging.
+/// typ returns a human readable page type string used for debugging.
func (p *page) typ() string {
if (p.flags & branchPageFlag) != 0 {
return "branch"
@@ -2985,18 +3202,18 @@ func (p *page) typ() string {
return fmt.Sprintf("unknown<%02x>", p.flags)
}
-// meta returns a pointer to the metadata section of the page.
+/// page.meta() returns a pointer to the metadata section of the page.
func (p *page) meta() *meta {
return (*meta)(unsafe.Pointer(&p.ptr))
}
-// leafPageElement retrieves the leaf node by index
+/// page.leafPageElement() retrieves the leaf node by index
func (p *page) leafPageElement(index uint16) *leafPageElement {
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
return n
}
-// leafPageElements retrieves a list of leaf nodes.
+/// page.leafPageElements() retrieves a list of leaf nodes.
func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 {
return nil
@@ -3004,12 +3221,12 @@ func (p *page) leafPageElements() []leafPageElement {
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
-// branchPageElement retrieves the branch node by index
+/// page.branchPageElement() retrieves the branch node by index
func (p *page) branchPageElement(index uint16) *branchPageElement {
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
}
-// branchPageElements retrieves a list of branch nodes.
+/// page.branchPageElements() retrieves a list of branch nodes.
func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 {
return nil
@@ -3017,33 +3234,53 @@ func (p *page) branchPageElements() []branchPageElement {
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
-func (s pages) Len() int { return len(s) }
-func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
+func (s pages) Len() int {
+ return len(s)
+}
+
+func (s pages) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s pages) Less(i, j int) bool {
+ return s[i].id < s[j].id
+}
-// key returns a byte slice of the node key.
+/// branchPageElement.key() returns a byte slice of the node key.
func (n *branchPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
}
-// key returns a byte slice of the node key.
+/// leafPageElement.key() returns a byte slice of the node key.
func (n *leafPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
- return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
+ return (*[maxAllocSize]byte)(
+ unsafe.Pointer(&buf[n.pos]),
+ )[:n.ksize:n.ksize]
}
-// value returns a byte slice of the node value.
+/// leafPageElement.value() returns a byte slice of the node value.
func (n *leafPageElement) value() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
- return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
+ return (*[maxAllocSize]byte)(
+ unsafe.Pointer(&buf[n.pos+n.ksize]),
+ )[:n.vsize:n.vsize]
}
-func (s pgids) Len() int { return len(s) }
-func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
+func (s pgids) Len() int {
+ return len(s)
+}
-// merge returns the sorted union of a and b.
+func (s pgids) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s pgids) Less(i, j int) bool {
+ return s[i] < s[j]
+}
+
+/// pgids.merge() returns the sorted union of a and b.
func (a pgids) merge(b pgids) pgids {
// Return the opposite slice if one is nil.
if len(a) == 0 {
@@ -3057,11 +3294,16 @@ func (a pgids) merge(b pgids) pgids {
return merged
}
-// mergepgids copies the sorted union of a and b into dst.
-// If dst is too small, it panics.
+/// mergepgids() copies the sorted union of a and b into dst. If dst is too
+/// small, it panics.
func mergepgids(dst, a, b pgids) {
if len(dst) < len(a)+len(b) {
- panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
+ panic(fmt.Errorf(
+ "mergepgids bad len %d < %d + %d",
+ len(dst),
+ len(a),
+ len(b),
+ ))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
@@ -3076,7 +3318,8 @@ func mergepgids(dst, a, b pgids) {
// Merged will hold all elements from both lists.
merged := dst[:0]
- // Assign lead to the slice with a lower starting value, follow to the higher value.
+ // Assign lead to the slice with a lower starting value, follow to the
+ // higher value.
lead, follow := a, b
if b[0] < a[0] {
lead, follow = b, a
@@ -3085,7 +3328,9 @@ func mergepgids(dst, a, b pgids) {
// Continue while there are elements in the lead.
for len(lead) > 0 {
// Merge largest prefix of lead that is ahead of follow[0].
- n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
+ n := sort.Search(len(lead), func(i int) bool {
+ return lead[i] > follow[0]
+ })
merged = append(merged, lead[:n]...)
if n >= len(lead) {
break
@@ -3099,7 +3344,7 @@ func mergepgids(dst, a, b pgids) {
_ = append(merged, follow...)
}
-// init initializes the transaction.
+/// Tx.init() initializes the transaction.
func (tx *Tx) init(db *DB) {
tx.db = db
tx.pages = nil
@@ -3113,90 +3358,96 @@ func (tx *Tx) init(db *DB) {
tx.root.ref = &bucket{}
*tx.root.ref = tx.meta.root
- // Increment the transaction id and add a page cache for writable transactions.
+ // Increment the transaction id and add a page cache for writable
+ // transactions.
if tx.writable {
tx.pages = make(map[pgid]*page)
tx.meta.txid += txid(1)
}
}
-// ID returns the transaction id.
+/// Tx.ID() returns the transaction id.
func (tx *Tx) ID() int {
return int(tx.meta.txid)
}
-// DB returns a reference to the database that created the transaction.
+/// Tx.DB() returns a reference to the database that created the transaction.
func (tx *Tx) DB() *DB {
return tx.db
}
-// Size returns current database size in bytes as seen by this transaction.
+/// Tx.Size() returns current database size in bytes as seen by this
+/// transaction.
func (tx *Tx) Size() int64 {
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
}
-// Writable returns whether the transaction can perform write operations.
+/// Tx.Writable() returns whether the transaction can perform write operations.
func (tx *Tx) Writable() bool {
return tx.writable
}
-// Cursor creates a cursor associated with the root bucket.
-// All items in the cursor will return a nil value because all root bucket keys point to buckets.
-// The cursor is only valid as long as the transaction is open.
-// Do not use a cursor after the transaction is closed.
+/// Tx.Cursor() creates a cursor associated with the root bucket. All items in
+/// the cursor will return a nil value because all root bucket keys point to
+/// buckets. The cursor is only valid as long as the transaction is open. Do
+/// not use a cursor after the transaction is closed.
func (tx *Tx) Cursor() *Cursor {
return tx.root.Cursor()
}
-// Bucket retrieves a bucket by name.
-// Returns nil if the bucket does not exist.
-// The bucket instance is only valid for the lifetime of the transaction.
+/// Tx.Bucket() retrieves a bucket by name. Returns nil if the bucket does not
+/// exist. The bucket instance is only valid for the lifetime of the
+/// transaction.
func (tx *Tx) Bucket(name []byte) *Bucket {
return tx.root.Bucket(name)
}
-// CreateBucket creates a new bucket.
-// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
+/// Tx.CreateBucket() creates a new bucket. Returns an error if the bucket
+/// already exists, if the bucket name is blank, or if the bucket name is too
+/// long. The bucket instance is only valid for the lifetime of the
+/// transaction.
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
return tx.root.CreateBucket(name)
}
-// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
-// Returns an error if the bucket name is blank, or if the bucket name is too long.
-// The bucket instance is only valid for the lifetime of the transaction.
+/// Tx.CreateBucketIfNotExists() creates a new bucket if it doesn't already
+/// exist. Returns an error if the bucket name is blank, or if the bucket name
+/// is too long. The bucket instance is only valid for the lifetime of the
+/// transaction.
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
return tx.root.CreateBucketIfNotExists(name)
}
-// DeleteBucket deletes a bucket.
-// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
+/// Tx.DeleteBucket() deletes a bucket. Returns an error if the bucket cannot
+/// be found or if the key represents a non-bucket value.
func (tx *Tx) DeleteBucket(name []byte) error {
return tx.root.DeleteBucket(name)
}
-// ForEach executes a function for each bucket in the root.
-// If the provided function returns an error then the iteration is stopped and
-// the error is returned to the caller.
+/// Tx.ForEach() executes a function for each bucket in the root. If the
+/// provided function returns an error then the iteration is stopped and the
+/// error is returned to the caller.
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
return tx.root.ForEach(func(k, v []byte) error {
- if err := fn(k, tx.root.Bucket(k)); err != nil {
+ err := fn(k, tx.root.Bucket(k))
+ if err != nil {
return err
}
return nil
})
}
-// OnCommit adds a handler function to be executed after the transaction successfully commits.
+/// Tx.OnCommit() adds a handler function to be executed after the transaction
+/// successfully commits.
func (tx *Tx) OnCommit(fn func()) {
tx.commitHandlers = append(tx.commitHandlers, fn)
}
-// Commit writes all changes to disk and updates the meta page.
-// Returns an error if a disk write error occurs, or if Commit is
-// called on a read-only transaction.
+/// Tx.Commit() writes all changes to disk and updates the meta page. Returns
+/// an error if a disk write error occurs, or if Tx.Commiti() is called on a
+/// read-only transaction.
func (tx *Tx) Commit() error {
- _assert(!tx.managed, "managed tx commit not allowed")
+ g.Assert(!tx.managed, "managed tx commit not allowed")
if tx.db == nil {
return ErrTxClosed
} else if !tx.writable {
@@ -3209,7 +3460,8 @@ func (tx *Tx) Commit() error {
tx.root.rebalance()
// spill data onto dirty pages.
- if err := tx.root.spill(); err != nil {
+ err := tx.root.spill()
+ if err != nil {
tx.rollback()
return err
}
@@ -3219,30 +3471,36 @@ func (tx *Tx) Commit() error {
opgid := tx.meta.pgid
- // Free the freelist and allocate new pages for it. This will overestimate
- // the size of the freelist but not underestimate the size (which would be bad).
+ // Free the freelist and allocate new pages for it. This will
+ // overestimate the size of the freelist but not underestimate the size
+ // (which would be bad).
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
if err != nil {
tx.rollback()
return err
}
- if err := tx.db.freelist.write(p); err != nil {
+
+ err = tx.db.freelist.write(p)
+ if err != nil {
tx.rollback()
return err
}
tx.meta.freelist = p.id
- // If the high water mark has moved up then attempt to grow the database.
+ // If the high water mark has moved up then attempt to grow the
+ // database.
if tx.meta.pgid > opgid {
- if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
+ err := tx.db.grow(int(tx.meta.pgid + 1) * tx.db.pageSize)
+ if err != nil {
tx.rollback()
return err
}
}
// Write dirty pages to disk.
- if err := tx.write(); err != nil {
+ err = tx.write()
+ if err != nil {
tx.rollback()
return err
}
@@ -3251,7 +3509,7 @@ func (tx *Tx) Commit() error {
// Only the first consistency error is reported in the panic.
if tx.db.StrictMode {
ch := tx.Check()
- var errs []string
+ errs := []string{}
for {
err, ok := <-ch
if !ok {
@@ -3265,7 +3523,8 @@ func (tx *Tx) Commit() error {
}
// Write meta to disk.
- if err := tx.writeMeta(); err != nil {
+ err = tx.writeMeta()
+ if err != nil {
tx.rollback()
return err
}
@@ -3281,10 +3540,10 @@ func (tx *Tx) Commit() error {
return nil
}
-// Rollback closes the transaction and ignores all previous updates. Read-only
-// transactions must be rolled back and not committed.
+/// Tx.Rollback() closes the transaction and ignores all previous updates.
+/// Read-only transactions must be rolled back and not committed.
func (tx *Tx) Rollback() error {
- _assert(!tx.managed, "managed tx rollback not allowed")
+ g.Assert(!tx.managed, "managed tx rollback not allowed")
if tx.db == nil {
return ErrTxClosed
}
@@ -3322,8 +3581,8 @@ func (tx *Tx) close() {
tx.pages = nil
}
-// WriteTo writes the entire database to a writer.
-// If err == nil then exactly tx.Size() bytes will be written into the writer.
+/// Tx.WriteTo() writes the entire database to a writer. If err == nil then
+/// exactly tx.Size() bytes will be written into the writer.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
f, err := os.OpenFile(tx.db.path, os.O_RDONLY, 0)
if err != nil {
@@ -3331,7 +3590,7 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
}
defer f.Close()
- // Generate a meta page. We use the same page data for both meta pages.
+ // Generate a meta page. We use the same page data for both meta pages.
buf := make([]byte, tx.db.pageSize)
page := (*page)(unsafe.Pointer(&buf[0]))
page.flags = metaPageFlag
@@ -3357,7 +3616,8 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
}
// Move past the meta pages in the file.
- if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
+ _, err = f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET)
+ if err != nil {
return n, fmt.Errorf("seek: %s", err)
}
@@ -3371,9 +3631,9 @@ func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
return n, f.Close()
}
-// CopyFile copies the entire database to file at the given path.
-// A reader transaction is maintained during the copy so it is safe to continue
-// using the database while a copy is in progress.
+/// Tx.CopyFile() copies the entire database to file at the given path. A
+/// reader transaction is maintained during the copy so it is safe to continue
+/// using the database while a copy is in progress.
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
@@ -3388,14 +3648,14 @@ func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
return f.Close()
}
-// Check performs several consistency checks on the database for this transaction.
-// An error is returned if any inconsistency is found.
-//
-// It can be safely run concurrently on a writable transaction. However, this
-// incurs a high cost for large databases and databases with a lot of subbuckets
-// because of caching. This overhead can be removed if running on a read-only
-// transaction, however, it is not safe to execute other writer transactions at
-// the same time.
+/// Tx.Check() performs several consistency checks on the database for this
+/// transaction. An error is returned if any inconsistency is found.
+///
+/// It can be safely run concurrently on a writable transaction. However, this
+/// incurs a high cost for large databases and databases with a lot of
+/// subbuckets because of caching. This overhead can be removed if running on a
+/// read-only transaction, however, it is not safe to execute other writer
+/// transactions at the same time.
func (tx *Tx) Check() <-chan error {
ch := make(chan error)
go tx.check(ch)
@@ -3437,7 +3697,20 @@ func (tx *Tx) check(ch chan error) {
close(ch)
}
-func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
+func isBranchPage(p *page) bool {
+ return (p.flags & branchPageFlag) == 0
+}
+
+func isLeafPage(p *page) bool {
+ return (p.flags & leafPageFlag) == 0
+}
+
+func (tx *Tx) checkBucket(
+ b *Bucket,
+ reachable map[pgid]*page,
+ freed map[pgid]bool,
+ ch chan error,
+) {
// Ignore inline buckets.
if b.ref.root == 0 {
return
@@ -3446,14 +3719,22 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo
// Check every page used by this bucket.
b.tx.forEachPage(b.ref.root, 0, func(p *page, _ int) {
if p.id > tx.meta.pgid {
- ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
+ ch <- fmt.Errorf(
+ "page %d: out of bounds: %d",
+ int(p.id),
+ int(b.tx.meta.pgid),
+ )
}
// Ensure each page is only referenced once.
for i := pgid(0); i <= pgid(p.overflow); i++ {
- var id = p.id + i
- if _, ok := reachable[id]; ok {
- ch <- fmt.Errorf("page %d: multiple references", int(id))
+ id := p.id + i
+ _, ok := reachable[id]
+ if ok {
+ ch <- fmt.Errorf(
+ "page %d: multiple references",
+ int(id),
+ )
}
reachable[id] = p
}
@@ -3461,21 +3742,26 @@ func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bo
// We should only encounter un-freed leaf and branch pages.
if freed[p.id] {
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
- } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
- ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
+ } else if isBranchPage(p) && isLeafPage(p) {
+ ch <- fmt.Errorf(
+ "page %d: invalid type: %s",
+ int(p.id),
+ p.typ(),
+ )
}
})
// Check each bucket within this bucket.
_ = b.ForEach(func(k, v []byte) error {
- if child := b.Bucket(k); child != nil {
+ child := b.Bucket(k)
+ if child != nil {
tx.checkBucket(child, reachable, freed, ch)
}
return nil
})
}
-// allocate returns a contiguous block of memory starting at a given page.
+/// Tx.allocate() returns a contiguous block of memory starting at a given page.
func (tx *Tx) allocate(count int) (*page, error) {
p, err := tx.db.allocate(count)
if err != nil {
@@ -3488,7 +3774,7 @@ func (tx *Tx) allocate(count int) (*page, error) {
return p, nil
}
-// write writes any dirty pages to disk.
+/// Tx.write() writes any dirty pages to disk.
func (tx *Tx) write() error {
// Sort pages by id.
pages := make(pages, 0, len(tx.pages))
@@ -3515,7 +3801,8 @@ func (tx *Tx) write() error {
// Write chunk to disk.
buf := ptr[:sz]
- if _, err := tx.db.file.WriteAt(buf, offset); err != nil {
+ _, err := tx.db.file.WriteAt(buf, offset)
+ if err != nil {
return err
}
@@ -3525,13 +3812,15 @@ func (tx *Tx) write() error {
break
}
- // Otherwise move offset forward and move pointer to next chunk.
+ // Otherwise move offset forward and move pointer to
+ // next chunk.
offset += int64(sz)
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
}
}
- if err := fdatasync(tx.db); err != nil {
+ err := fdatasync(tx.db)
+ if err != nil {
return err
}
@@ -3544,8 +3833,6 @@ func (tx *Tx) write() error {
}
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
-
- // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf {
buf[i] = 0
}
@@ -3555,7 +3842,7 @@ func (tx *Tx) write() error {
return nil
}
-// writeMeta writes the meta to the disk.
+/// Tx.writeMeta() writes the meta to the disk.
func (tx *Tx) writeMeta() error {
// Create a temporary buffer for the meta page.
buf := make([]byte, tx.db.pageSize)
@@ -3563,23 +3850,26 @@ func (tx *Tx) writeMeta() error {
tx.meta.write(p)
// Write the meta page to file.
- if _, err := tx.db.file.WriteAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
+ _, err := tx.db.file.WriteAt(buf, int64(p.id)*int64(tx.db.pageSize))
+ if err != nil {
return err
}
- if err := fdatasync(tx.db); err != nil {
+ err = fdatasync(tx.db)
+ if err != nil {
return err
}
return nil
}
-// page returns a reference to the page with a given id.
-// If page has been written to then a temporary buffered page is returned.
+/// Tx.page() returns a reference to the page with a given id. If page has been
+/// written to then a temporary buffered page is returned.
func (tx *Tx) page(id pgid) *page {
// Check the dirty pages first.
if tx.pages != nil {
- if p, ok := tx.pages[id]; ok {
+ p, ok := tx.pages[id]
+ if ok {
return p
}
}
@@ -3588,7 +3878,8 @@ func (tx *Tx) page(id pgid) *page {
return tx.db.page(id)
}
-// forEachPage iterates over every page within a given page and executes a function.
+/// Tx.forEachPage() iterates over every page within a given page and executes a
+/// function.
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
p := tx.page(pgid)
@@ -3604,8 +3895,8 @@ func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
}
}
-// Page returns page information for a given page number.
-// This is only safe for concurrent use when used by a writable transaction.
+/// Tx.Page() returns page information for a given page number. This is only
+/// safe for concurrent use when used by a writable transaction.
func (tx *Tx) Page(id int) (*PageInfo, error) {
if tx.db == nil {
return nil, ErrTxClosed
diff --git a/tests/dedo.go b/tests/dedo.go
index a43a3d2..b1eec95 100644
--- a/tests/dedo.go
+++ b/tests/dedo.go
@@ -137,16 +137,20 @@ func TestBucket_Get_NonExistent(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if v := b.Get([]byte("foo")); v != nil {
+
+ v := b.Get([]byte("foo"))
+ if v != nil {
t.Fatal("expected nil value")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -157,19 +161,25 @@ func TestBucket_Get_FromNode(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if v := b.Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) {
+
+ v := b.Get([]byte("foo"))
+ if !bytes.Equal(v, []byte("bar")) {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -180,27 +190,33 @@ func TestBucket_Get_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
+ _, err = tx.Bucket([]byte("widgets")).CreateBucket([
+ ]byte("foo"),
+ )
+ if err != nil {
t.Fatal(err)
}
if tx.Bucket([]byte("widgets")).Get([]byte("foo")) != nil {
t.Fatal("expected nil value")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a slice returned from a bucket has a capacity equal to its length.
-// This also allows slices to be appended to since it will require a realloc by Go.
+// Ensure that a slice returned from a bucket has a capacity equal to its
+// length. This also allows slices to be appended to since it will require a
+// realloc by Go.
//
// https://github.com/boltdb/bolt/issues/544
func TestBucket_Get_Capacity(t *testing.T) {
@@ -209,18 +225,20 @@ func TestBucket_Get_Capacity(t *testing.T) {
defer os.Remove(db.Path())
// Write key to a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("bucket"))
if err != nil {
return err
}
+
return b.Put([]byte("key"), []byte("val"))
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Retrieve value and attempt to append to it.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
k, v := tx.Bucket([]byte("bucket")).Cursor().First()
// Verify capacity.
@@ -235,7 +253,8 @@ func TestBucket_Get_Capacity(t *testing.T) {
v = append(v, []byte("123")...)
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -246,12 +265,14 @@ func TestBucket_Put(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
@@ -259,8 +280,10 @@ func TestBucket_Put(t *testing.T) {
if !bytes.Equal([]byte("bar"), v) {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -271,15 +294,19 @@ func TestBucket_Put_Repeat(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("baz")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("baz"))
+ if err != nil {
t.Fatal(err)
}
@@ -287,8 +314,10 @@ func TestBucket_Put_Repeat(t *testing.T) {
if !bytes.Equal([]byte("baz"), value) {
t.Fatalf("unexpected value: %v", value)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -300,31 +329,42 @@ func TestBucket_Put_Large(t *testing.T) {
defer os.Remove(db.Path())
count, factor := 100, 200
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
+
for i := 1; i < count; i++ {
- if err := b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))); err != nil {
+ key := strings.Repeat("0", i * factor)
+ value := strings.Repeat("X", (count - i) * factor)
+ err := b.Put([]byte(key), []byte(value))
+ if err != nil {
t.Fatal(err)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i < count; i++ {
value := b.Get([]byte(strings.Repeat("0", i*factor)))
- if !bytes.Equal(value, []byte(strings.Repeat("X", (count-i)*factor))) {
+ expected := []byte(strings.Repeat(
+ "X",
+ (count - i) * factor,
+ ))
+ if !bytes.Equal(value, expected) {
t.Fatalf("unexpected value: %v", value)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -343,7 +383,7 @@ func TestDB_Put_VeryLarge(t *testing.T) {
defer os.Remove(db.Path())
for i := 0; i < n; i += batchN {
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -351,12 +391,14 @@ func TestDB_Put_VeryLarge(t *testing.T) {
for j := 0; j < batchN; j++ {
k, v := make([]byte, ksize), make([]byte, vsize)
binary.BigEndian.PutUint32(k, uint32(i+j))
- if err := b.Put(k, v); err != nil {
+ err := b.Put(k, v)
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -368,25 +410,33 @@ func TestBucket_Put_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b0, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")); err != nil {
+ _, err = tx.Bucket([]byte("widgets")).CreateBucket(
+ []byte("foo"),
+ )
+ if err != nil {
t.Fatal(err)
}
- if err := b0.Put([]byte("foo"), []byte("bar")); err != ErrIncompatibleValue {
+
+ err = b0.Put([]byte("foo"), []byte("bar"))
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a setting a value while the transaction is closed returns an error.
+// Ensure that a setting a value while the transaction is closed returns an
+// error.
func TestBucket_Put_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
@@ -402,11 +452,13 @@ func TestBucket_Put_Closed(t *testing.T) {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != ErrTxClosed {
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -417,22 +469,28 @@ func TestBucket_Put_ReadOnly(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
- if err := b.Put([]byte("foo"), []byte("bar")); err != ErrTxNotWritable {
+ err := b.Put([]byte("foo"), []byte("bar"))
+ if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -443,22 +501,30 @@ func TestBucket_Delete(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Delete([]byte("foo")); err != nil {
+
+ err = b.Delete([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if v := b.Get([]byte("foo")); v != nil {
+
+ v := b.Get([]byte("foo"))
+ if v != nil {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -469,44 +535,54 @@ func TestBucket_Delete_Large(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 100; i++ {
- if err := b.Put([]byte(strconv.Itoa(i)), []byte(strings.Repeat("*", 1024))); err != nil {
+ err := b.Put(
+ []byte(strconv.Itoa(i)),
+ []byte(strings.Repeat("*", 1024)),
+ )
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ {
- if err := b.Delete([]byte(strconv.Itoa(i))); err != nil {
+ err := b.Delete([]byte(strconv.Itoa(i)))
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 100; i++ {
- if v := b.Get([]byte(strconv.Itoa(i))); v != nil {
+ v := b.Get([]byte(strconv.Itoa(i)))
+ if v != nil {
t.Fatalf("unexpected value: %v, i=%d", v, i)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -524,7 +600,7 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
k := make([]byte, 16)
for i := uint64(0); i < n1; i++ {
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("0"))
if err != nil {
t.Fatalf("bucket error: %s", err)
@@ -533,28 +609,32 @@ func TestBucket_Delete_FreelistOverflow(t *testing.T) {
for j := uint64(0); j < n2; j++ {
binary.BigEndian.PutUint64(k[:8], i)
binary.BigEndian.PutUint64(k[8:], j)
- if err := b.Put(k, nil); err != nil {
+ err := b.Put(k, nil)
+ if err != nil {
t.Fatalf("put error: %s", err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
// Delete all of them in one large transaction
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("0"))
c := b.Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
- if err := c.Delete(); err != nil {
+ err := c.Delete()
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -565,7 +645,7 @@ func TestBucket_Nested(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
// Create a widgets bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
@@ -579,70 +659,94 @@ func TestBucket_Nested(t *testing.T) {
}
// Create a widgets/bar key.
- if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
+ err = b.Put([]byte("bar"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
db.MustCheck()
// Update widgets/bar.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
- if err := b.Put([]byte("bar"), []byte("xxxx")); err != nil {
+ err := b.Put([]byte("bar"), []byte("xxxx"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
db.MustCheck()
// Cause a split.
- if err := db.Update(func(tx *Tx) error {
- var b = tx.Bucket([]byte("widgets"))
+ err = db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
for i := 0; i < 10000; i++ {
- if err := b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
+ err := b.Put(
+ []byte(strconv.Itoa(i)),
+ []byte(strconv.Itoa(i)),
+ )
+ if err != nil {
t.Fatal(err)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
db.MustCheck()
// Insert into widgets/foo/baz.
- if err := db.Update(func(tx *Tx) error {
- var b = tx.Bucket([]byte("widgets"))
- if err := b.Bucket([]byte("foo")).Put([]byte("baz"), []byte("yyyy")); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ err := b.Bucket([]byte("foo")).Put(
+ []byte("baz"),
+ []byte("yyyy"),
+ )
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
db.MustCheck()
// Verify.
- if err := db.View(func(tx *Tx) error {
- var b = tx.Bucket([]byte("widgets"))
- if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) {
+ err = db.View(func(tx *Tx) error {
+ b := tx.Bucket([]byte("widgets"))
+ v := b.Bucket([]byte("foo")).Get([]byte("baz"))
+ if !bytes.Equal(v, []byte("yyyy")) {
t.Fatalf("unexpected value: %v", v)
}
- if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) {
+
+ v = b.Get([]byte("bar"))
+ if !bytes.Equal(v, []byte("xxxx")) {
t.Fatalf("unexpected value: %v", v)
}
+
for i := 0; i < 10000; i++ {
- if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) {
+ v := b.Get([]byte(strconv.Itoa(i)))
+ if !bytes.Equal(v, []byte(strconv.Itoa(i))) {
t.Fatalf("unexpected value: %v", v)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -653,19 +757,25 @@ func TestBucket_Delete_Bucket(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("foo")); err != nil {
+
+ _, err = b.CreateBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Delete([]byte("foo")); err != ErrIncompatibleValue {
+
+ err = b.Delete([]byte("foo"))
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -676,26 +786,33 @@ func TestBucket_Delete_ReadOnly(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
- if err := tx.Bucket([]byte("widgets")).Delete([]byte("foo")); err != ErrTxNotWritable {
+ err = db.View(func(tx *Tx) error {
+ err := tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
+ if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a deleting value while the transaction is closed returns an error.
+// Ensure that a deleting value while the transaction is closed returns an
+// error.
func TestBucket_Delete_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
@@ -711,10 +828,13 @@ func TestBucket_Delete_Closed(t *testing.T) {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if err := b.Delete([]byte("foo")); err != ErrTxClosed {
+
+ err = b.Delete([]byte("foo"))
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -725,7 +845,7 @@ func TestBucket_DeleteBucket_Nested(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -740,25 +860,32 @@ func TestBucket_DeleteBucket_Nested(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = bar.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != nil {
+
+ err = tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
+// Ensure that deleting a bucket causes nested buckets to be deleted after they
+// have been committed.
func TestBucket_DeleteBucket_Nested2(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -774,15 +901,17 @@ func TestBucket_DeleteBucket_Nested2(t *testing.T) {
t.Fatal(err)
}
- if err := bar.Put([]byte("baz"), []byte("bat")); err != nil {
+ err = bar.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
widgets := tx.Bucket([]byte("widgets"))
if widgets == nil {
t.Fatal("expected widgets bucket")
@@ -798,35 +927,42 @@ func TestBucket_DeleteBucket_Nested2(t *testing.T) {
t.Fatal("expected bar bucket")
}
- if v := bar.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
+ v := bar.Get([]byte("baz"))
+ if !bytes.Equal(v, []byte("bat")) {
t.Fatalf("unexpected value: %v", v)
}
- if err := tx.DeleteBucket([]byte("widgets")); err != nil {
+
+ err := tx.DeleteBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) != nil {
t.Fatal("expected bucket to be deleted")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
-// NOTE: Consistency check in bolt_test.DB.Close() will panic if pages not freed properly.
+// Ensure that deleting a child bucket with multiple pages causes all pages to
+// get collected. NOTE: Consistency check in bolt_test.DB.Close() will panic if
+// pages not freed properly.
func TestBucket_DeleteBucket_Large(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -838,21 +974,29 @@ func TestBucket_DeleteBucket_Large(t *testing.T) {
}
for i := 0; i < 1000; i++ {
- if err := foo.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))); err != nil {
+ err := foo.Put(
+ []byte(fmt.Sprintf("%d", i)),
+ []byte(fmt.Sprintf("%0100d", i)),
+ )
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
- if err := tx.DeleteBucket([]byte("widgets")); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ err := tx.DeleteBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -863,20 +1007,25 @@ func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = widgets.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo")); b != nil {
+
+ b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))
+ if b != nil {
t.Fatal("expected nil bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -887,20 +1036,25 @@ func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = widgets.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := widgets.CreateBucket([]byte("foo")); err != ErrIncompatibleValue {
+
+ _, err = widgets.CreateBucket([]byte("foo"))
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -911,19 +1065,25 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := widgets.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = widgets.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")); err != ErrIncompatibleValue {
+
+ err = tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo"))
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -934,39 +1094,46 @@ func TestBucket_Sequence(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
bkt, err := tx.CreateBucket([]byte("0"))
if err != nil {
t.Fatal(err)
}
// Retrieve sequence.
- if v := bkt.Sequence(); v != 0 {
+ v := bkt.Sequence()
+ if v != 0 {
t.Fatalf("unexpected sequence: %d", v)
}
// Update sequence.
- if err := bkt.SetSequence(1000); err != nil {
+ err = bkt.SetSequence(1000)
+ if err != nil {
t.Fatal(err)
}
// Read sequence again.
- if v := bkt.Sequence(); v != 1000 {
+ v = bkt.Sequence()
+ if v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Verify sequence in separate transaction.
- if err := db.View(func(tx *Tx) error {
- if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 {
+ err = db.View(func(tx *Tx) error {
+ v := tx.Bucket([]byte("0")).Sequence()
+ if v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -977,38 +1144,43 @@ func TestBucket_NextSequence(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
widgets, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
+
woojits, err := tx.CreateBucket([]byte("woojits"))
if err != nil {
t.Fatal(err)
}
// Make sure sequence increments.
- if seq, err := widgets.NextSequence(); err != nil {
+ seq, err := widgets.NextSequence()
+ if err != nil {
t.Fatal(err)
} else if seq != 1 {
t.Fatalf("unexpecte sequence: %d", seq)
}
- if seq, err := widgets.NextSequence(); err != nil {
+ seq, err = widgets.NextSequence()
+ if err != nil {
t.Fatal(err)
} else if seq != 2 {
t.Fatalf("unexpected sequence: %d", seq)
}
// Buckets should be separate.
- if seq, err := woojits.NextSequence(); err != nil {
+ seq, err = woojits.NextSequence()
+ if err != nil {
t.Fatal(err)
} else if seq != 1 {
t.Fatalf("unexpected sequence: %d", 1)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1021,64 +1193,79 @@ func TestBucket_NextSequence_Persist(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.Bucket([]byte("widgets")).NextSequence(); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ _, err := tx.Bucket([]byte("widgets")).NextSequence()
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
seq, err := tx.Bucket([]byte("widgets")).NextSequence()
if err != nil {
t.Fatalf("unexpected error: %s", err)
} else if seq != 2 {
t.Fatalf("unexpected sequence: %d", seq)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that retrieving the next sequence on a read-only bucket returns an error.
+// Ensure that retrieving the next sequence on a read-only bucket returns an
+// error.
func TestBucket_NextSequence_ReadOnly(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
_, err := tx.Bucket([]byte("widgets")).NextSequence()
if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that retrieving the next sequence for a bucket on a closed database return an error.
+// Ensure that retrieving the next sequence for a bucket on a closed database
+// return an error.
func TestBucket_NextSequence_Closed(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
@@ -1088,14 +1275,19 @@ func TestBucket_NextSequence_Closed(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.NextSequence(); err != ErrTxClosed {
+
+ _, err = b.NextSequence()
+ if err != ErrTxClosed {
t.Fatal(err)
}
}
@@ -1106,23 +1298,29 @@ func TestBucket_ForEach(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("0001")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("0001"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
+
+ err = b.Put([]byte("bar"), []byte("0002"))
+ if err != nil {
t.Fatal(err)
}
var index int
- if err := b.ForEach(func(k, v []byte) error {
+ err = b.ForEach(func(k, v []byte) error {
switch index {
case 0:
if !bytes.Equal(k, []byte("bar")) {
@@ -1144,8 +1342,10 @@ func TestBucket_ForEach(t *testing.T) {
}
}
index++
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -1154,7 +1354,8 @@ func TestBucket_ForEach(t *testing.T) {
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1165,29 +1366,38 @@ func TestBucket_ForEach_ShortCircuit(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte("0000")); err != nil {
+
+ err = b.Put([]byte("bar"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("0000")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("0000")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("0000"))
+ if err != nil {
t.Fatal(err)
}
var index int
- if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
+ err = tx.Bucket(
+ []byte("widgets"),
+ ).ForEach(func(k, v []byte) error {
index++
if bytes.Equal(k, []byte("baz")) {
return errors.New("marker")
}
return nil
- }); err == nil || err.Error() != "marker" {
+ })
+ if err == nil || err.Error() != "marker" {
t.Fatalf("unexpected error: %s", err)
}
if index != 2 {
@@ -1195,7 +1405,8 @@ func TestBucket_ForEach_ShortCircuit(t *testing.T) {
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1216,11 +1427,13 @@ func TestBucket_ForEach_Closed(t *testing.T) {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if err := b.ForEach(func(k, v []byte) error { return nil }); err != ErrTxClosed {
+ err = b.ForEach(func(k, v []byte) error { return nil })
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -1231,19 +1444,25 @@ func TestBucket_Put_EmptyKey(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte(""), []byte("bar")); err != ErrKeyRequired {
+
+ err = b.Put([]byte(""), []byte("bar"))
+ if err != ErrKeyRequired {
t.Fatalf("unexpected error: %s", err)
}
- if err := b.Put(nil, []byte("bar")); err != ErrKeyRequired {
+
+ err = b.Put(nil, []byte("bar"))
+ if err != ErrKeyRequired {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1254,16 +1473,19 @@ func TestBucket_Put_KeyTooLarge(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put(make([]byte, 32769), []byte("bar")); err != ErrKeyTooLarge {
+
+ err = b.Put(make([]byte, 32769), []byte("bar"))
+ if err != ErrKeyTooLarge {
t.Fatalf("unexpected error: %s", err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1274,75 +1496,104 @@ func TestBucket_Put_ValueTooLarge(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), make([]byte, MaxValueSize+1)); err != ErrValueTooLarge {
+
+ err = b.Put([]byte("foo"), make([]byte, MaxValueSize+1))
+ if err != ErrValueTooLarge {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a bucket can write random keys and values across multiple transactions.
+// Ensure that a bucket can write random keys and values across multiple
+// transactions.
func TestBucket_Put_Single(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
index := 0
- if err := quick.Check(func(items testdata) bool {
+ err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
m := make(map[string][]byte)
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
for _, item := range items {
- if err := db.Update(func(tx *Tx) error {
- if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ err := tx.Bucket([]byte("widgets")).Put(
+ item.Key,
+ item.Value,
+ )
+ if err != nil {
panic("put error: " + err.Error())
}
+
m[string(item.Key)] = item.Value
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+ const errmsg = "value mismatch [run %d] (%d of %d):\n" +
+ "key: %x\ngot: %x\nexp: %x"
// Verify all key/values so far.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
i := 0
for k, v := range m {
- value := tx.Bucket([]byte("widgets")).Get([]byte(k))
+ value := tx.Bucket(
+ []byte("widgets"),
+ ).Get([]byte(k))
if !bytes.Equal(value, v) {
- t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
+ t.Logf(
+ errmsg,
+ index,
+ i,
+ len(m),
+ []byte(k),
+ value,
+ v,
+ )
db.CopyTempFile()
t.FailNow()
}
i++
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
index++
return true
- }, nil); err != nil {
+ }, nil)
+ if err != nil {
t.Error(err)
}
}
@@ -1353,111 +1604,142 @@ func TestBucket_Put_Multiple(t *testing.T) {
t.Skip("skipping test in short mode.")
}
- if err := quick.Check(func(items testdata) bool {
+ err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
// Bulk insert all values.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
- if err := b.Put(item.Key, item.Value); err != nil {
+ err := b.Put(item.Key, item.Value)
+ if err != nil {
t.Fatal(err)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Verify all items exist.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
value := b.Get(item.Key)
if !bytes.Equal(item.Value, value) {
db.CopyTempFile()
- t.Fatalf("exp=%x; got=%x", item.Value, value)
+ t.Fatalf(
+ "exp=%x, got=%x",
+ item.Value,
+ value,
+ )
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
return true
- }, qconfig()); err != nil {
+ }, qconfig())
+ if err != nil {
t.Error(err)
}
}
-// Ensure that a transaction can delete all key/value pairs and return to a single leaf page.
+// Ensure that a transaction can delete all key/value pairs and return to a
+// single leaf page.
func TestBucket_Delete_Quick(t *testing.T) {
if testing.Short() {
t.Skip("skipping test in short mode.")
}
- if err := quick.Check(func(items testdata) bool {
+ err := quick.Check(func(items testdata) bool {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
// Bulk insert all values.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
- if err := b.Put(item.Key, item.Value); err != nil {
+ err := b.Put(item.Key, item.Value)
+ if err != nil {
t.Fatal(err)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Remove items one at a time and check consistency.
for _, item := range items {
- if err := db.Update(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Delete(item.Key)
- }); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Delete(
+ item.Key,
+ )
+ })
+ if err != nil {
t.Fatal(err)
}
}
// Anything before our deletion index should be nil.
- if err := db.View(func(tx *Tx) error {
- if err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
- t.Fatalf("bucket should be empty; found: %06x", trunc(k, 3))
+ err = db.View(func(tx *Tx) error {
+ err := tx.Bucket(
+ []byte("widgets"),
+ ).ForEach(func(k, v []byte) error {
+ t.Fatalf(
+ "bucket should be empty; found: %06x",
+ trunc(k, 3),
+ )
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
return true
- }, qconfig()); err != nil {
+ }, qconfig())
+ if err != nil {
t.Error(err)
}
}
@@ -1471,7 +1753,7 @@ func ExampleBucket_Put() {
defer os.Remove(db.Path())
// Start a write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
// Create a bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
@@ -1479,25 +1761,30 @@ func ExampleBucket_Put() {
}
// Set the value "bar" for the key "foo".
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
return err
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Read value back in a different read-only transaction.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value of 'foo' is: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -1514,7 +1801,7 @@ func ExampleBucket_Delete() {
defer os.Remove(db.Path())
// Start a write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
// Create a bucket.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
@@ -1522,7 +1809,8 @@ func ExampleBucket_Delete() {
}
// Set the value "bar" for the key "foo".
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
return err
}
@@ -1531,30 +1819,34 @@ func ExampleBucket_Delete() {
fmt.Printf("The value of 'foo' was: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Delete the key in a different write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
return tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Retrieve the key again.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
if value == nil {
fmt.Printf("The value of 'foo' is now: nil\n")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -1572,37 +1864,45 @@ func ExampleBucket_ForEach() {
defer os.Remove(db.Path())
// Insert data into a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("animals"))
if err != nil {
return err
}
- if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
+ err = b.Put([]byte("dog"), []byte("fun"))
+ if err != nil {
return err
}
- if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
+
+ err = b.Put([]byte("cat"), []byte("lame"))
+ if err != nil {
return err
}
- if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
+
+ err = b.Put([]byte("liger"), []byte("awesome"))
+ if err != nil {
return err
}
// Iterate over items in sorted key order.
- if err := b.ForEach(func(k, v []byte) error {
+ err = b.ForEach(func(k, v []byte) error {
fmt.Printf("A %s is %s.\n", k, v)
return nil
- }); err != nil {
+ })
+ if err != nil {
return err
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -1618,16 +1918,20 @@ func TestCursor_Bucket(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if cb := b.Cursor().Bucket(); !reflect.DeepEqual(cb, b) {
+
+ cb := b.Cursor().Bucket()
+ if !reflect.DeepEqual(cb, b) {
t.Fatal("cursor bucket mismatch")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1638,69 +1942,84 @@ func TestCursor_Seek(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("0001")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("0001"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte("0002")); err != nil {
+
+ err = b.Put([]byte("bar"), []byte("0002"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("0003")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("0003"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("bkt")); err != nil {
+ _, err = b.CreateBucket([]byte("bkt"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
// Exact match should go to the key.
- if k, v := c.Seek([]byte("bar")); !bytes.Equal(k, []byte("bar")) {
+ k, v := c.Seek([]byte("bar"))
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0002")) {
t.Fatalf("unexpected value: %v", v)
}
// Inexact match should go to the next key.
- if k, v := c.Seek([]byte("bas")); !bytes.Equal(k, []byte("baz")) {
+ k, v = c.Seek([]byte("bas"))
+ if !bytes.Equal(k, []byte("baz")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0003")) {
t.Fatalf("unexpected value: %v", v)
}
// Low key should go to the first key.
- if k, v := c.Seek([]byte("")); !bytes.Equal(k, []byte("bar")) {
+ k, v = c.Seek([]byte(""))
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte("0002")) {
t.Fatalf("unexpected value: %v", v)
}
// High key should return no key.
- if k, v := c.Seek([]byte("zzz")); k != nil {
+ k, v = c.Seek([]byte("zzz"))
+ if k != nil {
t.Fatalf("expected nil key: %v", k)
} else if v != nil {
t.Fatalf("expected nil value: %v", v)
}
// Buckets should return their key but no value.
- if k, v := c.Seek([]byte("bkt")); !bytes.Equal(k, []byte("bkt")) {
+ k, v = c.Seek([]byte("bkt"))
+ if !bytes.Equal(k, []byte("bkt")) {
t.Fatalf("unexpected key: %v", k)
} else if v != nil {
t.Fatalf("expected nil value: %v", v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1713,43 +2032,55 @@ func TestCursor_Delete(t *testing.T) {
const count = 1000
// Insert every other key between 0 and $count.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
+
for i := 0; i < count; i += 1 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(i))
- if err := b.Put(k, make([]byte, 100)); err != nil {
+ err := b.Put(k, make([]byte, 100))
+ if err != nil {
t.Fatal(err)
}
}
- if _, err := b.CreateBucket([]byte("sub")); err != nil {
+
+ _, err = b.CreateBucket([]byte("sub"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
bound := make([]byte, 8)
binary.BigEndian.PutUint64(bound, uint64(count/2))
- for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
- if err := c.Delete(); err != nil {
+ for
+ key, _ := c.First();
+ bytes.Compare(key, bound) < 0;
+ key, _ = c.Next() {
+ err := c.Delete()
+ if err != nil {
t.Fatal(err)
}
}
c.Seek([]byte("sub"))
- if err := c.Delete(); err != ErrIncompatibleValue {
+ err = c.Delete()
+ if err != ErrIncompatibleValue {
t.Fatalf("unexpected error: %s", err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1764,10 +2095,10 @@ func TestCursor_Seek_Large(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- var count = 10000
+ const count = 10000
// Insert every other key between 0 and $count.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
@@ -1777,17 +2108,19 @@ func TestCursor_Seek_Large(t *testing.T) {
for j := i; j < i+100; j += 2 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(j))
- if err := b.Put(k, make([]byte, 100)); err != nil {
+ err := b.Put(k, make([]byte, 100))
+ if err != nil {
t.Fatal(err)
}
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
for i := 0; i < count; i++ {
seek := make([]byte, 8)
@@ -1804,7 +2137,8 @@ func TestCursor_Seek_Large(t *testing.T) {
continue
}
- // Otherwise we should seek to the exact key or the next key.
+ // Otherwise we should seek to the exact key or the next
+ // key.
num := binary.BigEndian.Uint64(k)
if i%2 == 0 {
if num != uint64(i) {
@@ -1818,7 +2152,8 @@ func TestCursor_Seek_Large(t *testing.T) {
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -1829,14 +2164,15 @@ func TestCursor_EmptyBucket(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First()
if k != nil {
@@ -1845,24 +2181,28 @@ func TestCursor_EmptyBucket(t *testing.T) {
t.Fatalf("unexpected value: %v", v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
+// Ensure that a Tx cursor can reverse iterate over an empty bucket without
+// error.
func TestCursor_EmptyBucketReverse(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+
+ err = db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last()
if k != nil {
@@ -1871,35 +2211,46 @@ func TestCursor_EmptyBucketReverse(t *testing.T) {
t.Fatalf("unexpected value: %v", v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a Tx cursor can iterate over a single root with a couple elements.
+// Ensure that a Tx cursor can iterate over a single root with a couple
+// elements.
func TestCursor_Iterate_Leaf(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte{}); err != nil {
+
+ err = b.Put([]byte("baz"), []byte{})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte{0}); err != nil {
+
+ err = b.Put([]byte("foo"), []byte{0})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte{1}); err != nil {
+
+ err = b.Put([]byte("bar"), []byte{1})
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+
tx, err := db.Begin(false)
if err != nil {
t.Fatal(err)
@@ -1943,72 +2294,89 @@ func TestCursor_Iterate_Leaf(t *testing.T) {
t.Fatalf("expected nil value: %v", v)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
+// Ensure that a Tx cursor can iterate in reverse over a single root with a
+// couple elements.
func TestCursor_LeafRootReverse(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte{}); err != nil {
+
+ err = b.Put([]byte("baz"), []byte{})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte{0}); err != nil {
+
+ err = b.Put([]byte("foo"), []byte{0})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte{1}); err != nil {
+
+ err = b.Put([]byte("bar"), []byte{1})
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+
tx, err := db.Begin(false)
if err != nil {
t.Fatal(err)
}
- c := tx.Bucket([]byte("widgets")).Cursor()
- if k, v := c.Last(); !bytes.Equal(k, []byte("foo")) {
+ c := tx.Bucket([]byte("widgets")).Cursor()
+ k, v := c.Last()
+ if !bytes.Equal(k, []byte("foo")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte{0}) {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Prev(); !bytes.Equal(k, []byte("baz")) {
+ k, v = c.Prev()
+ if !bytes.Equal(k, []byte("baz")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte{}) {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Prev(); !bytes.Equal(k, []byte("bar")) {
+ k, v = c.Prev()
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, []byte{1}) {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Prev(); k != nil {
+ k, v = c.Prev()
+ if k != nil {
t.Fatalf("expected nil key: %v", k)
} else if v != nil {
t.Fatalf("expected nil value: %v", v)
}
- if k, v := c.Prev(); k != nil {
+ k, v = c.Prev()
+ if k != nil {
t.Fatalf("expected nil key: %v", k)
} else if v != nil {
t.Fatalf("expected nil value: %v", v)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2019,19 +2387,25 @@ func TestCursor_Restart(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("bar"), []byte{}); err != nil {
+
+ err = b.Put([]byte("bar"), []byte{})
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte{}); err != nil {
+
+ err = b.Put([]byte("foo"), []byte{})
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -2039,23 +2413,30 @@ func TestCursor_Restart(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
c := tx.Bucket([]byte("widgets")).Cursor()
- if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) {
+ k, _ := c.First()
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
}
- if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) {
+
+ k, _ = c.Next()
+ if !bytes.Equal(k, []byte("foo")) {
t.Fatalf("unexpected key: %v", k)
}
- if k, _ := c.First(); !bytes.Equal(k, []byte("bar")) {
+ k, _ = c.First()
+ if !bytes.Equal(k, []byte("bar")) {
t.Fatalf("unexpected key: %v", k)
}
- if k, _ := c.Next(); !bytes.Equal(k, []byte("foo")) {
+ k, _ = c.Next()
+ if !bytes.Equal(k, []byte("foo")) {
t.Fatalf("unexpected key: %v", k)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2067,28 +2448,31 @@ func TestCursor_First_EmptyPages(t *testing.T) {
defer os.Remove(db.Path())
// Create 1000 keys in the "widgets" bucket.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
for i := 0; i < 1000; i++ {
- if err := b.Put(u64tob(uint64(i)), []byte{}); err != nil {
+ err := b.Put(u64tob(uint64(i)), []byte{})
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Delete half the keys and then try to iterate.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < 600; i++ {
- if err := b.Delete(u64tob(uint64(i))); err != nil {
+ err := b.Delete(u64tob(uint64(i)))
+ if err != nil {
t.Fatal(err)
}
}
@@ -2103,7 +2487,8 @@ func TestCursor_First_EmptyPages(t *testing.T) {
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -2125,11 +2510,14 @@ func TestCursor_QuickCheck(t *testing.T) {
t.Fatal(err)
}
for _, item := range items {
- if err := b.Put(item.Key, item.Value); err != nil {
+ err := b.Put(item.Key, item.Value)
+ if err != nil {
t.Fatal(err)
}
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
@@ -2137,14 +2525,17 @@ func TestCursor_QuickCheck(t *testing.T) {
sort.Sort(items)
// Iterate over all items and check consistency.
- var index = 0
+ index := 0
tx, err = db.Begin(false)
if err != nil {
t.Fatal(err)
}
c := tx.Bucket([]byte("widgets")).Cursor()
- for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
+ for
+ k, v := c.First();
+ k != nil && index < len(items);
+ k, v = c.Next() {
if !bytes.Equal(k, items[index].Key) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, items[index].Value) {
@@ -2153,21 +2544,29 @@ func TestCursor_QuickCheck(t *testing.T) {
index++
}
if len(items) != index {
- t.Fatalf("unexpected item count: %v, expected %v", len(items), index)
+ t.Fatalf(
+ "unexpected item count: %v, expected %v",
+ len(items),
+ index,
+ )
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
return true
}
- if err := quick.Check(f, qconfig()); err != nil {
+
+ err := quick.Check(f, qconfig())
+ if err != nil {
t.Error(err)
}
}
-// Ensure that a transaction can iterate over all elements in a bucket in reverse.
+// Ensure that a transaction can iterate over all elements in a bucket in
+// reverse.
func TestCursor_QuickCheck_Reverse(t *testing.T) {
f := func(items testdata) bool {
db := MustOpenDB()
@@ -2179,16 +2578,21 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
if err != nil {
t.Fatal(err)
}
+
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
+
for _, item := range items {
- if err := b.Put(item.Key, item.Value); err != nil {
+ err := b.Put(item.Key, item.Value)
+ if err != nil {
t.Fatal(err)
}
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
@@ -2196,13 +2600,17 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
sort.Sort(revtestdata(items))
// Iterate over all items and check consistency.
- var index = 0
+ index := 0
tx, err = db.Begin(false)
if err != nil {
t.Fatal(err)
}
+
c := tx.Bucket([]byte("widgets")).Cursor()
- for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
+ for
+ k, v := c.Last();
+ k != nil && index < len(items);
+ k, v = c.Prev() {
if !bytes.Equal(k, items[index].Key) {
t.Fatalf("unexpected key: %v", k)
} else if !bytes.Equal(v, items[index].Value) {
@@ -2210,17 +2618,25 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
}
index++
}
+
if len(items) != index {
- t.Fatalf("unexpected item count: %v, expected %v", len(items), index)
+ t.Fatalf(
+ "unexpected item count: %v, expected %v",
+ len(items),
+ index,
+ )
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
return true
}
- if err := quick.Check(f, qconfig()); err != nil {
+
+ err := quick.Check(f, qconfig())
+ if err != nil {
t.Error(err)
}
}
@@ -2231,27 +2647,35 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("foo")); err != nil {
+
+ _, err = b.CreateBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("bar")); err != nil {
+
+ _, err = b.CreateBucket([]byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("baz")); err != nil {
+
+ _, err = b.CreateBucket([]byte("baz"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
- var names []string
+ err = db.View(func(tx *Tx) error {
+ names := []string{}
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
names = append(names, string(k))
@@ -2259,11 +2683,14 @@ func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
t.Fatalf("unexpected value: %v", v)
}
}
+
if !reflect.DeepEqual(names, []string{"bar", "baz", "foo"}) {
t.Fatalf("unexpected names: %+v", names)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -2274,27 +2701,35 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("foo")); err != nil {
+
+ _, err = b.CreateBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("bar")); err != nil {
+
+ _, err = b.CreateBucket([]byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := b.CreateBucket([]byte("baz")); err != nil {
+
+ _, err = b.CreateBucket([]byte("baz"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
- var names []string
+ err = db.View(func(tx *Tx) error {
+ names := []string{}
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil; k, v = c.Prev() {
names = append(names, string(k))
@@ -2302,11 +2737,14 @@ func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
t.Fatalf("unexpected value: %v", v)
}
}
+
if !reflect.DeepEqual(names, []string{"foo", "baz", "bar"}) {
t.Fatalf("unexpected names: %+v", names)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -2320,7 +2758,7 @@ func ExampleCursor() {
defer os.Remove(db.Path())
// Start a read-write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
// Create a new bucket.
b, err := tx.CreateBucket([]byte("animals"))
if err != nil {
@@ -2328,13 +2766,18 @@ func ExampleCursor() {
}
// Insert data into a bucket.
- if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
+ err = b.Put([]byte("dog"), []byte("fun"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
+
+ err = b.Put([]byte("cat"), []byte("lame"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
+
+ err = b.Put([]byte("liger"), []byte("awesome"))
+ if err != nil {
log.Fatal(err)
}
@@ -2345,17 +2788,20 @@ func ExampleCursor() {
// first key/value pair and updates the k/v variables to the
// next key/value on each iteration.
//
- // The loop finishes at the end of the cursor when a nil key is returned.
+ // The loop finishes at the end of the cursor when a nil key is
+ // returned.
for k, v := c.First(); k != nil; k, v = c.Next() {
fmt.Printf("A %s is %s.\n", k, v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -2374,7 +2820,7 @@ func ExampleCursor_reverse() {
defer os.Remove(db.Path())
// Start a read-write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
// Create a new bucket.
b, err := tx.CreateBucket([]byte("animals"))
if err != nil {
@@ -2382,13 +2828,18 @@ func ExampleCursor_reverse() {
}
// Insert data into a bucket.
- if err := b.Put([]byte("dog"), []byte("fun")); err != nil {
+ err = b.Put([]byte("dog"), []byte("fun"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("cat"), []byte("lame")); err != nil {
+
+ err = b.Put([]byte("cat"), []byte("lame"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("liger"), []byte("awesome")); err != nil {
+
+ err = b.Put([]byte("liger"), []byte("awesome"))
+ if err != nil {
log.Fatal(err)
}
@@ -2399,19 +2850,21 @@ func ExampleCursor_reverse() {
// from the last key/value pair and updates the k/v variables to
// the previous key/value on each iteration.
//
- // The loop finishes at the beginning of the cursor when a nil key
- // is returned.
+ // The loop finishes at the beginning of the cursor when a nil
+ // key is returned.
for k, v := c.Last(); k != nil; k, v = c.Prev() {
fmt.Printf("A %s is %s.\n", k, v)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close the database to release the file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -2435,11 +2888,13 @@ func TestOpen(t *testing.T) {
}
defer os.Remove(db.Path())
- if s := db.Path(); s != path {
+ s := db.Path()
+ if s != path {
t.Fatalf("unexpected path: %s", s)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2460,7 +2915,8 @@ func TestOpen_ErrNotExists(t *testing.T) {
}
}
-// Ensure that opening a file with two invalid versions returns ErrVersionMismatch.
+// Ensure that opening a file with two invalid versions returns
+// ErrVersionMismatch.
func TestOpen_ErrVersionMismatch(t *testing.T) {
if pageSize != os.Getpagesize() {
t.Skip("page size mismatch")
@@ -2473,7 +2929,8 @@ func TestOpen_ErrVersionMismatch(t *testing.T) {
path := db.Path()
// Close database.
- if err := db.DB.Close(); err != nil {
+ err := db.DB.Close()
+ if err != nil {
t.Fatal(err)
}
@@ -2488,12 +2945,14 @@ func TestOpen_ErrVersionMismatch(t *testing.T) {
meta0.version++
meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize]))
meta1.version++
- if err := ioutil.WriteFile(path, buf, 0666); err != nil {
+ err = ioutil.WriteFile(path, buf, 0666)
+ if err != nil {
t.Fatal(err)
}
// Reopen data file.
- if _, err := Open(path); err != ErrVersionMismatch {
+ _, err = Open(path)
+ if err != ErrVersionMismatch {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -2511,7 +2970,8 @@ func TestOpen_ErrChecksum(t *testing.T) {
path := db.Path()
// Close database.
- if err := db.DB.Close(); err != nil {
+ err := db.DB.Close()
+ if err != nil {
t.Fatal(err)
}
@@ -2526,12 +2986,14 @@ func TestOpen_ErrChecksum(t *testing.T) {
meta0.pgid++
meta1 := (*meta)(unsafe.Pointer(&buf[pageSize+pageHeaderSize]))
meta1.pgid++
- if err := ioutil.WriteFile(path, buf, 0666); err != nil {
+ err = ioutil.WriteFile(path, buf, 0666)
+ if err != nil {
t.Fatal(err)
}
// Reopen data file.
- if _, err := Open(path); err != ErrChecksum {
+ _, err = Open(path)
+ if err != ErrChecksum {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -2548,22 +3010,29 @@ func TestOpen_Size(t *testing.T) {
pagesize := db.pageSize
// Insert until we get above the minimum 4MB size.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte("data"))
for i := 0; i < 10000; i++ {
- if err := b.Put([]byte(fmt.Sprintf("%04d", i)), make([]byte, 1000)); err != nil {
+ err := b.Put([]byte(
+ fmt.Sprintf("%04d", i)),
+ make([]byte, 1000),
+ )
+ if err != nil {
t.Fatal(err)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Close database and grab the size.
- if err := db.DB.Close(); err != nil {
+ err = db.DB.Close()
+ if err != nil {
t.Fatal(err)
}
+
sz := fileSize(path)
if sz == 0 {
t.Fatalf("unexpected new file size: %d", sz)
@@ -2574,30 +3043,38 @@ func TestOpen_Size(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := db0.Update(func(tx *Tx) error {
- if err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0}); err != nil {
+
+ err = db0.Update(func(tx *Tx) error {
+ err := tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0})
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db0.Close(); err != nil {
+
+ err = db0.Close()
+ if err != nil {
t.Fatal(err)
}
+
newSz := fileSize(path)
if newSz == 0 {
t.Fatalf("unexpected new file size: %d", newSz)
}
- // Compare the original size with the new size.
- // db size might increase by a few page sizes due to the new small update.
+ // Compare the original size with the new size. Database size might
+ // increase by a few page sizes due to the new small update.
if sz < newSz-5*int64(pagesize) {
t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
}
}
-// Ensure that opening a database beyond the max step size does not increase its size.
+// Ensure that opening a database beyond the max step size does not increase its
+// size.
// https://github.com/boltdb/bolt/issues/303
func TestOpen_Size_Large(t *testing.T) {
return // FIXME: way too slow
@@ -2618,24 +3095,29 @@ func TestOpen_Size_Large(t *testing.T) {
// Insert until we get above the minimum 4MB size.
var index uint64
for i := 0; i < n1; i++ {
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte("data"))
for j := 0; j < n2; j++ {
- if err := b.Put(u64tob(index), make([]byte, 50)); err != nil {
+ err := b.Put(u64tob(index), make([]byte, 50))
+ if err != nil {
t.Fatal(err)
}
+
index++
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
// Close database and grab the size.
- if err := db.DB.Close(); err != nil {
+ err := db.DB.Close()
+ if err != nil {
t.Fatal(err)
}
+
sz := fileSize(path)
if sz == 0 {
t.Fatalf("unexpected new file size: %d", sz)
@@ -2648,12 +3130,16 @@ func TestOpen_Size_Large(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := db0.Update(func(tx *Tx) error {
+
+ err = db0.Update(func(tx *Tx) error {
return tx.Bucket([]byte("data")).Put([]byte{0}, []byte{0})
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db0.Close(); err != nil {
+
+ err = db0.Close()
+ if err != nil {
t.Fatal(err)
}
@@ -2662,8 +3148,8 @@ func TestOpen_Size_Large(t *testing.T) {
t.Fatalf("unexpected new file size: %d", newSz)
}
- // Compare the original size with the new size.
- // db size might increase by a few page sizes due to the new small update.
+ // Compare the original size with the new size. Database size might
+ // increase by a few page sizes due to the new small update.
if sz < newSz - (5 * int64(pagesize)) {
t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
}
@@ -2679,11 +3165,13 @@ func TestOpen_Check(t *testing.T) {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error { return <-tx.Check() }); err != nil {
+ err = db.View(func(tx *Tx) error { return <-tx.Check() })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
t.Fatal(err)
}
@@ -2692,16 +3180,19 @@ func TestOpen_Check(t *testing.T) {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error { return <-tx.Check() }); err != nil {
+ err = db.View(func(tx *Tx) error { return <-tx.Check() })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that write errors to the meta file handler during initialization are returned.
+// Ensure that write errors to the meta file handler during initialization are
+// returned.
func TestOpen_MetaInitWriteError(t *testing.T) {
// FIXME
// t.Skip("pending")
@@ -2716,12 +3207,15 @@ func TestOpen_FileTooSmall(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := db.Close(); err != nil {
+
+ err = db.Close()
+ if err != nil {
t.Fatal(err)
}
// corrupt the database
- if err := os.Truncate(path, int64(os.Getpagesize())); err != nil {
+ err = os.Truncate(path, int64(os.Getpagesize()))
+ if err != nil {
t.Fatal(err)
}
@@ -2776,9 +3270,11 @@ func TestDB_Open_InitialMmapSize(t *testing.T) {
done := make(chan struct{})
go func() {
- if err := wtx.Commit(); err != nil {
+ err := wtx.Commit()
+ if err != nil {
t.Fatal(err)
}
+
done <- struct{}{}
}()
@@ -2788,7 +3284,8 @@ func TestDB_Open_InitialMmapSize(t *testing.T) {
case <-done:
}
- if err := rtx.Rollback(); err != nil {
+ err = rtx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2796,7 +3293,8 @@ func TestDB_Open_InitialMmapSize(t *testing.T) {
// Ensure that a database cannot open a transaction when it's not open.
func TestDB_Begin_ErrDatabaseNotOpen(t *testing.T) {
var db DB
- if _, err := db.Begin(false); err != ErrDatabaseNotOpen {
+ _, err := db.Begin(false)
+ if err != ErrDatabaseNotOpen {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -2820,7 +3318,8 @@ func TestDB_BeginRW(t *testing.T) {
t.Fatal("expected writable tx")
}
- if err := tx.Commit(); err != nil {
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
}
@@ -2828,13 +3327,19 @@ func TestDB_BeginRW(t *testing.T) {
// Ensure that opening a transaction while the DB is closed returns an error.
func TestDB_BeginRW_Closed(t *testing.T) {
var db DB
- if _, err := db.Begin(true); err != ErrDatabaseNotOpen {
+ _, err := db.Begin(true)
+ if err != ErrDatabaseNotOpen {
t.Fatalf("unexpected error: %s", err)
}
}
-func TestDB_Close_PendingTx_RW(t *testing.T) { testDB_Close_PendingTx(t, true) }
-func TestDB_Close_PendingTx_RO(t *testing.T) { testDB_Close_PendingTx(t, false) }
+func TestDB_Close_PendingTx_RW(t *testing.T) {
+ testDB_Close_PendingTx(t, true)
+}
+
+func TestDB_Close_PendingTx_RO(t *testing.T) {
+ testDB_Close_PendingTx(t, false)
+}
// Ensure that a database cannot close while transactions are open.
func testDB_Close_PendingTx(t *testing.T, writable bool) {
@@ -2854,9 +3359,11 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) {
// Open update in separate goroutine.
done := make(chan struct{})
go func() {
- if err := db.Close(); err != nil {
+ err := db.Close()
+ if err != nil {
t.Fatal(err)
}
+
close(done)
}()
@@ -2869,16 +3376,17 @@ func testDB_Close_PendingTx(t *testing.T, writable bool) {
}
// Commit transaction.
- if err := tx.Commit(); err != nil {
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
// Ensure database closed now.
time.Sleep(100 * time.Millisecond)
select {
- case <-done:
- default:
- t.Fatal("database did not close")
+ case <-done:
+ default:
+ t.Fatal("database did not close")
}
}
@@ -2888,34 +3396,48 @@ func TestDB_Update(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Delete([]byte("foo")); err != nil {
+
+ err = b.Delete([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
- if v := b.Get([]byte("foo")); v != nil {
+ v := b.Get([]byte("foo"))
+ if v != nil {
t.Fatalf("expected nil value, got: %v", v)
}
- if v := b.Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
+
+ v = b.Get([]byte("baz"))
+ if !bytes.Equal(v, []byte("bat")) {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -2923,12 +3445,15 @@ func TestDB_Update(t *testing.T) {
// Ensure a closed database returns an error while running a transaction block
func TestDB_Update_Closed(t *testing.T) {
var db DB
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != ErrDatabaseNotOpen {
+ })
+ if err != ErrDatabaseNotOpen {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -2940,20 +3465,23 @@ func TestDB_Update_ManualCommit(t *testing.T) {
defer os.Remove(db.Path())
var panicked bool
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
panicked = true
}
}()
- if err := tx.Commit(); err != nil {
+ err := tx.Commit()
+ if err != nil {
t.Fatal(err)
}
}()
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if !panicked {
t.Fatal("expected panic")
@@ -2967,20 +3495,23 @@ func TestDB_Update_ManualRollback(t *testing.T) {
defer os.Remove(db.Path())
var panicked bool
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
panicked = true
}
}()
- if err := tx.Rollback(); err != nil {
+ err := tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}()
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if !panicked {
t.Fatal("expected panic")
@@ -2994,20 +3525,24 @@ func TestDB_View_ManualCommit(t *testing.T) {
defer os.Remove(db.Path())
var panicked bool
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
panicked = true
}
}()
- if err := tx.Commit(); err != nil {
+ err := tx.Commit()
+ if err != nil {
t.Fatal(err)
}
}()
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if !panicked {
t.Fatal("expected panic")
@@ -3021,20 +3556,23 @@ func TestDB_View_ManualRollback(t *testing.T) {
defer os.Remove(db.Path())
var panicked bool
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
panicked = true
}
}()
- if err := tx.Rollback(); err != nil {
+ err := tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
}()
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if !panicked {
t.Fatal("expected panic")
@@ -3050,51 +3588,61 @@ func TestDB_Update_Panic(t *testing.T) {
// Panic during update but recover.
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
// t.Log("recover: update", r)
}
}()
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
panic("omg")
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}()
// Verify we can update again.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Verify that our change persisted.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure a database can return an error through a read-only transactional block.
+// Ensure a database can return an error through a read-only transactional
+// block.
func TestDB_View_Error(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
return errors.New("xxx")
- }); err == nil || err.Error() != "xxx" {
+ })
+ if err == nil || err.Error() != "xxx" {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -3105,40 +3653,46 @@ func TestDB_View_Panic(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Panic during view transaction but recover.
func() {
defer func() {
- if r := recover(); r != nil {
+ r := recover()
+ if r != nil {
// t.Log("recover: view", r)
}
}()
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
panic("omg")
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}()
// Verify that we can still use read transactions.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3149,66 +3703,82 @@ func TestDB_Consistency(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
for i := 0; i < 10; i++ {
- if err := db.Update(func(tx *Tx) error {
- if err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ err := tx.Bucket([]byte("widgets")).Put(
+ []byte("foo"),
+ []byte("bar"),
+ )
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
- if err := db.Update(func(tx *Tx) error {
- if p, _ := tx.Page(0); p == nil {
+ err = db.Update(func(tx *Tx) error {
+ p, _ := tx.Page(0)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "meta" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(1); p == nil {
+ p, _ = tx.Page(1)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "meta" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(2); p == nil {
+ p, _ = tx.Page(2)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "free" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(3); p == nil {
+ p, _ = tx.Page(3)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "free" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(4); p == nil {
+ p, _ = tx.Page(4)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "leaf" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(5); p == nil {
+ p, _ = tx.Page(5)
+ if p == nil {
t.Fatal("expected page")
} else if p.Type != "freelist" {
t.Fatalf("unexpected page type: %s", p.Type)
}
- if p, _ := tx.Page(6); p != nil {
+ p, _ = tx.Page(6)
+ if p != nil {
t.Fatal("unexpected page")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3219,12 +3789,15 @@ func TestDB_Batch(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -3234,28 +3807,34 @@ func TestDB_Batch(t *testing.T) {
for i := 0; i < n; i++ {
go func(i int) {
ch <- db.Batch(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
+ return tx.Bucket([]byte("widgets")).Put(
+ u64tob(uint64(i)),
+ []byte{},
+ )
})
}(i)
}
// Check all responses to make sure there's no error.
for i := 0; i < n; i++ {
- if err := <-ch; err != nil {
+ err := <-ch
+ if err != nil {
t.Fatal(err)
}
}
// Ensure data is correct.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 0; i < n; i++ {
- if v := b.Get(u64tob(uint64(i))); v == nil {
+ v := b.Get(u64tob(uint64(i)))
+ if v == nil {
t.Errorf("key not found: %d", i)
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3265,15 +3844,18 @@ func TestDB_Batch_Panic(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- var sentinel int
- var bork = &sentinel
- var problem interface{}
- var err error
+ var (
+ sentinel int
+ problem interface{}
+ err error
+ )
+ bork := &sentinel
// Execute a function inside a batch that panics.
func() {
defer func() {
- if p := recover(); p != nil {
+ p := recover()
+ if p != nil {
problem = p
}
}()
@@ -3283,12 +3865,14 @@ func TestDB_Batch_Panic(t *testing.T) {
}()
// Verify there is no error.
- if g, e := err, error(nil); g != e {
+ g, e := err, error(nil)
+ if g != e {
t.Fatalf("wrong error: %v != %v", g, e)
}
// Verify the panic was captured.
- if g, e := problem, bork; g != e {
- t.Fatalf("wrong error: %v != %v", g, e)
+ p, b := problem, bork
+ if p != b {
+ t.Fatalf("wrong error: %v != %v", p, b)
}
}
@@ -3297,10 +3881,11 @@ func TestDB_BatchFull(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -3309,7 +3894,10 @@ func TestDB_BatchFull(t *testing.T) {
ch := make(chan error, size)
put := func(i int) {
ch <- db.Batch(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
+ return tx.Bucket([]byte("widgets")).Put(
+ u64tob(uint64(i)),
+ []byte{},
+ )
})
}
@@ -3334,21 +3922,25 @@ func TestDB_BatchFull(t *testing.T) {
// Check all responses to make sure there's no error.
for i := 0; i < size; i++ {
- if err := <-ch; err != nil {
+ err := <-ch
+ if err != nil {
t.Fatal(err)
}
}
// Ensure data is correct.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i <= size; i++ {
- if v := b.Get(u64tob(uint64(i))); v == nil {
+ v := b.Get(u64tob(uint64(i)))
+ if v == nil {
t.Errorf("key not found: %d", i)
}
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3358,10 +3950,11 @@ func TestDB_BatchTime(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -3370,7 +3963,10 @@ func TestDB_BatchTime(t *testing.T) {
ch := make(chan error, size)
put := func(i int) {
ch <- db.Batch(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Put(u64tob(uint64(i)), []byte{})
+ return tx.Bucket([]byte("widgets")).Put(
+ u64tob(uint64(i)),
+ []byte{},
+ )
})
}
@@ -3389,7 +3985,7 @@ func TestDB_BatchTime(t *testing.T) {
}
// Ensure data is correct.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
for i := 1; i <= size; i++ {
if v := b.Get(u64tob(uint64(i))); v == nil {
@@ -3397,7 +3993,8 @@ func TestDB_BatchTime(t *testing.T) {
}
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -3411,30 +4008,36 @@ func ExampleDB_Update() {
defer os.Remove(db.Path())
// Execute several commands within a read-write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
return err
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
return err
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Read the value back from a separate read-only transaction.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value of 'foo' is: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release the file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -3451,33 +4054,41 @@ func ExampleDB_View() {
defer os.Remove(db.Path())
// Insert data into a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("people"))
if err != nil {
return err
}
- if err := b.Put([]byte("john"), []byte("doe")); err != nil {
+
+ err = b.Put([]byte("john"), []byte("doe"))
+ if err != nil {
return err
}
- if err := b.Put([]byte("susy"), []byte("que")); err != nil {
+
+ err = b.Put([]byte("susy"), []byte("que"))
+ if err != nil {
return err
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Access data from within a read-only transactional block.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
v := tx.Bucket([]byte("people")).Get([]byte("john"))
fmt.Printf("John's last name is %s.\n", v)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release the file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -3494,10 +4105,11 @@ func ExampleDB_Begin_ReadOnly() {
defer os.Remove(db.Path())
// Create a bucket using a read-write transaction.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
@@ -3506,17 +4118,25 @@ func ExampleDB_Begin_ReadOnly() {
if err != nil {
log.Fatal(err)
}
+
b := tx.Bucket([]byte("widgets"))
- if err := b.Put([]byte("john"), []byte("blue")); err != nil {
+ err = b.Put([]byte("john"), []byte("blue"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("abby"), []byte("red")); err != nil {
+
+ err = b.Put([]byte("abby"), []byte("red"))
+ if err != nil {
log.Fatal(err)
}
- if err := b.Put([]byte("zephyr"), []byte("purple")); err != nil {
+
+ err = b.Put([]byte("zephyr"), []byte("purple"))
+ if err != nil {
log.Fatal(err)
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
log.Fatal(err)
}
@@ -3530,11 +4150,13 @@ func ExampleDB_Begin_ReadOnly() {
fmt.Printf("%s likes %s\n", k, v)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
log.Fatal(err)
}
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -3549,10 +4171,11 @@ func BenchmarkDBBatchAutomatic(b *testing.B) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("bench"))
return err
- }); err != nil {
+ })
+ if err != nil {
b.Fatal(err)
}
@@ -3577,7 +4200,9 @@ func BenchmarkDBBatchAutomatic(b *testing.B) {
b := tx.Bucket([]byte("bench"))
return b.Put(k, []byte("filler"))
}
- if err := db.Batch(insert); err != nil {
+
+ err := db.Batch(insert)
+ if err != nil {
b.Error(err)
return
}
@@ -3596,10 +4221,11 @@ func BenchmarkDBBatchSingle(b *testing.B) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("bench"))
return err
- }); err != nil {
+ })
+ if err != nil {
b.Fatal(err)
}
@@ -3623,7 +4249,9 @@ func BenchmarkDBBatchSingle(b *testing.B) {
b := tx.Bucket([]byte("bench"))
return b.Put(k, []byte("filler"))
}
- if err := db.Update(insert); err != nil {
+
+ err := db.Update(insert)
+ if err != nil {
b.Error(err)
return
}
@@ -3642,10 +4270,11 @@ func BenchmarkDBBatchManual10x100(b *testing.B) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("bench"))
return err
- }); err != nil {
+ })
+ if err != nil {
b.Fatal(err)
}
@@ -3663,19 +4292,29 @@ func BenchmarkDBBatchManual10x100(b *testing.B) {
insert100 := func(tx *Tx) error {
h := fnv.New32a()
buf := make([]byte, 4)
- for minor := uint32(0); minor < 100; minor++ {
- binary.LittleEndian.PutUint32(buf, uint32(id*100+minor))
+ for minor := 0; minor < 100; minor++ {
+ n := (id * 100) + uint32(minor)
+ binary.LittleEndian.PutUint32(
+ buf,
+ n,
+ )
h.Reset()
_, _ = h.Write(buf[:])
k := h.Sum(nil)
b := tx.Bucket([]byte("bench"))
- if err := b.Put(k, []byte("filler")); err != nil {
+ err := b.Put(
+ k,
+ []byte("filler"),
+ )
+ if err != nil {
return err
}
}
return nil
}
- if err := db.Update(insert100); err != nil {
+
+ err := db.Update(insert100)
+ if err != nil {
b.Fatal(err)
}
}(uint32(major))
@@ -3689,7 +4328,7 @@ func BenchmarkDBBatchManual10x100(b *testing.B) {
}
func validateBatchBench(b *testing.B, db *WDB) {
- var rollback = errors.New("sentinel error to cause rollback")
+ rollback := errors.New("sentinel error to cause rollback")
validate := func(tx *Tx) error {
bucket := tx.Bucket([]byte("bench"))
h := fnv.New32a()
@@ -3704,10 +4343,20 @@ func validateBatchBench(b *testing.B, db *WDB) {
b.Errorf("not found id=%d key=%x", id, k)
continue
}
- if g, e := v, []byte("filler"); !bytes.Equal(g, e) {
- b.Errorf("bad value for id=%d key=%x: %s != %q", id, k, g, e)
+
+ g, e := v, []byte("filler")
+ if !bytes.Equal(g, e) {
+ b.Errorf(
+ "bad value for id=%d key=%x: %s != %q",
+ id,
+ k,
+ g,
+ e,
+ )
}
- if err := bucket.Delete(k); err != nil {
+
+ err := bucket.Delete(k)
+ if err != nil {
return err
}
}
@@ -3718,7 +4367,9 @@ func validateBatchBench(b *testing.B, db *WDB) {
}
return rollback
}
- if err := db.Update(validate); err != nil && err != rollback {
+
+ err := db.Update(validate)
+ if err != nil && err != rollback {
b.Error(err)
}
}
@@ -3748,19 +4399,23 @@ func (db *WDB) Close() error {
return db.DB.Close()
}
-// MustClose closes the database and deletes the underlying file. Panic on error.
+// MustClose closes the database and deletes the underlying file. Panic on
+// error.
func (db *WDB) MustClose() {
- if err := db.Close(); err != nil {
+ err := db.Close()
+ if err != nil {
panic(err)
}
+
os.Remove(db.Path())
}
-// MustCheck runs a consistency check on the database and panics if any errors are found.
+// MustCheck runs a consistency check on the database and panics if any errors
+// are found.
func (db *WDB) MustCheck() {
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
// Collect all the errors.
- var errors []error
+ errors := []error{}
for err := range tx.Check() {
errors = append(errors, err)
if len(errors) > 10 {
@@ -3770,15 +4425,19 @@ func (db *WDB) MustCheck() {
// If errors occurred, copy the DB and print the errors.
if len(errors) > 0 {
- var path = tempfile()
+ path := tempfile()
defer os.Remove(path)
- if err := tx.CopyFile(path, 0600); err != nil {
+ err := tx.CopyFile(path, 0600)
+ if err != nil {
panic(err)
}
// Print errors.
fmt.Print("\n\n")
- fmt.Printf("consistency check failed (%d errors)\n", len(errors))
+ fmt.Printf(
+ "consistency check failed (%d errors)\n",
+ len(errors),
+ )
for _, err := range errors {
fmt.Println(err)
}
@@ -3790,7 +4449,8 @@ func (db *WDB) MustCheck() {
}
return nil
- }); err != nil && err != ErrDatabaseNotOpen {
+ })
+ if err != nil && err != ErrDatabaseNotOpen {
panic(err)
}
}
@@ -3798,11 +4458,13 @@ func (db *WDB) MustCheck() {
// CopyTempFile copies a database to a temporary file.
func (db *WDB) CopyTempFile() {
path := tempfile()
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
return tx.CopyFile(path, 0600)
- }); err != nil {
+ })
+ if err != nil {
panic(err)
}
+
fmt.Println("db copied to: ", path)
}
@@ -3813,9 +4475,8 @@ func getTempdir() string {
shm = "/dev/shm"
tmp = "/tmp"
)
- var dir string
- dir = os.Getenv("XDG_RUNTIME_DIR")
+ dir := os.Getenv("XDG_RUNTIME_DIR")
if dir != "" {
return dir
}
@@ -3839,46 +4500,61 @@ func tempfile() string {
panic(err)
}
- if err := f.Close(); err != nil {
+ err = f.Close()
+ if err != nil {
panic(err)
}
- if err := os.Remove(f.Name()); err != nil {
+
+ err = os.Remove(f.Name())
+ if err != nil {
panic(err)
}
+
return f.Name()
}
// mustContainKeys checks that a bucket contains a given set of keys.
func mustContainKeys(b *Bucket, m map[string]string) {
found := make(map[string]string)
- if err := b.ForEach(func(k, _ []byte) error {
+ err := b.ForEach(func(k, _ []byte) error {
found[string(k)] = ""
return nil
- }); err != nil {
+ })
+ if err != nil {
panic(err)
}
// Check for keys found in bucket that shouldn't be there.
- var keys []string
+ keys := []string{}
for k, _ := range found {
- if _, ok := m[string(k)]; !ok {
+ _, ok := m[string(k)]
+ if !ok {
keys = append(keys, k)
}
}
if len(keys) > 0 {
sort.Strings(keys)
- panic(fmt.Sprintf("keys found(%d): %s", len(keys), strings.Join(keys, ",")))
+ panic(fmt.Sprintf(
+ "keys found(%d): %s",
+ len(keys),
+ strings.Join(keys, ","),
+ ))
}
// Check for keys not found in bucket that should be there.
for k, _ := range m {
- if _, ok := found[string(k)]; !ok {
+ _, ok := found[string(k)]
+ if !ok {
keys = append(keys, k)
}
}
if len(keys) > 0 {
sort.Strings(keys)
- panic(fmt.Sprintf("keys not found(%d): %s", len(keys), strings.Join(keys, ",")))
+ panic(fmt.Sprintf(
+ "keys not found(%d): %s",
+ len(keys),
+ strings.Join(keys, ","),
+ ))
}
}
@@ -3890,7 +4566,10 @@ func trunc(b []byte, length int) []byte {
}
func truncDuration(d time.Duration) string {
- return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
+ return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(
+ d.String(),
+ "$1",
+ )
}
func fileSize(path string) int64 {
@@ -3916,7 +4595,7 @@ func TestFreelist_free(t *testing.T) {
f := newFreelist()
f.free(100, &page{id: 12})
if !reflect.DeepEqual([]pgid{12}, f.pending[100]) {
- t.Fatalf("exp=%v; got=%v", []pgid{12}, f.pending[100])
+ t.Fatalf("exp=%v, got=%v", []pgid{12}, f.pending[100])
}
}
@@ -3924,8 +4603,9 @@ func TestFreelist_free(t *testing.T) {
func TestFreelist_free_overflow(t *testing.T) {
f := newFreelist()
f.free(100, &page{id: 12, overflow: 3})
- if exp := []pgid{12, 13, 14, 15}; !reflect.DeepEqual(exp, f.pending[100]) {
- t.Fatalf("exp=%v; got=%v", exp, f.pending[100])
+ exp := []pgid{12, 13, 14, 15}
+ if !reflect.DeepEqual(exp, f.pending[100]) {
+ t.Fatalf("exp=%v, got=%v", exp, f.pending[100])
}
}
@@ -3937,55 +4617,79 @@ func TestFreelist_release(t *testing.T) {
f.free(102, &page{id: 39})
f.release(100)
f.release(101)
- if exp := []pgid{9, 12, 13}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ exp := []pgid{9, 12, 13}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
f.release(102)
- if exp := []pgid{9, 12, 13, 39}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ exp = []pgid{9, 12, 13, 39}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
}
// Ensure that a freelist can find contiguous blocks of pages.
func TestFreelist_allocate(t *testing.T) {
f := &freelist{ids: []pgid{3, 4, 5, 6, 7, 9, 12, 13, 18}}
- if id := int(f.allocate(3)); id != 3 {
- t.Fatalf("exp=3; got=%v", id)
+ id := int(f.allocate(3))
+ if id != 3 {
+ t.Fatalf("exp=3, got=%v", id)
}
- if id := int(f.allocate(1)); id != 6 {
- t.Fatalf("exp=6; got=%v", id)
+
+ id = int(f.allocate(1))
+ if id != 6 {
+ t.Fatalf("exp=6, got=%v", id)
}
- if id := int(f.allocate(3)); id != 0 {
- t.Fatalf("exp=0; got=%v", id)
+
+ id = int(f.allocate(3))
+ if id != 0 {
+ t.Fatalf("exp=0, got=%v", id)
}
- if id := int(f.allocate(2)); id != 12 {
- t.Fatalf("exp=12; got=%v", id)
+
+ id = int(f.allocate(2))
+ if id != 12 {
+ t.Fatalf("exp=12, got=%v", id)
}
- if id := int(f.allocate(1)); id != 7 {
- t.Fatalf("exp=7; got=%v", id)
+
+ id = int(f.allocate(1))
+ if id != 7 {
+ t.Fatalf("exp=7, got=%v", id)
}
- if id := int(f.allocate(0)); id != 0 {
- t.Fatalf("exp=0; got=%v", id)
+
+ id = int(f.allocate(0))
+ if id != 0 {
+ t.Fatalf("exp=0, got=%v", id)
}
- if id := int(f.allocate(0)); id != 0 {
- t.Fatalf("exp=0; got=%v", id)
+
+ id = int(f.allocate(0))
+ if id != 0 {
+ t.Fatalf("exp=0, got=%v", id)
}
- if exp := []pgid{9, 18}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+
+ exp := []pgid{9, 18}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
- if id := int(f.allocate(1)); id != 9 {
- t.Fatalf("exp=9; got=%v", id)
+ id = int(f.allocate(1))
+ if id != 9 {
+ t.Fatalf("exp=9, got=%v", id)
}
- if id := int(f.allocate(1)); id != 18 {
- t.Fatalf("exp=18; got=%v", id)
+
+ id = int(f.allocate(1))
+ if id != 18 {
+ t.Fatalf("exp=18, got=%v", id)
}
- if id := int(f.allocate(1)); id != 0 {
- t.Fatalf("exp=0; got=%v", id)
+
+ id = int(f.allocate(1))
+ if id != 0 {
+ t.Fatalf("exp=0, got=%v", id)
}
- if exp := []pgid{}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+
+ exp = []pgid{}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
}
@@ -4007,8 +4711,9 @@ func TestFreelist_read(t *testing.T) {
f.read(page)
// Ensure that there are two page ids in the freelist.
- if exp := []pgid{23, 50}; !reflect.DeepEqual(exp, f.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f.ids)
+ exp := []pgid{23, 50}
+ if !reflect.DeepEqual(exp, f.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f.ids)
}
}
@@ -4020,7 +4725,9 @@ func TestFreelist_write(t *testing.T) {
f.pending[100] = []pgid{28, 11}
f.pending[101] = []pgid{3}
p := (*page)(unsafe.Pointer(&buf[0]))
- if err := f.write(p); err != nil {
+
+ err := f.write(p)
+ if err != nil {
t.Fatal(err)
}
@@ -4030,15 +4737,27 @@ func TestFreelist_write(t *testing.T) {
// Ensure that the freelist is correct.
// All pages should be present and in reverse order.
- if exp := []pgid{3, 11, 12, 28, 39}; !reflect.DeepEqual(exp, f2.ids) {
- t.Fatalf("exp=%v; got=%v", exp, f2.ids)
+ exp := []pgid{3, 11, 12, 28, 39}
+ if !reflect.DeepEqual(exp, f2.ids) {
+ t.Fatalf("exp=%v, got=%v", exp, f2.ids)
}
}
-func Benchmark_FreelistRelease10K(b *testing.B) { benchmark_FreelistRelease(b, 10000) }
-func Benchmark_FreelistRelease100K(b *testing.B) { benchmark_FreelistRelease(b, 100000) }
-func Benchmark_FreelistRelease1000K(b *testing.B) { benchmark_FreelistRelease(b, 1000000) }
-func Benchmark_FreelistRelease10000K(b *testing.B) { benchmark_FreelistRelease(b, 10000000) }
+func Benchmark_FreelistRelease10K(b *testing.B) {
+ benchmark_FreelistRelease(b, 10000)
+}
+
+func Benchmark_FreelistRelease100K(b *testing.B) {
+ benchmark_FreelistRelease(b, 100000)
+}
+
+func Benchmark_FreelistRelease1000K(b *testing.B) {
+ benchmark_FreelistRelease(b, 1000000)
+}
+
+func Benchmark_FreelistRelease10000K(b *testing.B) {
+ benchmark_FreelistRelease(b, 10000000)
+}
func benchmark_FreelistRelease(b *testing.B, size int) {
ids := randomPgids(size)
@@ -4060,26 +4779,53 @@ func randomPgids(n int) []pgid {
return pgids
}
+func puts(
+ n *node,
+ oldKey string,
+ newKey string,
+ value string,
+ pgid pgid,
+ flags uint32,
+) {
+ n.put([]byte(oldKey), []byte(newKey), []byte(value), pgid, flags)
+}
+
// Ensure that a node can insert a key/value.
func TestNode_put(t *testing.T) {
- n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{meta: &meta{pgid: 1}}}}
- n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0)
- n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
- n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
- n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
+ n := &node{
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "baz", "baz", "2", 0, 0)
+ puts(n, "foo", "foo", "0", 0, 0)
+ puts(n, "bar", "bar", "1", 0, 0)
+ puts(n, "foo", "foo", "3", 0, leafPageFlag)
if len(n.inodes) != 3 {
- t.Fatalf("exp=3; got=%d", len(n.inodes))
+ t.Fatalf("exp=3, got=%d", len(n.inodes))
}
- if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "1" {
- t.Fatalf("exp=<bar,1>; got=<%s,%s>", k, v)
+
+ k, v := n.inodes[0].key, n.inodes[0].value
+ if string(k) != "bar" || string(v) != "1" {
+ t.Fatalf("exp=<bar,1>, got=<%s,%s>", k, v)
}
- if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "baz" || string(v) != "2" {
- t.Fatalf("exp=<baz,2>; got=<%s,%s>", k, v)
+
+ k, v = n.inodes[1].key, n.inodes[1].value
+ if string(k) != "baz" || string(v) != "2" {
+ t.Fatalf("exp=<baz,2>, got=<%s,%s>", k, v)
}
- if k, v := n.inodes[2].key, n.inodes[2].value; string(k) != "foo" || string(v) != "3" {
- t.Fatalf("exp=<foo,3>; got=<%s,%s>", k, v)
+
+ k, v = n.inodes[2].key, n.inodes[2].value
+ if string(k) != "foo" || string(v) != "3" {
+ t.Fatalf("exp=<foo,3>, got=<%s,%s>", k, v)
}
+
if n.inodes[2].flags != uint32(leafPageFlag) {
t.Fatalf("not a leaf: %d", n.inodes[2].flags)
}
@@ -4095,8 +4841,12 @@ func TestNode_read_LeafPage(t *testing.T) {
// Insert 2 elements at the beginning. sizeof(leafPageElement) == 16
nodes := (*[3]leafPageElement)(unsafe.Pointer(&page.ptr))
- nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4} // pos = sizeof(leafPageElement) * 2
- nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3} // pos = sizeof(leafPageElement) + 3 + 4
+
+ // pos = sizeof(leafPageElement) * 2
+ nodes[0] = leafPageElement{flags: 0, pos: 32, ksize: 3, vsize: 4}
+
+ // pos = sizeof(leafPageElement) + 3 + 4
+ nodes[1] = leafPageElement{flags: 0, pos: 23, ksize: 10, vsize: 3}
// Write data for the nodes at the end.
data := (*[4096]byte)(unsafe.Pointer(&nodes[2]))
@@ -4111,24 +4861,40 @@ func TestNode_read_LeafPage(t *testing.T) {
if !n.isLeaf {
t.Fatal("expected leaf")
}
+
if len(n.inodes) != 2 {
- t.Fatalf("exp=2; got=%d", len(n.inodes))
+ t.Fatalf("exp=2, got=%d", len(n.inodes))
}
- if k, v := n.inodes[0].key, n.inodes[0].value; string(k) != "bar" || string(v) != "fooz" {
- t.Fatalf("exp=<bar,fooz>; got=<%s,%s>", k, v)
+
+ k, v := n.inodes[0].key, n.inodes[0].value
+ if string(k) != "bar" || string(v) != "fooz" {
+ t.Fatalf("exp=<bar,fooz>, got=<%s,%s>", k, v)
}
- if k, v := n.inodes[1].key, n.inodes[1].value; string(k) != "helloworld" || string(v) != "bye" {
- t.Fatalf("exp=<helloworld,bye>; got=<%s,%s>", k, v)
+
+ k, v = n.inodes[1].key, n.inodes[1].value
+ if string(k) != "helloworld" || string(v) != "bye" {
+ t.Fatalf("exp=<helloworld,bye>, got=<%s,%s>", k, v)
}
}
// Ensure that a node can serialize into a leaf page.
func TestNode_write_LeafPage(t *testing.T) {
// Create a node.
- n := &node{isLeaf: true, inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
- n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0)
- n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0)
- n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0)
+ n := &node{
+ isLeaf: true,
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ db: &DB{},
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "susy", "susy", "que", 0, 0)
+ puts(n, "ricki", "ricki", "lake", 0, 0)
+ puts(n, "john", "john", "johnson", 0, 0)
// Write it to a page.
var buf [4096]byte
@@ -4141,50 +4907,77 @@ func TestNode_write_LeafPage(t *testing.T) {
// Check that the two pages are the same.
if len(n2.inodes) != 3 {
- t.Fatalf("exp=3; got=%d", len(n2.inodes))
+ t.Fatalf("exp=3, got=%d", len(n2.inodes))
}
- if k, v := n2.inodes[0].key, n2.inodes[0].value; string(k) != "john" || string(v) != "johnson" {
- t.Fatalf("exp=<john,johnson>; got=<%s,%s>", k, v)
+
+ k, v := n2.inodes[0].key, n2.inodes[0].value
+ if string(k) != "john" || string(v) != "johnson" {
+ t.Fatalf("exp=<john,johnson>, got=<%s,%s>", k, v)
}
- if k, v := n2.inodes[1].key, n2.inodes[1].value; string(k) != "ricki" || string(v) != "lake" {
- t.Fatalf("exp=<ricki,lake>; got=<%s,%s>", k, v)
+
+ k, v = n2.inodes[1].key, n2.inodes[1].value
+ if string(k) != "ricki" || string(v) != "lake" {
+ t.Fatalf("exp=<ricki,lake>, got=<%s,%s>", k, v)
}
- if k, v := n2.inodes[2].key, n2.inodes[2].value; string(k) != "susy" || string(v) != "que" {
- t.Fatalf("exp=<susy,que>; got=<%s,%s>", k, v)
+
+ k, v = n2.inodes[2].key, n2.inodes[2].value
+ if string(k) != "susy" || string(v) != "que" {
+ t.Fatalf("exp=<susy,que>, got=<%s,%s>", k, v)
}
}
// Ensure that a node can split into appropriate subgroups.
func TestNode_split(t *testing.T) {
// Create a node.
- n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
- n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
+ n := &node{
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ db: &DB{},
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "00000001", "00000001", "0123456701234567", 0, 0)
+ puts(n, "00000002", "00000002", "0123456701234567", 0, 0)
+ puts(n, "00000003", "00000003", "0123456701234567", 0, 0)
+ puts(n, "00000004", "00000004", "0123456701234567", 0, 0)
+ puts(n, "00000005", "00000005", "0123456701234567", 0, 0)
// Split between 2 & 3.
n.split(100)
- var parent = n.parent
+ parent := n.parent
if len(parent.children) != 2 {
- t.Fatalf("exp=2; got=%d", len(parent.children))
+ t.Fatalf("exp=2, got=%d", len(parent.children))
}
if len(parent.children[0].inodes) != 2 {
- t.Fatalf("exp=2; got=%d", len(parent.children[0].inodes))
+ t.Fatalf("exp=2, got=%d", len(parent.children[0].inodes))
}
if len(parent.children[1].inodes) != 3 {
- t.Fatalf("exp=3; got=%d", len(parent.children[1].inodes))
+ t.Fatalf("exp=3, got=%d", len(parent.children[1].inodes))
}
}
-// Ensure that a page with the minimum number of inodes just returns a single node.
+// Ensure that a page with the minimum number of inodes just returns a single
+// node.
func TestNode_split_MinKeys(t *testing.T) {
// Create a node.
- n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
- n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
+ n := &node{
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ db: &DB{},
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "00000001", "00000001", "0123456701234567", 0, 0)
+ puts(n, "00000002", "00000002", "0123456701234567", 0, 0)
// Split.
n.split(20)
@@ -4193,15 +4986,26 @@ func TestNode_split_MinKeys(t *testing.T) {
}
}
-// Ensure that a node that has keys that all fit on a page just returns one leaf.
+// Ensure that a node that has keys that all fit on a page just returns one
+// leaf.
func TestNode_split_SinglePage(t *testing.T) {
// Create a node.
- n := &node{inodes: make(inodes, 0), bucket: &Bucket{tx: &Tx{db: &DB{}, meta: &meta{pgid: 1}}}}
- n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
- n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
+ n := &node{
+ inodes: make(inodes, 0),
+ bucket: &Bucket{
+ tx: &Tx{
+ db: &DB{},
+ meta: &meta{
+ pgid: 1,
+ },
+ },
+ },
+ }
+ puts(n, "00000001", "00000001", "0123456701234567", 0, 0)
+ puts(n, "00000002", "00000002", "0123456701234567", 0, 0)
+ puts(n, "00000003", "00000003", "0123456701234567", 0, 0)
+ puts(n, "00000004", "00000004", "0123456701234567", 0, 0)
+ puts(n, "00000005", "00000005", "0123456701234567", 0, 0)
// Split.
n.split(4096)
@@ -4212,20 +5016,29 @@ func TestNode_split_SinglePage(t *testing.T) {
// Ensure that the page type can be returned in human readable format.
func TestPage_typ(t *testing.T) {
- if typ := (&page{flags: branchPageFlag}).typ(); typ != "branch" {
- t.Fatalf("exp=branch; got=%v", typ)
+ typ := (&page{flags: branchPageFlag}).typ()
+ if typ != "branch" {
+ t.Fatalf("exp=branch, got=%v", typ)
}
- if typ := (&page{flags: leafPageFlag}).typ(); typ != "leaf" {
- t.Fatalf("exp=leaf; got=%v", typ)
+
+ typ = (&page{flags: leafPageFlag}).typ()
+ if typ != "leaf" {
+ t.Fatalf("exp=leaf, got=%v", typ)
}
- if typ := (&page{flags: metaPageFlag}).typ(); typ != "meta" {
- t.Fatalf("exp=meta; got=%v", typ)
+
+ typ = (&page{flags: metaPageFlag}).typ()
+ if typ != "meta" {
+ t.Fatalf("exp=meta, got=%v", typ)
}
- if typ := (&page{flags: freelistPageFlag}).typ(); typ != "freelist" {
- t.Fatalf("exp=freelist; got=%v", typ)
+
+ typ = (&page{flags: freelistPageFlag}).typ()
+ if typ != "freelist" {
+ t.Fatalf("exp=freelist, got=%v", typ)
}
- if typ := (&page{flags: 20000}).typ(); typ != "unknown<4e20>" {
- t.Fatalf("exp=unknown<4e20>; got=%v", typ)
+
+ typ = (&page{flags: 20000}).typ()
+ if typ != "unknown<4e20>" {
+ t.Fatalf("exp=unknown<4e20>, got=%v", typ)
}
}
@@ -4233,20 +5046,23 @@ func TestPgids_merge(t *testing.T) {
a := pgids{4, 5, 6, 10, 11, 12, 13, 27}
b := pgids{1, 3, 8, 9, 25, 30}
c := a.merge(b)
- if !reflect.DeepEqual(c, pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}) {
+ expected := pgids{1, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30}
+ if !reflect.DeepEqual(c, expected) {
t.Errorf("mismatch: %v", c)
}
a = pgids{4, 5, 6, 10, 11, 12, 13, 27, 35, 36}
b = pgids{8, 9, 25, 30}
c = a.merge(b)
- if !reflect.DeepEqual(c, pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}) {
+
+ expected = pgids{4, 5, 6, 8, 9, 10, 11, 12, 13, 25, 27, 30, 35, 36}
+ if !reflect.DeepEqual(c, expected) {
t.Errorf("mismatch: %v", c)
}
}
func TestPgids_merge_quick(t *testing.T) {
- if err := quick.Check(func(a, b pgids) bool {
+ err := quick.Check(func(a, b pgids) bool {
// Sort incoming lists.
sort.Sort(a)
sort.Sort(b)
@@ -4254,7 +5070,8 @@ func TestPgids_merge_quick(t *testing.T) {
// Merge the two lists together.
got := a.merge(b)
- // The expected value should be the two lists combined and sorted.
+ // The expected value should be the two lists combined and
+ // sorted.
exp := append(a, b...)
sort.Sort(exp)
@@ -4264,7 +5081,8 @@ func TestPgids_merge_quick(t *testing.T) {
}
return true
- }, nil); err != nil {
+ }, nil)
+ if err != nil {
t.Fatal(err)
}
}
@@ -4279,7 +5097,13 @@ func TestPgids_merge_quick(t *testing.T) {
// -quick.maxvsize The maximum size of a value.
//
-var qcount, qseed, qmaxitems, qmaxksize, qmaxvsize int
+var (
+ qcount int
+ qseed int
+ qmaxitems int
+ qmaxksize int
+ qmaxvsize int
+)
func _init() {
flag.IntVar(&qcount, "quick.count", 5, "")
@@ -4289,7 +5113,14 @@ func _init() {
flag.IntVar(&qmaxvsize, "quick.maxvsize", 1024, "")
flag.Parse()
fmt.Fprintln(os.Stderr, "seed:", qseed)
- fmt.Fprintf(os.Stderr, "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n", qcount, qmaxitems, qmaxksize, qmaxvsize)
+ fmt.Fprintf(
+ os.Stderr,
+ "quick settings: count=%v, items=%v, ksize=%v, vsize=%v\n",
+ qcount,
+ qmaxitems,
+ qmaxksize,
+ qmaxvsize,
+ )
}
func qconfig() *quick.Config {
@@ -4301,9 +5132,17 @@ func qconfig() *quick.Config {
type testdata []testdataitem
-func (t testdata) Len() int { return len(t) }
-func (t testdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == -1 }
+func (t testdata) Len() int {
+ return len(t)
+}
+
+func (t testdata) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+
+func (t testdata) Less(i, j int) bool {
+ return bytes.Compare(t[i].Key, t[j].Key) == -1
+}
func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
n := rand.Intn(qmaxitems-1) + 1
@@ -4311,7 +5150,8 @@ func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
used := make(map[string]bool)
for i := 0; i < n; i++ {
item := &items[i]
- // Ensure that keys are unique by looping until we find one that we have not already used.
+ // Ensure that keys are unique by looping until we find one that
+ // we have not already used.
for {
item.Key = randByteSlice(rand, 1, qmaxksize)
if !used[string(item.Key)] {
@@ -4326,9 +5166,17 @@ func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
type revtestdata []testdataitem
-func (t revtestdata) Len() int { return len(t) }
-func (t revtestdata) Swap(i, j int) { t[i], t[j] = t[j], t[i] }
-func (t revtestdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key) == 1 }
+func (t revtestdata) Len() int {
+ return len(t)
+}
+
+func (t revtestdata) Swap(i, j int) {
+ t[i], t[j] = t[j], t[i]
+}
+
+func (t revtestdata) Less(i, j int) bool {
+ return bytes.Compare(t[i].Key, t[j].Key) == 1
+}
type testdataitem struct {
Key []byte
@@ -4344,24 +5192,64 @@ func randByteSlice(rand *rand.Rand, minSize, maxSize int) []byte {
return b
}
-func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) }
-func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) }
-func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) }
-func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) }
-func TestSimulate_10000op_1p(t *testing.T) { return; /* FIXME: is too slow */ testSimulate(t, 10000, 1) }
+func TestSimulate_1op_1p(t *testing.T) {
+ testSimulate(t, 1, 1)
+}
+
+func TestSimulate_10op_1p(t *testing.T) {
+ testSimulate(t, 10, 1)
+}
+
+func TestSimulate_100op_1p(t *testing.T) {
+ testSimulate(t, 100, 1)
+}
+
+func TestSimulate_1000op_1p(t *testing.T) {
+ testSimulate(t, 1000, 1)
+}
-func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) }
-func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) }
-func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) }
-func TestSimulate_10000op_10p(t *testing.T) { return; /* FIXME: too slow */ testSimulate(t, 10000, 10) }
+func TestSimulate_10000op_1p(t *testing.T) {
+ return // FIXME: too slow
+ testSimulate(t, 10000, 1)
+}
-func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) }
-func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) }
-func TestSimulate_10000op_100p(t *testing.T) { return; /* FIXME: too slow */ testSimulate(t, 10000, 100) }
+func TestSimulate_10op_10p(t *testing.T) {
+ testSimulate(t, 10, 10)
+}
-func TestSimulate_10000op_1000p(t *testing.T) { return; /* FIXME: too slow */ testSimulate(t, 10000, 1000) }
+func TestSimulate_100op_10p(t *testing.T) {
+ testSimulate(t, 100, 10)
+}
-// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety.
+func TestSimulate_1000op_10p(t *testing.T) {
+ testSimulate(t, 1000, 10)
+}
+
+func TestSimulate_10000op_10p(t *testing.T) {
+ return // FIXME: too slow
+ testSimulate(t, 10000, 10)
+}
+
+func TestSimulate_100op_100p(t *testing.T) {
+ testSimulate(t, 100, 100)
+}
+
+func TestSimulate_1000op_100p(t *testing.T) {
+ testSimulate(t, 1000, 100)
+}
+
+func TestSimulate_10000op_100p(t *testing.T) {
+ return // FIXME: too slow
+ testSimulate(t, 10000, 100)
+}
+
+func TestSimulate_10000op_1000p(t *testing.T) {
+ return // FIXME: too slow
+ testSimulate(t, 10000, 1000)
+}
+
+// Randomly generate operations on a given database with multiple clients to
+// ensure consistency and thread safety.
func testSimulate(t *testing.T, threadCount, parallelism int) {
if testing.Short() {
t.Skip("skipping test in short mode.")
@@ -4370,10 +5258,15 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
rand.Seed(int64(qseed))
// A list of operations that readers and writers can perform.
- var readerHandlers = []simulateHandler{simulateGetHandler}
- var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler}
+ readerHandlers := []simulateHandler{
+ simulateGetHandler,
+ }
+ writerHandlers := []simulateHandler{
+ simulateGetHandler,
+ simulatePutHandler,
+ }
- var versions = make(map[int]*QuickDB)
+ versions := make(map[int]*QuickDB)
versions[1] = NewQuickDB()
db := MustOpenDB()
@@ -4384,7 +5277,7 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
// Run n threads in parallel, each with their own operation.
var wg sync.WaitGroup
- var threads = make(chan bool, parallelism)
+ threads := make(chan bool, parallelism)
var i int
for {
threads <- true
@@ -4417,14 +5310,16 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
}
mutex.Unlock()
- // Make sure we commit/rollback the tx at the end and update the state.
+ // Make sure we commit/rollback the tx at the end and
+ // update the state.
if writable {
defer func() {
mutex.Lock()
versions[tx.ID()] = qdb
mutex.Unlock()
- if err := tx.Commit(); err != nil {
+ err := tx.Commit()
+ if err != nil {
t.Fatal(err)
}
}()
@@ -4467,14 +5362,21 @@ func simulateGetHandler(tx *Tx, qdb *QuickDB) {
// Retrieve root bucket.
b := tx.Bucket(keys[0])
if b == nil {
- panic(fmt.Sprintf("bucket[0] expected: %08x\n", trunc(keys[0], 4)))
+ panic(fmt.Sprintf(
+ "bucket[0] expected: %08x\n",
+ trunc(keys[0], 4),
+ ))
}
// Drill into nested buckets.
for _, key := range keys[1 : len(keys)-1] {
b = b.Bucket(key)
if b == nil {
- panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key))
+ panic(fmt.Sprintf(
+ "bucket[n] expected: %v -> %v\n",
+ keys,
+ key,
+ ))
}
}
@@ -4519,7 +5421,8 @@ func simulatePutHandler(tx *Tx, qdb *QuickDB) {
}
// Insert into database.
- if err := b.Put(keys[len(keys)-1], value); err != nil {
+ err = b.Put(keys[len(keys)-1], value)
+ if err != nil {
panic("put: " + err.Error())
}
@@ -4560,9 +5463,11 @@ func (db *QuickDB) Get(keys [][]byte) []byte {
}
// Only return if it's a simple value.
- if value, ok := m[string(keys[len(keys)-1])].([]byte); ok {
+ value, ok := m[string(keys[len(keys)-1])].([]byte)
+ if ok {
return value
}
+
return nil
}
@@ -4574,13 +5479,16 @@ func (db *QuickDB) Put(keys [][]byte, value []byte) {
// Build buckets all the way down the key path.
m := db.m
for _, key := range keys[:len(keys)-1] {
- if _, ok := m[string(key)].([]byte); ok {
- return // Keypath intersects with a simple value. Do nothing.
+ _, ok := m[string(key)].([]byte)
+ if ok {
+ // Keypath intersects with a simple value. Do nothing.
+ return
}
if m[string(key)] == nil {
m[string(key)] = make(map[string]interface{})
}
+
m = m[string(key)].(map[string]interface{})
}
@@ -4605,9 +5513,11 @@ func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) {
for k, v := range m {
if i == index {
*keys = append(*keys, []byte(k))
- if v, ok := v.(map[string]interface{}); ok {
+ v, ok := v.(map[string]interface{})
+ if ok {
db.rand(v, keys)
}
+
return
}
i++
@@ -4636,7 +5546,10 @@ func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} {
}
func randKey() []byte {
- var min, max = 1, 1024
+ const (
+ min = 1
+ max = 1024
+ )
n := rand.Intn(max-min) + min
b := make([]byte, n)
for i := 0; i < n; i++ {
@@ -4646,8 +5559,8 @@ func randKey() []byte {
}
func randKeys() [][]byte {
- var keys [][]byte
- var count = rand.Intn(2) + 2
+ keys := [][]byte{}
+ count := rand.Intn(2) + 2
for i := 0; i < count; i++ {
keys = append(keys, randKey())
}
@@ -4674,15 +5587,18 @@ func TestTx_Commit_ErrTxClosed(t *testing.T) {
t.Fatal(err)
}
- if _, err := tx.CreateBucket([]byte("foo")); err != nil {
+ _, err = tx.CreateBucket([]byte("foo"))
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != nil {
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != ErrTxClosed {
+ err = tx.Commit()
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -4698,10 +5614,13 @@ func TestTx_Rollback_ErrTxClosed(t *testing.T) {
t.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+ err = tx.Rollback()
+ if err != nil {
t.Fatal(err)
}
- if err := tx.Rollback(); err != ErrTxClosed {
+
+ err = tx.Rollback()
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -4716,7 +5635,9 @@ func TestTx_Commit_ErrTxNotWritable(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != ErrTxNotWritable {
+
+ err = tx.Commit()
+ if err != ErrTxNotWritable {
t.Fatal(err)
}
}
@@ -4727,36 +5648,42 @@ func TestTx_Cursor(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
- if _, err := tx.CreateBucket([]byte("woojits")); err != nil {
+ _, err = tx.CreateBucket([]byte("woojits"))
+ if err != nil {
t.Fatal(err)
}
c := tx.Cursor()
- if k, v := c.First(); !bytes.Equal(k, []byte("widgets")) {
+ k, v := c.First()
+ if !bytes.Equal(k, []byte("widgets")) {
t.Fatalf("unexpected key: %v", k)
} else if v != nil {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Next(); !bytes.Equal(k, []byte("woojits")) {
+ k, v = c.Next()
+ if !bytes.Equal(k, []byte("woojits")) {
t.Fatalf("unexpected key: %v", k)
} else if v != nil {
t.Fatalf("unexpected value: %v", v)
}
- if k, v := c.Next(); k != nil {
+ k, v = c.Next()
+ if k != nil {
t.Fatalf("unexpected key: %v", k)
} else if v != nil {
t.Fatalf("unexpected value: %v", k)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4767,13 +5694,15 @@ func TestTx_CreateBucket_ErrTxNotWritable(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.View(func(tx *Tx) error {
+ err := db.View(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("foo"))
if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4788,11 +5717,14 @@ func TestTx_CreateBucket_ErrTxClosed(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
- if _, err := tx.CreateBucket([]byte("foo")); err != ErrTxClosed {
+ _, err = tx.CreateBucket([]byte("foo"))
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -4803,15 +5735,19 @@ func TestTx_Bucket(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4822,20 +5758,24 @@ func TestTx_Get_NotFound(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
+
if b.Get([]byte("no_such_key")) != nil {
t.Fatal("expected nil value")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4847,25 +5787,29 @@ func TestTx_CreateBucket(t *testing.T) {
defer os.Remove(db.Path())
// Create a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
} else if b == nil {
t.Fatal("expected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Read the bucket through a separate transaction.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4876,33 +5820,38 @@ func TestTx_CreateBucketIfNotExists(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
// Create bucket.
- if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil {
+ b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
} else if b == nil {
t.Fatal("expected bucket")
}
// Create bucket again.
- if b, err := tx.CreateBucketIfNotExists([]byte("widgets")); err != nil {
+ b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
} else if b == nil {
t.Fatal("expected bucket")
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Read the bucket through a separate transaction.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
if tx.Bucket([]byte("widgets")) == nil {
t.Fatal("expected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4913,17 +5862,20 @@ func TestTx_CreateBucketIfNotExists_ErrBucketNameRequired(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucketIfNotExists([]byte{}); err != ErrBucketNameRequired {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucketIfNotExists([]byte{})
+ if err != ErrBucketNameRequired {
t.Fatalf("unexpected error: %s", err)
}
- if _, err := tx.CreateBucketIfNotExists(nil); err != ErrBucketNameRequired {
+ _, err = tx.CreateBucketIfNotExists(nil)
+ if err != ErrBucketNameRequired {
t.Fatalf("unexpected error: %s", err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4935,22 +5887,28 @@ func TestTx_CreateBucket_ErrBucketExists(t *testing.T) {
defer os.Remove(db.Path())
// Create a bucket.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Create the same bucket again.
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket([]byte("widgets")); err != ErrBucketExists {
+ err = db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != ErrBucketExists {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4961,12 +5919,15 @@ func TestTx_CreateBucket_ErrBucketNameRequired(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if _, err := tx.CreateBucket(nil); err != ErrBucketNameRequired {
+ err := db.Update(func(tx *Tx) error {
+ _, err := tx.CreateBucket(nil)
+ if err != ErrBucketNameRequired {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -4978,43 +5939,56 @@ func TestTx_DeleteBucket(t *testing.T) {
defer os.Remove(db.Path())
// Create a bucket and add a value.
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
// Delete the bucket and make sure we can't get the value.
- if err := db.Update(func(tx *Tx) error {
- if err := tx.DeleteBucket([]byte("widgets")); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ err := tx.DeleteBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
if tx.Bucket([]byte("widgets")) != nil {
t.Fatal("unexpected bucket")
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.Update(func(tx *Tx) error {
- // Create the bucket again and make sure there's not a phantom value.
+ err = db.Update(func(tx *Tx) error {
+ // Create the bucket again and make sure there's not a phantom
+ // value.
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if v := b.Get([]byte("foo")); v != nil {
+
+ v := b.Get([]byte("foo"))
+ if v != nil {
t.Fatalf("unexpected phantom value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -5029,10 +6003,14 @@ func TestTx_DeleteBucket_ErrTxClosed(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- if err := tx.Commit(); err != nil {
+
+ err = tx.Commit()
+ if err != nil {
t.Fatal(err)
}
- if err := tx.DeleteBucket([]byte("foo")); err != ErrTxClosed {
+
+ err = tx.DeleteBucket([]byte("foo"))
+ if err != ErrTxClosed {
t.Fatalf("unexpected error: %s", err)
}
}
@@ -5043,12 +6021,15 @@ func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.View(func(tx *Tx) error {
- if err := tx.DeleteBucket([]byte("foo")); err != ErrTxNotWritable {
+ err := db.View(func(tx *Tx) error {
+ err := tx.DeleteBucket([]byte("foo"))
+ if err != ErrTxNotWritable {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -5059,12 +6040,15 @@ func TestTx_DeleteBucket_NotFound(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
- if err := tx.DeleteBucket([]byte("widgets")); err != ErrBucketNotFound {
+ err := db.Update(func(tx *Tx) error {
+ err := tx.DeleteBucket([]byte("widgets"))
+ if err != ErrBucketNotFound {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -5076,22 +6060,27 @@ func TestTx_ForEach_NoError(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := tx.ForEach(func(name []byte, b *Bucket) error {
+ err = tx.ForEach(func(name []byte, b *Bucket) error {
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
@@ -5102,42 +6091,50 @@ func TestTx_ForEach_WithError(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
marker := errors.New("marker")
- if err := tx.ForEach(func(name []byte, b *Bucket) error {
+ err = tx.ForEach(func(name []byte, b *Bucket) error {
return marker
- }); err != marker {
+ })
+ if err != marker {
t.Fatalf("unexpected error: %s", err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
}
-// Ensure that Tx commit handlers are called after a transaction successfully commits.
+// Ensure that Tx commit handlers are called after a transaction successfully
+// commits.
func TestTx_OnCommit(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
defer os.Remove(db.Path())
var x int
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
} else if x != 3 {
t.Fatalf("unexpected x: %d", x)
@@ -5151,14 +6148,17 @@ func TestTx_OnCommit_Rollback(t *testing.T) {
defer os.Remove(db.Path())
var x int
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
- if _, err := tx.CreateBucket([]byte("widgets")); err != nil {
+ _, err := tx.CreateBucket([]byte("widgets"))
+ if err != nil {
t.Fatal(err)
}
+
return errors.New("rollback this commit")
- }); err == nil || err.Error() != "rollback this commit" {
+ })
+ if err == nil || err.Error() != "rollback this commit" {
t.Fatalf("unexpected error: %s", err)
} else if x != 0 {
t.Fatalf("unexpected x: %d", x)
@@ -5173,25 +6173,32 @@ func TestTx_CopyFile(t *testing.T) {
path := tempfile()
defer os.Remove(path)
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
return tx.CopyFile(path, 0600)
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
@@ -5200,19 +6207,25 @@ func TestTx_CopyFile(t *testing.T) {
t.Fatal(err)
}
- if err := db2.View(func(tx *Tx) error {
- if v := tx.Bucket([]byte("widgets")).Get([]byte("foo")); !bytes.Equal(v, []byte("bar")) {
+ err = db2.View(func(tx *Tx) error {
+ v := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
+ if !bytes.Equal(v, []byte("bar")) {
t.Fatalf("unexpected value: %v", v)
}
- if v := tx.Bucket([]byte("widgets")).Get([]byte("baz")); !bytes.Equal(v, []byte("bat")) {
+
+ v = tx.Bucket([]byte("widgets")).Get([]byte("baz"))
+ if !bytes.Equal(v, []byte("bat")) {
t.Fatalf("unexpected value: %v", v)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db2.Close(); err != nil {
+ err = db2.Close()
+ if err != nil {
t.Fatal(err)
}
}
@@ -5244,26 +6257,34 @@ func TestTx_CopyFile_Error_Meta(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ const expectedMsg = "meta 0 copy: error injected for tests"
+ err = db.View(func(tx *Tx) error {
_, err := tx.WriteTo(&failWriter{})
return err
- }); err == nil || err.Error() != "meta 0 copy: error injected for tests" {
+ })
+ if err == nil || err.Error() != expectedMsg {
t.Fatalf("unexpected error: %v", err)
}
}
@@ -5274,26 +6295,33 @@ func TestTx_CopyFile_Error_Normal(t *testing.T) {
defer db.MustClose()
defer os.Remove(db.Path())
- if err := db.Update(func(tx *Tx) error {
+ err := db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
t.Fatal(err)
}
- if err := b.Put([]byte("baz"), []byte("bat")); err != nil {
+
+ err = b.Put([]byte("baz"), []byte("bat"))
+ if err != nil {
t.Fatal(err)
}
+
return nil
- }); err != nil {
+ })
+ if err != nil {
t.Fatal(err)
}
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
_, err := tx.WriteTo(&failWriter{3 * db.pageSize})
return err
- }); err == nil || err.Error() != "error injected for tests" {
+ })
+ if err == nil || err.Error() != "error injected for tests" {
t.Fatalf("unexpected error: %v", err)
}
}
@@ -5307,17 +6335,22 @@ func ExampleTx_Rollback() {
defer os.Remove(db.Path())
// Create a bucket.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Set a value for a key.
- if err := db.Update(func(tx *Tx) error {
- return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
- }); err != nil {
+ err = db.Update(func(tx *Tx) error {
+ return tx.Bucket([]byte("widgets")).Put(
+ []byte("foo"),
+ []byte("bar"),
+ )
+ })
+ if err != nil {
log.Fatal(err)
}
@@ -5326,25 +6359,31 @@ func ExampleTx_Rollback() {
if err != nil {
log.Fatal(err)
}
+
b := tx.Bucket([]byte("widgets"))
- if err := b.Put([]byte("foo"), []byte("baz")); err != nil {
+ err = b.Put([]byte("foo"), []byte("baz"))
+ if err != nil {
log.Fatal(err)
}
- if err := tx.Rollback(); err != nil {
+
+ err = tx.Rollback()
+ if err != nil {
log.Fatal(err)
}
// Ensure that our original value is still set.
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value for 'foo' is still: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -5361,25 +6400,29 @@ func ExampleTx_CopyFile() {
defer os.Remove(db.Path())
// Create a bucket and a key.
- if err := db.Update(func(tx *Tx) error {
+ err = db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
if err != nil {
return err
}
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
+
+ err = b.Put([]byte("foo"), []byte("bar"))
+ if err != nil {
return err
}
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Copy the database to another file.
toFile := tempfile()
defer os.Remove(toFile)
- if err := db.View(func(tx *Tx) error {
+ err = db.View(func(tx *Tx) error {
return tx.CopyFile(toFile, 0666)
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
@@ -5390,20 +6433,23 @@ func ExampleTx_CopyFile() {
}
// Ensure that the key exists in the copy.
- if err := db2.View(func(tx *Tx) error {
+ err = db2.View(func(tx *Tx) error {
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
fmt.Printf("The value for 'foo' in the clone is: %s\n", value)
return nil
- }); err != nil {
+ })
+ if err != nil {
log.Fatal(err)
}
// Close database to release file lock.
- if err := db.Close(); err != nil {
+ err = db.Close()
+ if err != nil {
log.Fatal(err)
}
- if err := db2.Close(); err != nil {
+ err = db2.Close()
+ if err != nil {
log.Fatal(err)
}
@@ -5445,8 +6491,10 @@ func fillBucket(b *Bucket, prefix []byte) error {
if err != nil {
return err
}
+
k := append(prefix, []byte(fmt.Sprintf("k%d", i))...)
- if err := b.Put(k, v); err != nil {
+ err = b.Put(k, v)
+ if err != nil {
return err
}
}
@@ -5462,7 +6510,9 @@ func fillBucket(b *Bucket, prefix []byte) error {
if err != nil {
return err
}
- if err := fillBucket(sb, append(k, '.')); err != nil {
+
+ err = fillBucket(sb, append(k, '.'))
+ if err != nil {
return err
}
}
@@ -5470,7 +6520,8 @@ func fillBucket(b *Bucket, prefix []byte) error {
}
func walkBucket(parent *Bucket, k []byte, v []byte, w io.Writer) error {
- if _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v); err != nil {
+ _, err := fmt.Fprintf(w, "%d:%x=%x\n", parent.Sequence(), k, v)
+ if err != nil {
return err
}