aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--bolt_unix.go11
-rw-r--r--bolt_unix_solaris.go11
-rw-r--r--bucket_test.go112
-rw-r--r--db.go42
-rw-r--r--db_test.go16
-rw-r--r--simulation_test.go2
-rw-r--r--tx.go12
-rw-r--r--tx_test.go4
8 files changed, 121 insertions, 89 deletions
diff --git a/bolt_unix.go b/bolt_unix.go
index 251680b..4b0723a 100644
--- a/bolt_unix.go
+++ b/bolt_unix.go
@@ -46,17 +46,6 @@ func funlock(f *os.File) error {
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
- // Truncate and fsync to ensure file size metadata is flushed.
- // https://github.com/boltdb/bolt/issues/284
- if !db.NoGrowSync && !db.readOnly {
- if err := db.file.Truncate(int64(sz)); err != nil {
- return fmt.Errorf("file resize error: %s", err)
- }
- if err := db.file.Sync(); err != nil {
- return fmt.Errorf("file sync error: %s", err)
- }
- }
-
// Map the data file to memory.
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
diff --git a/bolt_unix_solaris.go b/bolt_unix_solaris.go
index 214009b..1c4e48d 100644
--- a/bolt_unix_solaris.go
+++ b/bolt_unix_solaris.go
@@ -56,17 +56,6 @@ func funlock(f *os.File) error {
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
- // Truncate and fsync to ensure file size metadata is flushed.
- // https://github.com/boltdb/bolt/issues/284
- if !db.NoGrowSync && !db.readOnly {
- if err := db.file.Truncate(int64(sz)); err != nil {
- return fmt.Errorf("file resize error: %s", err)
- }
- if err := db.file.Sync(); err != nil {
- return fmt.Errorf("file sync error: %s", err)
- }
- }
-
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
diff --git a/bucket_test.go b/bucket_test.go
index 480ed08..a02c367 100644
--- a/bucket_test.go
+++ b/bucket_test.go
@@ -456,10 +456,10 @@ func TestBucket_Nested(t *testing.T) {
if err := db.View(func(tx *bolt.Tx) error {
var b = tx.Bucket([]byte("widgets"))
if v := b.Bucket([]byte("foo")).Get([]byte("baz")); !bytes.Equal(v, []byte("yyyy")) {
- t.Fatalf("unexpected value: %v")
+ t.Fatalf("unexpected value: %v", v)
}
- if !bytes.Equal(b.Get([]byte("bar")), []byte("xxxx")) {
- t.Fatalf("unexpected value: %v")
+ if v := b.Get([]byte("bar")); !bytes.Equal(v, []byte("xxxx")) {
+ t.Fatalf("unexpected value: %v", v)
}
for i := 0; i < 10000; i++ {
if v := b.Get([]byte(strconv.Itoa(i))); !bytes.Equal(v, []byte(strconv.Itoa(i))) {
@@ -1118,18 +1118,18 @@ func TestBucket_Stats(t *testing.T) {
// Only check allocations for 4KB pages.
if os.Getpagesize() == 4096 {
if stats.BranchAlloc != 4096 {
- t.Fatalf("unexpected BranchAlloc:", stats.BranchAlloc)
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 36864 {
- t.Fatalf("unexpected LeafAlloc:", stats.LeafAlloc)
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
- t.Fatalf("unexpected BucketN:", stats.BucketN)
+ t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 0 {
- t.Fatalf("unexpected InlineBucketN:", stats.InlineBucketN)
+ t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 0 {
- t.Fatalf("unexpected InlineBucketInuse:", stats.InlineBucketInuse)
+ t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
@@ -1178,27 +1178,27 @@ func TestBucket_Stats_RandomFill(t *testing.T) {
if err := db.View(func(tx *bolt.Tx) error {
stats := tx.Bucket([]byte("woojits")).Stats()
if stats.KeyN != 100000 {
- t.Fatalf("unexpected KeyN", stats.KeyN)
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
}
if stats.BranchPageN != 98 {
- t.Fatalf("unexpected BranchPageN", stats.BranchPageN)
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN", stats.BranchOverflowN)
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.BranchInuse != 130984 {
- t.Fatalf("unexpected BranchInuse", stats.BranchInuse)
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.BranchAlloc != 401408 {
- t.Fatalf("unexpected BranchAlloc", stats.BranchAlloc)
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
}
if stats.LeafPageN != 3412 {
- t.Fatalf("unexpected LeafPageN", stats.LeafPageN)
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
- t.Fatalf("unexpected LeafOverflowN", stats.LeafOverflowN)
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.LeafInuse != 4742482 {
- t.Fatalf("unexpected LeafInuse", stats.LeafInuse)
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
} else if stats.LeafAlloc != 13975552 {
- t.Fatalf("unexpected LeafAlloc", stats.LeafAlloc)
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
return nil
}); err != nil {
@@ -1232,37 +1232,37 @@ func TestBucket_Stats_Small(t *testing.T) {
b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats()
if stats.BranchPageN != 0 {
- t.Fatalf("unexpected BranchPageN: ", stats.BranchPageN)
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN: ", stats.BranchOverflowN)
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 0 {
- t.Fatalf("unexpected LeafPageN: ", stats.LeafPageN)
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
- t.Fatalf("unexpected LeafOverflowN: ", stats.LeafOverflowN)
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 1 {
- t.Fatalf("unexpected KeyN: ", stats.KeyN)
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 1 {
- t.Fatalf("unexpected Depth: ", stats.Depth)
+ t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 0 {
- t.Fatalf("unexpected BranchInuse: ", stats.BranchInuse)
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.LeafInuse != 0 {
- t.Fatalf("unexpected LeafInuse: ", stats.LeafInuse)
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if os.Getpagesize() == 4096 {
if stats.BranchAlloc != 0 {
- t.Fatalf("unexpected BranchAlloc: ", stats.BranchAlloc)
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 0 {
- t.Fatalf("unexpected LeafAlloc: ", stats.LeafAlloc)
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
- t.Fatalf("unexpected BucketN: ", stats.BucketN)
+ t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 1 {
- t.Fatalf("unexpected InlineBucketN: ", stats.InlineBucketN)
+ t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 16+16+6 {
- t.Fatalf("unexpected InlineBucketInuse: ", stats.InlineBucketInuse)
+ t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
@@ -1291,37 +1291,37 @@ func TestBucket_Stats_EmptyBucket(t *testing.T) {
b := tx.Bucket([]byte("whozawhats"))
stats := b.Stats()
if stats.BranchPageN != 0 {
- t.Fatalf("unexpected BranchPageN: ", stats.BranchPageN)
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN: ", stats.BranchOverflowN)
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 0 {
- t.Fatalf("unexpected LeafPageN: ", stats.LeafPageN)
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
- t.Fatalf("unexpected LeafOverflowN: ", stats.LeafOverflowN)
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 0 {
- t.Fatalf("unexpected KeyN: ", stats.KeyN)
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 1 {
- t.Fatalf("unexpected Depth: ", stats.Depth)
+ t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 0 {
- t.Fatalf("unexpected BranchInuse: ", stats.BranchInuse)
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
} else if stats.LeafInuse != 0 {
- t.Fatalf("unexpected LeafInuse: ", stats.LeafInuse)
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if os.Getpagesize() == 4096 {
if stats.BranchAlloc != 0 {
- t.Fatalf("unexpected BranchAlloc: ", stats.BranchAlloc)
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 0 {
- t.Fatalf("unexpected LeafAlloc: ", stats.LeafAlloc)
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 1 {
- t.Fatalf("unexpected BucketN: ", stats.BucketN)
+ t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 1 {
- t.Fatalf("unexpected InlineBucketN: ", stats.InlineBucketN)
+ t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != 16 {
- t.Fatalf("unexpected InlineBucketInuse: ", stats.InlineBucketInuse)
+ t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
@@ -1377,19 +1377,19 @@ func TestBucket_Stats_Nested(t *testing.T) {
b := tx.Bucket([]byte("foo"))
stats := b.Stats()
if stats.BranchPageN != 0 {
- t.Fatalf("unexpected BranchPageN: ", stats.BranchPageN)
+ t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
} else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN: ", stats.BranchOverflowN)
+ t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
} else if stats.LeafPageN != 2 {
- t.Fatalf("unexpected LeafPageN: ", stats.LeafPageN)
+ t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
} else if stats.LeafOverflowN != 0 {
- t.Fatalf("unexpected LeafOverflowN: ", stats.LeafOverflowN)
+ t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
} else if stats.KeyN != 122 {
- t.Fatalf("unexpected KeyN: ", stats.KeyN)
+ t.Fatalf("unexpected KeyN: %d", stats.KeyN)
} else if stats.Depth != 3 {
- t.Fatalf("unexpected Depth: ", stats.Depth)
+ t.Fatalf("unexpected Depth: %d", stats.Depth)
} else if stats.BranchInuse != 0 {
- t.Fatalf("unexpected BranchInuse: ", stats.BranchInuse)
+ t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
}
foo := 16 // foo (pghdr)
@@ -1407,23 +1407,23 @@ func TestBucket_Stats_Nested(t *testing.T) {
baz += 10 + 10 // baz leaf key/values
if stats.LeafInuse != foo+bar+baz {
- t.Fatalf("unexpected LeafInuse: ", stats.LeafInuse)
+ t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
}
if os.Getpagesize() == 4096 {
if stats.BranchAlloc != 0 {
- t.Fatalf("unexpected BranchAlloc: ", stats.BranchAlloc)
+ t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
} else if stats.LeafAlloc != 8192 {
- t.Fatalf("unexpected LeafAlloc: ", stats.LeafAlloc)
+ t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
}
}
if stats.BucketN != 3 {
- t.Fatalf("unexpected BucketN: ", stats.BucketN)
+ t.Fatalf("unexpected BucketN: %d", stats.BucketN)
} else if stats.InlineBucketN != 1 {
- t.Fatalf("unexpected InlineBucketN: ", stats.InlineBucketN)
+ t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
} else if stats.InlineBucketInuse != baz {
- t.Fatalf("unexpected InlineBucketInuse: ", stats.InlineBucketInuse)
+ t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
}
return nil
diff --git a/db.go b/db.go
index f559ee5..0f1e1bc 100644
--- a/db.go
+++ b/db.go
@@ -26,13 +26,14 @@ const magic uint32 = 0xED0CDAED
// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when
// syncing changes to a file. This is required as some operating systems,
// such as OpenBSD, do not have a unified buffer cache (UBC) and writes
-// must be synchronzied using the msync(2) syscall.
+// must be synchronized using the msync(2) syscall.
const IgnoreNoSync = runtime.GOOS == "openbsd"
// Default values if not set in a DB instance.
const (
DefaultMaxBatchSize int = 1000
DefaultMaxBatchDelay = 10 * time.Millisecond
+ DefaultAllocSize = 16 * 1024 * 1024
)
// DB represents a collection of buckets persisted to a file on disk.
@@ -85,11 +86,17 @@ type DB struct {
// Do not change concurrently with calls to Batch.
MaxBatchDelay time.Duration
+ // AllocSize is the amount of space allocated when the database
+ // needs to create new pages. This is done to amortize the cost
+ // of truncate() and fsync() when growing the data file.
+ AllocSize int
+
path string
file *os.File
dataref []byte // mmap'ed readonly, write throws SEGV
data *[maxMapSize]byte
datasz int
+ filesz int // current on disk file size
meta0 *meta
meta1 *meta
pageSize int
@@ -147,6 +154,7 @@ func Open(path string, mode os.FileMode, options *Options) (*DB, error) {
// Set default values for later DB operations.
db.MaxBatchSize = DefaultMaxBatchSize
db.MaxBatchDelay = DefaultMaxBatchDelay
+ db.AllocSize = DefaultAllocSize
flag := os.O_RDWR
if options.ReadOnly {
@@ -410,7 +418,7 @@ func (db *DB) close() error {
// will cause the calls to block and be serialized until the current write
// transaction finishes.
//
-// Transactions should not be depedent on one another. Opening a read
+// Transactions should not be dependent on one another. Opening a read
// transaction and a write transaction in the same goroutine can cause the
// writer to deadlock because the database periodically needs to re-mmap itself
// as it grows and it cannot do that while a read transaction is open.
@@ -798,6 +806,36 @@ func (db *DB) allocate(count int) (*page, error) {
return p, nil
}
+// grow grows the size of the database to the given sz.
+func (db *DB) grow(sz int) error {
+ // Ignore if the new size is less than available file size.
+ if sz <= db.filesz {
+ return nil
+ }
+
+ // If the data is smaller than the alloc size then only allocate what's needed.
+ // Once it goes over the allocation size then allocate in chunks.
+ if db.datasz < db.AllocSize {
+ sz = db.datasz
+ } else {
+ sz += db.AllocSize
+ }
+
+ // Truncate and fsync to ensure file size metadata is flushed.
+ // https://github.com/boltdb/bolt/issues/284
+ if !db.NoGrowSync && !db.readOnly {
+ if err := db.file.Truncate(int64(sz)); err != nil {
+ return fmt.Errorf("file resize error: %s", err)
+ }
+ if err := db.file.Sync(); err != nil {
+ return fmt.Errorf("file sync error: %s", err)
+ }
+ }
+
+ db.filesz = sz
+ return nil
+}
+
func (db *DB) IsReadOnly() bool {
return db.readOnly
}
diff --git a/db_test.go b/db_test.go
index c1346b8..e31b865 100644
--- a/db_test.go
+++ b/db_test.go
@@ -229,6 +229,8 @@ func TestOpen_Size(t *testing.T) {
path := db.Path()
defer db.MustClose()
+ pagesize := db.Info().PageSize
+
// Insert until we get above the minimum 4MB size.
if err := db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists([]byte("data"))
@@ -273,7 +275,8 @@ func TestOpen_Size(t *testing.T) {
}
// Compare the original size with the new size.
- if sz != newSz {
+ // db size might increase by a few page sizes due to the new small update.
+ if sz < newSz-5*int64(pagesize) {
t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
}
}
@@ -290,6 +293,8 @@ func TestOpen_Size_Large(t *testing.T) {
path := db.Path()
defer db.MustClose()
+ pagesize := db.Info().PageSize
+
// Insert until we get above the minimum 4MB size.
var index uint64
for i := 0; i < 10000; i++ {
@@ -338,7 +343,8 @@ func TestOpen_Size_Large(t *testing.T) {
}
// Compare the original size with the new size.
- if sz != newSz {
+ // db size might increase by a few page sizes due to the new small update.
+ if sz < newSz-5*int64(pagesize) {
t.Fatalf("unexpected file growth: %d => %d", sz, newSz)
}
}
@@ -904,11 +910,11 @@ func TestDB_Stats(t *testing.T) {
stats := db.Stats()
if stats.TxStats.PageCount != 2 {
- t.Fatalf("unexpected TxStats.PageCount", stats.TxStats.PageCount)
+ t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.PageCount)
} else if stats.FreePageN != 0 {
- t.Fatalf("unexpected FreePageN != 0", stats.FreePageN)
+ t.Fatalf("unexpected FreePageN != 0: %d", stats.FreePageN)
} else if stats.PendingPageN != 2 {
- t.Fatalf("unexpected PendingPageN != 2", stats.PendingPageN)
+ t.Fatalf("unexpected PendingPageN != 2: %d", stats.PendingPageN)
}
}
diff --git a/simulation_test.go b/simulation_test.go
index c691527..3831016 100644
--- a/simulation_test.go
+++ b/simulation_test.go
@@ -10,7 +10,7 @@ import (
"github.com/boltdb/bolt"
)
-func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) }
+func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 1, 1) }
func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) }
func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) }
func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) }
diff --git a/tx.go b/tx.go
index 5f4c9f0..e74d2ca 100644
--- a/tx.go
+++ b/tx.go
@@ -168,6 +168,8 @@ func (tx *Tx) Commit() error {
// Free the old root bucket.
tx.meta.root.root = tx.root.root
+ opgid := tx.meta.pgid
+
// Free the freelist and allocate new pages for it. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad).
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
@@ -182,6 +184,14 @@ func (tx *Tx) Commit() error {
}
tx.meta.freelist = p.id
+ // If the high water mark has moved up then attempt to grow the database.
+ if tx.meta.pgid > opgid {
+ if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
+ tx.rollback()
+ return err
+ }
+ }
+
// Write dirty pages to disk.
startTime = time.Now()
if err := tx.write(); err != nil {
@@ -505,7 +515,7 @@ func (tx *Tx) writeMeta() error {
}
// page returns a reference to the page with a given id.
-// If page has been written to then a temporary bufferred page is returned.
+// If page has been written to then a temporary buffered page is returned.
func (tx *Tx) page(id pgid) *page {
// Check the dirty pages first.
if tx.pages != nil {
diff --git a/tx_test.go b/tx_test.go
index 7a274e4..18ff166 100644
--- a/tx_test.go
+++ b/tx_test.go
@@ -571,7 +571,7 @@ func TestTx_CopyFile_Error_Meta(t *testing.T) {
if err := db.View(func(tx *bolt.Tx) error {
return tx.Copy(&failWriter{})
}); err == nil || err.Error() != "meta copy: error injected for tests" {
- t.Fatal("unexpected error: %s", err)
+ t.Fatalf("unexpected error: %v", err)
}
}
@@ -598,7 +598,7 @@ func TestTx_CopyFile_Error_Normal(t *testing.T) {
if err := db.View(func(tx *bolt.Tx) error {
return tx.Copy(&failWriter{3 * db.Info().PageSize})
}); err == nil || err.Error() != "error injected for tests" {
- t.Fatal("unexpected error: %s", err)
+ t.Fatalf("unexpected error: %v", err)
}
}