aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/dedo.go299
-rw-r--r--tests/dedo.go542
2 files changed, 0 insertions, 841 deletions
diff --git a/src/dedo.go b/src/dedo.go
index d5a39b7..948f579 100644
--- a/src/dedo.go
+++ b/src/dedo.go
@@ -44,30 +44,6 @@ type Bucket struct {
nodes map[pgid]*node // node cache
}
-// BucketStats records statistics about resources used by a bucket.
-type BucketStats struct {
- // Page count statistics.
- BranchPageN int // number of logical branch pages
- BranchOverflowN int // number of physical branch overflow pages
- LeafPageN int // number of logical leaf pages
- LeafOverflowN int // number of physical leaf overflow pages
-
- // Tree statistics.
- KeyN int // number of keys/value pairs
- Depth int // number of levels in B+tree
-
- // Page size utilization.
- BranchAlloc int // bytes allocated for physical branch pages
- BranchInuse int // bytes actually used for branch data
- LeafAlloc int // bytes allocated for physical leaf pages
- LeafInuse int // bytes actually used for leaf data
-
- // Bucket statistics
- BucketN int // total number of buckets including the top bucket
- InlineBucketN int // total number on inlined buckets
- InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
-}
-
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
// Cursors see nested buckets with value == nil.
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
@@ -138,7 +114,6 @@ type DB struct {
rwtx *Tx
txs []*Tx
freelist *freelist
- stats Stats
pagePool sync.Pool
@@ -148,7 +123,6 @@ type DB struct {
rwlock sync.Mutex // Allows only one writer at a time.
metalock sync.Mutex // Protects meta page access.
mmaplock sync.RWMutex // Protects mmap access during remapping.
- statlock sync.RWMutex // Protects stats access.
ops dbops
}
@@ -169,21 +143,6 @@ type panicked struct {
reason interface{}
}
-// Stats represents statistics about the database.
-type Stats struct {
- // Freelist stats
- FreePageN int // total number of free pages on the freelist
- PendingPageN int // total number of pending pages on the freelist
- FreeAlloc int // total bytes allocated in free pages
- FreelistInuse int // total bytes used by the freelist
-
- // Transaction stats
- TxN int // total number of started read transactions
- OpenTxN int // number of currently open read transactions
-
- TxStats TxStats // global, ongoing stats.
-}
-
type meta struct {
magic uint32
version uint32
@@ -284,37 +243,9 @@ type Tx struct {
meta *meta
root Bucket
pages map[pgid]*page
- stats TxStats
commitHandlers []func()
}
-// TxStats represents statistics about the actions performed by the transaction.
-type TxStats struct {
- // Page statistics.
- PageCount int // number of page allocations
- PageAlloc int // total bytes allocated
-
- // Cursor statistics.
- CursorCount int // number of cursors created
-
- // Node statistics
- NodeCount int // number of node allocations
- NodeDeref int // number of node dereferences
-
- // Rebalance statistics.
- Rebalance int // number of node rebalances
- RebalanceTime time.Duration // total time spent rebalancing
-
- // Split/Spill statistics.
- Split int // number of nodes split
- Spill int // number of nodes spilled
- SpillTime time.Duration // total time spent spilling
-
- // Write statistics.
- Write int // number of writes performed
- WriteTime time.Duration // total time spent writing to disk
-}
-
// walkFunc is the type of the function called for keys (buckets and "normal"
// values) discovered by Walk. keys is the list of keys to descend to the bucket
// owning the discovered key/value pair k/v.
@@ -586,9 +517,6 @@ func (b *Bucket) Writable() bool {
// The cursor is only valid as long as the transaction is open.
// Do not use a cursor after the transaction is closed.
func (b *Bucket) Cursor() *Cursor {
- // Update transaction statistics.
- b.tx.stats.CursorCount++
-
// Allocate and return a cursor.
return &Cursor{
bucket: b,
@@ -894,88 +822,6 @@ func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
return nil
}
-// Stat returns stats on a bucket.
-func (b *Bucket) Stats() BucketStats {
- var s, subStats BucketStats
- pageSize := b.tx.db.pageSize
- s.BucketN += 1
- if b.ref.root == 0 {
- s.InlineBucketN += 1
- }
- b.forEachPage(func(p *page, depth int) {
- if (p.flags & leafPageFlag) != 0 {
- s.KeyN += int(p.count)
-
- // used totals the used bytes for the page
- used := pageHeaderSize
-
- if p.count != 0 {
- // If page has any elements, add all element headers.
- used += leafPageElementSize * int(p.count-1)
-
- // Add all element key, value sizes.
- // The computation takes advantage of the fact that the position
- // of the last element's key/value equals to the total of the sizes
- // of all previous elements' keys and values.
- // It also includes the last element's header.
- lastElement := p.leafPageElement(p.count - 1)
- used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
- }
-
- if b.ref.root == 0 {
- // For inlined bucket just update the inline stats
- s.InlineBucketInuse += used
- } else {
- // For non-inlined bucket update all the leaf stats
- s.LeafPageN++
- s.LeafInuse += used
- s.LeafOverflowN += int(p.overflow)
-
- // Collect stats from sub-buckets.
- // Do that by iterating over all element headers
- // looking for the ones with the bucketLeafFlag.
- for i := uint16(0); i < p.count; i++ {
- e := p.leafPageElement(i)
- if (e.flags & bucketLeafFlag) != 0 {
- // For any bucket element, open the element value
- // and recursively call Stats on the contained bucket.
- subStats.Add(b.openBucket(e.value()).Stats())
- }
- }
- }
- } else if (p.flags & branchPageFlag) != 0 {
- s.BranchPageN++
- lastElement := p.branchPageElement(p.count - 1)
-
- // used totals the used bytes for the page
- // Add header and all element headers.
- used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
-
- // Add size of all keys and values.
- // Again, use the fact that last element's position equals to
- // the total of key, value sizes of all previous elements.
- used += int(lastElement.pos + lastElement.ksize)
- s.BranchInuse += used
- s.BranchOverflowN += int(p.overflow)
- }
-
- // Keep track of maximum page depth.
- if depth+1 > s.Depth {
- s.Depth = (depth + 1)
- }
- })
-
- // Alloc stats can be computed from page counts and pageSize.
- s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
- s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
-
- // Add the max depth of sub-buckets to get total nested depth.
- s.Depth += subStats.Depth
- // Add the stats for all sub-buckets
- s.Add(subStats)
- return s
-}
-
// forEachPage iterates over every page in a bucket, including inline pages.
func (b *Bucket) forEachPage(fn func(*page, int)) {
// If we have an inline page then just use that.
@@ -1166,9 +1012,6 @@ func (b *Bucket) node(pgid pgid, parent *node) *node {
n.read(p)
b.nodes[pgid] = n
- // Update statistics.
- b.tx.stats.NodeCount++
-
return n
}
@@ -1226,25 +1069,6 @@ func (b *Bucket) pageNode(id pgid) (*page, *node) {
return b.tx.page(id), nil
}
-func (s *BucketStats) Add(other BucketStats) {
- s.BranchPageN += other.BranchPageN
- s.BranchOverflowN += other.BranchOverflowN
- s.LeafPageN += other.LeafPageN
- s.LeafOverflowN += other.LeafOverflowN
- s.KeyN += other.KeyN
- if s.Depth < other.Depth {
- s.Depth = other.Depth
- }
- s.BranchAlloc += other.BranchAlloc
- s.BranchInuse += other.BranchInuse
- s.LeafAlloc += other.LeafAlloc
- s.LeafInuse += other.LeafInuse
-
- s.BucketN += other.BucketN
- s.InlineBucketN += other.InlineBucketN
- s.InlineBucketInuse += other.InlineBucketInuse
-}
-
// cloneBytes returns a copy of a given slice.
func cloneBytes(v []byte) []byte {
var clone = make([]byte, len(v))
@@ -1982,17 +1806,10 @@ func (db *DB) beginTx() (*Tx, error) {
// Keep track of transaction until it closes.
db.txs = append(db.txs, t)
- n := len(db.txs)
// Unlock the meta pages.
db.metalock.Unlock()
- // Update the transaction stats.
- db.statlock.Lock()
- db.stats.TxN++
- db.stats.OpenTxN = n
- db.statlock.Unlock()
-
return t, nil
}
@@ -2049,16 +1866,9 @@ func (db *DB) removeTx(tx *Tx) {
break
}
}
- n := len(db.txs)
// Unlock the meta pages.
db.metalock.Unlock()
-
- // Merge statistics.
- db.statlock.Lock()
- db.stats.OpenTxN = n
- db.stats.TxStats.add(&tx.stats)
- db.statlock.Unlock()
}
// Update executes a function within the context of a read-write managed transaction.
@@ -2237,14 +2047,6 @@ func safelyCall(fn func(*Tx) error, tx *Tx) (err error) {
return fn(tx)
}
-// Stats retrieves ongoing performance stats for the database.
-// This is only updated when a transaction closes.
-func (db *DB) Stats() Stats {
- db.statlock.RLock()
- defer db.statlock.RUnlock()
- return db.stats
-}
-
// page retrieves a page reference from the mmap based on the current page size.
func (db *DB) page(id pgid) *page {
pos := id * pgid(db.pageSize)
@@ -2340,23 +2142,6 @@ func (db *DB) grow(sz int) error {
return nil
}
-// Sub calculates and returns the difference between two sets of database stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *Stats) Sub(other *Stats) Stats {
- if other == nil {
- return *s
- }
- var diff Stats
- diff.FreePageN = s.FreePageN
- diff.PendingPageN = s.PendingPageN
- diff.FreeAlloc = s.FreeAlloc
- diff.FreelistInuse = s.FreelistInuse
- diff.TxN = s.TxN - other.TxN
- diff.TxStats = s.TxStats.Sub(&other.TxStats)
- return diff
-}
-
// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
func (m *meta) validate() error {
if m.magic != magic {
@@ -2966,9 +2751,6 @@ func (n *node) splitTwo(pageSize int) (*node, *node) {
next.inodes = n.inodes[splitIndex:]
n.inodes = n.inodes[:splitIndex]
- // Update the statistics.
- n.bucket.tx.stats.Split++
-
return n, next
}
@@ -3052,9 +2834,6 @@ func (n *node) spill() error {
node.key = node.inodes[0].key
_assert(len(node.key) > 0, "spill: zero-length node key")
}
-
- // Update the statistics.
- tx.stats.Spill++
}
// If the root node split and created a new root then we need to spill that
@@ -3075,9 +2854,6 @@ func (n *node) rebalance() {
}
n.unbalanced = false
- // Update statistics.
- n.bucket.tx.stats.Rebalance++
-
// Ignore if node is above threshold (25%) and has enough keys.
var threshold = n.bucket.tx.db.pageSize / 4
if n.size() > threshold && len(n.inodes) > n.minKeys() {
@@ -3208,9 +2984,6 @@ func (n *node) dereference() {
for _, child := range n.children {
child.dereference()
}
-
- // Update statistics.
- n.bucket.tx.stats.NodeDeref++
}
// free adds the node's underlying page to the freelist.
@@ -3402,11 +3175,6 @@ func (tx *Tx) Cursor() *Cursor {
return tx.root.Cursor()
}
-// Stats retrieves a copy of the current transaction statistics.
-func (tx *Tx) Stats() TxStats {
- return tx.stats
-}
-
// Bucket retrieves a bucket by name.
// Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
@@ -3465,19 +3233,13 @@ func (tx *Tx) Commit() error {
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
// Rebalance nodes which have had deletions.
- var startTime = time.Now()
tx.root.rebalance()
- if tx.stats.Rebalance > 0 {
- tx.stats.RebalanceTime += time.Since(startTime)
- }
// spill data onto dirty pages.
- startTime = time.Now()
if err := tx.root.spill(); err != nil {
tx.rollback()
return err
}
- tx.stats.SpillTime += time.Since(startTime)
// Free the old root bucket.
tx.meta.root.root = tx.root.ref.root
@@ -3507,7 +3269,6 @@ func (tx *Tx) Commit() error {
}
// Write dirty pages to disk.
- startTime = time.Now()
if err := tx.write(); err != nil {
tx.rollback()
return err
@@ -3535,7 +3296,6 @@ func (tx *Tx) Commit() error {
tx.rollback()
return err
}
- tx.stats.WriteTime += time.Since(startTime)
// Finalize the transaction.
tx.close()
@@ -3575,23 +3335,9 @@ func (tx *Tx) close() {
return
}
if tx.writable {
- // Grab freelist stats.
- var freelistFreeN = tx.db.freelist.free_count()
- var freelistPendingN = tx.db.freelist.pending_count()
- var freelistAlloc = tx.db.freelist.size()
-
// Remove transaction ref & writer lock.
tx.db.rwtx = nil
tx.db.rwlock.Unlock()
-
- // Merge statistics.
- tx.db.statlock.Lock()
- tx.db.stats.FreePageN = freelistFreeN
- tx.db.stats.PendingPageN = freelistPendingN
- tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
- tx.db.stats.FreelistInuse = freelistAlloc
- tx.db.stats.TxStats.add(&tx.stats)
- tx.db.statlock.Unlock()
} else {
tx.db.removeTx(tx)
}
@@ -3766,10 +3512,6 @@ func (tx *Tx) allocate(count int) (*page, error) {
// Save to our page cache.
tx.pages[p.id] = p
- // Update statistics.
- tx.stats.PageCount++
- tx.stats.PageAlloc += count * tx.db.pageSize
-
return p, nil
}
@@ -3804,9 +3546,6 @@ func (tx *Tx) write() error {
return err
}
- // Update statistics.
- tx.stats.Write++
-
// Exit inner for loop if we've written all the chunks.
size -= sz
if size == 0 {
@@ -3859,9 +3598,6 @@ func (tx *Tx) writeMeta() error {
return err
}
- // Update statistics.
- tx.stats.Write++
-
return nil
}
@@ -3922,41 +3658,6 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
return info, nil
}
-func (s *TxStats) add(other *TxStats) {
- s.PageCount += other.PageCount
- s.PageAlloc += other.PageAlloc
- s.CursorCount += other.CursorCount
- s.NodeCount += other.NodeCount
- s.NodeDeref += other.NodeDeref
- s.Rebalance += other.Rebalance
- s.RebalanceTime += other.RebalanceTime
- s.Split += other.Split
- s.Spill += other.Spill
- s.SpillTime += other.SpillTime
- s.Write += other.Write
- s.WriteTime += other.WriteTime
-}
-
-// Sub calculates and returns the difference between two sets of transaction stats.
-// This is useful when obtaining stats at two different points and time and
-// you need the performance counters that occurred within that time span.
-func (s *TxStats) Sub(other *TxStats) TxStats {
- var diff TxStats
- diff.PageCount = s.PageCount - other.PageCount
- diff.PageAlloc = s.PageAlloc - other.PageAlloc
- diff.CursorCount = s.CursorCount - other.CursorCount
- diff.NodeCount = s.NodeCount - other.NodeCount
- diff.NodeDeref = s.NodeDeref - other.NodeDeref
- diff.Rebalance = s.Rebalance - other.Rebalance
- diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
- diff.Split = s.Split - other.Split
- diff.Spill = s.Spill - other.Spill
- diff.SpillTime = s.SpillTime - other.SpillTime
- diff.Write = s.Write - other.Write
- diff.WriteTime = s.WriteTime - other.WriteTime
- return diff
-}
-
func noopGetopt(args argsT, _w io.Writer) (argsT, bool) {
return args, true
}
diff --git a/tests/dedo.go b/tests/dedo.go
index 3634f00..3767d1d 100644
--- a/tests/dedo.go
+++ b/tests/dedo.go
@@ -1290,468 +1290,6 @@ func TestBucket_Put_ValueTooLarge(t *testing.T) {
}
}
-// Ensure a bucket can calculate stats.
-func TestBucket_Stats(t *testing.T) {
- return // FIXME
- db := MustOpenDB()
- defer db.MustClose()
- defer os.Remove(db.Path())
-
- // Add bucket with fewer keys but one big value.
- bigKey := []byte("really-big-value")
- const n = 1000 // FIXME: was too slow
- for i := 0; i < n; i++ {
- if err := db.Update(func(tx *Tx) error {
- b, err := tx.CreateBucketIfNotExists([]byte("woojits"))
- if err != nil {
- t.Fatal(err)
- }
-
- if err := b.Put([]byte(fmt.Sprintf("%03d", i)), []byte(strconv.Itoa(i))); err != nil {
- t.Fatal(err)
- }
- return nil
- }); err != nil {
- t.Fatal(err)
- }
- }
- if err := db.Update(func(tx *Tx) error {
- const size = 10000 // FIXME: was too slow
- if err := tx.Bucket([]byte("woojits")).Put(bigKey, []byte(strings.Repeat("*", size))); err != nil {
- t.Fatal(err)
- }
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-
- db.MustCheck()
-
- if err := db.View(func(tx *Tx) error {
- stats := tx.Bucket([]byte("woojits")).Stats()
- if stats.BranchPageN != 1 {
- t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
- } else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
- } else if stats.LeafPageN != 7 {
- t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
- } else if stats.LeafOverflowN != 2 {
- t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
- } else if stats.KeyN != 501 {
- t.Fatalf("unexpected KeyN: %d", stats.KeyN)
- } else if stats.Depth != 2 {
- t.Fatalf("unexpected Depth: %d", stats.Depth)
- }
-
- branchInuse := 16 // branch page header
- branchInuse += 7 * 16 // branch elements
- branchInuse += 7 * 3 // branch keys (6 3-byte keys)
- if stats.BranchInuse != branchInuse {
- t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
- }
-
- leafInuse := 7 * 16 // leaf page header
- leafInuse += 501 * 16 // leaf elements
- leafInuse += 500*3 + len(bigKey) // leaf keys
- leafInuse += 1*10 + 2*90 + 3*400 + 10000 // leaf values
- if stats.LeafInuse != leafInuse {
- t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
- }
-
- // Only check allocations for 4KB pages.
- if os.Getpagesize() == 4096 {
- if stats.BranchAlloc != 4096 {
- t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
- } else if stats.LeafAlloc != 36864 {
- t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
- }
- }
-
- if stats.BucketN != 1 {
- t.Fatalf("unexpected BucketN: %d", stats.BucketN)
- } else if stats.InlineBucketN != 0 {
- t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
- } else if stats.InlineBucketInuse != 0 {
- t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
- }
-
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-}
-
-// Ensure a bucket with random insertion utilizes fill percentage correctly.
-func TestBucket_Stats_RandomFill(t *testing.T) {
- return // FIXME
- if os.Getpagesize() != 4096 {
- t.Skip("invalid page size for test")
- }
-
- db := MustOpenDB()
- defer db.MustClose()
- defer os.Remove(db.Path())
-
- // Add a set of values in random order. It will be the same random
- // order so we can maintain consistency between test runs.
- var count int
- rand := rand.New(rand.NewSource(42))
- for _, i := range rand.Perm(1000) {
- if err := db.Update(func(tx *Tx) error {
- b, err := tx.CreateBucketIfNotExists([]byte("woojits"))
- if err != nil {
- t.Fatal(err)
- }
-
- for _, j := range rand.Perm(100) {
- index := (j * 10000) + i
- if err := b.Put([]byte(fmt.Sprintf("%d000000000000000", index)), []byte("0000000000")); err != nil {
- t.Fatal(err)
- }
- count++
- }
- return nil
- }); err != nil {
- t.Fatal(err)
- }
- }
-
- db.MustCheck()
-
- if err := db.View(func(tx *Tx) error {
- stats := tx.Bucket([]byte("woojits")).Stats()
- if stats.KeyN != 100000 {
- t.Fatalf("unexpected KeyN: %d", stats.KeyN)
- }
-
- if stats.BranchPageN != 98 {
- t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
- } else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
- } else if stats.BranchInuse != 130984 {
- t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
- } else if stats.BranchAlloc != 401408 {
- t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
- }
-
- if stats.LeafPageN != 3412 {
- t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
- } else if stats.LeafOverflowN != 0 {
- t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
- } else if stats.LeafInuse != 4742482 {
- t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
- } else if stats.LeafAlloc != 13975552 {
- t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
- }
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-}
-
-// Ensure a bucket can calculate stats.
-func TestBucket_Stats_Small(t *testing.T) {
- db := MustOpenDB()
- defer db.MustClose()
- defer os.Remove(db.Path())
-
- if err := db.Update(func(tx *Tx) error {
- // Add a bucket that fits on a single root leaf.
- b, err := tx.CreateBucket([]byte("whozawhats"))
- if err != nil {
- t.Fatal(err)
- }
- if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
- t.Fatal(err)
- }
-
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-
- db.MustCheck()
-
- if err := db.View(func(tx *Tx) error {
- b := tx.Bucket([]byte("whozawhats"))
- stats := b.Stats()
- if stats.BranchPageN != 0 {
- t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
- } else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
- } else if stats.LeafPageN != 0 {
- t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
- } else if stats.LeafOverflowN != 0 {
- t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
- } else if stats.KeyN != 1 {
- t.Fatalf("unexpected KeyN: %d", stats.KeyN)
- } else if stats.Depth != 1 {
- t.Fatalf("unexpected Depth: %d", stats.Depth)
- } else if stats.BranchInuse != 0 {
- t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
- } else if stats.LeafInuse != 0 {
- t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
- }
-
- if os.Getpagesize() == 4096 {
- if stats.BranchAlloc != 0 {
- t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
- } else if stats.LeafAlloc != 0 {
- t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
- }
- }
-
- if stats.BucketN != 1 {
- t.Fatalf("unexpected BucketN: %d", stats.BucketN)
- } else if stats.InlineBucketN != 1 {
- t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
- } else if stats.InlineBucketInuse != 16+16+6 {
- t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
- }
-
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-}
-
-func TestBucket_Stats_EmptyBucket(t *testing.T) {
- db := MustOpenDB()
- defer db.MustClose()
- defer os.Remove(db.Path())
-
- if err := db.Update(func(tx *Tx) error {
- // Add a bucket that fits on a single root leaf.
- if _, err := tx.CreateBucket([]byte("whozawhats")); err != nil {
- t.Fatal(err)
- }
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-
- db.MustCheck()
-
- if err := db.View(func(tx *Tx) error {
- b := tx.Bucket([]byte("whozawhats"))
- stats := b.Stats()
- if stats.BranchPageN != 0 {
- t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
- } else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
- } else if stats.LeafPageN != 0 {
- t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
- } else if stats.LeafOverflowN != 0 {
- t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
- } else if stats.KeyN != 0 {
- t.Fatalf("unexpected KeyN: %d", stats.KeyN)
- } else if stats.Depth != 1 {
- t.Fatalf("unexpected Depth: %d", stats.Depth)
- } else if stats.BranchInuse != 0 {
- t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
- } else if stats.LeafInuse != 0 {
- t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
- }
-
- if os.Getpagesize() == 4096 {
- if stats.BranchAlloc != 0 {
- t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
- } else if stats.LeafAlloc != 0 {
- t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
- }
- }
-
- if stats.BucketN != 1 {
- t.Fatalf("unexpected BucketN: %d", stats.BucketN)
- } else if stats.InlineBucketN != 1 {
- t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
- } else if stats.InlineBucketInuse != 16 {
- t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
- }
-
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-}
-
-// Ensure a bucket can calculate stats.
-func TestBucket_Stats_Nested(t *testing.T) {
- db := MustOpenDB()
- defer db.MustClose()
- defer os.Remove(db.Path())
-
- if err := db.Update(func(tx *Tx) error {
- b, err := tx.CreateBucket([]byte("foo"))
- if err != nil {
- t.Fatal(err)
- }
- for i := 0; i < 100; i++ {
- if err := b.Put([]byte(fmt.Sprintf("%02d", i)), []byte(fmt.Sprintf("%02d", i))); err != nil {
- t.Fatal(err)
- }
- }
-
- bar, err := b.CreateBucket([]byte("bar"))
- if err != nil {
- t.Fatal(err)
- }
- for i := 0; i < 10; i++ {
- if err := bar.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
- t.Fatal(err)
- }
- }
-
- baz, err := bar.CreateBucket([]byte("baz"))
- if err != nil {
- t.Fatal(err)
- }
- for i := 0; i < 10; i++ {
- if err := baz.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i))); err != nil {
- t.Fatal(err)
- }
- }
-
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-
- db.MustCheck()
-
- if err := db.View(func(tx *Tx) error {
- b := tx.Bucket([]byte("foo"))
- stats := b.Stats()
- if stats.BranchPageN != 0 {
- t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
- } else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
- } else if stats.LeafPageN != 2 {
- t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
- } else if stats.LeafOverflowN != 0 {
- t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
- } else if stats.KeyN != 122 {
- t.Fatalf("unexpected KeyN: %d", stats.KeyN)
- } else if stats.Depth != 3 {
- t.Fatalf("unexpected Depth: %d", stats.Depth)
- } else if stats.BranchInuse != 0 {
- t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
- }
-
- foo := 16 // foo (pghdr)
- foo += 101 * 16 // foo leaf elements
- foo += 100*2 + 100*2 // foo leaf key/values
- foo += 3 + 16 // foo -> bar key/value
-
- bar := 16 // bar (pghdr)
- bar += 11 * 16 // bar leaf elements
- bar += 10 + 10 // bar leaf key/values
- bar += 3 + 16 // bar -> baz key/value
-
- baz := 16 // baz (inline) (pghdr)
- baz += 10 * 16 // baz leaf elements
- baz += 10 + 10 // baz leaf key/values
-
- if stats.LeafInuse != foo+bar+baz {
- t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
- }
-
- if os.Getpagesize() == 4096 {
- if stats.BranchAlloc != 0 {
- t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
- } else if stats.LeafAlloc != 8192 {
- t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
- }
- }
-
- if stats.BucketN != 3 {
- t.Fatalf("unexpected BucketN: %d", stats.BucketN)
- } else if stats.InlineBucketN != 1 {
- t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
- } else if stats.InlineBucketInuse != baz {
- t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
- }
-
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-}
-
-// Ensure a large bucket can calculate stats.
-func TestBucket_Stats_Large(t *testing.T) {
- if testing.Short() {
- t.Skip("skipping test in short mode.")
- }
-
- db := MustOpenDB()
- defer db.MustClose()
- defer os.Remove(db.Path())
-
- var index int
- for i := 0; i < 100; i++ {
- // Add bucket with lots of keys.
- if err := db.Update(func(tx *Tx) error {
- b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
- if err != nil {
- t.Fatal(err)
- }
- for i := 0; i < 1000; i++ {
- if err := b.Put([]byte(strconv.Itoa(index)), []byte(strconv.Itoa(index))); err != nil {
- t.Fatal(err)
- }
- index++
- }
- return nil
- }); err != nil {
- t.Fatal(err)
- }
- }
-
- db.MustCheck()
-
- if err := db.View(func(tx *Tx) error {
- stats := tx.Bucket([]byte("widgets")).Stats()
- if stats.BranchPageN != 13 {
- t.Fatalf("unexpected BranchPageN: %d", stats.BranchPageN)
- } else if stats.BranchOverflowN != 0 {
- t.Fatalf("unexpected BranchOverflowN: %d", stats.BranchOverflowN)
- } else if stats.LeafPageN != 1196 {
- t.Fatalf("unexpected LeafPageN: %d", stats.LeafPageN)
- } else if stats.LeafOverflowN != 0 {
- t.Fatalf("unexpected LeafOverflowN: %d", stats.LeafOverflowN)
- } else if stats.KeyN != 100000 {
- t.Fatalf("unexpected KeyN: %d", stats.KeyN)
- } else if stats.Depth != 3 {
- t.Fatalf("unexpected Depth: %d", stats.Depth)
- } else if stats.BranchInuse != 25257 {
- t.Fatalf("unexpected BranchInuse: %d", stats.BranchInuse)
- } else if stats.LeafInuse != 2596916 {
- t.Fatalf("unexpected LeafInuse: %d", stats.LeafInuse)
- }
-
- if os.Getpagesize() == 4096 {
- if stats.BranchAlloc != 53248 {
- t.Fatalf("unexpected BranchAlloc: %d", stats.BranchAlloc)
- } else if stats.LeafAlloc != 4898816 {
- t.Fatalf("unexpected LeafAlloc: %d", stats.LeafAlloc)
- }
- }
-
- if stats.BucketN != 1 {
- t.Fatalf("unexpected BucketN: %d", stats.BucketN)
- } else if stats.InlineBucketN != 0 {
- t.Fatalf("unexpected InlineBucketN: %d", stats.InlineBucketN)
- } else if stats.InlineBucketInuse != 0 {
- t.Fatalf("unexpected InlineBucketInuse: %d", stats.InlineBucketInuse)
- }
-
- return nil
- }); err != nil {
- t.Fatal(err)
- }
-}
-
// Ensure that a bucket can write random keys and values across multiple transactions.
func TestBucket_Put_Single(t *testing.T) {
if testing.Short() {
@@ -2216,16 +1754,6 @@ func TestCursor_Delete(t *testing.T) {
}); err != nil {
t.Fatal(err)
}
-
- if err := db.View(func(tx *Tx) error {
- stats := tx.Bucket([]byte("widgets")).Stats()
- if stats.KeyN != count/2+1 {
- t.Fatalf("unexpected KeyN: %d", stats.KeyN)
- }
- return nil
- }); err != nil {
- t.Fatal(err)
- }
}
// Ensure that a Tx cursor can seek to the appropriate keys when there are a
@@ -2895,8 +2423,6 @@ func ExampleCursor_reverse() {
// A cat is lame.
}
-var statsFlag = flag.Bool("stats", false, "show performance stats")
-
// pageSize is the size of one page in the data file.
const pageSize = 4096
@@ -3619,29 +3145,6 @@ func TestDB_View_Panic(t *testing.T) {
}
}
-// Ensure that DB stats can be returned.
-func TestDB_Stats(t *testing.T) {
- db := MustOpenDB()
- defer db.MustClose()
- defer os.Remove(db.Path())
-
- if err := db.Update(func(tx *Tx) error {
- _, err := tx.CreateBucket([]byte("widgets"))
- return err
- }); err != nil {
- t.Fatal(err)
- }
-
- stats := db.Stats()
- if stats.TxStats.PageCount != 2 {
- t.Fatalf("unexpected TxStats.PageCount: %d", stats.TxStats.PageCount)
- } else if stats.FreePageN != 0 {
- t.Fatalf("unexpected FreePageN != 0: %d", stats.FreePageN)
- } else if stats.PendingPageN != 2 {
- t.Fatalf("unexpected PendingPageN != 2: %d", stats.PendingPageN)
- }
-}
-
// Ensure that database pages are in expected order and type.
func TestDB_Consistency(t *testing.T) {
db := MustOpenDB()
@@ -3712,24 +3215,6 @@ func TestDB_Consistency(t *testing.T) {
}
}
-// Ensure that DB stats can be subtracted from one another.
-func TestDBStats_Sub(t *testing.T) {
- var a, b Stats
- a.TxStats.PageCount = 3
- a.FreePageN = 4
- b.TxStats.PageCount = 10
- b.FreePageN = 14
- diff := b.Sub(&a)
- if diff.TxStats.PageCount != 7 {
- t.Fatalf("unexpected TxStats.PageCount: %d", diff.TxStats.PageCount)
- }
-
- // free page stats are copied from the receiver and not subtracted
- if diff.FreePageN != 14 {
- t.Fatalf("unexpected FreePageN: %d", diff.FreePageN)
- }
-}
-
// Ensure two functions can perform updates in a single batch.
func TestDB_Batch(t *testing.T) {
db := MustOpenDB()
@@ -4257,10 +3742,6 @@ func MustOpenDB() *WDB {
// Close closes the database and deletes the underlying file.
func (db *WDB) Close() error {
defer os.Remove(db.Path())
- // Log statistics.
- if *statsFlag {
- db.PrintStats()
- }
// Check database consistency after every test.
db.MustCheck()
@@ -4277,21 +3758,6 @@ func (db *WDB) MustClose() {
os.Remove(db.Path())
}
-// PrintStats prints the database stats
-func (db *WDB) PrintStats() {
- var stats = db.Stats()
- fmt.Printf("[db] %-20s %-20s %-20s\n",
- fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
- fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
- fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
- )
- fmt.Printf(" %-20s %-20s %-20s\n",
- fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
- fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
- fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
- )
-}
-
// MustCheck runs a consistency check on the database and panics if any errors are found.
func (db *WDB) MustCheck() {
if err := db.Update(func(tx *Tx) error {
@@ -6068,12 +5534,6 @@ func MainTest() {
{ "TestBucket_Put_EmptyKey", TestBucket_Put_EmptyKey },
{ "TestBucket_Put_KeyTooLarge", TestBucket_Put_KeyTooLarge },
{ "TestBucket_Put_ValueTooLarge", TestBucket_Put_ValueTooLarge },
- { "TestBucket_Stats", TestBucket_Stats },
- { "TestBucket_Stats_RandomFill", TestBucket_Stats_RandomFill },
- { "TestBucket_Stats_Small", TestBucket_Stats_Small },
- { "TestBucket_Stats_EmptyBucket", TestBucket_Stats_EmptyBucket },
- { "TestBucket_Stats_Nested", TestBucket_Stats_Nested },
- { "TestBucket_Stats_Large", TestBucket_Stats_Large },
// { "TestBucket_Put_Single", TestBucket_Put_Single },
// { "TestBucket_Put_Multiple", TestBucket_Put_Multiple },
// { "TestBucket_Delete_Quick", TestBucket_Delete_Quick },
@@ -6117,9 +5577,7 @@ func MainTest() {
{ "TestDB_Update_Panic", TestDB_Update_Panic },
{ "TestDB_View_Error", TestDB_View_Error },
{ "TestDB_View_Panic", TestDB_View_Panic },
- { "TestDB_Stats", TestDB_Stats },
{ "TestDB_Consistency", TestDB_Consistency },
- { "TestDBStats_Sub", TestDBStats_Sub },
{ "TestDB_Batch", TestDB_Batch },
{ "TestDB_Batch_Panic", TestDB_Batch_Panic },
{ "TestDB_BatchFull", TestDB_BatchFull },