aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--README.md34
-rw-r--r--branch_node.go79
-rw-r--r--bucket.go1
-rw-r--r--cursor.go48
-rw-r--r--db.go10
-rw-r--r--node.go (renamed from leaf_node.go)16
-rw-r--r--page.go13
-rw-r--r--rwcursor.go831
-rw-r--r--rwtransaction.go2
9 files changed, 490 insertions, 544 deletions
diff --git a/README.md b/README.md
index e26dc46..0309695 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,38 @@
bolt
====
+## Overview
+
A low-level key/value database for Go.
+
+
+## API
+
+### DB
+
+### Creating a database
+
+```
+db := DB()
+err := db.Open("/path/to/db", 0666)
+...
+err := db.Close()
+```
+
+### Creating a bucket
+
+
+* Cursor
+
+
+```
+DB
+Bucket
+Transaction / RWTransaction
+Cursor / RWCursor
+
+page
+meta
+branchNode
+leafNode
+```
diff --git a/branch_node.go b/branch_node.go
index 58ab5a5..a5bd071 100644
--- a/branch_node.go
+++ b/branch_node.go
@@ -10,86 +10,7 @@ const (
dupNode = 0x04
)
-// branchNode represents a node on a branch page.
-type branchNode struct {
- pgno uint32
- flags uint16
- keySize uint16
- data uintptr // Pointer to the beginning of the data.
-}
-
// key returns a byte slice that of the key data.
func (n *branchNode) key() []byte {
return (*[MaxKeySize]byte)(unsafe.Pointer(&n.data))[:n.keySize]
}
-
-func (n *branchNode) size() int {
- return 0 // TODO: offsetof(MDB_node, mn_data)
-}
-
-// TODO: #define INDXSIZE(k) (NODESIZE + ((k) == NULL ? 0 : (k)->mv_size))
-// TODO: #define LEAFSIZE(k, d) (NODESIZE + (k)->mv_size + (d)->mv_size)
-// TODO: #define NODEPTR(p, i) ((MDB_node *)((char *)(p) + (p)->mp_ptrs[i]))
-// TODO: #define NODEKEY(node) (void *)((node)->mn_data)
-// TODO: #define NODEDATA(node) (void *)((char *)(node)->mn_data + (node)->mn_ksize)
-// TODO: #define NODEPGNO(node) ((node)->mn_lo | ((pgno_t) (node)->mn_hi << 16) | (PGNO_TOPWORD ? ((pgno_t) (node)->mn_flags << PGNO_TOPWORD) : 0))
-// TODO: #define SETPGNO(node,pgno) do { (node)->mn_lo = (pgno) & 0xffff; (node)->mn_hi = (pgno) >> 16; if (PGNO_TOPWORD) (node)->mn_flags = (pgno) >> PGNO_TOPWORD; } while(0)
-// TODO: #define NODEDSZ(node) ((node)->mn_lo | ((unsigned)(node)->mn_hi << 16))
-// TODO: #define SETDSZ(node,size) do { (node)->mn_lo = (size) & 0xffff; (node)->mn_hi = (size) >> 16;} while(0)
-// TODO: #define NODEKSZ(node) ((node)->mn_ksize)
-
-// TODO: #define LEAF2KEY(p, i, ks) ((char *)(p) + PAGEHDRSZ + ((i)*(ks)))
-
-// TODO: #define MDB_GET_KEY(node, keyptr) { if ((keyptr) != NULL) { (keyptr)->mv_size = NODEKSZ(node); (keyptr)->mv_data = NODEKEY(node); } }
-// TODO: #define MDB_GET_KEY2(node, key) { key.mv_size = NODEKSZ(node); key.mv_data = NODEKEY(node); }
-
-// Compact the main page after deleting a node on a subpage.
-// @param[in] mp The main page to operate on.
-// @param[in] indx The index of the subpage on the main page.
-func (n *node) shrink(index int) {
- /*
- MDB_node *node;
- MDB_page *sp, *xp;
- char *base;
- int nsize, delta;
- indx_t i, numkeys, ptr;
-
- node = NODEPTR(mp, indx);
- sp = (MDB_page *)NODEDATA(node);
- delta = SIZELEFT(sp);
- xp = (MDB_page *)((char *)sp + delta);
-
- // shift subpage upward
- if (IS_LEAF2(sp)) {
- nsize = NUMKEYS(sp) * sp->mp_pad;
- if (nsize & 1)
- return; // do not make the node uneven-sized
- memmove(METADATA(xp), METADATA(sp), nsize);
- } else {
- int i;
- numkeys = NUMKEYS(sp);
- for (i=numkeys-1; i>=0; i--)
- xp->mp_ptrs[i] = sp->mp_ptrs[i] - delta;
- }
- xp->mp_upper = sp->mp_lower;
- xp->mp_lower = sp->mp_lower;
- xp->mp_flags = sp->mp_flags;
- xp->mp_pad = sp->mp_pad;
- COPY_PGNO(xp->mp_pgno, mp->mp_pgno);
-
- nsize = NODEDSZ(node) - delta;
- SETDSZ(node, nsize);
-
- // shift lower nodes upward
- ptr = mp->mp_ptrs[indx];
- numkeys = NUMKEYS(mp);
- for (i = 0; i < numkeys; i++) {
- if (mp->mp_ptrs[i] <= ptr)
- mp->mp_ptrs[i] += delta;
- }
-
- base = (char *)mp + mp->mp_upper;
- memmove(base + delta, base, ptr - mp->mp_upper + NODESIZE + NODEKSZ(node));
- mp->mp_upper += delta;
- */
-}
diff --git a/bucket.go b/bucket.go
index f42db47..ece92b5 100644
--- a/bucket.go
+++ b/bucket.go
@@ -29,4 +29,3 @@ type bucket struct {
entries uint64
root pgno
}
-
diff --git a/cursor.go b/cursor.go
index d63cf94..13a56cf 100644
--- a/cursor.go
+++ b/cursor.go
@@ -5,31 +5,19 @@ package bolt
const (
c_initialized = 0x01 /**< cursor has been initialized and is valid */
c_eof = 0x02 /**< No more data */
- c_sub = 0x04 /**< Cursor is a sub-cursor */
c_del = 0x08 /**< last op was a cursor_del */
c_splitting = 0x20 /**< Cursor is in page_split */
c_untrack = 0x40 /**< Un-track cursor when closing */
)
-// TODO: #define MDB_NOSPILL 0x8000 /** Do not spill pages to disk if txn is getting full, may fail instead */
-
/*
type Cursor interface {
First() error
- FirstDup() error
- Get() ([]byte, []byte, error)
- GetRange() ([]byte, []byte, error)
- Current() ([]byte, []byte, error)
- Last()
- LastDup()
+ Last() error
Next() ([]byte, []byte, error)
- NextDup() ([]byte, []byte, error)
- NextNoDup() ([]byte, []byte, error)
Prev() ([]byte, []byte, error)
- PrevDup() ([]byte, []byte, error)
- PrevNoDup() ([]byte, []byte, error)
- Set() ([]byte, []byte, error)
- SetRange() ([]byte, []byte, error)
+ Current() ([]byte, []byte, error)
+ Get([]byte) ([]byte, error)
}
*/
@@ -37,13 +25,11 @@ type Cursor struct {
flags int
next *Cursor
backup *Cursor
- subcursor *Cursor
transaction *Transaction
bucket *Bucket
- subbucket *Bucket
top int
pages []*page
- indices []int /* the index of the node for the page at the same level */
+ indices []int /* the index of the node for the page at the same level */
}
// , data []byte, op int
@@ -53,12 +39,6 @@ func (c *Cursor) Get(key []byte) ([]byte, error) {
int exact = 0;
int (*mfunc)(MDB_cursor *mc, MDB_val *key, MDB_val *data);
- if (mc == NULL)
- return EINVAL;
-
- if (mc->mc_txn->mt_flags & MDB_TXN_ERROR)
- return MDB_BAD_TXN;
-
switch (op) {
case MDB_GET_CURRENT:
if (!(mc->mc_flags & C_INITIALIZED)) {
@@ -222,14 +202,14 @@ func (c *Cursor) page(key []byte, flags int) (*page, error) {
if (flags & ps_first) != 0 {
index = 0
} else if (flags & ps_last) != 0 {
- index = indx(p.numkeys()) - 1;
+ index = indx(p.numkeys()) - 1
} else {
- node, i, exact := p.find(key, c.transaction.db.pageSize);
+ node, i, exact := p.find(key, c.transaction.db.pageSize)
if exact {
c.indices[c.top] = i
}
if node == nil {
- index = indx(p.numkeys()) - 1;
+ index = indx(p.numkeys()) - 1
} else {
index = indx(c.indices[c.top])
if !exact {
@@ -255,7 +235,7 @@ func (c *Cursor) page(key []byte, flags int) (*page, error) {
}
// If we ended up with a non-leaf page by the end then something is wrong.
- if p.flags & p_leaf == 0 {
+ if p.flags&p_leaf == 0 {
return nil, CorruptedError
}
@@ -267,7 +247,7 @@ func (c *Cursor) page(key []byte, flags int) (*page, error) {
// pop moves the last page off the cursor's page stack.
func (c *Cursor) pop() {
- top := len(c.pages)-1
+ top := len(c.pages) - 1
c.pages = c.pages[0:c.top]
c.indices = c.indices[0:c.top]
}
@@ -279,8 +259,8 @@ func (c *Cursor) push(p *page) {
c.top = len(c.pages) - 1
}
-// page retrieves the last page on the page stack.
-func (c *Cursor) page() *page {
+// currentPage retrieves the last page on the page stack.
+func (c *Cursor) currentPage() *page {
top := len(c.pages)
if top > 0 {
return c.pages[top]
@@ -306,7 +286,6 @@ func (c *Cursor) currentLeafNode() *node {
return nil
}
-
// //
// //
// //
@@ -642,9 +621,6 @@ func (c *Cursor) page_touch() int {
return 0
}
-
-
-
// Search for the lowest key under the current branch page.
// This just bypasses a NUMKEYS check in the current page
// before calling mdb_page_search_root(), because the callers
@@ -1262,7 +1238,7 @@ func (c *Cursor) touch() error {
}
return rc;
}
-*/
+ */
return nil
}
diff --git a/db.go b/db.go
index 480d059..d7510c2 100644
--- a/db.go
+++ b/db.go
@@ -46,11 +46,11 @@ type DB struct {
size int /**< current file size */
pbuf []byte
transaction *RWTransaction /**< current write transaction */
- maxPageNumber int /**< me_mapsize / me_psize */
- dpages []*page /**< list of malloc'd blocks for re-use */
- freePages []int /** IDL of pages that became unused in a write txn */
- dirtyPages []int /** ID2L of pages written during a write txn. Length MDB_IDL_UM_SIZE. */
- maxFreeOnePage int /** Max number of freelist items that can fit in a single overflow page */
+ maxPageNumber int /**< me_mapsize / me_psize */
+ dpages []*page /**< list of malloc'd blocks for re-use */
+ freePages []int /** IDL of pages that became unused in a write txn */
+ dirtyPages []int /** ID2L of pages written during a write txn. Length MDB_IDL_UM_SIZE. */
+ maxFreeOnePage int /** Max number of freelist items that can fit in a single overflow page */
maxPageDataSize int
maxNodeSize int /** Max size of a node on a page */
maxKeySize int /**< max size of a key */
diff --git a/leaf_node.go b/node.go
index 80b5cf7..7a64459 100644
--- a/leaf_node.go
+++ b/node.go
@@ -4,14 +4,26 @@ import (
"unsafe"
)
+// node represents a node on a page.
+type node struct {
+ flags uint16
+ keySize uint16
+}
+
// leafNode represents a node on a leaf page.
type leafNode struct {
- flags uint16
- keySize uint16
+ node
dataSize uint32
data uintptr // Pointer to the beginning of the data.
}
+// branchNode represents a node on a branch page.
+type branchNode struct {
+ node
+ pgno uint32
+ data uintptr // Pointer to the beginning of the data.
+}
+
// key returns a byte slice that of the key data.
func (n *leafNode) key() []byte {
return (*[MaxKeySize]byte)(unsafe.Pointer(&n.data))[:n.keySize]
diff --git a/page.go b/page.go
index ecbaedd..8ecf920 100644
--- a/page.go
+++ b/page.go
@@ -20,8 +20,7 @@ const (
p_leaf = 0x02
p_overflow = 0x04
p_meta = 0x08
- p_dirty = 0x10 /**< dirty page, also set for #P_SUBP pages */
- p_sub = 0x40
+ p_dirty = 0x10 /**< dirty page, also set for #P_SUBP pages */
p_keep = 0x8000 /**< leave this page alone during spill */
p_invalid = ^pgno(0)
@@ -91,13 +90,13 @@ func (p *page) init(pageSize int) {
// branchNode retrieves the branch node at the given index within the page.
func (p *page) branchNode(index indx) *branchNode {
b := (*[maxPageSize]byte)(unsafe.Pointer(&p.ptr))
- return (*branchNode)(unsafe.Pointer(&b[index * indx(unsafe.Sizeof(index))]))
+ return (*branchNode)(unsafe.Pointer(&b[index*indx(unsafe.Sizeof(index))]))
}
// leafNode retrieves the leaf node at the given index within the page.
func (p *page) leafNode(index indx) *leafNode {
b := (*[maxPageSize]byte)(unsafe.Pointer(&p.ptr))
- return (*leafNode)(unsafe.Pointer(&b[index * indx(unsafe.Sizeof(index))]))
+ return (*leafNode)(unsafe.Pointer(&b[index*indx(unsafe.Sizeof(index))]))
}
// numkeys returns the number of nodes in the page.
@@ -117,20 +116,20 @@ func (p *page) find(key []byte, pageSize int) (*node, int, bool) {
var node *node
nkeys := p.numkeys()
- low, high := 1, nkeys - 1
+ low, high := 1, nkeys-1
if (p.flags & p_leaf) != 0 {
low = 0
}
// Perform a binary search to find the correct node.
var i, rc int
- for ; low <= high; {
+ for low <= high {
i = (low + high) / 2
node = p.node(indx(i))
rc = bytes.Compare(key, node.key())
if rc == 0 {
- break;
+ break
} else if rc > 0 {
low = i + 1
} else {
diff --git a/rwcursor.go b/rwcursor.go
index 553989f..cd5168b 100644
--- a/rwcursor.go
+++ b/rwcursor.go
@@ -1,11 +1,18 @@
package bolt
+/*
+type RWCursor interface {
+ Put([]byte, []byte) (error)
+ Delete([]byte) (error)
+}
+*/
+
// RWCursor represents a cursor that can read and write data for a bucket.
type RWCursor struct {
Cursor
transaction *RWTransaction
- reclaimed []pgno /**< Reclaimed freeDB pages, or NULL before use (was me_pghead) */
- last txnid /**< ID of last used record, or 0 if len(reclaimed) == 0 */
+ reclaimed []pgno /**< Reclaimed freeDB pages, or NULL before use (was me_pghead) */
+ last txnid /**< ID of last used record, or 0 if len(reclaimed) == 0 */
}
func (c *RWCursor) Put(key []byte, value []byte) error {
@@ -61,339 +68,339 @@ func (c *RWCursor) Put(key []byte, value []byte) error {
// If key does not exist the
if exists {
node := c.currentNode()
-
+
}
/*
- insert = rc;
- if (insert) {
- // The key does not exist
- DPRINTF(("inserting key at index %i", mc->mc_ki[mc->mc_top]));
- if ((mc->mc_db->md_flags & MDB_DUPSORT) &&
- LEAFSIZE(key, data) > env->me_nodemax)
- {
- // Too big for a node, insert in sub-DB
- fp_flags = P_LEAF|P_DIRTY;
- fp = env->me_pbuf;
- fp->mp_pad = data->mv_size; // used if MDB_DUPFIXED
- fp->mp_lower = fp->mp_upper = olddata.mv_size = PAGEHDRSZ;
- goto prep_subDB;
- }
- } else {
-
-more:
- leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
- olddata.mv_size = NODEDSZ(leaf);
- olddata.mv_data = NODEDATA(leaf);
-
- // DB has dups?
- if (F_ISSET(mc->mc_db->md_flags, MDB_DUPSORT)) {
- // Prepare (sub-)page/sub-DB to accept the new item,
- // if needed. fp: old sub-page or a header faking
- // it. mp: new (sub-)page. offset: growth in page
- // size. xdata: node data with new page or DB.
- ssize_t i, offset = 0;
- mp = fp = xdata.mv_data = env->me_pbuf;
- mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno;
-
- // Was a single item before, must convert now
- if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) {
- // Just overwrite the current item
- if (flags == MDB_CURRENT)
- goto current;
-
-#if UINT_MAX < SIZE_MAX
- if (mc->mc_dbx->md_dcmp == mdb_cmp_int && olddata.mv_size == sizeof(size_t))
-#ifdef MISALIGNED_OK
- mc->mc_dbx->md_dcmp = mdb_cmp_long;
-#else
- mc->mc_dbx->md_dcmp = mdb_cmp_cint;
-#endif
-#endif
- // if data matches, skip it
- if (!mc->mc_dbx->md_dcmp(data, &olddata)) {
- if (flags & MDB_NODUPDATA)
- rc = MDB_KEYEXIST;
- else if (flags & MDB_MULTIPLE)
- goto next_mult;
- else
- rc = MDB_SUCCESS;
- return rc;
- }
+ insert = rc;
+ if (insert) {
+ // The key does not exist
+ DPRINTF(("inserting key at index %i", mc->mc_ki[mc->mc_top]));
+ if ((mc->mc_db->md_flags & MDB_DUPSORT) &&
+ LEAFSIZE(key, data) > env->me_nodemax)
+ {
+ // Too big for a node, insert in sub-DB
+ fp_flags = P_LEAF|P_DIRTY;
+ fp = env->me_pbuf;
+ fp->mp_pad = data->mv_size; // used if MDB_DUPFIXED
+ fp->mp_lower = fp->mp_upper = olddata.mv_size = PAGEHDRSZ;
+ goto prep_subDB;
+ }
+ } else {
- // Back up original data item
- dkey.mv_size = olddata.mv_size;
- dkey.mv_data = memcpy(fp+1, olddata.mv_data, olddata.mv_size);
-
- // Make sub-page header for the dup items, with dummy body
- fp->mp_flags = P_LEAF|P_DIRTY|P_SUBP;
- fp->mp_lower = PAGEHDRSZ;
- xdata.mv_size = PAGEHDRSZ + dkey.mv_size + data->mv_size;
- if (mc->mc_db->md_flags & MDB_DUPFIXED) {
- fp->mp_flags |= P_LEAF2;
- fp->mp_pad = data->mv_size;
- xdata.mv_size += 2 * data->mv_size; // leave space for 2 more
- } else {
- xdata.mv_size += 2 * (sizeof(indx_t) + NODESIZE) +
- (dkey.mv_size & 1) + (data->mv_size & 1);
- }
- fp->mp_upper = xdata.mv_size;
- olddata.mv_size = fp->mp_upper; // pretend olddata is fp
- } else if (leaf->mn_flags & F_SUBDATA) {
- // Data is on sub-DB, just store it
- flags |= F_DUPDATA|F_SUBDATA;
- goto put_sub;
- } else {
- // Data is on sub-page
- fp = olddata.mv_data;
- switch (flags) {
- default:
- i = -(ssize_t)SIZELEFT(fp);
- if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
- offset = i += (ssize_t) EVEN(
- sizeof(indx_t) + NODESIZE + data->mv_size);
+ more:
+ leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+ olddata.mv_size = NODEDSZ(leaf);
+ olddata.mv_data = NODEDATA(leaf);
+
+ // DB has dups?
+ if (F_ISSET(mc->mc_db->md_flags, MDB_DUPSORT)) {
+ // Prepare (sub-)page/sub-DB to accept the new item,
+ // if needed. fp: old sub-page or a header faking
+ // it. mp: new (sub-)page. offset: growth in page
+ // size. xdata: node data with new page or DB.
+ ssize_t i, offset = 0;
+ mp = fp = xdata.mv_data = env->me_pbuf;
+ mp->mp_pgno = mc->mc_pg[mc->mc_top]->mp_pgno;
+
+ // Was a single item before, must convert now
+ if (!F_ISSET(leaf->mn_flags, F_DUPDATA)) {
+ // Just overwrite the current item
+ if (flags == MDB_CURRENT)
+ goto current;
+
+ #if UINT_MAX < SIZE_MAX
+ if (mc->mc_dbx->md_dcmp == mdb_cmp_int && olddata.mv_size == sizeof(size_t))
+ #ifdef MISALIGNED_OK
+ mc->mc_dbx->md_dcmp = mdb_cmp_long;
+ #else
+ mc->mc_dbx->md_dcmp = mdb_cmp_cint;
+ #endif
+ #endif
+ // if data matches, skip it
+ if (!mc->mc_dbx->md_dcmp(data, &olddata)) {
+ if (flags & MDB_NODUPDATA)
+ rc = MDB_KEYEXIST;
+ else if (flags & MDB_MULTIPLE)
+ goto next_mult;
+ else
+ rc = MDB_SUCCESS;
+ return rc;
+ }
+
+ // Back up original data item
+ dkey.mv_size = olddata.mv_size;
+ dkey.mv_data = memcpy(fp+1, olddata.mv_data, olddata.mv_size);
+
+ // Make sub-page header for the dup items, with dummy body
+ fp->mp_flags = P_LEAF|P_DIRTY|P_SUBP;
+ fp->mp_lower = PAGEHDRSZ;
+ xdata.mv_size = PAGEHDRSZ + dkey.mv_size + data->mv_size;
+ if (mc->mc_db->md_flags & MDB_DUPFIXED) {
+ fp->mp_flags |= P_LEAF2;
+ fp->mp_pad = data->mv_size;
+ xdata.mv_size += 2 * data->mv_size; // leave space for 2 more
} else {
- i += offset = fp->mp_pad;
- offset *= 4; // space for 4 more
+ xdata.mv_size += 2 * (sizeof(indx_t) + NODESIZE) +
+ (dkey.mv_size & 1) + (data->mv_size & 1);
}
- if (i > 0)
- break;
- // FALLTHRU: Sub-page is big enough
- case MDB_CURRENT:
- fp->mp_flags |= P_DIRTY;
- COPY_PGNO(fp->mp_pgno, mp->mp_pgno);
- mc->mc_xcursor->mx_cursor.mc_pg[0] = fp;
- flags |= F_DUPDATA;
+ fp->mp_upper = xdata.mv_size;
+ olddata.mv_size = fp->mp_upper; // pretend olddata is fp
+ } else if (leaf->mn_flags & F_SUBDATA) {
+ // Data is on sub-DB, just store it
+ flags |= F_DUPDATA|F_SUBDATA;
goto put_sub;
+ } else {
+ // Data is on sub-page
+ fp = olddata.mv_data;
+ switch (flags) {
+ default:
+ i = -(ssize_t)SIZELEFT(fp);
+ if (!(mc->mc_db->md_flags & MDB_DUPFIXED)) {
+ offset = i += (ssize_t) EVEN(
+ sizeof(indx_t) + NODESIZE + data->mv_size);
+ } else {
+ i += offset = fp->mp_pad;
+ offset *= 4; // space for 4 more
+ }
+ if (i > 0)
+ break;
+ // FALLTHRU: Sub-page is big enough
+ case MDB_CURRENT:
+ fp->mp_flags |= P_DIRTY;
+ COPY_PGNO(fp->mp_pgno, mp->mp_pgno);
+ mc->mc_xcursor->mx_cursor.mc_pg[0] = fp;
+ flags |= F_DUPDATA;
+ goto put_sub;
+ }
+ xdata.mv_size = olddata.mv_size + offset;
}
- xdata.mv_size = olddata.mv_size + offset;
- }
- fp_flags = fp->mp_flags;
- if (NODESIZE + NODEKSZ(leaf) + xdata.mv_size > env->me_nodemax) {
- // Too big for a sub-page, convert to sub-DB
- fp_flags &= ~P_SUBP;
-prep_subDB:
- dummy.md_pad = 0;
- dummy.md_flags = 0;
- dummy.md_depth = 1;
- dummy.md_branch_pages = 0;
- dummy.md_leaf_pages = 1;
- dummy.md_overflow_pages = 0;
- dummy.md_entries = NUMKEYS(fp);
- xdata.mv_size = sizeof(MDB_db);
- xdata.mv_data = &dummy;
- if ((rc = mdb_page_alloc(mc, 1, &mp)))
- return rc;
- offset = env->me_psize - olddata.mv_size;
- flags |= F_DUPDATA|F_SUBDATA;
- dummy.md_root = mp->mp_pgno;
- }
- if (mp != fp) {
- mp->mp_flags = fp_flags | P_DIRTY;
- mp->mp_pad = fp->mp_pad;
- mp->mp_lower = fp->mp_lower;
- mp->mp_upper = fp->mp_upper + offset;
- if (fp_flags & P_LEAF2) {
- memcpy(METADATA(mp), METADATA(fp), NUMKEYS(fp) * fp->mp_pad);
- } else {
- memcpy((char *)mp + mp->mp_upper, (char *)fp + fp->mp_upper,
- olddata.mv_size - fp->mp_upper);
- for (i = NUMKEYS(fp); --i >= 0; )
- mp->mp_ptrs[i] = fp->mp_ptrs[i] + offset;
+ fp_flags = fp->mp_flags;
+ if (NODESIZE + NODEKSZ(leaf) + xdata.mv_size > env->me_nodemax) {
+ // Too big for a sub-page, convert to sub-DB
+ fp_flags &= ~P_SUBP;
+ prep_subDB:
+ dummy.md_pad = 0;
+ dummy.md_flags = 0;
+ dummy.md_depth = 1;
+ dummy.md_branch_pages = 0;
+ dummy.md_leaf_pages = 1;
+ dummy.md_overflow_pages = 0;
+ dummy.md_entries = NUMKEYS(fp);
+ xdata.mv_size = sizeof(MDB_db);
+ xdata.mv_data = &dummy;
+ if ((rc = mdb_page_alloc(mc, 1, &mp)))
+ return rc;
+ offset = env->me_psize - olddata.mv_size;
+ flags |= F_DUPDATA|F_SUBDATA;
+ dummy.md_root = mp->mp_pgno;
+ }
+ if (mp != fp) {
+ mp->mp_flags = fp_flags | P_DIRTY;
+ mp->mp_pad = fp->mp_pad;
+ mp->mp_lower = fp->mp_lower;
+ mp->mp_upper = fp->mp_upper + offset;
+ if (fp_flags & P_LEAF2) {
+ memcpy(METADATA(mp), METADATA(fp), NUMKEYS(fp) * fp->mp_pad);
+ } else {
+ memcpy((char *)mp + mp->mp_upper, (char *)fp + fp->mp_upper,
+ olddata.mv_size - fp->mp_upper);
+ for (i = NUMKEYS(fp); --i >= 0; )
+ mp->mp_ptrs[i] = fp->mp_ptrs[i] + offset;
+ }
}
- }
- rdata = &xdata;
- flags |= F_DUPDATA;
- do_sub = 1;
- if (!insert)
- mdb_node_del(mc, 0);
- goto new_sub;
- }
-current:
- // overflow page overwrites need special handling
- if (F_ISSET(leaf->mn_flags, F_BIGDATA)) {
- MDB_page *omp;
- pgno_t pg;
- int level, ovpages, dpages = OVPAGES(data->mv_size, env->me_psize);
-
- memcpy(&pg, olddata.mv_data, sizeof(pg));
- if ((rc2 = mdb_page_get(mc->mc_txn, pg, &omp, &level)) != 0)
- return rc2;
- ovpages = omp->mp_pages;
-
- // Is the ov page large enough?
- if (ovpages >= dpages) {
- if (!(omp->mp_flags & P_DIRTY) &&
- (level || (env->me_flags & MDB_WRITEMAP)))
- {
- rc = mdb_page_unspill(mc->mc_txn, omp, &omp);
- if (rc)
- return rc;
- level = 0; // dirty in this txn or clean
- }
- // Is it dirty?
- if (omp->mp_flags & P_DIRTY) {
- // yes, overwrite it. Note in this case we don't
- // bother to try shrinking the page if the new data
- // is smaller than the overflow threshold.
- if (level > 1) {
- // It is writable only in a parent txn
- size_t sz = (size_t) env->me_psize * ovpages, off;
- MDB_page *np = mdb_page_malloc(mc->mc_txn, ovpages);
- MDB_ID2 id2;
- if (!np)
- return ENOMEM;
- id2.mid = pg;
- id2.mptr = np;
- rc = mdb_mid2l_insert(mc->mc_txn->mt_u.dirty_list, &id2);
- mdb_cassert(mc, rc == 0);
- if (!(flags & MDB_RESERVE)) {
- // Copy end of page, adjusting alignment so
- // compiler may copy words instead of bytes.
- off = (PAGEHDRSZ + data->mv_size) & -sizeof(size_t);
- memcpy((size_t *)((char *)np + off),
- (size_t *)((char *)omp + off), sz - off);
- sz = PAGEHDRSZ;
+ rdata = &xdata;
+ flags |= F_DUPDATA;
+ do_sub = 1;
+ if (!insert)
+ mdb_node_del(mc, 0);
+ goto new_sub;
+ }
+ current:
+ // overflow page overwrites need special handling
+ if (F_ISSET(leaf->mn_flags, F_BIGDATA)) {
+ MDB_page *omp;
+ pgno_t pg;
+ int level, ovpages, dpages = OVPAGES(data->mv_size, env->me_psize);
+
+ memcpy(&pg, olddata.mv_data, sizeof(pg));
+ if ((rc2 = mdb_page_get(mc->mc_txn, pg, &omp, &level)) != 0)
+ return rc2;
+ ovpages = omp->mp_pages;
+
+ // Is the ov page large enough?
+ if (ovpages >= dpages) {
+ if (!(omp->mp_flags & P_DIRTY) &&
+ (level || (env->me_flags & MDB_WRITEMAP)))
+ {
+ rc = mdb_page_unspill(mc->mc_txn, omp, &omp);
+ if (rc)
+ return rc;
+ level = 0; // dirty in this txn or clean
+ }
+ // Is it dirty?
+ if (omp->mp_flags & P_DIRTY) {
+ // yes, overwrite it. Note in this case we don't
+ // bother to try shrinking the page if the new data
+ // is smaller than the overflow threshold.
+ if (level > 1) {
+ // It is writable only in a parent txn
+ size_t sz = (size_t) env->me_psize * ovpages, off;
+ MDB_page *np = mdb_page_malloc(mc->mc_txn, ovpages);
+ MDB_ID2 id2;
+ if (!np)
+ return ENOMEM;
+ id2.mid = pg;
+ id2.mptr = np;
+ rc = mdb_mid2l_insert(mc->mc_txn->mt_u.dirty_list, &id2);
+ mdb_cassert(mc, rc == 0);
+ if (!(flags & MDB_RESERVE)) {
+ // Copy end of page, adjusting alignment so
+ // compiler may copy words instead of bytes.
+ off = (PAGEHDRSZ + data->mv_size) & -sizeof(size_t);
+ memcpy((size_t *)((char *)np + off),
+ (size_t *)((char *)omp + off), sz - off);
+ sz = PAGEHDRSZ;
+ }
+ memcpy(np, omp, sz); // Copy beginning of page
+ omp = np;
}
- memcpy(np, omp, sz); // Copy beginning of page
- omp = np;
+ SETDSZ(leaf, data->mv_size);
+ if (F_ISSET(flags, MDB_RESERVE))
+ data->mv_data = METADATA(omp);
+ else
+ memcpy(METADATA(omp), data->mv_data, data->mv_size);
+ goto done;
+ }
}
- SETDSZ(leaf, data->mv_size);
+ if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS)
+ return rc2;
+ } else if (data->mv_size == olddata.mv_size) {
+ // same size, just replace it. Note that we could
+ // also reuse this node if the new data is smaller,
+ // but instead we opt to shrink the node in that case.
if (F_ISSET(flags, MDB_RESERVE))
- data->mv_data = METADATA(omp);
+ data->mv_data = olddata.mv_data;
+ else if (data->mv_size)
+ memcpy(olddata.mv_data, data->mv_data, data->mv_size);
else
- memcpy(METADATA(omp), data->mv_data, data->mv_size);
+ memcpy(NODEKEY(leaf), key->mv_data, key->mv_size);
goto done;
- }
}
- if ((rc2 = mdb_ovpage_free(mc, omp)) != MDB_SUCCESS)
- return rc2;
- } else if (data->mv_size == olddata.mv_size) {
- // same size, just replace it. Note that we could
- // also reuse this node if the new data is smaller,
- // but instead we opt to shrink the node in that case.
- if (F_ISSET(flags, MDB_RESERVE))
- data->mv_data = olddata.mv_data;
- else if (data->mv_size)
- memcpy(olddata.mv_data, data->mv_data, data->mv_size);
- else
- memcpy(NODEKEY(leaf), key->mv_data, key->mv_size);
- goto done;
+ mdb_node_del(mc, 0);
+ mc->mc_db->md_entries--;
}
- mdb_node_del(mc, 0);
- mc->mc_db->md_entries--;
- }
- rdata = data;
-
-new_sub:
- nflags = flags & NODE_ADD_FLAGS;
- nsize = IS_LEAF2(mc->mc_pg[mc->mc_top]) ? key->mv_size : mdb_leaf_size(env, key, rdata);
- if (SIZELEFT(mc->mc_pg[mc->mc_top]) < nsize) {
- if (( flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA )
- nflags &= ~MDB_APPEND;
- if (!insert)
- nflags |= MDB_SPLIT_REPLACE;
- rc = mdb_page_split(mc, key, rdata, P_INVALID, nflags);
- } else {
- // There is room already in this leaf page.
- rc = mdb_node_add(mc, mc->mc_ki[mc->mc_top], key, rdata, 0, nflags);
- if (rc == 0 && !do_sub && insert) {
- // Adjust other cursors pointing to mp
- MDB_cursor *m2, *m3;
- MDB_dbi dbi = mc->mc_dbi;
- unsigned i = mc->mc_top;
- MDB_page *mp = mc->mc_pg[i];
-
- for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
- if (mc->mc_flags & C_SUB)
- m3 = &m2->mc_xcursor->mx_cursor;
- else
- m3 = m2;
- if (m3 == mc || m3->mc_snum < mc->mc_snum) continue;
- if (m3->mc_pg[i] == mp && m3->mc_ki[i] >= mc->mc_ki[i]) {
- m3->mc_ki[i]++;
+ rdata = data;
+
+ new_sub:
+ nflags = flags & NODE_ADD_FLAGS;
+ nsize = IS_LEAF2(mc->mc_pg[mc->mc_top]) ? key->mv_size : mdb_leaf_size(env, key, rdata);
+ if (SIZELEFT(mc->mc_pg[mc->mc_top]) < nsize) {
+ if (( flags & (F_DUPDATA|F_SUBDATA)) == F_DUPDATA )
+ nflags &= ~MDB_APPEND;
+ if (!insert)
+ nflags |= MDB_SPLIT_REPLACE;
+ rc = mdb_page_split(mc, key, rdata, P_INVALID, nflags);
+ } else {
+ // There is room already in this leaf page.
+ rc = mdb_node_add(mc, mc->mc_ki[mc->mc_top], key, rdata, 0, nflags);
+ if (rc == 0 && !do_sub && insert) {
+ // Adjust other cursors pointing to mp
+ MDB_cursor *m2, *m3;
+ MDB_dbi dbi = mc->mc_dbi;
+ unsigned i = mc->mc_top;
+ MDB_page *mp = mc->mc_pg[i];
+
+ for (m2 = mc->mc_txn->mt_cursors[dbi]; m2; m2=m2->mc_next) {
+ if (mc->mc_flags & C_SUB)
+ m3 = &m2->mc_xcursor->mx_cursor;
+ else
+ m3 = m2;
+ if (m3 == mc || m3->mc_snum < mc->mc_snum) continue;
+ if (m3->mc_pg[i] == mp && m3->mc_ki[i] >= mc->mc_ki[i]) {
+ m3->mc_ki[i]++;
+ }
}
}
}
- }
- if (rc != MDB_SUCCESS)
- mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
- else {
- // Now store the actual data in the child DB. Note that we're
- // storing the user data in the keys field, so there are strict
- // size limits on dupdata. The actual data fields of the child
- // DB are all zero size.
- if (do_sub) {
- int xflags;
-put_sub:
- xdata.mv_size = 0;
- xdata.mv_data = "";
- leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
- if (flags & MDB_CURRENT) {
- xflags = MDB_CURRENT|MDB_NOSPILL;
- } else {
- mdb_xcursor_init1(mc, leaf);
- xflags = (flags & MDB_NODUPDATA) ?
- MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL;
- }
- // converted, write the original data first
- if (dkey.mv_size) {
- rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, &dkey, &xdata, xflags);
- if (rc)
- return rc;
- {
- // Adjust other cursors pointing to mp
- MDB_cursor *m2;
- unsigned i = mc->mc_top;
- MDB_page *mp = mc->mc_pg[i];
-
- for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) {
- if (m2 == mc || m2->mc_snum < mc->mc_snum) continue;
- if (!(m2->mc_flags & C_INITIALIZED)) continue;
- if (m2->mc_pg[i] == mp && m2->mc_ki[i] == mc->mc_ki[i]) {
- mdb_xcursor_init1(m2, leaf);
+ if (rc != MDB_SUCCESS)
+ mc->mc_txn->mt_flags |= MDB_TXN_ERROR;
+ else {
+ // Now store the actual data in the child DB. Note that we're
+ // storing the user data in the keys field, so there are strict
+ // size limits on dupdata. The actual data fields of the child
+ // DB are all zero size.
+ if (do_sub) {
+ int xflags;
+ put_sub:
+ xdata.mv_size = 0;
+ xdata.mv_data = "";
+ leaf = NODEPTR(mc->mc_pg[mc->mc_top], mc->mc_ki[mc->mc_top]);
+ if (flags & MDB_CURRENT) {
+ xflags = MDB_CURRENT|MDB_NOSPILL;
+ } else {
+ mdb_xcursor_init1(mc, leaf);
+ xflags = (flags & MDB_NODUPDATA) ?
+ MDB_NOOVERWRITE|MDB_NOSPILL : MDB_NOSPILL;
+ }
+ // converted, write the original data first
+ if (dkey.mv_size) {
+ rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, &dkey, &xdata, xflags);
+ if (rc)
+ return rc;
+ {
+ // Adjust other cursors pointing to mp
+ MDB_cursor *m2;
+ unsigned i = mc->mc_top;
+ MDB_page *mp = mc->mc_pg[i];
+
+ for (m2 = mc->mc_txn->mt_cursors[mc->mc_dbi]; m2; m2=m2->mc_next) {
+ if (m2 == mc || m2->mc_snum < mc->mc_snum) continue;
+ if (!(m2->mc_flags & C_INITIALIZED)) continue;
+ if (m2->mc_pg[i] == mp && m2->mc_ki[i] == mc->mc_ki[i]) {
+ mdb_xcursor_init1(m2, leaf);
+ }
}
}
+ // we've done our job
+ dkey.mv_size = 0;
+ }
+ if (flags & MDB_APPENDDUP)
+ xflags |= MDB_APPEND;
+ rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, data, &xdata, xflags);
+ if (flags & F_SUBDATA) {
+ void *db = NODEDATA(leaf);
+ memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db));
}
- // we've done our job
- dkey.mv_size = 0;
- }
- if (flags & MDB_APPENDDUP)
- xflags |= MDB_APPEND;
- rc = mdb_cursor_put(&mc->mc_xcursor->mx_cursor, data, &xdata, xflags);
- if (flags & F_SUBDATA) {
- void *db = NODEDATA(leaf);
- memcpy(db, &mc->mc_xcursor->mx_db, sizeof(MDB_db));
}
- }
- // sub-writes might have failed so check rc again.
- // Don't increment count if we just replaced an existing item.
- if (!rc && !(flags & MDB_CURRENT))
- mc->mc_db->md_entries++;
- if (flags & MDB_MULTIPLE) {
- if (!rc) {
-next_mult:
- mcount++;
- // let caller know how many succeeded, if any
- data[1].mv_size = mcount;
- if (mcount < dcount) {
- data[0].mv_data = (char *)data[0].mv_data + data[0].mv_size;
- goto more;
+ // sub-writes might have failed so check rc again.
+ // Don't increment count if we just replaced an existing item.
+ if (!rc && !(flags & MDB_CURRENT))
+ mc->mc_db->md_entries++;
+ if (flags & MDB_MULTIPLE) {
+ if (!rc) {
+ next_mult:
+ mcount++;
+ // let caller know how many succeeded, if any
+ data[1].mv_size = mcount;
+ if (mcount < dcount) {
+ data[0].mv_data = (char *)data[0].mv_data + data[0].mv_size;
+ goto more;
+ }
}
}
}
- }
-done:
- // If we succeeded and the key didn't exist before, make sure
- // the cursor is marked valid.
- if (!rc && insert)
- mc->mc_flags |= C_INITIALIZED;
- return rc;
+ done:
+ // If we succeeded and the key didn't exist before, make sure
+ // the cursor is marked valid.
+ if (!rc && insert)
+ mc->mc_flags |= C_INITIALIZED;
+ return rc;
*/
return nil
}
@@ -476,126 +483,126 @@ func (c *RWCursor) allocatePage(count int) (*page, error) {
// }
/*
- int rc, retry = INT_MAX;
- MDB_txn *txn = mc->mc_txn;
- MDB_env *env = txn->mt_env;
- pgno_t pgno, *mop = env->me_pghead;
- unsigned i, j, k, mop_len = mop ? mop[0] : 0, n2 = num-1;
- MDB_page *np;
- txnid_t oldest = 0, last;
- MDB_cursor_op op;
- MDB_cursor m2;
-
- *mp = NULL;
-
-
- for (op = MDB_FIRST;; op = MDB_NEXT) {
- MDB_val key, data;
- MDB_node *leaf;
- pgno_t *idl, old_id, new_id;
-
- // Seek a big enough contiguous page range. Prefer
- // pages at the tail, just truncating the list.
- if (mop_len > n2) {
- i = mop_len;
- do {
- pgno = mop[i];
- if (mop[i-n2] == pgno+n2)
- goto search_done;
- } while (--i > n2);
- if (Max_retries < INT_MAX && --retry < 0)
- break;
- }
+ int rc, retry = INT_MAX;
+ MDB_txn *txn = mc->mc_txn;
+ MDB_env *env = txn->mt_env;
+ pgno_t pgno, *mop = env->me_pghead;
+ unsigned i, j, k, mop_len = mop ? mop[0] : 0, n2 = num-1;
+ MDB_page *np;
+ txnid_t oldest = 0, last;
+ MDB_cursor_op op;
+ MDB_cursor m2;
+
+ *mp = NULL;
+
+
+ for (op = MDB_FIRST;; op = MDB_NEXT) {
+ MDB_val key, data;
+ MDB_node *leaf;
+ pgno_t *idl, old_id, new_id;
+
+ // Seek a big enough contiguous page range. Prefer
+ // pages at the tail, just truncating the list.
+ if (mop_len > n2) {
+ i = mop_len;
+ do {
+ pgno = mop[i];
+ if (mop[i-n2] == pgno+n2)
+ goto search_done;
+ } while (--i > n2);
+ if (Max_retries < INT_MAX && --retry < 0)
+ break;
+ }
- if (op == MDB_FIRST) { // 1st iteration
- // Prepare to fetch more and coalesce
- oldest = mdb_find_oldest(txn);
- last = env->me_pglast;
- mdb_cursor_init(&m2, txn, FREE_DBI, NULL);
- if (last) {
- op = MDB_SET_RANGE;
- key.mv_data = &last; // will look up last+1
- key.mv_size = sizeof(last);
+ if (op == MDB_FIRST) { // 1st iteration
+ // Prepare to fetch more and coalesce
+ oldest = mdb_find_oldest(txn);
+ last = env->me_pglast;
+ mdb_cursor_init(&m2, txn, FREE_DBI, NULL);
+ if (last) {
+ op = MDB_SET_RANGE;
+ key.mv_data = &last; // will look up last+1
+ key.mv_size = sizeof(last);
+ }
}
- }
- last++;
- // Do not fetch more if the record will be too recent
- if (oldest <= last)
- break;
- rc = mdb_cursor_get(&m2, &key, NULL, op);
- if (rc) {
- if (rc == MDB_NOTFOUND)
+ last++;
+ // Do not fetch more if the record will be too recent
+ if (oldest <= last)
break;
- goto fail;
- }
- last = *(txnid_t*)key.mv_data;
- if (oldest <= last)
- break;
- np = m2.mc_pg[m2.mc_top];
- leaf = NODEPTR(np, m2.mc_ki[m2.mc_top]);
- if ((rc = mdb_node_read(txn, leaf, &data)) != MDB_SUCCESS)
- return rc;
-
- idl = (MDB_ID *) data.mv_data;
- i = idl[0];
- if (!mop) {
- if (!(env->me_pghead = mop = mdb_midl_alloc(i))) {
- rc = ENOMEM;
+ rc = mdb_cursor_get(&m2, &key, NULL, op);
+ if (rc) {
+ if (rc == MDB_NOTFOUND)
+ break;
goto fail;
}
- } else {
- if ((rc = mdb_midl_need(&env->me_pghead, i)) != 0)
- goto fail;
- mop = env->me_pghead;
+ last = *(txnid_t*)key.mv_data;
+ if (oldest <= last)
+ break;
+ np = m2.mc_pg[m2.mc_top];
+ leaf = NODEPTR(np, m2.mc_ki[m2.mc_top]);
+ if ((rc = mdb_node_read(txn, leaf, &data)) != MDB_SUCCESS)
+ return rc;
+
+ idl = (MDB_ID *) data.mv_data;
+ i = idl[0];
+ if (!mop) {
+ if (!(env->me_pghead = mop = mdb_midl_alloc(i))) {
+ rc = ENOMEM;
+ goto fail;
+ }
+ } else {
+ if ((rc = mdb_midl_need(&env->me_pghead, i)) != 0)
+ goto fail;
+ mop = env->me_pghead;
+ }
+ env->me_pglast = last;
+
+ // Merge in descending sorted order
+ j = mop_len;
+ k = mop_len += i;
+ mop[0] = (pgno_t)-1;
+ old_id = mop[j];
+ while (i) {
+ new_id = idl[i--];
+ for (; old_id < new_id; old_id = mop[--j])
+ mop[k--] = old_id;
+ mop[k--] = new_id;
+ }
+ mop[0] = mop_len;
}
- env->me_pglast = last;
-
- // Merge in descending sorted order
- j = mop_len;
- k = mop_len += i;
- mop[0] = (pgno_t)-1;
- old_id = mop[j];
- while (i) {
- new_id = idl[i--];
- for (; old_id < new_id; old_id = mop[--j])
- mop[k--] = old_id;
- mop[k--] = new_id;
+
+ // Use new pages from the map when nothing suitable in the freeDB
+ i = 0;
+ pgno = txn->mt_next_pgno;
+ if (pgno + num >= env->me_maxpg) {
+ DPUTS("DB size maxed out");
+ rc = MDB_MAP_FULL;
+ goto fail;
}
- mop[0] = mop_len;
- }
- // Use new pages from the map when nothing suitable in the freeDB
- i = 0;
- pgno = txn->mt_next_pgno;
- if (pgno + num >= env->me_maxpg) {
- DPUTS("DB size maxed out");
- rc = MDB_MAP_FULL;
+ search_done:
+ if (!(np = mdb_page_malloc(txn, num))) {
+ rc = ENOMEM;
goto fail;
- }
-
- search_done:
- if (!(np = mdb_page_malloc(txn, num))) {
- rc = ENOMEM;
- goto fail;
- }
- if (i) {
- mop[0] = mop_len -= num;
- // Move any stragglers down
- for (j = i-num; j < mop_len; )
- mop[++j] = mop[++i];
- } else {
- txn->mt_next_pgno = pgno + num;
- }
- np->mp_pgno = pgno;
- mdb_page_dirty(txn, np);
- *mp = np;
+ }
+ if (i) {
+ mop[0] = mop_len -= num;
+ // Move any stragglers down
+ for (j = i-num; j < mop_len; )
+ mop[++j] = mop[++i];
+ } else {
+ txn->mt_next_pgno = pgno + num;
+ }
+ np->mp_pgno = pgno;
+ mdb_page_dirty(txn, np);
+ *mp = np;
- return MDB_SUCCESS;
+ return MDB_SUCCESS;
- fail:
- txn->mt_flags |= MDB_TXN_ERROR;
- return rc;
+ fail:
+ txn->mt_flags |= MDB_TXN_ERROR;
+ return rc;
*/
return nil
}
diff --git a/rwtransaction.go b/rwtransaction.go
index 4cb7b4f..d0fe440 100644
--- a/rwtransaction.go
+++ b/rwtransaction.go
@@ -4,6 +4,4 @@ package bolt
// Only one read/write transaction can be active for a DB at a time.
type RWTransaction struct {
Transaction
- pagestate pagestate
}
-