Initial db.open.

master
Ben Johnson 2014-01-11 22:51:01 -07:00
parent df8333328f
commit ee24437bfc
12 changed files with 4872 additions and 4960 deletions

20
Makefile Normal file
View File

@ -0,0 +1,20 @@
PKG=./...
TEST=.
BENCH=.
COVERPROFILE=/tmp/c.out
bench: benchpreq
go test -v -test.bench=$(BENCH) ./.bench
cover: fmt
go test -coverprofile=$(COVERPROFILE) .
go tool cover -html=$(COVERPROFILE)
rm $(COVERPROFILE)
fmt:
@go fmt ./...
test: fmt
@go test -v -cover -test.run=$(TEST) $(PKG)
.PHONY: bench cover fmt test

View File

@ -7,12 +7,12 @@ package bolt
// TODO: #define MAIN_DBI 1
type Bucket struct {
pad uint32
flags uint16
depth uint16
branches pgno
leafs pgno
overflows pgno
entries uint64
root pgno
pad uint32
flags uint16
depth uint16
branches pgno
leafs pgno
overflows pgno
entries uint64
root pgno
}

5763
cursor.go

File diff suppressed because it is too large Load Diff

1432
db.go

File diff suppressed because it is too large Load Diff

27
db_test.go Normal file
View File

@ -0,0 +1,27 @@
package bolt
import (
"io/ioutil"
"os"
"testing"
"github.com/stretchr/testify/assert"
)
func TestDBOpen(t *testing.T) {
withDB(func(db *DB, path string) {
err := db.Open(path, 0666)
assert.NoError(t, err)
})
}
func withDB(fn func(*DB, string)) {
f, _ := ioutil.TempFile("", "bolt-")
path := f.Name()
f.Close()
os.Remove(path)
defer os.RemoveAll(path)
db := NewDB()
fn(db, path)
}

View File

@ -1,30 +1,34 @@
package bolt
var (
KeyExistError = &Error{"Key/data pair already exists"}
NotFoundError = &Error{"No matching key/data pair found"}
PageNotFoundError = &Error{"Requested page not found"}
CorruptedError = &Error{"Located page was wrong type"}
PanicError = &Error{"Update of meta page failed"}
VersionMismatchError = &Error{"Database environment version mismatch"}
InvalidError = &Error{"File is not an MDB file"}
MapFullError = &Error{"Environment mapsize limit reached"}
BucketFullError = &Error{"Environment maxdbs limit reached"}
ReadersFullError = &Error{"Environment maxreaders limit reached"}
TransactionFullError = &Error{"Transaction has too many dirty pages - transaction too big"}
CursorFullError = &Error{"Internal error - cursor stack limit reached"}
PageFullError = &Error{"Internal error - page has no more space"}
MapResizedError = &Error{"Database contents grew beyond environment mapsize"}
IncompatibleError = &Error{"Operation and DB incompatible, or DB flags changed"}
BadReaderSlotError = &Error{"Invalid reuse of reader locktable slot"}
BadTransactionError = &Error{"Transaction cannot recover - it must be aborted"}
BadValueSizeError = &Error{"Too big key/data, key is empty, or wrong DUPFIXED size"}
KeyExistError = &Error{"key/data pair already exists", nil}
NotFoundError = &Error{"no matching key/data pair found", nil}
PageNotFoundError = &Error{"requested page not found", nil}
CorruptedError = &Error{"located page was wrong type", nil}
PanicError = &Error{"update of meta page failed", nil}
VersionMismatchError = &Error{"database environment version mismatch", nil}
InvalidError = &Error{"file is not a bolt file", nil}
MapFullError = &Error{"environment mapsize limit reached", nil}
BucketFullError = &Error{"environment maxdbs limit reached", nil}
ReadersFullError = &Error{"environment maxreaders limit reached", nil}
TransactionFullError = &Error{"transaction has too many dirty pages - transaction too big", nil}
CursorFullError = &Error{"internal error - cursor stack limit reached", nil}
PageFullError = &Error{"internal error - page has no more space", nil}
MapResizedError = &Error{"database contents grew beyond environment mapsize", nil}
IncompatibleError = &Error{"operation and db incompatible, or db flags changed", nil}
BadReaderSlotError = &Error{"invalid reuse of reader locktable slot", nil}
BadTransactionError = &Error{"transaction cannot recover - it must be aborted", nil}
BadValueSizeError = &Error{"too big key/data or key is empty", nil}
)
type Error struct {
message string
cause error
}
func (e *Error) Error() {
func (e *Error) Error() string {
if e.cause != nil {
return e.message + ": " + e.cause.Error()
}
return e.message
}

29
meta.go
View File

@ -1,7 +1,7 @@
package bolt
var (
InvalidMetaPageError = &Error{"Invalid meta page"}
InvalidMetaPageError = &Error{"Invalid meta page", nil}
)
// TODO: #define mm_psize mm_dbs[0].md_pad
@ -25,18 +25,16 @@ var (
// void *md_relctx; /**< user-provided context for md_rel */
// } MDB_dbx;
const magic int32 = 0xBEEFC0DE
const magic uint32 = 0xC0DEC0DE
const version uint32 = 1
type meta struct {
magic int32
version int32
mapsize int
free bucket
main bucket
pgno int
txnid int
magic uint32
version uint32
free Bucket
main Bucket
pgno int
txnid int
}
// validate checks the marker bytes and version of the meta page to ensure it matches this binary.
@ -49,7 +47,6 @@ func (m *meta) validate() error {
return nil
}
// Read the environment parameters of a DB environment before
// mapping it into memory.
// @param[in] env the environment handle
@ -57,10 +54,10 @@ func (m *meta) validate() error {
// @return 0 on success, non-zero on failure.
func (m *meta) read(p *page) error {
/*
if (off == 0 || m->mm_txnid > meta->mm_txnid)
*meta = *m;
}
return 0;
if (off == 0 || m->mm_txnid > meta->mm_txnid)
*meta = *m;
}
return 0;
*/
return nil
}

76
node.go
View File

@ -44,48 +44,48 @@ func (n *node) size() int {
// @param[in] indx The index of the subpage on the main page.
func (n *node) shrink(index int) {
/*
MDB_node *node;
MDB_page *sp, *xp;
char *base;
int nsize, delta;
indx_t i, numkeys, ptr;
MDB_node *node;
MDB_page *sp, *xp;
char *base;
int nsize, delta;
indx_t i, numkeys, ptr;
node = NODEPTR(mp, indx);
sp = (MDB_page *)NODEDATA(node);
delta = SIZELEFT(sp);
xp = (MDB_page *)((char *)sp + delta);
node = NODEPTR(mp, indx);
sp = (MDB_page *)NODEDATA(node);
delta = SIZELEFT(sp);
xp = (MDB_page *)((char *)sp + delta);
// shift subpage upward
if (IS_LEAF2(sp)) {
nsize = NUMKEYS(sp) * sp->mp_pad;
if (nsize & 1)
return; // do not make the node uneven-sized
memmove(METADATA(xp), METADATA(sp), nsize);
} else {
int i;
numkeys = NUMKEYS(sp);
for (i=numkeys-1; i>=0; i--)
xp->mp_ptrs[i] = sp->mp_ptrs[i] - delta;
}
xp->mp_upper = sp->mp_lower;
xp->mp_lower = sp->mp_lower;
xp->mp_flags = sp->mp_flags;
xp->mp_pad = sp->mp_pad;
COPY_PGNO(xp->mp_pgno, mp->mp_pgno);
// shift subpage upward
if (IS_LEAF2(sp)) {
nsize = NUMKEYS(sp) * sp->mp_pad;
if (nsize & 1)
return; // do not make the node uneven-sized
memmove(METADATA(xp), METADATA(sp), nsize);
} else {
int i;
numkeys = NUMKEYS(sp);
for (i=numkeys-1; i>=0; i--)
xp->mp_ptrs[i] = sp->mp_ptrs[i] - delta;
}
xp->mp_upper = sp->mp_lower;
xp->mp_lower = sp->mp_lower;
xp->mp_flags = sp->mp_flags;
xp->mp_pad = sp->mp_pad;
COPY_PGNO(xp->mp_pgno, mp->mp_pgno);
nsize = NODEDSZ(node) - delta;
SETDSZ(node, nsize);
nsize = NODEDSZ(node) - delta;
SETDSZ(node, nsize);
// shift lower nodes upward
ptr = mp->mp_ptrs[indx];
numkeys = NUMKEYS(mp);
for (i = 0; i < numkeys; i++) {
if (mp->mp_ptrs[i] <= ptr)
mp->mp_ptrs[i] += delta;
}
// shift lower nodes upward
ptr = mp->mp_ptrs[indx];
numkeys = NUMKEYS(mp);
for (i = 0; i < numkeys; i++) {
if (mp->mp_ptrs[i] <= ptr)
mp->mp_ptrs[i] += delta;
}
base = (char *)mp + mp->mp_upper;
memmove(base + delta, base, ptr - mp->mp_upper + NODESIZE + NODEKSZ(node));
mp->mp_upper += delta;
base = (char *)mp + mp->mp_upper;
memmove(base + delta, base, ptr - mp->mp_upper + NODESIZE + NODEKSZ(node));
mp->mp_upper += delta;
*/
}

5
os.go Normal file
View File

@ -0,0 +1,5 @@
package bolt
import (
_ "os"
)

48
page.go
View File

@ -5,9 +5,11 @@ import (
)
const maxPageSize = 0x8000
const minKeyCount = 2
var _page page
const headerSize = unsafe.Offsetof(_page.ptr)
const pageHeaderSize = int(unsafe.Offsetof(_page.ptr))
const minPageKeys = 2
const fillThreshold = 250 // 25%
@ -20,13 +22,15 @@ const (
p_dirty = 0x10 /**< dirty page, also set for #P_SUBP pages */
p_sub = 0x40
p_keep = 0x8000 /**< leave this page alone during spill */
p_invalid = ^pgno(0)
)
// maxCommitPages is the maximum number of pages to commit in one writev() call.
const maxCommitPages 64
const maxCommitPages = 64
/* max bytes to write in one call */
const maxWriteByteCount 0x80000000U // TODO: #define MAX_WRITE 0x80000000U >> (sizeof(ssize_t) == 4))
const maxWriteByteCount uint = 0x80000000 // TODO: #define MAX_WRITE 0x80000000U >> (sizeof(ssize_t) == 4))
// TODO:
// #if defined(IOV_MAX) && IOV_MAX < MDB_COMMIT_PAGES
@ -42,26 +46,28 @@ const maxWriteByteCount 0x80000000U // TODO: #define MAX_WRITE 0x80000000U >>
// TODO: #define MDB_SPLIT_REPLACE MDB_APPENDDUP /**< newkey is not new */
type pgno uint64
type txnid uint64
type indx uint16
type page struct {
id pgno
flags int
lower int
upper int
lower indx
upper indx
overflow int
ptr int
}
type pageState struct {
head int /**< Reclaimed freeDB pages, or NULL before use */
last int /**< ID of last used record, or 0 if !mf_pghead */
head int /**< Reclaimed freeDB pages, or NULL before use */
last int /**< ID of last used record, or 0 if !mf_pghead */
}
// meta returns a pointer to the metadata section of the page.
func (p *page) meta() (*meta, error) {
// Exit if page is not a meta page.
if (p.flags & p_meta) != 0 {
return InvalidMetaPageError
if (p.flags & p_meta) == 0 {
return nil, InvalidMetaPageError
}
// Cast the meta section and validate before returning.
@ -72,12 +78,17 @@ func (p *page) meta() (*meta, error) {
return m, nil
}
// initMeta initializes a page as a new meta page.
func (p *page) initMeta(pageSize int) {
p.flags = p_meta
m := (*meta)(unsafe.Pointer(&p.ptr))
m.magic = magic
m.version = version
m.free.pad = uint32(pageSize)
m.pgno = 1
m.free.root = p_invalid
m.main.root = p_invalid
}
// nodeCount returns the number of nodes on the page.
func (p *page) nodeCount() int {
@ -86,10 +97,5 @@ func (p *page) nodeCount() int {
// remainingSize returns the number of bytes left in the page.
func (p *page) remainingSize() int {
return p.header.upper - p.header.lower
}
// remainingSize returns the number of bytes left in the page.
func (p *page) remainingSize() int {
return p.header.upper - p.header.lower
return int(p.upper - p.lower)
}

View File

@ -1,5 +1,5 @@
package bolt
type reader struct {
int transactionID
txnid int
}

File diff suppressed because it is too large Load Diff