mirror of https://github.com/hak5/bolt.git
Add nested buckets.
This commit adds the ability to create buckets inside of other buckets. It also replaces the buckets page with a root bucket. Fixes #56.master
parent
feb84e39be
commit
698b07b074
4
Makefile
4
Makefile
|
@ -10,7 +10,7 @@ cloc:
|
||||||
@cloc --not-match-f='Makefile|_test.go' .
|
@cloc --not-match-f='Makefile|_test.go' .
|
||||||
|
|
||||||
cover: fmt
|
cover: fmt
|
||||||
go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) .
|
go test -coverprofile=$(COVERPROFILE) -test.run=$(TEST) $(COVERFLAG) .
|
||||||
go tool cover -html=$(COVERPROFILE)
|
go tool cover -html=$(COVERPROFILE)
|
||||||
rm $(COVERPROFILE)
|
rm $(COVERPROFILE)
|
||||||
|
|
||||||
|
@ -32,6 +32,6 @@ test: fmt errcheck
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo ""
|
@echo ""
|
||||||
@echo "=== RACE DETECTOR ==="
|
@echo "=== RACE DETECTOR ==="
|
||||||
@go test -v -race -test.run=Parallel
|
@go test -v -race -test.run="TestSimulate_(100op|1000op|10000op)"
|
||||||
|
|
||||||
.PHONY: bench cloc cover cpuprofile fmt memprofile test
|
.PHONY: bench cloc cover cpuprofile fmt memprofile test
|
||||||
|
|
|
@ -0,0 +1,327 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestSimulate_1op_1p(t *testing.T) { testSimulate(t, 100, 1) }
|
||||||
|
func TestSimulate_10op_1p(t *testing.T) { testSimulate(t, 10, 1) }
|
||||||
|
func TestSimulate_100op_1p(t *testing.T) { testSimulate(t, 100, 1) }
|
||||||
|
func TestSimulate_1000op_1p(t *testing.T) { testSimulate(t, 1000, 1) }
|
||||||
|
func TestSimulate_10000op_1p(t *testing.T) { testSimulate(t, 10000, 1) }
|
||||||
|
|
||||||
|
func TestSimulate_10op_10p(t *testing.T) { testSimulate(t, 10, 10) }
|
||||||
|
func TestSimulate_100op_10p(t *testing.T) { testSimulate(t, 100, 10) }
|
||||||
|
func TestSimulate_1000op_10p(t *testing.T) { testSimulate(t, 1000, 10) }
|
||||||
|
func TestSimulate_10000op_10p(t *testing.T) { testSimulate(t, 10000, 10) }
|
||||||
|
|
||||||
|
func TestSimulate_100op_100p(t *testing.T) { testSimulate(t, 100, 100) }
|
||||||
|
func TestSimulate_1000op_100p(t *testing.T) { testSimulate(t, 1000, 100) }
|
||||||
|
func TestSimulate_10000op_100p(t *testing.T) { testSimulate(t, 10000, 100) }
|
||||||
|
|
||||||
|
func TestSimulate_10000op_1000p(t *testing.T) { testSimulate(t, 10000, 1000) }
|
||||||
|
|
||||||
|
// Randomly generate operations on a given database with multiple clients to ensure consistency and thread safety.
|
||||||
|
func testSimulate(t *testing.T, threadCount, parallelism int) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
|
||||||
|
rand.Seed(int64(qseed))
|
||||||
|
|
||||||
|
// A list of operations that readers and writers can perform.
|
||||||
|
var readerHandlers = []simulateHandler{simulateGetHandler}
|
||||||
|
var writerHandlers = []simulateHandler{simulateGetHandler, simulatePutHandler}
|
||||||
|
|
||||||
|
var versions = make(map[txid]*QuickDB)
|
||||||
|
versions[1] = NewQuickDB()
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
var mutex sync.Mutex
|
||||||
|
|
||||||
|
// Run n threads in parallel, each with their own operation.
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
var threads = make(chan bool, parallelism)
|
||||||
|
var i int
|
||||||
|
for {
|
||||||
|
threads <- true
|
||||||
|
wg.Add(1)
|
||||||
|
writable := ((rand.Int() % 100) < 20) // 20% writers
|
||||||
|
|
||||||
|
// Choose an operation to execute.
|
||||||
|
var handler simulateHandler
|
||||||
|
if writable {
|
||||||
|
handler = writerHandlers[rand.Intn(len(writerHandlers))]
|
||||||
|
} else {
|
||||||
|
handler = readerHandlers[rand.Intn(len(readerHandlers))]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute a thread for the given operation.
|
||||||
|
go func(writable bool, handler simulateHandler) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
// Start transaction.
|
||||||
|
tx, err := db.Begin(writable)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal("tx begin: ", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Obtain current state of the dataset.
|
||||||
|
mutex.Lock()
|
||||||
|
var qdb = versions[tx.id()]
|
||||||
|
if writable {
|
||||||
|
qdb = versions[tx.id()-1].Copy()
|
||||||
|
}
|
||||||
|
mutex.Unlock()
|
||||||
|
|
||||||
|
// Make sure we commit/rollback the tx at the end and update the state.
|
||||||
|
if writable {
|
||||||
|
defer func() {
|
||||||
|
mutex.Lock()
|
||||||
|
versions[tx.id()] = qdb
|
||||||
|
mutex.Unlock()
|
||||||
|
|
||||||
|
assert.NoError(t, tx.Commit())
|
||||||
|
}()
|
||||||
|
} else {
|
||||||
|
defer tx.Rollback()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore operation if we don't have data yet.
|
||||||
|
if qdb == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Execute handler.
|
||||||
|
handler(tx, qdb)
|
||||||
|
|
||||||
|
// Release a thread back to the scheduling loop.
|
||||||
|
<-threads
|
||||||
|
}(writable, handler)
|
||||||
|
|
||||||
|
i++
|
||||||
|
if i > threadCount {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait until all threads are done.
|
||||||
|
wg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
type simulateHandler func(tx *Tx, qdb *QuickDB)
|
||||||
|
|
||||||
|
// Retrieves a key from the database and verifies that it is what is expected.
|
||||||
|
func simulateGetHandler(tx *Tx, qdb *QuickDB) {
|
||||||
|
// Randomly retrieve an existing exist.
|
||||||
|
keys := qdb.Rand()
|
||||||
|
if len(keys) == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve root bucket.
|
||||||
|
b := tx.Bucket(keys[0])
|
||||||
|
if b == nil {
|
||||||
|
panic(fmt.Sprintf("bucket[0] expected: %v\n", keys[0]))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drill into nested buckets.
|
||||||
|
for _, key := range keys[1 : len(keys)-1] {
|
||||||
|
b = b.Bucket(key)
|
||||||
|
if b == nil {
|
||||||
|
panic(fmt.Sprintf("bucket[n] expected: %v -> %v\n", keys, key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify key/value on the final bucket.
|
||||||
|
expected := qdb.Get(keys)
|
||||||
|
actual := b.Get(keys[len(keys)-1])
|
||||||
|
if !bytes.Equal(actual, expected) {
|
||||||
|
fmt.Println("=== EXPECTED ===")
|
||||||
|
fmt.Println(expected)
|
||||||
|
fmt.Println("=== ACTUAL ===")
|
||||||
|
fmt.Println(actual)
|
||||||
|
fmt.Println("=== END ===")
|
||||||
|
panic("value mismatch")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Inserts a key into the database.
|
||||||
|
func simulatePutHandler(tx *Tx, qdb *QuickDB) {
|
||||||
|
keys, value := randKeys(), randValue()
|
||||||
|
|
||||||
|
// Retrieve root bucket.
|
||||||
|
b := tx.Bucket(keys[0])
|
||||||
|
if b == nil {
|
||||||
|
if err := tx.CreateBucket(keys[0]); err != nil {
|
||||||
|
panic("create bucket: " + err.Error())
|
||||||
|
}
|
||||||
|
b = tx.Bucket(keys[0])
|
||||||
|
if b == nil {
|
||||||
|
panic(fmt.Sprintf("bucket[0] nil: %v", keys[0]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create nested buckets, if necessary.
|
||||||
|
for _, key := range keys[1 : len(keys)-1] {
|
||||||
|
child := b.Bucket(key)
|
||||||
|
if child != nil {
|
||||||
|
b = child
|
||||||
|
} else {
|
||||||
|
if err := b.CreateBucket(key); err != nil {
|
||||||
|
panic("create bucket: " + err.Error())
|
||||||
|
}
|
||||||
|
b = b.Bucket(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert into database.
|
||||||
|
if err := b.Put(keys[len(keys)-1], value); err != nil {
|
||||||
|
panic("put: " + err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert into in-memory database.
|
||||||
|
qdb.Put(keys, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// QuickDB is an in-memory database that replicates the functionality of the
|
||||||
|
// Bolt DB type except that it is entirely in-memory. It is meant for testing
|
||||||
|
// that the Bolt database is consistent.
|
||||||
|
type QuickDB struct {
|
||||||
|
sync.RWMutex
|
||||||
|
m map[string]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewQuickDB returns an instance of QuickDB.
|
||||||
|
func NewQuickDB() *QuickDB {
|
||||||
|
return &QuickDB{m: make(map[string]interface{})}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves the value at a key path.
|
||||||
|
func (db *QuickDB) Get(keys [][]byte) []byte {
|
||||||
|
db.RLock()
|
||||||
|
defer db.RUnlock()
|
||||||
|
|
||||||
|
m := db.m
|
||||||
|
for _, key := range keys[:len(keys)-1] {
|
||||||
|
value := m[string(key)]
|
||||||
|
if value == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
switch value := value.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
m = value
|
||||||
|
case []byte:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only return if it's a simple value.
|
||||||
|
if value, ok := m[string(keys[len(keys)-1])].([]byte); ok {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put inserts a value into a key path.
|
||||||
|
func (db *QuickDB) Put(keys [][]byte, value []byte) {
|
||||||
|
db.Lock()
|
||||||
|
defer db.Unlock()
|
||||||
|
|
||||||
|
// Build buckets all the way down the key path.
|
||||||
|
m := db.m
|
||||||
|
for _, key := range keys[:len(keys)-1] {
|
||||||
|
if _, ok := m[string(key)].([]byte); ok {
|
||||||
|
return // Keypath intersects with a simple value. Do nothing.
|
||||||
|
}
|
||||||
|
|
||||||
|
if m[string(key)] == nil {
|
||||||
|
m[string(key)] = make(map[string]interface{})
|
||||||
|
}
|
||||||
|
m = m[string(key)].(map[string]interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert value into the last key.
|
||||||
|
m[string(keys[len(keys)-1])] = value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rand returns a random key path that points to a simple value.
|
||||||
|
func (db *QuickDB) Rand() [][]byte {
|
||||||
|
db.RLock()
|
||||||
|
defer db.RUnlock()
|
||||||
|
if len(db.m) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var keys [][]byte
|
||||||
|
db.rand(db.m, &keys)
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func (db *QuickDB) rand(m map[string]interface{}, keys *[][]byte) {
|
||||||
|
i, index := 0, rand.Intn(len(m))
|
||||||
|
for k, v := range m {
|
||||||
|
if i == index {
|
||||||
|
*keys = append(*keys, []byte(k))
|
||||||
|
if v, ok := v.(map[string]interface{}); ok {
|
||||||
|
db.rand(v, keys)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
panic("quickdb rand: out-of-range")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy copies the entire database.
|
||||||
|
func (db *QuickDB) Copy() *QuickDB {
|
||||||
|
db.RLock()
|
||||||
|
defer db.RUnlock()
|
||||||
|
return &QuickDB{m: db.copy(db.m)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (db *QuickDB) copy(m map[string]interface{}) map[string]interface{} {
|
||||||
|
clone := make(map[string]interface{}, len(m))
|
||||||
|
for k, v := range m {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case map[string]interface{}:
|
||||||
|
clone[k] = db.copy(v)
|
||||||
|
default:
|
||||||
|
clone[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return clone
|
||||||
|
}
|
||||||
|
|
||||||
|
func randKey() []byte {
|
||||||
|
var min, max = 1, 1024
|
||||||
|
n := rand.Intn(max-min) + min
|
||||||
|
b := make([]byte, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
b[i] = byte(rand.Intn(255))
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func randKeys() [][]byte {
|
||||||
|
var keys [][]byte
|
||||||
|
var count = rand.Intn(2) + 2
|
||||||
|
for i := 0; i < count; i++ {
|
||||||
|
keys = append(keys, randKey())
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
func randValue() []byte {
|
||||||
|
n := rand.Intn(8192)
|
||||||
|
b := make([]byte, n)
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
b[i] = byte(rand.Intn(255))
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
368
bucket.go
368
bucket.go
|
@ -3,6 +3,9 @@ package bolt
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sort"
|
||||||
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -16,14 +19,6 @@ var (
|
||||||
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
|
||||||
ErrBucketNameRequired = errors.New("bucket name required")
|
ErrBucketNameRequired = errors.New("bucket name required")
|
||||||
|
|
||||||
// ErrBucketNameTooLarge is returned when creating a bucket with a name
|
|
||||||
// that is longer than MaxBucketNameSize.
|
|
||||||
ErrBucketNameTooLarge = errors.New("bucket name too large")
|
|
||||||
|
|
||||||
// ErrBucketNotWritable is returned when changing data on a bucket
|
|
||||||
// reference that was created from a read-only transaction.
|
|
||||||
ErrBucketNotWritable = errors.New("bucket not writable")
|
|
||||||
|
|
||||||
// ErrKeyRequired is returned when inserting a zero-length key.
|
// ErrKeyRequired is returned when inserting a zero-length key.
|
||||||
ErrKeyRequired = errors.New("key required")
|
ErrKeyRequired = errors.New("key required")
|
||||||
|
|
||||||
|
@ -33,6 +28,11 @@ var (
|
||||||
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
|
||||||
ErrValueTooLarge = errors.New("value too large")
|
ErrValueTooLarge = errors.New("value too large")
|
||||||
|
|
||||||
|
// ErrIncompatibleValue is returned when trying create or delete a bucket
|
||||||
|
// on an existing non-bucket key or when trying to create or delete a
|
||||||
|
// non-bucket key on an existing bucket key.
|
||||||
|
ErrIncompatibleValue = errors.New("incompatible value")
|
||||||
|
|
||||||
// ErrSequenceOverflow is returned when the next sequence number will be
|
// ErrSequenceOverflow is returned when the next sequence number will be
|
||||||
// larger than the maximum integer size.
|
// larger than the maximum integer size.
|
||||||
ErrSequenceOverflow = errors.New("sequence overflow")
|
ErrSequenceOverflow = errors.New("sequence overflow")
|
||||||
|
@ -41,8 +41,10 @@ var (
|
||||||
// Bucket represents a collection of key/value pairs inside the database.
|
// Bucket represents a collection of key/value pairs inside the database.
|
||||||
type Bucket struct {
|
type Bucket struct {
|
||||||
*bucket
|
*bucket
|
||||||
name string
|
tx *Tx
|
||||||
tx *Tx
|
buckets map[string]*Bucket
|
||||||
|
nodes map[pgid]*node
|
||||||
|
pending []*node
|
||||||
}
|
}
|
||||||
|
|
||||||
// bucket represents the on-file representation of a bucket.
|
// bucket represents the on-file representation of a bucket.
|
||||||
|
@ -51,9 +53,14 @@ type bucket struct {
|
||||||
sequence uint64
|
sequence uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name returns the name of the bucket.
|
// newBucket returns a new bucket associated with a transaction.
|
||||||
func (b *Bucket) Name() string {
|
func newBucket(tx *Tx) Bucket {
|
||||||
return b.name
|
var b = Bucket{tx: tx}
|
||||||
|
b.buckets = make(map[string]*Bucket)
|
||||||
|
if tx.writable {
|
||||||
|
b.nodes = make(map[pgid]*node)
|
||||||
|
}
|
||||||
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
// Writable returns whether the bucket is writable.
|
// Writable returns whether the bucket is writable.
|
||||||
|
@ -70,17 +77,145 @@ func (b *Bucket) Cursor() *Cursor {
|
||||||
|
|
||||||
// Allocate and return a cursor.
|
// Allocate and return a cursor.
|
||||||
return &Cursor{
|
return &Cursor{
|
||||||
tx: b.tx,
|
bucket: b,
|
||||||
root: b.root,
|
stack: make([]elemRef, 0),
|
||||||
stack: make([]elemRef, 0),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get retrieves the value for a key in the bucket.
|
// Bucket retrieves a nested bucket by name.
|
||||||
// Returns a nil value if the key does not exist.
|
// Returns nil if the bucket does not exist.
|
||||||
func (b *Bucket) Get(key []byte) []byte {
|
func (b *Bucket) Bucket(name []byte) *Bucket {
|
||||||
|
if child := b.buckets[string(name)]; child != nil {
|
||||||
|
return child
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move cursor to key.
|
||||||
c := b.Cursor()
|
c := b.Cursor()
|
||||||
k, v := c.Seek(key)
|
k, v, flags := c.seek(name)
|
||||||
|
|
||||||
|
// Return nil if the key doesn't exist or it is not a bucket.
|
||||||
|
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise create a bucket and cache it.
|
||||||
|
var child = newBucket(b.tx)
|
||||||
|
child.bucket = &bucket{}
|
||||||
|
*child.bucket = *(*bucket)(unsafe.Pointer(&v[0]))
|
||||||
|
b.buckets[string(name)] = &child
|
||||||
|
|
||||||
|
return &child
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBucket creates a new bucket at the given key.
|
||||||
|
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||||
|
func (b *Bucket) CreateBucket(key []byte) error {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !b.tx.writable {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
} else if len(key) == 0 {
|
||||||
|
return ErrBucketNameRequired
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move cursor to correct position.
|
||||||
|
c := b.Cursor()
|
||||||
|
k, _, flags := c.seek(key)
|
||||||
|
|
||||||
|
// Return an error if there is an existing key.
|
||||||
|
if bytes.Equal(key, k) {
|
||||||
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
|
return ErrBucketExists
|
||||||
|
} else {
|
||||||
|
return ErrIncompatibleValue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a blank root leaf page.
|
||||||
|
p, err := b.tx.allocate(1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
p.flags = leafPageFlag
|
||||||
|
|
||||||
|
// Insert key/value.
|
||||||
|
value := make([]byte, unsafe.Sizeof(bucket{}))
|
||||||
|
bucket := (*bucket)(unsafe.Pointer(&value[0]))
|
||||||
|
bucket.root = p.id
|
||||||
|
|
||||||
|
// Insert into node.
|
||||||
|
c.node().put(key, key, value, 0, bucketLeafFlag)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
||||||
|
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||||
|
func (b *Bucket) CreateBucketIfNotExists(key []byte) error {
|
||||||
|
err := b.CreateBucket(key)
|
||||||
|
if err != nil && err != ErrBucketExists {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBucket deletes a bucket at the given key.
|
||||||
|
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
|
||||||
|
func (b *Bucket) DeleteBucket(key []byte) error {
|
||||||
|
if b.tx.db == nil {
|
||||||
|
return ErrTxClosed
|
||||||
|
} else if !b.Writable() {
|
||||||
|
return ErrTxNotWritable
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move cursor to correct position.
|
||||||
|
c := b.Cursor()
|
||||||
|
k, _, flags := c.seek(key)
|
||||||
|
|
||||||
|
// Return an error if bucket doesn't exist or is not a bucket.
|
||||||
|
if !bytes.Equal(key, k) {
|
||||||
|
return ErrBucketNotFound
|
||||||
|
} else if (flags & bucketLeafFlag) == 0 {
|
||||||
|
return ErrIncompatibleValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Recursively delete all child buckets.
|
||||||
|
child := b.Bucket(key)
|
||||||
|
err := child.ForEach(func(k, v []byte) error {
|
||||||
|
if v == nil {
|
||||||
|
if err := child.DeleteBucket(k); err != nil {
|
||||||
|
return fmt.Errorf("delete bucket: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove cached copy.
|
||||||
|
delete(b.buckets, string(key))
|
||||||
|
|
||||||
|
// Release all bucket pages to freelist.
|
||||||
|
b.tx.forEachPage(child.root, 0, func(p *page, _ int) {
|
||||||
|
b.tx.db.freelist.free(b.tx.id(), p)
|
||||||
|
})
|
||||||
|
|
||||||
|
// Delete the node if we have a matching key.
|
||||||
|
c.node().del(key)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get retrieves the value for a key in the bucket.
|
||||||
|
// Returns a nil value if the key does not exist or if the key is a nested bucket.
|
||||||
|
func (b *Bucket) Get(key []byte) []byte {
|
||||||
|
k, v, flags := b.Cursor().seek(key)
|
||||||
|
|
||||||
|
// Return nil if this is a bucket.
|
||||||
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// If our target node isn't the same key as what's passed in then return nil.
|
// If our target node isn't the same key as what's passed in then return nil.
|
||||||
if !bytes.Equal(key, k) {
|
if !bytes.Equal(key, k) {
|
||||||
|
@ -96,11 +231,8 @@ func (b *Bucket) Put(key []byte, value []byte) error {
|
||||||
if b.tx.db == nil {
|
if b.tx.db == nil {
|
||||||
return ErrTxClosed
|
return ErrTxClosed
|
||||||
} else if !b.Writable() {
|
} else if !b.Writable() {
|
||||||
return ErrBucketNotWritable
|
return ErrTxNotWritable
|
||||||
}
|
} else if len(key) == 0 {
|
||||||
|
|
||||||
// Validate the key and data size.
|
|
||||||
if len(key) == 0 {
|
|
||||||
return ErrKeyRequired
|
return ErrKeyRequired
|
||||||
} else if len(key) > MaxKeySize {
|
} else if len(key) > MaxKeySize {
|
||||||
return ErrKeyTooLarge
|
return ErrKeyTooLarge
|
||||||
|
@ -110,10 +242,15 @@ func (b *Bucket) Put(key []byte, value []byte) error {
|
||||||
|
|
||||||
// Move cursor to correct position.
|
// Move cursor to correct position.
|
||||||
c := b.Cursor()
|
c := b.Cursor()
|
||||||
c.Seek(key)
|
k, _, flags := c.seek(key)
|
||||||
|
|
||||||
// Insert the key/value.
|
// Return an error if there is an existing key with a bucket value.
|
||||||
c.node(b.tx).put(key, key, value, 0)
|
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
|
||||||
|
return ErrIncompatibleValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Insert into node.
|
||||||
|
c.node().put(key, key, value, 0, 0)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -125,15 +262,20 @@ func (b *Bucket) Delete(key []byte) error {
|
||||||
if b.tx.db == nil {
|
if b.tx.db == nil {
|
||||||
return ErrTxClosed
|
return ErrTxClosed
|
||||||
} else if !b.Writable() {
|
} else if !b.Writable() {
|
||||||
return ErrBucketNotWritable
|
return ErrTxNotWritable
|
||||||
}
|
}
|
||||||
|
|
||||||
// Move cursor to correct position.
|
// Move cursor to correct position.
|
||||||
c := b.Cursor()
|
c := b.Cursor()
|
||||||
c.Seek(key)
|
_, _, flags := c.seek(key)
|
||||||
|
|
||||||
|
// Return an error if there is already existing bucket value.
|
||||||
|
if (flags & bucketLeafFlag) != 0 {
|
||||||
|
return ErrIncompatibleValue
|
||||||
|
}
|
||||||
|
|
||||||
// Delete the node if we have a matching key.
|
// Delete the node if we have a matching key.
|
||||||
c.node(b.tx).del(key)
|
c.node().del(key)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -143,7 +285,7 @@ func (b *Bucket) NextSequence() (int, error) {
|
||||||
if b.tx.db == nil {
|
if b.tx.db == nil {
|
||||||
return 0, ErrTxClosed
|
return 0, ErrTxClosed
|
||||||
} else if !b.Writable() {
|
} else if !b.Writable() {
|
||||||
return 0, ErrBucketNotWritable
|
return 0, ErrTxNotWritable
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure next sequence number will not be larger than the maximum
|
// Make sure next sequence number will not be larger than the maximum
|
||||||
|
@ -194,6 +336,162 @@ func (b *Bucket) Stat() *BucketStat {
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// spill writes all the nodes for this bucket to dirty pages.
|
||||||
|
func (b *Bucket) spill() error {
|
||||||
|
// Spill all child buckets first.
|
||||||
|
for name, child := range b.buckets {
|
||||||
|
if err := child.spill(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the child bucket header in this bucket.
|
||||||
|
value := make([]byte, unsafe.Sizeof(bucket{}))
|
||||||
|
bucket := (*bucket)(unsafe.Pointer(&value[0]))
|
||||||
|
*bucket = *child.bucket
|
||||||
|
|
||||||
|
// Update parent node.
|
||||||
|
c := b.Cursor()
|
||||||
|
k, _, flags := c.seek([]byte(name))
|
||||||
|
_assert(bytes.Equal([]byte(name), k), "misplaced bucket header: %x -> %x", []byte(name), k)
|
||||||
|
_assert(flags&bucketLeafFlag != 0, "unexpected bucket header flag: %x", flags)
|
||||||
|
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ignore if there are no nodes to spill.
|
||||||
|
if len(b.nodes) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort nodes by highest depth first.
|
||||||
|
nodes := make(nodesByDepth, 0, len(b.nodes))
|
||||||
|
for _, n := range b.nodes {
|
||||||
|
nodes = append(nodes, n)
|
||||||
|
}
|
||||||
|
sort.Sort(nodes)
|
||||||
|
|
||||||
|
// Spill nodes by deepest first.
|
||||||
|
for i := 0; i < len(nodes); i++ {
|
||||||
|
n := nodes[i]
|
||||||
|
|
||||||
|
// Split nodes into appropriate sized nodes.
|
||||||
|
// The first node in this list will be a reference to n to preserve ancestry.
|
||||||
|
newNodes := n.split(b.tx.db.pageSize)
|
||||||
|
b.pending = newNodes
|
||||||
|
|
||||||
|
// If this is a root node that split then create a parent node.
|
||||||
|
if n.parent == nil && len(newNodes) > 1 {
|
||||||
|
n.parent = &node{bucket: b, isLeaf: false}
|
||||||
|
nodes = append(nodes, n.parent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add node's page to the freelist.
|
||||||
|
if n.pgid > 0 {
|
||||||
|
b.tx.db.freelist.free(b.tx.id(), b.tx.page(n.pgid))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write nodes to dirty pages.
|
||||||
|
for i, newNode := range newNodes {
|
||||||
|
// Allocate contiguous space for the node.
|
||||||
|
p, err := b.tx.allocate((newNode.size() / b.tx.db.pageSize) + 1)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the node to the page.
|
||||||
|
newNode.write(p)
|
||||||
|
newNode.pgid = p.id
|
||||||
|
newNode.parent = n.parent
|
||||||
|
|
||||||
|
// The first node should use the existing entry, other nodes are inserts.
|
||||||
|
var oldKey []byte
|
||||||
|
if i == 0 {
|
||||||
|
oldKey = n.key
|
||||||
|
} else {
|
||||||
|
oldKey = newNode.inodes[0].key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the parent entry.
|
||||||
|
if newNode.parent != nil {
|
||||||
|
newNode.parent.put(oldKey, newNode.inodes[0].key, nil, newNode.pgid, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the statistics.
|
||||||
|
b.tx.stats.Spill++
|
||||||
|
}
|
||||||
|
|
||||||
|
b.pending = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear out nodes now that they are all spilled.
|
||||||
|
b.nodes = make(map[pgid]*node)
|
||||||
|
|
||||||
|
// Update the root node for this bucket.
|
||||||
|
b.root = nodes[len(nodes)-1].pgid
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// rebalance attempts to balance all nodes.
|
||||||
|
func (b *Bucket) rebalance() {
|
||||||
|
for _, n := range b.nodes {
|
||||||
|
n.rebalance()
|
||||||
|
}
|
||||||
|
for _, child := range b.buckets {
|
||||||
|
child.rebalance()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// node creates a node from a page and associates it with a given parent.
|
||||||
|
func (b *Bucket) node(pgid pgid, parent *node) *node {
|
||||||
|
_assert(b.nodes != nil, "nodes map expected")
|
||||||
|
// Retrieve node if it's already been created.
|
||||||
|
if n := b.nodes[pgid]; n != nil {
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// Otherwise create a branch and cache it.
|
||||||
|
n := &node{bucket: b, parent: parent}
|
||||||
|
if n.parent != nil {
|
||||||
|
n.depth = n.parent.depth + 1
|
||||||
|
}
|
||||||
|
n.read(b.tx.page(pgid))
|
||||||
|
b.nodes[pgid] = n
|
||||||
|
|
||||||
|
// Update statistics.
|
||||||
|
b.tx.stats.NodeCount++
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// dereference removes all references to the old mmap.
|
||||||
|
func (b *Bucket) dereference() {
|
||||||
|
for _, n := range b.nodes {
|
||||||
|
n.dereference()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, n := range b.pending {
|
||||||
|
n.dereference()
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, child := range b.buckets {
|
||||||
|
child.dereference()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update statistics
|
||||||
|
b.tx.stats.NodeDeref += len(b.nodes) + len(b.pending)
|
||||||
|
}
|
||||||
|
|
||||||
|
// pageNode returns the in-memory node, if it exists.
|
||||||
|
// Otherwise returns the underlying page.
|
||||||
|
func (b *Bucket) pageNode(id pgid) (*page, *node) {
|
||||||
|
if b.nodes != nil {
|
||||||
|
if n := b.nodes[id]; n != nil {
|
||||||
|
return nil, n
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b.tx.page(id), nil
|
||||||
|
}
|
||||||
|
|
||||||
// BucketStat represents stats on a bucket such as branch pages and leaf pages.
|
// BucketStat represents stats on a bucket such as branch pages and leaf pages.
|
||||||
type BucketStat struct {
|
type BucketStat struct {
|
||||||
BranchPageCount int
|
BranchPageCount int
|
||||||
|
@ -202,9 +500,3 @@ type BucketStat struct {
|
||||||
KeyCount int
|
KeyCount int
|
||||||
MaxDepth int
|
MaxDepth int
|
||||||
}
|
}
|
||||||
|
|
||||||
type bucketsByName []*Bucket
|
|
||||||
|
|
||||||
func (s bucketsByName) Len() int { return len(s) }
|
|
||||||
func (s bucketsByName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
func (s bucketsByName) Less(i, j int) bool { return s[i].name < s[j].name }
|
|
||||||
|
|
530
bucket_test.go
530
bucket_test.go
|
@ -14,11 +14,11 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure that a bucket that gets a non-existent key returns nil.
|
// Ensure that a bucket that gets a non-existent key returns nil.
|
||||||
func TestBucketGetNonExistent(t *testing.T) {
|
func TestBucket_Get_NonExistent(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
value := tx.Bucket("widgets").Get([]byte("foo"))
|
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||||
assert.Nil(t, value)
|
assert.Nil(t, value)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -26,11 +26,11 @@ func TestBucketGetNonExistent(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a bucket can read a value that is not flushed yet.
|
// Ensure that a bucket can read a value that is not flushed yet.
|
||||||
func TestBucketGetFromNode(t *testing.T) {
|
func TestBucket_Get_FromNode(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
b.Put([]byte("foo"), []byte("bar"))
|
b.Put([]byte("foo"), []byte("bar"))
|
||||||
value := b.Get([]byte("foo"))
|
value := b.Get([]byte("foo"))
|
||||||
assert.Equal(t, value, []byte("bar"))
|
assert.Equal(t, value, []byte("bar"))
|
||||||
|
@ -39,84 +39,278 @@ func TestBucketGetFromNode(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a bucket can write a key/value.
|
// Ensure that a bucket retrieved via Get() returns a nil.
|
||||||
func TestBucketPut(t *testing.T) {
|
func TestBucket_Get_IncompatibleValue(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
err := tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
|
assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
|
||||||
|
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a bucket can write a key/value.
|
||||||
|
func TestBucket_Put(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
err := tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
value := tx.Bucket("widgets").Get([]byte("foo"))
|
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||||
assert.Equal(t, value, []byte("bar"))
|
assert.Equal(t, value, []byte("bar"))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that setting a value on a read-only bucket returns an error.
|
// Ensure that a bucket can rewrite a key in the same transaction.
|
||||||
func TestBucketPutReadOnly(t *testing.T) {
|
func TestBucket_Put_Repeat(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
assert.NoError(t, b.Put([]byte("foo"), []byte("bar")))
|
||||||
|
assert.NoError(t, b.Put([]byte("foo"), []byte("baz")))
|
||||||
|
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||||
|
assert.Equal(t, value, []byte("baz"))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a bucket can write a bunch of large values.
|
||||||
|
func TestBucket_Put_Large(t *testing.T) {
|
||||||
|
var count = 100
|
||||||
|
var factor = 200
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
for i := 1; i < count; i++ {
|
||||||
|
assert.NoError(t, b.Put([]byte(strings.Repeat("0", i*factor)), []byte(strings.Repeat("X", (count-i)*factor))))
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
for i := 1; i < count; i++ {
|
||||||
|
value := b.Get([]byte(strings.Repeat("0", i*factor)))
|
||||||
|
assert.Equal(t, []byte(strings.Repeat("X", (count-i)*factor)), value)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a setting a value on a key with a bucket value returns an error.
|
||||||
|
func TestBucket_Put_IncompatibleValue(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
|
||||||
|
assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a setting a value while the transaction is closed returns an error.
|
||||||
|
func TestBucket_Put_Closed(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
tx, _ := db.Begin(true)
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
tx.Rollback()
|
||||||
|
assert.Equal(t, ErrTxClosed, b.Put([]byte("foo"), []byte("bar")))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that setting a value on a read-only bucket returns an error.
|
||||||
|
func TestBucket_Put_ReadOnly(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
err := b.Put([]byte("foo"), []byte("bar"))
|
err := b.Put([]byte("foo"), []byte("bar"))
|
||||||
assert.Equal(t, err, ErrBucketNotWritable)
|
assert.Equal(t, err, ErrTxNotWritable)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a bucket can delete an existing key.
|
// Ensure that a bucket can delete an existing key.
|
||||||
func TestBucketDelete(t *testing.T) {
|
func TestBucket_Delete(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||||
err := tx.Bucket("widgets").Delete([]byte("foo"))
|
err := tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
value := tx.Bucket("widgets").Get([]byte("foo"))
|
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||||
assert.Nil(t, value)
|
assert.Nil(t, value)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that deleting a key on a read-only bucket returns an error.
|
// Ensure that deleting a bucket using Delete() returns an error.
|
||||||
func TestBucketDeleteReadOnly(t *testing.T) {
|
func TestBucket_Delete_Bucket(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
assert.NoError(t, b.CreateBucket([]byte("foo")))
|
||||||
|
assert.Equal(t, ErrIncompatibleValue, b.Delete([]byte("foo")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that deleting a key on a read-only bucket returns an error.
|
||||||
|
func TestBucket_Delete_ReadOnly(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
err := b.Delete([]byte("foo"))
|
err := b.Delete([]byte("foo"))
|
||||||
assert.Equal(t, err, ErrBucketNotWritable)
|
assert.Equal(t, err, ErrTxNotWritable)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a deleting value while the transaction is closed returns an error.
|
||||||
|
func TestBucket_Delete_Closed(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
tx, _ := db.Begin(true)
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
tx.Rollback()
|
||||||
|
assert.Equal(t, ErrTxClosed, b.Delete([]byte("foo")))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that deleting a bucket causes nested buckets to be deleted.
|
||||||
|
func TestBucket_DeleteBucket_Nested(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that deleting a bucket causes nested buckets to be deleted after they have been committed.
|
||||||
|
func TestBucket_DeleteBucket_Nested2(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).CreateBucket([]byte("bar")))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Put([]byte("baz"), []byte("bat")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
assert.NotNil(t, tx.Bucket([]byte("widgets")))
|
||||||
|
assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")))
|
||||||
|
assert.NotNil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")))
|
||||||
|
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Bucket([]byte("foo")).Bucket([]byte("bar")).Get([]byte("baz")))
|
||||||
|
assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
assert.Nil(t, tx.Bucket([]byte("widgets")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that deleting a child bucket with multiple pages causes all pages to get collected.
|
||||||
|
func TestBucket_DeleteBucket_Large(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
|
||||||
|
b := tx.Bucket([]byte("widgets")).Bucket([]byte("foo"))
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
assert.NoError(t, b.Put([]byte(fmt.Sprintf("%d", i)), []byte(fmt.Sprintf("%0100d", i))))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// NOTE: Consistency check in withOpenDB() will error if pages not freed properly.
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a simple value retrieved via Bucket() returns a nil.
|
||||||
|
func TestBucket_Bucket_IncompatibleValue(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
|
||||||
|
assert.Nil(t, tx.Bucket([]byte("widgets")).Bucket([]byte("foo")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that creating a bucket on an existing non-bucket key returns an error.
|
||||||
|
func TestBucket_CreateBucket_IncompatibleValue(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
|
||||||
|
assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).CreateBucket([]byte("foo")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that deleting a bucket on an existing non-bucket key returns an error.
|
||||||
|
func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
|
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
|
||||||
|
assert.Equal(t, ErrIncompatibleValue, tx.Bucket([]byte("widgets")).DeleteBucket([]byte("foo")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a bucket can return an autoincrementing sequence.
|
// Ensure that a bucket can return an autoincrementing sequence.
|
||||||
func TestBucketNextSequence(t *testing.T) {
|
func TestBucket_NextSequence(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
tx.CreateBucket("woojits")
|
tx.CreateBucket([]byte("woojits"))
|
||||||
|
|
||||||
// Make sure sequence increments.
|
// Make sure sequence increments.
|
||||||
seq, err := tx.Bucket("widgets").NextSequence()
|
seq, err := tx.Bucket([]byte("widgets")).NextSequence()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, seq, 1)
|
assert.Equal(t, seq, 1)
|
||||||
seq, err = tx.Bucket("widgets").NextSequence()
|
seq, err = tx.Bucket([]byte("widgets")).NextSequence()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, seq, 2)
|
assert.Equal(t, seq, 2)
|
||||||
|
|
||||||
// Buckets should be separate.
|
// Buckets should be separate.
|
||||||
seq, err = tx.Bucket("woojits").NextSequence()
|
seq, err = tx.Bucket([]byte("woojits")).NextSequence()
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, seq, 1)
|
assert.Equal(t, seq, 1)
|
||||||
return nil
|
return nil
|
||||||
|
@ -125,31 +319,31 @@ func TestBucketNextSequence(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that retrieving the next sequence on a read-only bucket returns an error.
|
// Ensure that retrieving the next sequence on a read-only bucket returns an error.
|
||||||
func TestBucketNextSequenceReadOnly(t *testing.T) {
|
func TestBucket_NextSequence_ReadOnly(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
i, err := b.NextSequence()
|
i, err := b.NextSequence()
|
||||||
assert.Equal(t, i, 0)
|
assert.Equal(t, i, 0)
|
||||||
assert.Equal(t, err, ErrBucketNotWritable)
|
assert.Equal(t, err, ErrTxNotWritable)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that incrementing past the maximum sequence number will return an error.
|
// Ensure that incrementing past the maximum sequence number will return an error.
|
||||||
func TestBucketNextSequenceOverflow(t *testing.T) {
|
func TestBucket_NextSequence_Overflow(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
b.bucket.sequence = uint64(maxInt)
|
b.bucket.sequence = uint64(maxInt)
|
||||||
seq, err := b.NextSequence()
|
seq, err := b.NextSequence()
|
||||||
assert.Equal(t, err, ErrSequenceOverflow)
|
assert.Equal(t, err, ErrSequenceOverflow)
|
||||||
|
@ -159,17 +353,29 @@ func TestBucketNextSequenceOverflow(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure a database can loop over all key/value pairs in a bucket.
|
// Ensure that retrieving the next sequence for a bucket on a closed database return an error.
|
||||||
func TestBucketForEach(t *testing.T) {
|
func TestBucket_NextSequence_Closed(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
tx, _ := db.Begin(true)
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
tx.Rollback()
|
||||||
|
_, err := b.NextSequence()
|
||||||
|
assert.Equal(t, ErrTxClosed, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure a user can loop over all key/value pairs in a bucket.
|
||||||
|
func TestBucket_ForEach(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte("0000"))
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000"))
|
||||||
tx.Bucket("widgets").Put([]byte("baz"), []byte("0001"))
|
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0001"))
|
||||||
tx.Bucket("widgets").Put([]byte("bar"), []byte("0002"))
|
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0002"))
|
||||||
|
|
||||||
var index int
|
var index int
|
||||||
err := tx.Bucket("widgets").ForEach(func(k, v []byte) error {
|
err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
|
||||||
switch index {
|
switch index {
|
||||||
case 0:
|
case 0:
|
||||||
assert.Equal(t, k, []byte("bar"))
|
assert.Equal(t, k, []byte("bar"))
|
||||||
|
@ -192,16 +398,16 @@ func TestBucketForEach(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure a database can stop iteration early.
|
// Ensure a database can stop iteration early.
|
||||||
func TestBucketForEachShortCircuit(t *testing.T) {
|
func TestBucket_ForEach_ShortCircuit(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
tx.Bucket("widgets").Put([]byte("bar"), []byte("0000"))
|
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte("0000"))
|
||||||
tx.Bucket("widgets").Put([]byte("baz"), []byte("0000"))
|
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("0000"))
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte("0000"))
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("0000"))
|
||||||
|
|
||||||
var index int
|
var index int
|
||||||
err := tx.Bucket("widgets").ForEach(func(k, v []byte) error {
|
err := tx.Bucket([]byte("widgets")).ForEach(func(k, v []byte) error {
|
||||||
index++
|
index++
|
||||||
if bytes.Equal(k, []byte("baz")) {
|
if bytes.Equal(k, []byte("baz")) {
|
||||||
return errors.New("marker")
|
return errors.New("marker")
|
||||||
|
@ -215,14 +421,26 @@ func TestBucketForEachShortCircuit(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure that looping over a bucket on a closed database returns an error.
|
||||||
|
func TestBucket_ForEach_Closed(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
tx, _ := db.Begin(true)
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
tx.Rollback()
|
||||||
|
err := b.ForEach(func(k, v []byte) error { return nil })
|
||||||
|
assert.Equal(t, ErrTxClosed, err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure that an error is returned when inserting with an empty key.
|
// Ensure that an error is returned when inserting with an empty key.
|
||||||
func TestBucketPutEmptyKey(t *testing.T) {
|
func TestBucket_Put_EmptyKey(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
err := tx.Bucket("widgets").Put([]byte(""), []byte("bar"))
|
err := tx.Bucket([]byte("widgets")).Put([]byte(""), []byte("bar"))
|
||||||
assert.Equal(t, err, ErrKeyRequired)
|
assert.Equal(t, err, ErrKeyRequired)
|
||||||
err = tx.Bucket("widgets").Put(nil, []byte("bar"))
|
err = tx.Bucket([]byte("widgets")).Put(nil, []byte("bar"))
|
||||||
assert.Equal(t, err, ErrKeyRequired)
|
assert.Equal(t, err, ErrKeyRequired)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -230,11 +448,11 @@ func TestBucketPutEmptyKey(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that an error is returned when inserting with a key that's too large.
|
// Ensure that an error is returned when inserting with a key that's too large.
|
||||||
func TestBucketPutKeyTooLarge(t *testing.T) {
|
func TestBucket_Put_KeyTooLarge(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
err := tx.Bucket("widgets").Put(make([]byte, 32769), []byte("bar"))
|
err := tx.Bucket([]byte("widgets")).Put(make([]byte, 32769), []byte("bar"))
|
||||||
assert.Equal(t, err, ErrKeyTooLarge)
|
assert.Equal(t, err, ErrKeyTooLarge)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -242,54 +460,35 @@ func TestBucketPutKeyTooLarge(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure a bucket can calculate stats.
|
// Ensure a bucket can calculate stats.
|
||||||
func TestBucketStat(t *testing.T) {
|
func TestBucket_Stat(t *testing.T) {
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
// Add bucket with lots of keys.
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
b := tx.Bucket("widgets")
|
|
||||||
for i := 0; i < 100000; i++ {
|
|
||||||
b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add bucket with fewer keys but one big value.
|
// Add bucket with fewer keys but one big value.
|
||||||
tx.CreateBucket("woojits")
|
assert.NoError(t, tx.CreateBucket([]byte("woojits")))
|
||||||
b = tx.Bucket("woojits")
|
b := tx.Bucket([]byte("woojits"))
|
||||||
for i := 0; i < 500; i++ {
|
for i := 0; i < 500; i++ {
|
||||||
b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
|
b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
|
||||||
}
|
}
|
||||||
b.Put([]byte("really-big-value"), []byte(strings.Repeat("*", 10000)))
|
b.Put([]byte("really-big-value"), []byte(strings.Repeat("*", 10000)))
|
||||||
|
|
||||||
// Add a bucket that fits on a single root leaf.
|
// Add a bucket that fits on a single root leaf.
|
||||||
tx.CreateBucket("whozawhats")
|
assert.NoError(t, tx.CreateBucket([]byte("whozawhats")))
|
||||||
b = tx.Bucket("whozawhats")
|
b = tx.Bucket([]byte("whozawhats"))
|
||||||
b.Put([]byte("foo"), []byte("bar"))
|
b.Put([]byte("foo"), []byte("bar"))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
mustCheck(db)
|
mustCheck(db)
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("woojits"))
|
||||||
stat := b.Stat()
|
stat := b.Stat()
|
||||||
assert.Equal(t, stat.BranchPageCount, 15)
|
|
||||||
assert.Equal(t, stat.LeafPageCount, 1281)
|
|
||||||
assert.Equal(t, stat.OverflowPageCount, 0)
|
|
||||||
assert.Equal(t, stat.KeyCount, 100000)
|
|
||||||
assert.Equal(t, stat.MaxDepth, 3)
|
|
||||||
|
|
||||||
b = tx.Bucket("woojits")
|
|
||||||
stat = b.Stat()
|
|
||||||
assert.Equal(t, stat.BranchPageCount, 1)
|
assert.Equal(t, stat.BranchPageCount, 1)
|
||||||
assert.Equal(t, stat.LeafPageCount, 6)
|
assert.Equal(t, stat.LeafPageCount, 6)
|
||||||
assert.Equal(t, stat.OverflowPageCount, 2)
|
assert.Equal(t, stat.OverflowPageCount, 2)
|
||||||
assert.Equal(t, stat.KeyCount, 501)
|
assert.Equal(t, stat.KeyCount, 501)
|
||||||
assert.Equal(t, stat.MaxDepth, 2)
|
assert.Equal(t, stat.MaxDepth, 2)
|
||||||
|
|
||||||
b = tx.Bucket("whozawhats")
|
b = tx.Bucket([]byte("whozawhats"))
|
||||||
stat = b.Stat()
|
stat = b.Stat()
|
||||||
assert.Equal(t, stat.BranchPageCount, 0)
|
assert.Equal(t, stat.BranchPageCount, 0)
|
||||||
assert.Equal(t, stat.LeafPageCount, 1)
|
assert.Equal(t, stat.LeafPageCount, 1)
|
||||||
|
@ -302,8 +501,38 @@ func TestBucketStat(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure a large bucket can calculate stats.
|
||||||
|
func TestBucket_Stat_Large(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("skipping test in short mode.")
|
||||||
|
}
|
||||||
|
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
// Add bucket with lots of keys.
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
for i := 0; i < 100000; i++ {
|
||||||
|
b.Put([]byte(strconv.Itoa(i)), []byte(strconv.Itoa(i)))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
mustCheck(db)
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
stat := b.Stat()
|
||||||
|
assert.Equal(t, stat.BranchPageCount, 15)
|
||||||
|
assert.Equal(t, stat.LeafPageCount, 1281)
|
||||||
|
assert.Equal(t, stat.OverflowPageCount, 0)
|
||||||
|
assert.Equal(t, stat.KeyCount, 100000)
|
||||||
|
assert.Equal(t, stat.MaxDepth, 3)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure that a bucket can write random keys and values across multiple transactions.
|
// Ensure that a bucket can write random keys and values across multiple transactions.
|
||||||
func TestBucketPutSingle(t *testing.T) {
|
func TestBucket_Put_Single(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
@ -314,11 +543,11 @@ func TestBucketPutSingle(t *testing.T) {
|
||||||
m := make(map[string][]byte)
|
m := make(map[string][]byte)
|
||||||
|
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
if err := tx.Bucket("widgets").Put(item.Key, item.Value); err != nil {
|
if err := tx.Bucket([]byte("widgets")).Put(item.Key, item.Value); err != nil {
|
||||||
panic("put error: " + err.Error())
|
panic("put error: " + err.Error())
|
||||||
}
|
}
|
||||||
m[string(item.Key)] = item.Value
|
m[string(item.Key)] = item.Value
|
||||||
|
@ -329,10 +558,10 @@ func TestBucketPutSingle(t *testing.T) {
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
i := 0
|
i := 0
|
||||||
for k, v := range m {
|
for k, v := range m {
|
||||||
value := tx.Bucket("widgets").Get([]byte(k))
|
value := tx.Bucket([]byte("widgets")).Get([]byte(k))
|
||||||
if !bytes.Equal(value, v) {
|
if !bytes.Equal(value, v) {
|
||||||
db.CopyFile("/tmp/bolt.put.single.db", 0666)
|
t.Logf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
|
||||||
t.Fatalf("value mismatch [run %d] (%d of %d):\nkey: %x\ngot: %x\nexp: %x", index, i, len(m), []byte(k), value, v)
|
copyAndFailNow(t, db)
|
||||||
}
|
}
|
||||||
i++
|
i++
|
||||||
}
|
}
|
||||||
|
@ -347,11 +576,10 @@ func TestBucketPutSingle(t *testing.T) {
|
||||||
if err := quick.Check(f, qconfig()); err != nil {
|
if err := quick.Check(f, qconfig()); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
fmt.Fprint(os.Stderr, "\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a transaction can insert multiple key/value pairs at once.
|
// Ensure that a transaction can insert multiple key/value pairs at once.
|
||||||
func TestBucketPutMultiple(t *testing.T) {
|
func TestBucket_Put_Multiple(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
@ -360,10 +588,10 @@ func TestBucketPutMultiple(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
// Bulk insert all values.
|
// Bulk insert all values.
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
err := db.Update(func(tx *Tx) error {
|
err := db.Update(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
assert.NoError(t, b.Put(item.Key, item.Value))
|
assert.NoError(t, b.Put(item.Key, item.Value))
|
||||||
}
|
}
|
||||||
|
@ -373,12 +601,11 @@ func TestBucketPutMultiple(t *testing.T) {
|
||||||
|
|
||||||
// Verify all items exist.
|
// Verify all items exist.
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
value := b.Get(item.Key)
|
value := b.Get(item.Key)
|
||||||
if !assert.Equal(t, item.Value, value) {
|
if !assert.Equal(t, item.Value, value) {
|
||||||
db.CopyFile("/tmp/bolt.put.multiple.db", 0666)
|
copyAndFailNow(t, db)
|
||||||
t.FailNow()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -389,11 +616,10 @@ func TestBucketPutMultiple(t *testing.T) {
|
||||||
if err := quick.Check(f, qconfig()); err != nil {
|
if err := quick.Check(f, qconfig()); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
fmt.Fprint(os.Stderr, "\n")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a transaction can delete all key/value pairs and return to a single leaf page.
|
// Ensure that a transaction can delete all key/value pairs and return to a single leaf page.
|
||||||
func TestBucketDeleteQuick(t *testing.T) {
|
func TestBucket_Delete_Quick(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("skipping test in short mode.")
|
t.Skip("skipping test in short mode.")
|
||||||
}
|
}
|
||||||
|
@ -402,10 +628,10 @@ func TestBucketDeleteQuick(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
// Bulk insert all values.
|
// Bulk insert all values.
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
err := db.Update(func(tx *Tx) error {
|
err := db.Update(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
for _, item := range items {
|
for _, item := range items {
|
||||||
assert.NoError(t, b.Put(item.Key, item.Value))
|
assert.NoError(t, b.Put(item.Key, item.Value))
|
||||||
}
|
}
|
||||||
|
@ -416,13 +642,13 @@ func TestBucketDeleteQuick(t *testing.T) {
|
||||||
// Remove items one at a time and check consistency.
|
// Remove items one at a time and check consistency.
|
||||||
for i, item := range items {
|
for i, item := range items {
|
||||||
err := db.Update(func(tx *Tx) error {
|
err := db.Update(func(tx *Tx) error {
|
||||||
return tx.Bucket("widgets").Delete(item.Key)
|
return tx.Bucket([]byte("widgets")).Delete(item.Key)
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// Anything before our deletion index should be nil.
|
// Anything before our deletion index should be nil.
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
for j, exp := range items {
|
for j, exp := range items {
|
||||||
if j > i {
|
if j > i {
|
||||||
value := b.Get(exp.Key)
|
value := b.Get(exp.Key)
|
||||||
|
@ -445,5 +671,99 @@ func TestBucketDeleteQuick(t *testing.T) {
|
||||||
if err := quick.Check(f, qconfig()); err != nil {
|
if err := quick.Check(f, qconfig()); err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
fmt.Fprint(os.Stderr, "\n")
|
}
|
||||||
|
|
||||||
|
func ExampleBucket_Put() {
|
||||||
|
// Open the database.
|
||||||
|
db, _ := Open(tempfile(), 0666)
|
||||||
|
defer os.Remove(db.Path())
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Start a write transaction.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
// Create a bucket.
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
|
||||||
|
// Set the value "bar" for the key "foo".
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Read value back in a different read-only transaction.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||||
|
fmt.Printf("The value of 'foo' is: %s\n", string(value))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// The value of 'foo' is: bar
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleBucket_Delete() {
|
||||||
|
// Open the database.
|
||||||
|
db, _ := Open(tempfile(), 0666)
|
||||||
|
defer os.Remove(db.Path())
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Start a write transaction.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
// Create a bucket.
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
|
||||||
|
// Set the value "bar" for the key "foo".
|
||||||
|
b.Put([]byte("foo"), []byte("bar"))
|
||||||
|
|
||||||
|
// Retrieve the key back from the database and verify it.
|
||||||
|
value := b.Get([]byte("foo"))
|
||||||
|
fmt.Printf("The value of 'foo' was: %s\n", string(value))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Delete the key in a different write transaction.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
return tx.Bucket([]byte("widgets")).Delete([]byte("foo"))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Retrieve the key again.
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||||
|
if value == nil {
|
||||||
|
fmt.Printf("The value of 'foo' is now: nil\n")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// The value of 'foo' was: bar
|
||||||
|
// The value of 'foo' is now: nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleBucket_ForEach() {
|
||||||
|
// Open the database.
|
||||||
|
db, _ := Open(tempfile(), 0666)
|
||||||
|
defer os.Remove(db.Path())
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Insert data into a bucket.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("animals"))
|
||||||
|
b := tx.Bucket([]byte("animals"))
|
||||||
|
b.Put([]byte("dog"), []byte("fun"))
|
||||||
|
b.Put([]byte("cat"), []byte("lame"))
|
||||||
|
b.Put([]byte("liger"), []byte("awesome"))
|
||||||
|
|
||||||
|
// Iterate over items in sorted key order.
|
||||||
|
b.ForEach(func(k, v []byte) error {
|
||||||
|
fmt.Printf("A %s is %s.\n", string(k), string(v))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// A cat is lame.
|
||||||
|
// A dog is fun.
|
||||||
|
// A liger is awesome.
|
||||||
}
|
}
|
||||||
|
|
110
buckets.go
110
buckets.go
|
@ -1,110 +0,0 @@
|
||||||
package bolt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sort"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// buckets represents a in-memory buckets page.
|
|
||||||
type buckets struct {
|
|
||||||
pgid pgid
|
|
||||||
items map[string]*bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
// size returns the size of the page after serialization.
|
|
||||||
func (b *buckets) size() int {
|
|
||||||
var size = pageHeaderSize
|
|
||||||
for key := range b.items {
|
|
||||||
size += int(unsafe.Sizeof(bucket{})) + len(key)
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
// get retrieves a bucket by name.
|
|
||||||
func (b *buckets) get(key string) *bucket {
|
|
||||||
return b.items[key]
|
|
||||||
}
|
|
||||||
|
|
||||||
// put sets a new value for a bucket.
|
|
||||||
func (b *buckets) put(key string, item *bucket) {
|
|
||||||
b.items[key] = item
|
|
||||||
}
|
|
||||||
|
|
||||||
// del deletes a bucket by name.
|
|
||||||
func (b *buckets) del(key string) {
|
|
||||||
if item := b.items[key]; item != nil {
|
|
||||||
delete(b.items, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// read initializes the data from an on-disk page.
|
|
||||||
func (b *buckets) read(p *page) {
|
|
||||||
b.pgid = p.id
|
|
||||||
b.items = make(map[string]*bucket)
|
|
||||||
|
|
||||||
var items []*bucket
|
|
||||||
var keys []string
|
|
||||||
|
|
||||||
// Read items.
|
|
||||||
nodes := (*[maxNodesPerPage]bucket)(unsafe.Pointer(&p.ptr))
|
|
||||||
for i := 0; i < int(p.count); i++ {
|
|
||||||
node := &nodes[i]
|
|
||||||
items = append(items, node)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read keys.
|
|
||||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(&nodes[p.count]))[:]
|
|
||||||
for i := 0; i < int(p.count); i++ {
|
|
||||||
size := int(buf[0])
|
|
||||||
buf = buf[1:]
|
|
||||||
keys = append(keys, string(buf[:size]))
|
|
||||||
buf = buf[size:]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Associate keys and items.
|
|
||||||
for index, key := range keys {
|
|
||||||
b.items[key] = &bucket{
|
|
||||||
root: items[index].root,
|
|
||||||
sequence: items[index].sequence,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// write writes the items onto a page.
|
|
||||||
func (b *buckets) write(p *page) {
|
|
||||||
// Initialize page.
|
|
||||||
p.flags |= bucketsPageFlag
|
|
||||||
p.count = uint16(len(b.items))
|
|
||||||
|
|
||||||
// Sort keys.
|
|
||||||
var keys []string
|
|
||||||
for key := range b.items {
|
|
||||||
keys = append(keys, key)
|
|
||||||
}
|
|
||||||
sort.StringSlice(keys).Sort()
|
|
||||||
|
|
||||||
// Write each bucket to the page.
|
|
||||||
items := (*[maxNodesPerPage]bucket)(unsafe.Pointer(&p.ptr))
|
|
||||||
for index, key := range keys {
|
|
||||||
items[index] = *b.items[key]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write each key to the page.
|
|
||||||
buf := (*[maxAllocSize]byte)(unsafe.Pointer(&items[p.count]))[:]
|
|
||||||
for _, key := range keys {
|
|
||||||
buf[0] = byte(len(key))
|
|
||||||
buf = buf[1:]
|
|
||||||
copy(buf, []byte(key))
|
|
||||||
buf = buf[len(key):]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateRoot finds a bucket by root id and then updates it to point to a new root.
|
|
||||||
func (b *buckets) updateRoot(oldid, newid pgid) {
|
|
||||||
for _, b := range b.items {
|
|
||||||
if b.root == oldid {
|
|
||||||
b.root = newid
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,70 +0,0 @@
|
||||||
package bolt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure that a buckets page can set a bucket.
|
|
||||||
func TestBucketsPut(t *testing.T) {
|
|
||||||
b := &buckets{items: make(map[string]*bucket)}
|
|
||||||
b.put("foo", &bucket{root: 2})
|
|
||||||
b.put("bar", &bucket{root: 3})
|
|
||||||
b.put("foo", &bucket{root: 4})
|
|
||||||
assert.Equal(t, len(b.items), 2)
|
|
||||||
assert.Equal(t, b.get("foo").root, pgid(4))
|
|
||||||
assert.Equal(t, b.get("bar").root, pgid(3))
|
|
||||||
assert.Nil(t, b.get("no_such_bucket"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a buckets page can deserialize from a page.
|
|
||||||
func TestBucketsRead(t *testing.T) {
|
|
||||||
// Create a page.
|
|
||||||
var buf [4096]byte
|
|
||||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
|
||||||
page.count = 2
|
|
||||||
|
|
||||||
// Insert 2 items at the beginning.
|
|
||||||
s := (*[3]bucket)(unsafe.Pointer(&page.ptr))
|
|
||||||
s[0] = bucket{root: 3}
|
|
||||||
s[1] = bucket{root: 4}
|
|
||||||
|
|
||||||
// Write data for the nodes at the end.
|
|
||||||
data := (*[4096]byte)(unsafe.Pointer(&s[2]))
|
|
||||||
data[0] = 3
|
|
||||||
copy(data[1:], []byte("bar"))
|
|
||||||
data[4] = 10
|
|
||||||
copy(data[5:], []byte("helloworld"))
|
|
||||||
|
|
||||||
// Deserialize page into a buckets page.
|
|
||||||
b := &buckets{items: make(map[string]*bucket)}
|
|
||||||
b.read(page)
|
|
||||||
|
|
||||||
// Check that there are two items with correct data.
|
|
||||||
assert.Equal(t, len(b.items), 2)
|
|
||||||
assert.Equal(t, b.get("bar").root, pgid(3))
|
|
||||||
assert.Equal(t, b.get("helloworld").root, pgid(4))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a buckets page can serialize itself.
|
|
||||||
func TestBucketsWrite(t *testing.T) {
|
|
||||||
b := &buckets{items: make(map[string]*bucket)}
|
|
||||||
b.put("foo", &bucket{root: 2})
|
|
||||||
b.put("bar", &bucket{root: 3})
|
|
||||||
|
|
||||||
// Write it to a page.
|
|
||||||
var buf [4096]byte
|
|
||||||
p := (*page)(unsafe.Pointer(&buf[0]))
|
|
||||||
b.write(p)
|
|
||||||
|
|
||||||
// Read the page back in.
|
|
||||||
b2 := &buckets{items: make(map[string]*bucket)}
|
|
||||||
b2.read(p)
|
|
||||||
|
|
||||||
// Check that the two pages are the same.
|
|
||||||
assert.Equal(t, len(b.items), 2)
|
|
||||||
assert.Equal(t, b.get("foo").root, pgid(2))
|
|
||||||
assert.Equal(t, b.get("bar").root, pgid(3))
|
|
||||||
}
|
|
|
@ -21,10 +21,10 @@ func Buckets(path string) {
|
||||||
defer db.Close()
|
defer db.Close()
|
||||||
|
|
||||||
err = db.View(func(tx *bolt.Tx) error {
|
err = db.View(func(tx *bolt.Tx) error {
|
||||||
for _, b := range tx.Buckets() {
|
return tx.ForEach(func(name []byte, _ *bolt.Bucket) error {
|
||||||
println(b.Name())
|
println(string(name))
|
||||||
}
|
return nil
|
||||||
return nil
|
})
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fatal(err)
|
fatal(err)
|
||||||
|
|
|
@ -22,7 +22,7 @@ func Get(path, name, key string) {
|
||||||
|
|
||||||
err = db.View(func(tx *bolt.Tx) error {
|
err = db.View(func(tx *bolt.Tx) error {
|
||||||
// Find bucket.
|
// Find bucket.
|
||||||
b := tx.Bucket(name)
|
b := tx.Bucket([]byte(name))
|
||||||
if b == nil {
|
if b == nil {
|
||||||
fatalf("bucket not found: %s", name)
|
fatalf("bucket not found: %s", name)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -22,7 +22,7 @@ func Keys(path, name string) {
|
||||||
|
|
||||||
err = db.View(func(tx *bolt.Tx) error {
|
err = db.View(func(tx *bolt.Tx) error {
|
||||||
// Find bucket.
|
// Find bucket.
|
||||||
b := tx.Bucket(name)
|
b := tx.Bucket([]byte(name))
|
||||||
if b == nil {
|
if b == nil {
|
||||||
fatalf("bucket not found: %s", name)
|
fatalf("bucket not found: %s", name)
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -22,7 +22,7 @@ func Set(path, name, key, value string) {
|
||||||
|
|
||||||
err = db.Update(func(tx *bolt.Tx) error {
|
err = db.Update(func(tx *bolt.Tx) error {
|
||||||
// Find bucket.
|
// Find bucket.
|
||||||
b := tx.Bucket(name)
|
b := tx.Bucket([]byte(name))
|
||||||
if b == nil {
|
if b == nil {
|
||||||
fatalf("bucket not found: %s", name)
|
fatalf("bucket not found: %s", name)
|
||||||
return nil
|
return nil
|
||||||
|
|
5
const.go
5
const.go
|
@ -1,6 +1,6 @@
|
||||||
package bolt
|
package bolt
|
||||||
|
|
||||||
const version = 1
|
const version = 2
|
||||||
|
|
||||||
const (
|
const (
|
||||||
maxUint = ^uint(0)
|
maxUint = ^uint(0)
|
||||||
|
@ -10,9 +10,6 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// MaxBucketNameSize is the maximum length of a bucket name, in bytes.
|
|
||||||
MaxBucketNameSize = 255
|
|
||||||
|
|
||||||
// MaxKeySize is the maximum length of a key, in bytes.
|
// MaxKeySize is the maximum length of a key, in bytes.
|
||||||
MaxKeySize = 32768
|
MaxKeySize = 32768
|
||||||
|
|
||||||
|
|
80
cursor.go
80
cursor.go
|
@ -8,39 +8,47 @@ import (
|
||||||
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
|
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
|
||||||
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
|
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
|
||||||
type Cursor struct {
|
type Cursor struct {
|
||||||
tx *Tx
|
bucket *Bucket
|
||||||
root pgid
|
stack []elemRef
|
||||||
stack []elemRef
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// First moves the cursor to the first item in the bucket and returns its key and value.
|
// First moves the cursor to the first item in the bucket and returns its key and value.
|
||||||
// If the bucket is empty then a nil key and value are returned.
|
// If the bucket is empty then a nil key and value are returned.
|
||||||
func (c *Cursor) First() (key []byte, value []byte) {
|
func (c *Cursor) First() (key []byte, value []byte) {
|
||||||
_assert(c.tx.db != nil, "tx closed")
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
c.stack = c.stack[:0]
|
c.stack = c.stack[:0]
|
||||||
p, n := c.tx.pageNode(c.root)
|
p, n := c.bucket.pageNode(c.bucket.root)
|
||||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||||
c.first()
|
c.first()
|
||||||
return c.keyValue()
|
k, v, flags := c.keyValue()
|
||||||
|
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Last moves the cursor to the last item in the bucket and returns its key and value.
|
// Last moves the cursor to the last item in the bucket and returns its key and value.
|
||||||
// If the bucket is empty then a nil key and value are returned.
|
// If the bucket is empty then a nil key and value are returned.
|
||||||
func (c *Cursor) Last() (key []byte, value []byte) {
|
func (c *Cursor) Last() (key []byte, value []byte) {
|
||||||
_assert(c.tx.db != nil, "tx closed")
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
c.stack = c.stack[:0]
|
c.stack = c.stack[:0]
|
||||||
p, n := c.tx.pageNode(c.root)
|
p, n := c.bucket.pageNode(c.bucket.root)
|
||||||
ref := elemRef{page: p, node: n}
|
ref := elemRef{page: p, node: n}
|
||||||
ref.index = ref.count() - 1
|
ref.index = ref.count() - 1
|
||||||
c.stack = append(c.stack, ref)
|
c.stack = append(c.stack, ref)
|
||||||
c.last()
|
c.last()
|
||||||
return c.keyValue()
|
k, v, flags := c.keyValue()
|
||||||
|
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
}
|
}
|
||||||
|
|
||||||
// Next moves the cursor to the next item in the bucket and returns its key and value.
|
// Next moves the cursor to the next item in the bucket and returns its key and value.
|
||||||
// If the cursor is at the end of the bucket then a nil key and value are returned.
|
// If the cursor is at the end of the bucket then a nil key and value are returned.
|
||||||
func (c *Cursor) Next() (key []byte, value []byte) {
|
func (c *Cursor) Next() (key []byte, value []byte) {
|
||||||
_assert(c.tx.db != nil, "tx closed")
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
|
|
||||||
// Attempt to move over one element until we're successful.
|
// Attempt to move over one element until we're successful.
|
||||||
// Move up the stack as we hit the end of each page in our stack.
|
// Move up the stack as we hit the end of each page in our stack.
|
||||||
|
@ -60,13 +68,17 @@ func (c *Cursor) Next() (key []byte, value []byte) {
|
||||||
|
|
||||||
// Move down the stack to find the first element of the first leaf under this branch.
|
// Move down the stack to find the first element of the first leaf under this branch.
|
||||||
c.first()
|
c.first()
|
||||||
return c.keyValue()
|
k, v, flags := c.keyValue()
|
||||||
|
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
}
|
}
|
||||||
|
|
||||||
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
|
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
|
||||||
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
|
||||||
func (c *Cursor) Prev() (key []byte, value []byte) {
|
func (c *Cursor) Prev() (key []byte, value []byte) {
|
||||||
_assert(c.tx.db != nil, "tx closed")
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
|
|
||||||
// Attempt to move back one element until we're successful.
|
// Attempt to move back one element until we're successful.
|
||||||
// Move up the stack as we hit the beginning of each page in our stack.
|
// Move up the stack as we hit the beginning of each page in our stack.
|
||||||
|
@ -86,25 +98,43 @@ func (c *Cursor) Prev() (key []byte, value []byte) {
|
||||||
|
|
||||||
// Move down the stack to find the last element of the last leaf under this branch.
|
// Move down the stack to find the last element of the last leaf under this branch.
|
||||||
c.last()
|
c.last()
|
||||||
return c.keyValue()
|
k, v, flags := c.keyValue()
|
||||||
|
if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek moves the cursor to a given key and returns it.
|
// Seek moves the cursor to a given key and returns it.
|
||||||
// If the key does not exist then the next key is used. If no keys
|
// If the key does not exist then the next key is used. If no keys
|
||||||
// follow, a nil value is returned.
|
// follow, a nil value is returned.
|
||||||
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
|
||||||
_assert(c.tx.db != nil, "tx closed")
|
k, v, flags := c.seek(seek)
|
||||||
|
if k == nil {
|
||||||
|
return nil, nil
|
||||||
|
} else if (flags & uint32(bucketLeafFlag)) != 0 {
|
||||||
|
return k, nil
|
||||||
|
}
|
||||||
|
return k, v
|
||||||
|
}
|
||||||
|
|
||||||
|
// seek moves the cursor to a given key and returns it.
|
||||||
|
// If the key does not exist then the next key is used. If no keys
|
||||||
|
// follow, a nil value is returned.
|
||||||
|
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
|
||||||
|
_assert(c.bucket.tx.db != nil, "tx closed")
|
||||||
|
|
||||||
// Start from root page/node and traverse to correct page.
|
// Start from root page/node and traverse to correct page.
|
||||||
c.stack = c.stack[:0]
|
c.stack = c.stack[:0]
|
||||||
c.search(seek, c.root)
|
c.search(seek, c.bucket.root)
|
||||||
ref := &c.stack[len(c.stack)-1]
|
ref := &c.stack[len(c.stack)-1]
|
||||||
|
|
||||||
// If the cursor is pointing to the end of page/node then return nil.
|
// If the cursor is pointing to the end of page/node then return nil.
|
||||||
if ref.index >= ref.count() {
|
if ref.index >= ref.count() {
|
||||||
return nil, nil
|
return nil, nil, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If this is a bucket then return a nil value.
|
||||||
return c.keyValue()
|
return c.keyValue()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,7 +154,7 @@ func (c *Cursor) first() {
|
||||||
} else {
|
} else {
|
||||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||||
}
|
}
|
||||||
p, n := c.tx.pageNode(pgid)
|
p, n := c.bucket.pageNode(pgid)
|
||||||
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -145,7 +175,7 @@ func (c *Cursor) last() {
|
||||||
} else {
|
} else {
|
||||||
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
|
||||||
}
|
}
|
||||||
p, n := c.tx.pageNode(pgid)
|
p, n := c.bucket.pageNode(pgid)
|
||||||
|
|
||||||
var nextRef = elemRef{page: p, node: n}
|
var nextRef = elemRef{page: p, node: n}
|
||||||
nextRef.index = nextRef.count() - 1
|
nextRef.index = nextRef.count() - 1
|
||||||
|
@ -155,7 +185,7 @@ func (c *Cursor) last() {
|
||||||
|
|
||||||
// search recursively performs a binary search against a given page/node until it finds a given key.
|
// search recursively performs a binary search against a given page/node until it finds a given key.
|
||||||
func (c *Cursor) search(key []byte, pgid pgid) {
|
func (c *Cursor) search(key []byte, pgid pgid) {
|
||||||
p, n := c.tx.pageNode(pgid)
|
p, n := c.bucket.pageNode(pgid)
|
||||||
if p != nil {
|
if p != nil {
|
||||||
_assert((p.flags&(branchPageFlag|leafPageFlag)) != 0, "invalid page type: "+p.typ())
|
_assert((p.flags&(branchPageFlag|leafPageFlag)) != 0, "invalid page type: "+p.typ())
|
||||||
}
|
}
|
||||||
|
@ -241,25 +271,25 @@ func (c *Cursor) nsearch(key []byte) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// keyValue returns the key and value of the current leaf element.
|
// keyValue returns the key and value of the current leaf element.
|
||||||
func (c *Cursor) keyValue() ([]byte, []byte) {
|
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
|
||||||
ref := &c.stack[len(c.stack)-1]
|
ref := &c.stack[len(c.stack)-1]
|
||||||
if ref.count() == 0 || ref.index >= ref.count() {
|
if ref.count() == 0 || ref.index >= ref.count() {
|
||||||
return nil, nil
|
return nil, nil, 0
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieve value from node.
|
// Retrieve value from node.
|
||||||
if ref.node != nil {
|
if ref.node != nil {
|
||||||
inode := &ref.node.inodes[ref.index]
|
inode := &ref.node.inodes[ref.index]
|
||||||
return inode.key, inode.value
|
return inode.key, inode.value, inode.flags
|
||||||
}
|
}
|
||||||
|
|
||||||
// Or retrieve value from page.
|
// Or retrieve value from page.
|
||||||
elem := ref.page.leafPageElement(uint16(ref.index))
|
elem := ref.page.leafPageElement(uint16(ref.index))
|
||||||
return elem.key(), elem.value()
|
return elem.key(), elem.value(), elem.flags
|
||||||
}
|
}
|
||||||
|
|
||||||
// node returns the node that the cursor is currently positioned on.
|
// node returns the node that the cursor is currently positioned on.
|
||||||
func (c *Cursor) node(tx *Tx) *node {
|
func (c *Cursor) node() *node {
|
||||||
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
|
||||||
|
|
||||||
// If the top of the stack is a leaf node then just return it.
|
// If the top of the stack is a leaf node then just return it.
|
||||||
|
@ -270,7 +300,7 @@ func (c *Cursor) node(tx *Tx) *node {
|
||||||
// Start from root and traverse down the hierarchy.
|
// Start from root and traverse down the hierarchy.
|
||||||
var n = c.stack[0].node
|
var n = c.stack[0].node
|
||||||
if n == nil {
|
if n == nil {
|
||||||
n = tx.node(c.stack[0].page.id, nil)
|
n = c.bucket.node(c.stack[0].page.id, nil)
|
||||||
}
|
}
|
||||||
for _, ref := range c.stack[:len(c.stack)-1] {
|
for _, ref := range c.stack[:len(c.stack)-1] {
|
||||||
_assert(!n.isLeaf, "expected branch node")
|
_assert(!n.isLeaf, "expected branch node")
|
||||||
|
|
|
@ -0,0 +1,307 @@
|
||||||
|
package bolt
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
"testing"
|
||||||
|
"testing/quick"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ensure that a Tx cursor can seek to the appropriate keys.
|
||||||
|
func TestCursor_Seek(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
assert.NoError(t, b.Put([]byte("foo"), []byte("0001")))
|
||||||
|
assert.NoError(t, b.Put([]byte("bar"), []byte("0002")))
|
||||||
|
assert.NoError(t, b.Put([]byte("baz"), []byte("0003")))
|
||||||
|
assert.NoError(t, b.CreateBucket([]byte("bkt")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
|
||||||
|
// Exact match should go to the key.
|
||||||
|
k, v := c.Seek([]byte("bar"))
|
||||||
|
assert.Equal(t, []byte("bar"), k)
|
||||||
|
assert.Equal(t, []byte("0002"), v)
|
||||||
|
|
||||||
|
// Inexact match should go to the next key.
|
||||||
|
k, v = c.Seek([]byte("bas"))
|
||||||
|
assert.Equal(t, []byte("baz"), k)
|
||||||
|
assert.Equal(t, []byte("0003"), v)
|
||||||
|
|
||||||
|
// Low key should go to the first key.
|
||||||
|
k, v = c.Seek([]byte(""))
|
||||||
|
assert.Equal(t, []byte("bar"), k)
|
||||||
|
assert.Equal(t, []byte("0002"), v)
|
||||||
|
|
||||||
|
// High key should return no key.
|
||||||
|
k, v = c.Seek([]byte("zzz"))
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Nil(t, v)
|
||||||
|
|
||||||
|
// Buckets should return their key but no value.
|
||||||
|
k, v = c.Seek([]byte("bkt"))
|
||||||
|
assert.Equal(t, []byte("bkt"), k)
|
||||||
|
assert.Nil(t, v)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a cursor can iterate over an empty bucket without error.
|
||||||
|
func TestCursor_EmptyBucket(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
|
})
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
k, v := c.First()
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Nil(t, v)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
|
||||||
|
func TestCursor_EmptyBucketReverse(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
|
})
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
k, v := c.Last()
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Nil(t, v)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a Tx cursor can iterate over a single root with a couple elements.
|
||||||
|
func TestCursor_LeafRoot(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
tx, _ := db.Begin(false)
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
|
||||||
|
k, v := c.First()
|
||||||
|
assert.Equal(t, string(k), "bar")
|
||||||
|
assert.Equal(t, v, []byte{1})
|
||||||
|
|
||||||
|
k, v = c.Next()
|
||||||
|
assert.Equal(t, string(k), "baz")
|
||||||
|
assert.Equal(t, v, []byte{})
|
||||||
|
|
||||||
|
k, v = c.Next()
|
||||||
|
assert.Equal(t, string(k), "foo")
|
||||||
|
assert.Equal(t, v, []byte{0})
|
||||||
|
|
||||||
|
k, v = c.Next()
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Nil(t, v)
|
||||||
|
|
||||||
|
k, v = c.Next()
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Nil(t, v)
|
||||||
|
|
||||||
|
tx.Rollback()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
|
||||||
|
func TestCursor_LeafRootReverse(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
tx, _ := db.Begin(false)
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
|
||||||
|
k, v := c.Last()
|
||||||
|
assert.Equal(t, string(k), "foo")
|
||||||
|
assert.Equal(t, v, []byte{0})
|
||||||
|
|
||||||
|
k, v = c.Prev()
|
||||||
|
assert.Equal(t, string(k), "baz")
|
||||||
|
assert.Equal(t, v, []byte{})
|
||||||
|
|
||||||
|
k, v = c.Prev()
|
||||||
|
assert.Equal(t, string(k), "bar")
|
||||||
|
assert.Equal(t, v, []byte{1})
|
||||||
|
|
||||||
|
k, v = c.Prev()
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Nil(t, v)
|
||||||
|
|
||||||
|
k, v = c.Prev()
|
||||||
|
assert.Nil(t, k)
|
||||||
|
assert.Nil(t, v)
|
||||||
|
|
||||||
|
tx.Rollback()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a Tx cursor can restart from the beginning.
|
||||||
|
func TestCursor_Restart(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
tx, _ := db.Begin(false)
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
|
||||||
|
k, _ := c.First()
|
||||||
|
assert.Equal(t, string(k), "bar")
|
||||||
|
|
||||||
|
k, _ = c.Next()
|
||||||
|
assert.Equal(t, string(k), "foo")
|
||||||
|
|
||||||
|
k, _ = c.First()
|
||||||
|
assert.Equal(t, string(k), "bar")
|
||||||
|
|
||||||
|
k, _ = c.Next()
|
||||||
|
assert.Equal(t, string(k), "foo")
|
||||||
|
|
||||||
|
tx.Rollback()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a Tx can iterate over all elements in a bucket.
|
||||||
|
func TestCursor_Iterate(t *testing.T) {
|
||||||
|
f := func(items testdata) bool {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
// Bulk insert all values.
|
||||||
|
tx, _ := db.Begin(true)
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
for _, item := range items {
|
||||||
|
assert.NoError(t, b.Put(item.Key, item.Value))
|
||||||
|
}
|
||||||
|
assert.NoError(t, tx.Commit())
|
||||||
|
|
||||||
|
// Sort test data.
|
||||||
|
sort.Sort(items)
|
||||||
|
|
||||||
|
// Iterate over all items and check consistency.
|
||||||
|
var index = 0
|
||||||
|
tx, _ = db.Begin(false)
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
|
||||||
|
assert.Equal(t, k, items[index].Key)
|
||||||
|
assert.Equal(t, v, items[index].Value)
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
assert.Equal(t, len(items), index)
|
||||||
|
tx.Rollback()
|
||||||
|
})
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err := quick.Check(f, qconfig()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a transaction can iterate over all elements in a bucket in reverse.
|
||||||
|
func TestCursor_Iterate_Reverse(t *testing.T) {
|
||||||
|
f := func(items testdata) bool {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
// Bulk insert all values.
|
||||||
|
tx, _ := db.Begin(true)
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
for _, item := range items {
|
||||||
|
assert.NoError(t, b.Put(item.Key, item.Value))
|
||||||
|
}
|
||||||
|
assert.NoError(t, tx.Commit())
|
||||||
|
|
||||||
|
// Sort test data.
|
||||||
|
sort.Sort(revtestdata(items))
|
||||||
|
|
||||||
|
// Iterate over all items and check consistency.
|
||||||
|
var index = 0
|
||||||
|
tx, _ = db.Begin(false)
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
|
||||||
|
assert.Equal(t, k, items[index].Key)
|
||||||
|
assert.Equal(t, v, items[index].Value)
|
||||||
|
index++
|
||||||
|
}
|
||||||
|
assert.Equal(t, len(items), index)
|
||||||
|
tx.Rollback()
|
||||||
|
})
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if err := quick.Check(f, qconfig()); err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a Tx cursor can iterate over subbuckets.
|
||||||
|
func TestCursor_Iterate_BucketsOnly(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
assert.NoError(t, b.CreateBucket([]byte("foo")))
|
||||||
|
assert.NoError(t, b.CreateBucket([]byte("bar")))
|
||||||
|
assert.NoError(t, b.CreateBucket([]byte("baz")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
var names []string
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
|
names = append(names, string(k))
|
||||||
|
assert.Nil(t, v)
|
||||||
|
}
|
||||||
|
assert.Equal(t, names, []string{"bar", "baz", "foo"})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that a Tx cursor can reverse iterate over subbuckets.
|
||||||
|
func TestCursor_Iterate_BucketsOnly_Reverse(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
assert.NoError(t, b.CreateBucket([]byte("foo")))
|
||||||
|
assert.NoError(t, b.CreateBucket([]byte("bar")))
|
||||||
|
assert.NoError(t, b.CreateBucket([]byte("baz")))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
var names []string
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
for k, v := c.Last(); k != nil; k, v = c.Prev() {
|
||||||
|
names = append(names, string(k))
|
||||||
|
assert.Nil(t, v)
|
||||||
|
}
|
||||||
|
assert.Equal(t, names, []string{"foo", "baz", "bar"})
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
107
db.go
107
db.go
|
@ -45,6 +45,7 @@ type DB struct {
|
||||||
rwlock sync.Mutex // Allows only one writer at a time.
|
rwlock sync.Mutex // Allows only one writer at a time.
|
||||||
metalock sync.Mutex // Protects meta page access.
|
metalock sync.Mutex // Protects meta page access.
|
||||||
mmaplock sync.RWMutex // Protects mmap access during remapping.
|
mmaplock sync.RWMutex // Protects mmap access during remapping.
|
||||||
|
statlock sync.RWMutex // Protects stats access.
|
||||||
|
|
||||||
ops struct {
|
ops struct {
|
||||||
writeAt func(b []byte, off int64) (n int, err error)
|
writeAt func(b []byte, off int64) (n int, err error)
|
||||||
|
@ -133,7 +134,7 @@ func (db *DB) mmap(minsz int) error {
|
||||||
|
|
||||||
// Dereference all mmap references before unmapping.
|
// Dereference all mmap references before unmapping.
|
||||||
if db.rwtx != nil {
|
if db.rwtx != nil {
|
||||||
db.rwtx.dereference()
|
db.rwtx.root.dereference()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unmap existing data before continuing.
|
// Unmap existing data before continuing.
|
||||||
|
@ -224,7 +225,7 @@ func (db *DB) init() error {
|
||||||
m.pageSize = uint32(db.pageSize)
|
m.pageSize = uint32(db.pageSize)
|
||||||
m.version = version
|
m.version = version
|
||||||
m.freelist = 2
|
m.freelist = 2
|
||||||
m.buckets = 3
|
m.root = bucket{root: 3}
|
||||||
m.pgid = 4
|
m.pgid = 4
|
||||||
m.txid = txid(i)
|
m.txid = txid(i)
|
||||||
}
|
}
|
||||||
|
@ -238,7 +239,7 @@ func (db *DB) init() error {
|
||||||
// Write an empty leaf page at page 4.
|
// Write an empty leaf page at page 4.
|
||||||
p = db.pageInBuffer(buf[:], pgid(3))
|
p = db.pageInBuffer(buf[:], pgid(3))
|
||||||
p.id = pgid(3)
|
p.id = pgid(3)
|
||||||
p.flags = bucketsPageFlag
|
p.flags = leafPageFlag
|
||||||
p.count = 0
|
p.count = 0
|
||||||
|
|
||||||
// Write the buffer to our data file.
|
// Write the buffer to our data file.
|
||||||
|
@ -305,16 +306,18 @@ func (db *DB) Begin(writable bool) (*Tx, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *DB) beginTx() (*Tx, error) {
|
func (db *DB) beginTx() (*Tx, error) {
|
||||||
db.metalock.Lock()
|
|
||||||
defer db.metalock.Unlock()
|
|
||||||
|
|
||||||
// Obtain a read-only lock on the mmap. When the mmap is remapped it will
|
// Obtain a read-only lock on the mmap. When the mmap is remapped it will
|
||||||
// obtain a write lock so all transactions must finish before it can be
|
// obtain a write lock so all transactions must finish before it can be
|
||||||
// remapped.
|
// remapped.
|
||||||
db.mmaplock.RLock()
|
db.mmaplock.RLock()
|
||||||
|
|
||||||
|
// Lock the meta pages while we initialize the transaction.
|
||||||
|
db.metalock.Lock()
|
||||||
|
defer db.metalock.Unlock()
|
||||||
|
|
||||||
// Exit if the database is not open yet.
|
// Exit if the database is not open yet.
|
||||||
if !db.opened {
|
if !db.opened {
|
||||||
|
db.mmaplock.RUnlock()
|
||||||
return nil, ErrDatabaseNotOpen
|
return nil, ErrDatabaseNotOpen
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -329,12 +332,15 @@ func (db *DB) beginTx() (*Tx, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (db *DB) beginRWTx() (*Tx, error) {
|
func (db *DB) beginRWTx() (*Tx, error) {
|
||||||
|
// Obtain writer lock. This is released by the transaction when it closes.
|
||||||
|
// This enforces only one writer transaction at a time.
|
||||||
|
db.rwlock.Lock()
|
||||||
|
|
||||||
|
// Once we have the writer lock then we can lock the meta pages so that
|
||||||
|
// we can set up the transaction.
|
||||||
db.metalock.Lock()
|
db.metalock.Lock()
|
||||||
defer db.metalock.Unlock()
|
defer db.metalock.Unlock()
|
||||||
|
|
||||||
// Obtain writer lock. This is released by the transaction when it closes.
|
|
||||||
db.rwlock.Lock()
|
|
||||||
|
|
||||||
// Exit if the database is not open yet.
|
// Exit if the database is not open yet.
|
||||||
if !db.opened {
|
if !db.opened {
|
||||||
db.rwlock.Unlock()
|
db.rwlock.Unlock()
|
||||||
|
@ -363,7 +369,6 @@ func (db *DB) beginRWTx() (*Tx, error) {
|
||||||
// removeTx removes a transaction from the database.
|
// removeTx removes a transaction from the database.
|
||||||
func (db *DB) removeTx(tx *Tx) {
|
func (db *DB) removeTx(tx *Tx) {
|
||||||
db.metalock.Lock()
|
db.metalock.Lock()
|
||||||
defer db.metalock.Unlock()
|
|
||||||
|
|
||||||
// Release the read lock on the mmap.
|
// Release the read lock on the mmap.
|
||||||
db.mmaplock.RUnlock()
|
db.mmaplock.RUnlock()
|
||||||
|
@ -376,8 +381,13 @@ func (db *DB) removeTx(tx *Tx) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Unlock the meta pages.
|
||||||
|
db.metalock.Unlock()
|
||||||
|
|
||||||
// Merge statistics.
|
// Merge statistics.
|
||||||
|
db.statlock.Lock()
|
||||||
db.stats.TxStats.add(&tx.stats)
|
db.stats.TxStats.add(&tx.stats)
|
||||||
|
db.statlock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update executes a function within the context of a read-write managed transaction.
|
// Update executes a function within the context of a read-write managed transaction.
|
||||||
|
@ -497,8 +507,8 @@ func (db *DB) CopyFile(path string, mode os.FileMode) error {
|
||||||
// Stats retrieves ongoing performance stats for the database.
|
// Stats retrieves ongoing performance stats for the database.
|
||||||
// This is only updated when a transaction closes.
|
// This is only updated when a transaction closes.
|
||||||
func (db *DB) Stats() Stats {
|
func (db *DB) Stats() Stats {
|
||||||
db.metalock.Lock()
|
db.statlock.RLock()
|
||||||
defer db.metalock.Unlock()
|
defer db.statlock.RUnlock()
|
||||||
return db.stats
|
return db.stats
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -510,40 +520,14 @@ func (db *DB) Check() error {
|
||||||
|
|
||||||
// Track every reachable page.
|
// Track every reachable page.
|
||||||
reachable := make(map[pgid]*page)
|
reachable := make(map[pgid]*page)
|
||||||
reachable[0] = tx.page(0) // meta0
|
reachable[0] = db.page(0) // meta0
|
||||||
reachable[1] = tx.page(1) // meta1
|
reachable[1] = db.page(1) // meta1
|
||||||
for i := uint32(0); i <= tx.page(tx.meta.buckets).overflow; i++ {
|
for i := uint32(0); i <= db.page(tx.meta.freelist).overflow; i++ {
|
||||||
reachable[tx.meta.buckets+pgid(i)] = tx.page(tx.meta.buckets)
|
reachable[tx.meta.freelist+pgid(i)] = db.page(tx.meta.freelist)
|
||||||
}
|
|
||||||
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
|
||||||
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check each reachable page within each bucket.
|
// Recursively check buckets.
|
||||||
for _, bucket := range tx.Buckets() {
|
db.checkBucket(&tx.root, reachable, &errors)
|
||||||
// warnf("[bucket] %s", bucket.name)
|
|
||||||
tx.forEachPage(bucket.root, 0, func(p *page, _ int) {
|
|
||||||
// Ensure each page is only referenced once.
|
|
||||||
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
|
||||||
var id = p.id + i
|
|
||||||
if _, ok := reachable[id]; ok {
|
|
||||||
errors = append(errors, fmt.Errorf("page %d: multiple references", int(id)))
|
|
||||||
}
|
|
||||||
reachable[id] = p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Retrieve page info.
|
|
||||||
info, err := tx.Page(int(p.id))
|
|
||||||
// warnf("[page] %d + %d (%s)", p.id, p.overflow, info.Type)
|
|
||||||
if err != nil {
|
|
||||||
errors = append(errors, err)
|
|
||||||
} else if info == nil {
|
|
||||||
errors = append(errors, fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(tx.meta.pgid)))
|
|
||||||
} else if info.Type != "branch" && info.Type != "leaf" {
|
|
||||||
errors = append(errors, fmt.Errorf("page %d: invalid type: %s", int(p.id), info.Type))
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure all pages below high water mark are either reachable or freed.
|
// Ensure all pages below high water mark are either reachable or freed.
|
||||||
for i := pgid(0); i < tx.meta.pgid; i++ {
|
for i := pgid(0); i < tx.meta.pgid; i++ {
|
||||||
|
@ -553,8 +537,6 @@ func (db *DB) Check() error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(benbjohnson): Ensure that only one buckets page exists.
|
|
||||||
|
|
||||||
if len(errors) > 0 {
|
if len(errors) > 0 {
|
||||||
return errors
|
return errors
|
||||||
}
|
}
|
||||||
|
@ -563,6 +545,39 @@ func (db *DB) Check() error {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (db *DB) checkBucket(b *Bucket, reachable map[pgid]*page, errors *ErrorList) {
|
||||||
|
// Check every page used by this bucket.
|
||||||
|
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
|
||||||
|
// Ensure each page is only referenced once.
|
||||||
|
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
||||||
|
var id = p.id + i
|
||||||
|
if _, ok := reachable[id]; ok {
|
||||||
|
*errors = append(*errors, fmt.Errorf("page %d: multiple references", int(id)))
|
||||||
|
}
|
||||||
|
reachable[id] = p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve page info.
|
||||||
|
info, err := b.tx.Page(int(p.id))
|
||||||
|
// warnf("[page] %d + %d (%s)", p.id, p.overflow, info.Type)
|
||||||
|
if err != nil {
|
||||||
|
*errors = append(*errors, err)
|
||||||
|
} else if info == nil {
|
||||||
|
*errors = append(*errors, fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)))
|
||||||
|
} else if info.Type != "branch" && info.Type != "leaf" {
|
||||||
|
*errors = append(*errors, fmt.Errorf("page %d: invalid type: %s", int(p.id), info.Type))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Check each bucket within this bucket.
|
||||||
|
_ = b.ForEach(func(k, v []byte) error {
|
||||||
|
if child := b.Bucket(k); child != nil {
|
||||||
|
db.checkBucket(child, reachable, errors)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// page retrieves a page reference from the mmap based on the current page size.
|
// page retrieves a page reference from the mmap based on the current page size.
|
||||||
func (db *DB) page(id pgid) *page {
|
func (db *DB) page(id pgid) *page {
|
||||||
pos := id * pgid(db.pageSize)
|
pos := id * pgid(db.pageSize)
|
||||||
|
|
276
db_test.go
276
db_test.go
|
@ -34,14 +34,14 @@ func TestOpen(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that opening a database with a bad path returns an error.
|
// Ensure that opening a database with a bad path returns an error.
|
||||||
func TestOpenBadPath(t *testing.T) {
|
func TestOpen_BadPath(t *testing.T) {
|
||||||
db, err := Open("/../bad-path", 0666)
|
db, err := Open("/../bad-path", 0666)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
assert.Nil(t, db)
|
assert.Nil(t, db)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a database can be opened without error.
|
// Ensure that a database can be opened without error.
|
||||||
func TestDBOpen(t *testing.T) {
|
func TestDB_Open(t *testing.T) {
|
||||||
withTempPath(func(path string) {
|
withTempPath(func(path string) {
|
||||||
db, err := Open(path, 0666)
|
db, err := Open(path, 0666)
|
||||||
assert.NotNil(t, db)
|
assert.NotNil(t, db)
|
||||||
|
@ -52,7 +52,7 @@ func TestDBOpen(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a re-opened database is consistent.
|
// Ensure that a re-opened database is consistent.
|
||||||
func TestOpenCheck(t *testing.T) {
|
func TestOpen_Check(t *testing.T) {
|
||||||
withTempPath(func(path string) {
|
withTempPath(func(path string) {
|
||||||
db, err := Open(path, 0666)
|
db, err := Open(path, 0666)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -67,7 +67,7 @@ func TestOpenCheck(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the database returns an error if the file handle cannot be open.
|
// Ensure that the database returns an error if the file handle cannot be open.
|
||||||
func TestDBOpenFileError(t *testing.T) {
|
func TestDB_Open_FileError(t *testing.T) {
|
||||||
withTempPath(func(path string) {
|
withTempPath(func(path string) {
|
||||||
_, err := Open(path+"/youre-not-my-real-parent", 0666)
|
_, err := Open(path+"/youre-not-my-real-parent", 0666)
|
||||||
if err, _ := err.(*os.PathError); assert.Error(t, err) {
|
if err, _ := err.(*os.PathError); assert.Error(t, err) {
|
||||||
|
@ -78,12 +78,12 @@ func TestDBOpenFileError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that write errors to the meta file handler during initialization are returned.
|
// Ensure that write errors to the meta file handler during initialization are returned.
|
||||||
func TestDBMetaInitWriteError(t *testing.T) {
|
func TestDB_Open_MetaInitWriteError(t *testing.T) {
|
||||||
t.Skip("pending")
|
t.Skip("pending")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a database that is too small returns an error.
|
// Ensure that a database that is too small returns an error.
|
||||||
func TestDBFileTooSmall(t *testing.T) {
|
func TestDB_Open_FileTooSmall(t *testing.T) {
|
||||||
withTempPath(func(path string) {
|
withTempPath(func(path string) {
|
||||||
db, err := Open(path, 0666)
|
db, err := Open(path, 0666)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -98,7 +98,7 @@ func TestDBFileTooSmall(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that corrupt meta0 page errors get returned.
|
// Ensure that corrupt meta0 page errors get returned.
|
||||||
func TestDBCorruptMeta0(t *testing.T) {
|
func TestDB_Open_CorruptMeta0(t *testing.T) {
|
||||||
withTempPath(func(path string) {
|
withTempPath(func(path string) {
|
||||||
var m meta
|
var m meta
|
||||||
m.magic = magic
|
m.magic = magic
|
||||||
|
@ -122,16 +122,16 @@ func TestDBCorruptMeta0(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a corrupt meta page checksum causes the open to fail.
|
// Ensure that a corrupt meta page checksum causes the open to fail.
|
||||||
func TestDBMetaChecksumError(t *testing.T) {
|
func TestDB_Open_MetaChecksumError(t *testing.T) {
|
||||||
for i := 0; i < 2; i++ {
|
for i := 0; i < 2; i++ {
|
||||||
withTempPath(func(path string) {
|
withTempPath(func(path string) {
|
||||||
db, err := Open(path, 0600)
|
db, err := Open(path, 0600)
|
||||||
pageSize := db.pageSize
|
pageSize := db.pageSize
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("woojits")
|
return tx.CreateBucket([]byte("woojits"))
|
||||||
})
|
})
|
||||||
db.Close()
|
db.Close()
|
||||||
|
|
||||||
|
@ -155,7 +155,7 @@ func TestDBMetaChecksumError(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a database cannot open a transaction when it's not open.
|
// Ensure that a database cannot open a transaction when it's not open.
|
||||||
func TestDBTxErrDatabaseNotOpen(t *testing.T) {
|
func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
|
||||||
var db DB
|
var db DB
|
||||||
tx, err := db.Begin(false)
|
tx, err := db.Begin(false)
|
||||||
assert.Nil(t, tx)
|
assert.Nil(t, tx)
|
||||||
|
@ -163,7 +163,7 @@ func TestDBTxErrDatabaseNotOpen(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a read-write transaction can be retrieved.
|
// Ensure that a read-write transaction can be retrieved.
|
||||||
func TestDBBeginRW(t *testing.T) {
|
func TestDB_BeginRW(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
tx, err := db.Begin(true)
|
tx, err := db.Begin(true)
|
||||||
assert.NotNil(t, tx)
|
assert.NotNil(t, tx)
|
||||||
|
@ -175,7 +175,7 @@ func TestDBBeginRW(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that opening a transaction while the DB is closed returns an error.
|
// Ensure that opening a transaction while the DB is closed returns an error.
|
||||||
func TestDBRWTxOpenWithClosedDB(t *testing.T) {
|
func TestDB_BeginRW_Closed(t *testing.T) {
|
||||||
var db DB
|
var db DB
|
||||||
tx, err := db.Begin(true)
|
tx, err := db.Begin(true)
|
||||||
assert.Equal(t, err, ErrDatabaseNotOpen)
|
assert.Equal(t, err, ErrDatabaseNotOpen)
|
||||||
|
@ -183,11 +183,11 @@ func TestDBRWTxOpenWithClosedDB(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure a database can provide a transactional block.
|
// Ensure a database can provide a transactional block.
|
||||||
func TestDBTxBlock(t *testing.T) {
|
func TestDB_Update(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
err := db.Update(func(tx *Tx) error {
|
err := db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
b.Put([]byte("foo"), []byte("bar"))
|
b.Put([]byte("foo"), []byte("bar"))
|
||||||
b.Put([]byte("baz"), []byte("bat"))
|
b.Put([]byte("baz"), []byte("bat"))
|
||||||
b.Delete([]byte("foo"))
|
b.Delete([]byte("foo"))
|
||||||
|
@ -195,8 +195,8 @@ func TestDBTxBlock(t *testing.T) {
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
err = db.View(func(tx *Tx) error {
|
err = db.View(func(tx *Tx) error {
|
||||||
assert.Nil(t, tx.Bucket("widgets").Get([]byte("foo")))
|
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
|
||||||
assert.Equal(t, []byte("bat"), tx.Bucket("widgets").Get([]byte("baz")))
|
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
@ -204,20 +204,20 @@ func TestDBTxBlock(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure a closed database returns an error while running a transaction block
|
// Ensure a closed database returns an error while running a transaction block
|
||||||
func TestDBTxBlockWhileClosed(t *testing.T) {
|
func TestDB_Update_Closed(t *testing.T) {
|
||||||
var db DB
|
var db DB
|
||||||
err := db.Update(func(tx *Tx) error {
|
err := db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
assert.Equal(t, err, ErrDatabaseNotOpen)
|
assert.Equal(t, err, ErrDatabaseNotOpen)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure a panic occurs while trying to commit a managed transaction.
|
// Ensure a panic occurs while trying to commit a managed transaction.
|
||||||
func TestDBTxBlockWithManualCommitAndRollback(t *testing.T) {
|
func TestDB_Update_ManualCommitAndRollback(t *testing.T) {
|
||||||
var db DB
|
var db DB
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
assert.Panics(t, func() { tx.Commit() })
|
assert.Panics(t, func() { tx.Commit() })
|
||||||
assert.Panics(t, func() { tx.Rollback() })
|
assert.Panics(t, func() { tx.Rollback() })
|
||||||
return nil
|
return nil
|
||||||
|
@ -229,37 +229,58 @@ func TestDBTxBlockWithManualCommitAndRollback(t *testing.T) {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the database can be copied to a file path.
|
// Ensure a database can return an error through a read-only transactional block.
|
||||||
func TestDBCopyFile(t *testing.T) {
|
func TestDB_View_Error(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
err := db.View(func(tx *Tx) error {
|
||||||
|
return errors.New("xxx")
|
||||||
|
})
|
||||||
|
assert.Equal(t, errors.New("xxx"), err)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that the database can be copied to a file path.
|
||||||
|
func TestDB_CopyFile(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
var dest = tempfile()
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||||
tx.Bucket("widgets").Put([]byte("baz"), []byte("bat"))
|
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
assert.NoError(t, os.RemoveAll("/tmp/bolt.copyfile.db"))
|
assert.NoError(t, db.CopyFile(dest, 0600))
|
||||||
assert.NoError(t, db.CopyFile("/tmp/bolt.copyfile.db", 0666))
|
|
||||||
|
|
||||||
db2, err := Open("/tmp/bolt.copyfile.db", 0666)
|
db2, err := Open(dest, 0600)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer db2.Close()
|
defer db2.Close()
|
||||||
|
|
||||||
db2.View(func(tx *Tx) error {
|
db2.View(func(tx *Tx) error {
|
||||||
assert.Equal(t, []byte("bar"), tx.Bucket("widgets").Get([]byte("foo")))
|
assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
|
||||||
assert.Equal(t, []byte("bat"), tx.Bucket("widgets").Get([]byte("baz")))
|
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that an error is returned when a database write fails.
|
// Ensure that an error is returned when a database write fails.
|
||||||
func TestDBWriteFail(t *testing.T) {
|
func TestDB_Commit_WriteFail(t *testing.T) {
|
||||||
t.Skip("pending") // TODO(benbjohnson)
|
t.Skip("pending") // TODO(benbjohnson)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ensure that DB stats can be returned.
|
||||||
|
func TestDB_Stats(t *testing.T) {
|
||||||
|
withOpenDB(func(db *DB, path string) {
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
|
})
|
||||||
|
stats := db.Stats()
|
||||||
|
assert.Equal(t, 3, stats.TxStats.PageCount)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// Ensure that the mmap grows appropriately.
|
// Ensure that the mmap grows appropriately.
|
||||||
func TestDBMmapSize(t *testing.T) {
|
func TestDB_mmapSize(t *testing.T) {
|
||||||
db := &DB{pageSize: 4096}
|
db := &DB{pageSize: 4096}
|
||||||
assert.Equal(t, db.mmapSize(0), minMmapSize)
|
assert.Equal(t, db.mmapSize(0), minMmapSize)
|
||||||
assert.Equal(t, db.mmapSize(16384), minMmapSize)
|
assert.Equal(t, db.mmapSize(16384), minMmapSize)
|
||||||
|
@ -271,15 +292,15 @@ func TestDBMmapSize(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that database pages are in expected order and type.
|
// Ensure that database pages are in expected order and type.
|
||||||
func TestDBConsistency(t *testing.T) {
|
func TestDB_Consistency(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
|
|
||||||
for i := 0; i < 10; i++ {
|
for i := 0; i < 10; i++ {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
assert.NoError(t, tx.Bucket("widgets").Put([]byte("foo"), []byte("bar")))
|
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -300,7 +321,7 @@ func TestDBConsistency(t *testing.T) {
|
||||||
assert.Equal(t, "freelist", p.Type)
|
assert.Equal(t, "freelist", p.Type)
|
||||||
}
|
}
|
||||||
if p, _ := tx.Page(5); assert.NotNil(t, p) {
|
if p, _ := tx.Page(5); assert.NotNil(t, p) {
|
||||||
assert.Equal(t, "buckets", p.Type)
|
assert.Equal(t, "leaf", p.Type) // root leaf
|
||||||
}
|
}
|
||||||
if p, _ := tx.Page(6); assert.NotNil(t, p) {
|
if p, _ := tx.Page(6); assert.NotNil(t, p) {
|
||||||
assert.Equal(t, "leaf", p.Type)
|
assert.Equal(t, "leaf", p.Type)
|
||||||
|
@ -316,51 +337,187 @@ func TestDBConsistency(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a database can return a string representation of itself.
|
// Ensure that a database can return a string representation of itself.
|
||||||
func TestDBString(t *testing.T) {
|
func TestDB_String(t *testing.T) {
|
||||||
db := &DB{path: "/tmp/foo"}
|
db := &DB{path: "/foo/bar"}
|
||||||
assert.Equal(t, db.String(), `DB<"/tmp/foo">`)
|
assert.Equal(t, db.String(), `DB<"/foo/bar">`)
|
||||||
assert.Equal(t, db.GoString(), `bolt.DB{path:"/tmp/foo"}`)
|
assert.Equal(t, db.GoString(), `bolt.DB{path:"/foo/bar"}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure that DB stats can be substracted from one another.
|
||||||
|
func TestDBStats_Sub(t *testing.T) {
|
||||||
|
var a, b Stats
|
||||||
|
a.TxStats.PageCount = 3
|
||||||
|
b.TxStats.PageCount = 10
|
||||||
|
diff := b.Sub(&a)
|
||||||
|
assert.Equal(t, 7, diff.TxStats.PageCount)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Benchmark the performance of single put transactions in random order.
|
// Benchmark the performance of single put transactions in random order.
|
||||||
func BenchmarkDBPutSequential(b *testing.B) {
|
func BenchmarkDB_Put_Sequential(b *testing.B) {
|
||||||
value := []byte(strings.Repeat("0", 64))
|
value := []byte(strings.Repeat("0", 64))
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.Bucket("widgets").Put([]byte(strconv.Itoa(i)), value)
|
return tx.Bucket([]byte("widgets")).Put([]byte(strconv.Itoa(i)), value)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Benchmark the performance of single put transactions in random order.
|
// Benchmark the performance of single put transactions in random order.
|
||||||
func BenchmarkDBPutRandom(b *testing.B) {
|
func BenchmarkDB_Put_Random(b *testing.B) {
|
||||||
indexes := rand.Perm(b.N)
|
indexes := rand.Perm(b.N)
|
||||||
value := []byte(strings.Repeat("0", 64))
|
value := []byte(strings.Repeat("0", 64))
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.Bucket("widgets").Put([]byte(strconv.Itoa(indexes[i])), value)
|
return tx.Bucket([]byte("widgets")).Put([]byte(strconv.Itoa(indexes[i])), value)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ExampleDB_Update() {
|
||||||
|
// Open the database.
|
||||||
|
db, _ := Open(tempfile(), 0666)
|
||||||
|
defer os.Remove(db.Path())
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Execute several commands within a write transaction.
|
||||||
|
err := db.Update(func(tx *Tx) error {
|
||||||
|
if err := tx.CreateBucket([]byte("widgets")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// If our transactional block didn't return an error then our data is saved.
|
||||||
|
if err == nil {
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||||
|
fmt.Printf("The value of 'foo' is: %s\n", string(value))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// The value of 'foo' is: bar
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleDB_View() {
|
||||||
|
// Open the database.
|
||||||
|
db, _ := Open(tempfile(), 0666)
|
||||||
|
defer os.Remove(db.Path())
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Insert data into a bucket.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("people"))
|
||||||
|
b := tx.Bucket([]byte("people"))
|
||||||
|
b.Put([]byte("john"), []byte("doe"))
|
||||||
|
b.Put([]byte("susy"), []byte("que"))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Access data from within a read-only transactional block.
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
v := tx.Bucket([]byte("people")).Get([]byte("john"))
|
||||||
|
fmt.Printf("John's last name is %s.\n", string(v))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// John's last name is doe.
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleDB_Begin_ReadOnly() {
|
||||||
|
// Open the database.
|
||||||
|
db, _ := Open(tempfile(), 0666)
|
||||||
|
defer os.Remove(db.Path())
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Create a bucket.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Create several keys in a transaction.
|
||||||
|
tx, _ := db.Begin(true)
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
b.Put([]byte("john"), []byte("blue"))
|
||||||
|
b.Put([]byte("abby"), []byte("red"))
|
||||||
|
b.Put([]byte("zephyr"), []byte("purple"))
|
||||||
|
tx.Commit()
|
||||||
|
|
||||||
|
// Iterate over the values in sorted key order.
|
||||||
|
tx, _ = db.Begin(false)
|
||||||
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
|
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||||
|
fmt.Printf("%s likes %s\n", string(k), string(v))
|
||||||
|
}
|
||||||
|
tx.Rollback()
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// abby likes red
|
||||||
|
// john likes blue
|
||||||
|
// zephyr likes purple
|
||||||
|
}
|
||||||
|
|
||||||
|
func ExampleDB_CopyFile() {
|
||||||
|
// Open the database.
|
||||||
|
db, _ := Open(tempfile(), 0666)
|
||||||
|
defer os.Remove(db.Path())
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Create a bucket and a key.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
tx.CreateBucket([]byte("widgets"))
|
||||||
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Copy the database to another file.
|
||||||
|
toFile := tempfile()
|
||||||
|
db.CopyFile(toFile, 0666)
|
||||||
|
defer os.Remove(toFile)
|
||||||
|
|
||||||
|
// Open the cloned database.
|
||||||
|
db2, _ := Open(toFile, 0666)
|
||||||
|
defer db2.Close()
|
||||||
|
|
||||||
|
// Ensure that the key exists in the copy.
|
||||||
|
db2.View(func(tx *Tx) error {
|
||||||
|
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||||
|
fmt.Printf("The value for 'foo' in the clone is: %s\n", string(value))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// The value for 'foo' in the clone is: bar
|
||||||
|
}
|
||||||
|
|
||||||
|
// tempfile returns a temporary file path.
|
||||||
|
func tempfile() string {
|
||||||
|
f, _ := ioutil.TempFile("", "bolt-")
|
||||||
|
f.Close()
|
||||||
|
os.Remove(f.Name())
|
||||||
|
return f.Name()
|
||||||
|
}
|
||||||
|
|
||||||
// withTempPath executes a function with a database reference.
|
// withTempPath executes a function with a database reference.
|
||||||
func withTempPath(fn func(string)) {
|
func withTempPath(fn func(string)) {
|
||||||
f, _ := ioutil.TempFile("", "bolt-")
|
path := tempfile()
|
||||||
path := f.Name()
|
|
||||||
f.Close()
|
|
||||||
os.Remove(path)
|
|
||||||
defer os.RemoveAll(path)
|
defer os.RemoveAll(path)
|
||||||
|
|
||||||
fn(path)
|
fn(path)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -388,7 +545,8 @@ func withOpenDB(fn func(*DB, string)) {
|
||||||
func mustCheck(db *DB) {
|
func mustCheck(db *DB) {
|
||||||
if err := db.Check(); err != nil {
|
if err := db.Check(); err != nil {
|
||||||
// Copy db off first.
|
// Copy db off first.
|
||||||
db.CopyFile("/tmp/check.db", 0600)
|
var path = tempfile()
|
||||||
|
db.CopyFile(path, 0600)
|
||||||
|
|
||||||
if errors, ok := err.(ErrorList); ok {
|
if errors, ok := err.(ErrorList); ok {
|
||||||
for _, err := range errors {
|
for _, err := range errors {
|
||||||
|
@ -396,7 +554,7 @@ func mustCheck(db *DB) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
warn(err)
|
warn(err)
|
||||||
panic("check failure: see /tmp/check.db")
|
panic("check failure: " + path)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -425,3 +583,11 @@ func logStats(db *DB) {
|
||||||
func truncDuration(d time.Duration) string {
|
func truncDuration(d time.Duration) string {
|
||||||
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
|
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// copyAndFailNow copies a database to a new location and then fails then test.
|
||||||
|
func copyAndFailNow(t *testing.T, db *DB) {
|
||||||
|
path := tempfile()
|
||||||
|
db.CopyFile(path, 0600)
|
||||||
|
fmt.Println("db copied to: ", path)
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
251
example_test.go
251
example_test.go
|
@ -1,251 +0,0 @@
|
||||||
package bolt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
os.RemoveAll("/tmp/bolt")
|
|
||||||
os.MkdirAll("/tmp/bolt", 0777)
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleDB_Update() {
|
|
||||||
// Open the database.
|
|
||||||
db, _ := Open("/tmp/bolt/db_do.db", 0666)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Execute several commands within a write transaction.
|
|
||||||
err := db.Update(func(tx *Tx) error {
|
|
||||||
if err := tx.CreateBucket("widgets"); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
b := tx.Bucket("widgets")
|
|
||||||
if err := b.Put([]byte("foo"), []byte("bar")); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// If our transactional block didn't return an error then our data is saved.
|
|
||||||
if err == nil {
|
|
||||||
db.View(func(tx *Tx) error {
|
|
||||||
value := tx.Bucket("widgets").Get([]byte("foo"))
|
|
||||||
fmt.Printf("The value of 'foo' is: %s\n", string(value))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// The value of 'foo' is: bar
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleDB_View() {
|
|
||||||
// Open the database.
|
|
||||||
db, _ := Open("/tmp/bolt/db_with.db", 0666)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Insert data into a bucket.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
tx.CreateBucket("people")
|
|
||||||
tx.Bucket("people").Put([]byte("john"), []byte("doe"))
|
|
||||||
tx.Bucket("people").Put([]byte("susy"), []byte("que"))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Access data from within a read-only transactional block.
|
|
||||||
db.View(func(tx *Tx) error {
|
|
||||||
v := tx.Bucket("people").Get([]byte("john"))
|
|
||||||
fmt.Printf("John's last name is %s.\n", string(v))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// John's last name is doe.
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleTx_Put() {
|
|
||||||
// Open the database.
|
|
||||||
db, _ := Open("/tmp/bolt/db_put.db", 0666)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Start a write transaction.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
// Create a bucket.
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
|
|
||||||
// Set the value "bar" for the key "foo".
|
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Read value back in a different read-only transaction.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
value := tx.Bucket("widgets").Get([]byte("foo"))
|
|
||||||
fmt.Printf("The value of 'foo' is: %s\n", string(value))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// The value of 'foo' is: bar
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleTx_Delete() {
|
|
||||||
// Open the database.
|
|
||||||
db, _ := Open("/tmp/bolt/db_delete.db", 0666)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Start a write transaction.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
// Create a bucket.
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
b := tx.Bucket("widgets")
|
|
||||||
|
|
||||||
// Set the value "bar" for the key "foo".
|
|
||||||
b.Put([]byte("foo"), []byte("bar"))
|
|
||||||
|
|
||||||
// Retrieve the key back from the database and verify it.
|
|
||||||
value := b.Get([]byte("foo"))
|
|
||||||
fmt.Printf("The value of 'foo' was: %s\n", string(value))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Delete the key in a different write transaction.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
return tx.Bucket("widgets").Delete([]byte("foo"))
|
|
||||||
})
|
|
||||||
|
|
||||||
// Retrieve the key again.
|
|
||||||
db.View(func(tx *Tx) error {
|
|
||||||
value := tx.Bucket("widgets").Get([]byte("foo"))
|
|
||||||
if value == nil {
|
|
||||||
fmt.Printf("The value of 'foo' is now: nil\n")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// The value of 'foo' was: bar
|
|
||||||
// The value of 'foo' is now: nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleTx_ForEach() {
|
|
||||||
// Open the database.
|
|
||||||
db, _ := Open("/tmp/bolt/tx_foreach.db", 0666)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Insert data into a bucket.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
tx.CreateBucket("animals")
|
|
||||||
b := tx.Bucket("animals")
|
|
||||||
b.Put([]byte("dog"), []byte("fun"))
|
|
||||||
b.Put([]byte("cat"), []byte("lame"))
|
|
||||||
b.Put([]byte("liger"), []byte("awesome"))
|
|
||||||
|
|
||||||
// Iterate over items in sorted key order.
|
|
||||||
b.ForEach(func(k, v []byte) error {
|
|
||||||
fmt.Printf("A %s is %s.\n", string(k), string(v))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// A cat is lame.
|
|
||||||
// A dog is fun.
|
|
||||||
// A liger is awesome.
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleBegin_ReadOnly() {
|
|
||||||
// Open the database.
|
|
||||||
db, _ := Open("/tmp/bolt/tx.db", 0666)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Create a bucket.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
return tx.CreateBucket("widgets")
|
|
||||||
})
|
|
||||||
|
|
||||||
// Create several keys in a transaction.
|
|
||||||
tx, _ := db.Begin(true)
|
|
||||||
b := tx.Bucket("widgets")
|
|
||||||
b.Put([]byte("john"), []byte("blue"))
|
|
||||||
b.Put([]byte("abby"), []byte("red"))
|
|
||||||
b.Put([]byte("zephyr"), []byte("purple"))
|
|
||||||
tx.Commit()
|
|
||||||
|
|
||||||
// Iterate over the values in sorted key order.
|
|
||||||
tx, _ = db.Begin(false)
|
|
||||||
c := tx.Bucket("widgets").Cursor()
|
|
||||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
|
||||||
fmt.Printf("%s likes %s\n", string(k), string(v))
|
|
||||||
}
|
|
||||||
tx.Rollback()
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// abby likes red
|
|
||||||
// john likes blue
|
|
||||||
// zephyr likes purple
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleTx_rollback() {
|
|
||||||
// Open the database.
|
|
||||||
db, _ := Open("/tmp/bolt/tx_rollback.db", 0666)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Create a bucket.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
return tx.CreateBucket("widgets")
|
|
||||||
})
|
|
||||||
|
|
||||||
// Set a value for a key.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
return tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
|
|
||||||
})
|
|
||||||
|
|
||||||
// Update the key but rollback the transaction so it never saves.
|
|
||||||
tx, _ := db.Begin(true)
|
|
||||||
b := tx.Bucket("widgets")
|
|
||||||
b.Put([]byte("foo"), []byte("baz"))
|
|
||||||
tx.Rollback()
|
|
||||||
|
|
||||||
// Ensure that our original value is still set.
|
|
||||||
db.View(func(tx *Tx) error {
|
|
||||||
value := tx.Bucket("widgets").Get([]byte("foo"))
|
|
||||||
fmt.Printf("The value for 'foo' is still: %s\n", string(value))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// The value for 'foo' is still: bar
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleDB_CopyFile() {
|
|
||||||
// Open the database.
|
|
||||||
db, _ := Open("/tmp/bolt/db_copy.db", 0666)
|
|
||||||
defer db.Close()
|
|
||||||
|
|
||||||
// Create a bucket and a key.
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Copy the database to another file.
|
|
||||||
db.CopyFile("/tmp/bolt/db_copy_2.db", 0666)
|
|
||||||
|
|
||||||
// Open the cloned database.
|
|
||||||
db2, _ := Open("/tmp/bolt/db_copy_2.db", 0666)
|
|
||||||
defer db2.Close()
|
|
||||||
|
|
||||||
// Ensure that the key exists in the copy.
|
|
||||||
db2.View(func(tx *Tx) error {
|
|
||||||
value := tx.Bucket("widgets").Get([]byte("foo"))
|
|
||||||
fmt.Printf("The value for 'foo' in the clone is: %s\n", string(value))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
// Output:
|
|
||||||
// The value for 'foo' in the clone is: bar
|
|
||||||
}
|
|
25
freelist.go
25
freelist.go
|
@ -62,6 +62,8 @@ func (f *freelist) free(txid txid, p *page) {
|
||||||
ids = append(ids, p.id+pgid(i))
|
ids = append(ids, p.id+pgid(i))
|
||||||
}
|
}
|
||||||
f.pending[txid] = ids
|
f.pending[txid] = ids
|
||||||
|
|
||||||
|
// DEBUG ONLY: f.check()
|
||||||
}
|
}
|
||||||
|
|
||||||
// release moves all page ids for a transaction id (or older) to the freelist.
|
// release moves all page ids for a transaction id (or older) to the freelist.
|
||||||
|
@ -109,6 +111,29 @@ func (f *freelist) write(p *page) {
|
||||||
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// check verifies there are no double free pages.
|
||||||
|
// This is slow so it should only be used while debugging.
|
||||||
|
// If errors are found then a panic invoked.
|
||||||
|
/*
|
||||||
|
func (f *freelist) check() {
|
||||||
|
var lookup = make(map[pgid]txid)
|
||||||
|
for _, id := range f.ids {
|
||||||
|
if _, ok := lookup[id]; ok {
|
||||||
|
panic(fmt.Sprintf("page %d already freed", id))
|
||||||
|
}
|
||||||
|
lookup[id] = 0
|
||||||
|
}
|
||||||
|
for txid, m := range f.pending {
|
||||||
|
for _, id := range m {
|
||||||
|
if _, ok := lookup[id]; ok {
|
||||||
|
panic(fmt.Sprintf("tx %d: page %d already freed in tx %d", txid, id, lookup[id]))
|
||||||
|
}
|
||||||
|
lookup[id] = txid
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
type reverseSortedPgids []pgid
|
type reverseSortedPgids []pgid
|
||||||
|
|
||||||
func (s reverseSortedPgids) Len() int { return len(s) }
|
func (s reverseSortedPgids) Len() int { return len(s) }
|
||||||
|
|
|
@ -8,21 +8,21 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure that a page is added to a transaction's freelist.
|
// Ensure that a page is added to a transaction's freelist.
|
||||||
func TestFreelistFree(t *testing.T) {
|
func TestFreelist_free(t *testing.T) {
|
||||||
f := &freelist{pending: make(map[txid][]pgid)}
|
f := &freelist{pending: make(map[txid][]pgid)}
|
||||||
f.free(100, &page{id: 12})
|
f.free(100, &page{id: 12})
|
||||||
assert.Equal(t, f.pending[100], []pgid{12})
|
assert.Equal(t, f.pending[100], []pgid{12})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a page and its overflow is added to a transaction's freelist.
|
// Ensure that a page and its overflow is added to a transaction's freelist.
|
||||||
func TestFreelistFreeOverflow(t *testing.T) {
|
func TestFreelist_free_overflow(t *testing.T) {
|
||||||
f := &freelist{pending: make(map[txid][]pgid)}
|
f := &freelist{pending: make(map[txid][]pgid)}
|
||||||
f.free(100, &page{id: 12, overflow: 3})
|
f.free(100, &page{id: 12, overflow: 3})
|
||||||
assert.Equal(t, f.pending[100], []pgid{12, 13, 14, 15})
|
assert.Equal(t, f.pending[100], []pgid{12, 13, 14, 15})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a transaction's free pages can be released.
|
// Ensure that a transaction's free pages can be released.
|
||||||
func TestFreelistRelease(t *testing.T) {
|
func TestFreelist_release(t *testing.T) {
|
||||||
f := &freelist{pending: make(map[txid][]pgid)}
|
f := &freelist{pending: make(map[txid][]pgid)}
|
||||||
f.free(100, &page{id: 12, overflow: 1})
|
f.free(100, &page{id: 12, overflow: 1})
|
||||||
f.free(100, &page{id: 9})
|
f.free(100, &page{id: 9})
|
||||||
|
@ -35,7 +35,7 @@ func TestFreelistRelease(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a freelist can find contiguous blocks of pages.
|
// Ensure that a freelist can find contiguous blocks of pages.
|
||||||
func TestFreelistAllocate(t *testing.T) {
|
func TestFreelist_allocate(t *testing.T) {
|
||||||
f := &freelist{ids: []pgid{18, 13, 12, 9, 7, 6, 5, 4, 3}}
|
f := &freelist{ids: []pgid{18, 13, 12, 9, 7, 6, 5, 4, 3}}
|
||||||
assert.Equal(t, f.allocate(2), pgid(12))
|
assert.Equal(t, f.allocate(2), pgid(12))
|
||||||
assert.Equal(t, f.allocate(1), pgid(18))
|
assert.Equal(t, f.allocate(1), pgid(18))
|
||||||
|
@ -48,7 +48,7 @@ func TestFreelistAllocate(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a freelist can deserialize from a freelist page.
|
// Ensure that a freelist can deserialize from a freelist page.
|
||||||
func TestFreelistRead(t *testing.T) {
|
func TestFreelist_read(t *testing.T) {
|
||||||
// Create a page.
|
// Create a page.
|
||||||
var buf [4096]byte
|
var buf [4096]byte
|
||||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||||
|
@ -71,7 +71,7 @@ func TestFreelistRead(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a freelist can serialize into a freelist page.
|
// Ensure that a freelist can serialize into a freelist page.
|
||||||
func TestFreelistWrite(t *testing.T) {
|
func TestFreelist_write(t *testing.T) {
|
||||||
// Create a freelist and write it to a page.
|
// Create a freelist and write it to a page.
|
||||||
var buf [4096]byte
|
var buf [4096]byte
|
||||||
f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)}
|
f := &freelist{ids: []pgid{12, 39}, pending: make(map[txid][]pgid)}
|
||||||
|
|
|
@ -1,144 +0,0 @@
|
||||||
package bolt
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
"testing"
|
|
||||||
"testing/quick"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure that multiple threads can use the DB without race detector errors.
|
|
||||||
func TestParallelTxs(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
var mutex sync.RWMutex
|
|
||||||
|
|
||||||
err := quick.Check(func(numReaders, batchSize uint, items testdata) bool {
|
|
||||||
// Limit the readers & writers to something reasonable.
|
|
||||||
numReaders = (numReaders % 10) + 1
|
|
||||||
batchSize = (batchSize % 50) + 1
|
|
||||||
|
|
||||||
// warn("")
|
|
||||||
// warn("================================================================")
|
|
||||||
// warn("numReaders:", numReaders, "batchSize", batchSize)
|
|
||||||
|
|
||||||
// Maintain the current dataset.
|
|
||||||
var current testdata
|
|
||||||
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
return tx.CreateBucket("widgets")
|
|
||||||
})
|
|
||||||
|
|
||||||
// Maintain a set of concurrent readers.
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
var c = make(chan bool, 0)
|
|
||||||
go func() {
|
|
||||||
var readers = make(chan int, numReaders)
|
|
||||||
for {
|
|
||||||
wg.Add(1)
|
|
||||||
|
|
||||||
// Attempt to start a new reader unless we're stopped.
|
|
||||||
select {
|
|
||||||
case readers <- 0:
|
|
||||||
case <-c:
|
|
||||||
wg.Done()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
mutex.RLock()
|
|
||||||
local := make(map[string][]byte)
|
|
||||||
for _, item := range current {
|
|
||||||
local[string(item.Key)] = item.Value
|
|
||||||
}
|
|
||||||
tx, err := db.Begin(false)
|
|
||||||
mutex.RUnlock()
|
|
||||||
if err == ErrDatabaseNotOpen {
|
|
||||||
wg.Done()
|
|
||||||
return
|
|
||||||
} else if !assert.NoError(t, err) {
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify all data is in for local data list.
|
|
||||||
for k, v := range local {
|
|
||||||
value := tx.Bucket("widgets").Get([]byte(k))
|
|
||||||
if !assert.NoError(t, err) || !assert.Equal(t, value, v, fmt.Sprintf("reader (%p)", tx)) {
|
|
||||||
tx.Rollback()
|
|
||||||
wg.Done()
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
tx.Rollback()
|
|
||||||
wg.Done()
|
|
||||||
<-readers
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Batch insert items.
|
|
||||||
pending := items
|
|
||||||
for {
|
|
||||||
// Determine next batch.
|
|
||||||
currentBatchSize := int(batchSize)
|
|
||||||
if currentBatchSize > len(pending) {
|
|
||||||
currentBatchSize = len(pending)
|
|
||||||
}
|
|
||||||
batchItems := pending[0:currentBatchSize]
|
|
||||||
pending = pending[currentBatchSize:]
|
|
||||||
|
|
||||||
// Start write transaction.
|
|
||||||
tx, err := db.Begin(true)
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
|
|
||||||
// warnf("[writer] BEGIN (%d)", currentBatchSize)
|
|
||||||
|
|
||||||
// Insert whole batch.
|
|
||||||
b := tx.Bucket("widgets")
|
|
||||||
for _, item := range batchItems {
|
|
||||||
// warnf("[writer] PUT %x: %x", trunc(item.Key, 3), trunc(item.Value, 3))
|
|
||||||
err := b.Put(item.Key, item.Value)
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Commit and update the current list.
|
|
||||||
mutex.Lock()
|
|
||||||
// warnf("[writer] COMMIT\n\n")
|
|
||||||
err = tx.Commit()
|
|
||||||
current = append(current, batchItems...)
|
|
||||||
mutex.Unlock()
|
|
||||||
if !assert.NoError(t, err) {
|
|
||||||
t.FailNow()
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there are no more left then exit.
|
|
||||||
if len(pending) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
time.Sleep(1 * time.Millisecond)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Notify readers to stop.
|
|
||||||
close(c)
|
|
||||||
|
|
||||||
// Wait for readers to finish.
|
|
||||||
wg.Wait()
|
|
||||||
})
|
|
||||||
return true
|
|
||||||
}, qconfig())
|
|
||||||
assert.NoError(t, err)
|
|
||||||
fmt.Fprint(os.Stderr, "\n")
|
|
||||||
}
|
|
2
meta.go
2
meta.go
|
@ -25,7 +25,7 @@ type meta struct {
|
||||||
version uint32
|
version uint32
|
||||||
pageSize uint32
|
pageSize uint32
|
||||||
flags uint32
|
flags uint32
|
||||||
buckets pgid
|
root bucket
|
||||||
freelist pgid
|
freelist pgid
|
||||||
pgid pgid
|
pgid pgid
|
||||||
txid txid
|
txid txid
|
||||||
|
|
|
@ -6,13 +6,13 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure that meta with bad magic is invalid.
|
// Ensure that meta with bad magic is invalid.
|
||||||
func TestMetaValidateMagic(t *testing.T) {
|
func TestMeta_validate_magic(t *testing.T) {
|
||||||
m := &meta{magic: 0x01234567}
|
m := &meta{magic: 0x01234567}
|
||||||
assert.Equal(t, m.validate(), ErrInvalid)
|
assert.Equal(t, m.validate(), ErrInvalid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that meta with a bad version is invalid.
|
// Ensure that meta with a bad version is invalid.
|
||||||
func TestMetaValidateVersion(t *testing.T) {
|
func TestMeta_validate_version(t *testing.T) {
|
||||||
m := &meta{magic: magic, version: 200}
|
m := &meta{magic: magic, version: 200}
|
||||||
assert.Equal(t, m.validate(), ErrVersionMismatch)
|
assert.Equal(t, m.validate(), ErrVersionMismatch)
|
||||||
}
|
}
|
||||||
|
|
50
node.go
50
node.go
|
@ -8,7 +8,7 @@ import (
|
||||||
|
|
||||||
// node represents an in-memory, deserialized page.
|
// node represents an in-memory, deserialized page.
|
||||||
type node struct {
|
type node struct {
|
||||||
tx *Tx
|
bucket *Bucket
|
||||||
isLeaf bool
|
isLeaf bool
|
||||||
unbalanced bool
|
unbalanced bool
|
||||||
key []byte
|
key []byte
|
||||||
|
@ -45,18 +45,10 @@ func (n *node) pageElementSize() int {
|
||||||
return branchPageElementSize
|
return branchPageElementSize
|
||||||
}
|
}
|
||||||
|
|
||||||
// root returns the root node in the tree.
|
|
||||||
func (n *node) root() *node {
|
|
||||||
if n.parent == nil {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
return n.parent.root()
|
|
||||||
}
|
|
||||||
|
|
||||||
// childAt returns the child node at a given index.
|
// childAt returns the child node at a given index.
|
||||||
func (n *node) childAt(index int) *node {
|
func (n *node) childAt(index int) *node {
|
||||||
_assert(!n.isLeaf, "invalid childAt(%d) on a leaf node", index)
|
_assert(!n.isLeaf, "invalid childAt(%d) on a leaf node", index)
|
||||||
return n.tx.node(n.inodes[index].pgid, n)
|
return n.bucket.node(n.inodes[index].pgid, n)
|
||||||
}
|
}
|
||||||
|
|
||||||
// childIndex returns the index of a given child node.
|
// childIndex returns the index of a given child node.
|
||||||
|
@ -95,7 +87,7 @@ func (n *node) prevSibling() *node {
|
||||||
}
|
}
|
||||||
|
|
||||||
// put inserts a key/value.
|
// put inserts a key/value.
|
||||||
func (n *node) put(oldKey, newKey, value []byte, pgid pgid) {
|
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
|
||||||
// Find insertion index.
|
// Find insertion index.
|
||||||
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
|
||||||
|
|
||||||
|
@ -107,6 +99,7 @@ func (n *node) put(oldKey, newKey, value []byte, pgid pgid) {
|
||||||
}
|
}
|
||||||
|
|
||||||
inode := &n.inodes[index]
|
inode := &n.inodes[index]
|
||||||
|
inode.flags = flags
|
||||||
inode.key = newKey
|
inode.key = newKey
|
||||||
inode.value = value
|
inode.value = value
|
||||||
inode.pgid = pgid
|
inode.pgid = pgid
|
||||||
|
@ -139,6 +132,7 @@ func (n *node) read(p *page) {
|
||||||
inode := &n.inodes[i]
|
inode := &n.inodes[i]
|
||||||
if n.isLeaf {
|
if n.isLeaf {
|
||||||
elem := p.leafPageElement(uint16(i))
|
elem := p.leafPageElement(uint16(i))
|
||||||
|
inode.flags = elem.flags
|
||||||
inode.key = elem.key()
|
inode.key = elem.key()
|
||||||
inode.value = elem.value()
|
inode.value = elem.value()
|
||||||
} else {
|
} else {
|
||||||
|
@ -173,6 +167,7 @@ func (n *node) write(p *page) {
|
||||||
if n.isLeaf {
|
if n.isLeaf {
|
||||||
elem := p.leafPageElement(uint16(i))
|
elem := p.leafPageElement(uint16(i))
|
||||||
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
|
||||||
|
elem.flags = item.flags
|
||||||
elem.ksize = uint32(len(item.key))
|
elem.ksize = uint32(len(item.key))
|
||||||
elem.vsize = uint32(len(item.value))
|
elem.vsize = uint32(len(item.value))
|
||||||
} else {
|
} else {
|
||||||
|
@ -214,7 +209,7 @@ func (n *node) split(pageSize int) []*node {
|
||||||
if len(current.inodes) >= minKeysPerPage && i < len(inodes)-minKeysPerPage && size+elemSize > threshold {
|
if len(current.inodes) >= minKeysPerPage && i < len(inodes)-minKeysPerPage && size+elemSize > threshold {
|
||||||
size = pageHeaderSize
|
size = pageHeaderSize
|
||||||
nodes = append(nodes, current)
|
nodes = append(nodes, current)
|
||||||
current = &node{tx: n.tx, isLeaf: n.isLeaf}
|
current = &node{bucket: n.bucket, isLeaf: n.isLeaf}
|
||||||
}
|
}
|
||||||
|
|
||||||
size += elemSize
|
size += elemSize
|
||||||
|
@ -234,10 +229,10 @@ func (n *node) rebalance() {
|
||||||
n.unbalanced = false
|
n.unbalanced = false
|
||||||
|
|
||||||
// Update statistics.
|
// Update statistics.
|
||||||
n.tx.stats.Rebalance++
|
n.bucket.tx.stats.Rebalance++
|
||||||
|
|
||||||
// Ignore if node is above threshold (25%) and has enough keys.
|
// Ignore if node is above threshold (25%) and has enough keys.
|
||||||
var threshold = n.tx.db.pageSize / 4
|
var threshold = n.bucket.tx.db.pageSize / 4
|
||||||
if n.size() > threshold && len(n.inodes) > n.minKeys() {
|
if n.size() > threshold && len(n.inodes) > n.minKeys() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -247,20 +242,20 @@ func (n *node) rebalance() {
|
||||||
// If root node is a branch and only has one node then collapse it.
|
// If root node is a branch and only has one node then collapse it.
|
||||||
if !n.isLeaf && len(n.inodes) == 1 {
|
if !n.isLeaf && len(n.inodes) == 1 {
|
||||||
// Move child's children up.
|
// Move child's children up.
|
||||||
child := n.tx.nodes[n.inodes[0].pgid]
|
child := n.bucket.nodes[n.inodes[0].pgid]
|
||||||
n.isLeaf = child.isLeaf
|
n.isLeaf = child.isLeaf
|
||||||
n.inodes = child.inodes[:]
|
n.inodes = child.inodes[:]
|
||||||
|
|
||||||
// Reparent all child nodes being moved.
|
// Reparent all child nodes being moved.
|
||||||
for _, inode := range n.inodes {
|
for _, inode := range n.inodes {
|
||||||
if child, ok := n.tx.nodes[inode.pgid]; ok {
|
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||||
child.parent = n
|
child.parent = n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove old child.
|
// Remove old child.
|
||||||
child.parent = nil
|
child.parent = nil
|
||||||
delete(n.tx.nodes, child.pgid)
|
delete(n.bucket.nodes, child.pgid)
|
||||||
child.free()
|
child.free()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,18 +277,18 @@ func (n *node) rebalance() {
|
||||||
if target.numChildren() > target.minKeys() {
|
if target.numChildren() > target.minKeys() {
|
||||||
if useNextSibling {
|
if useNextSibling {
|
||||||
// Reparent and move node.
|
// Reparent and move node.
|
||||||
if child, ok := n.tx.nodes[target.inodes[0].pgid]; ok {
|
if child, ok := n.bucket.nodes[target.inodes[0].pgid]; ok {
|
||||||
child.parent = n
|
child.parent = n
|
||||||
}
|
}
|
||||||
n.inodes = append(n.inodes, target.inodes[0])
|
n.inodes = append(n.inodes, target.inodes[0])
|
||||||
target.inodes = target.inodes[1:]
|
target.inodes = target.inodes[1:]
|
||||||
|
|
||||||
// Update target key on parent.
|
// Update target key on parent.
|
||||||
target.parent.put(target.key, target.inodes[0].key, nil, target.pgid)
|
target.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0)
|
||||||
target.key = target.inodes[0].key
|
target.key = target.inodes[0].key
|
||||||
} else {
|
} else {
|
||||||
// Reparent and move node.
|
// Reparent and move node.
|
||||||
if child, ok := n.tx.nodes[target.inodes[len(target.inodes)-1].pgid]; ok {
|
if child, ok := n.bucket.nodes[target.inodes[len(target.inodes)-1].pgid]; ok {
|
||||||
child.parent = n
|
child.parent = n
|
||||||
}
|
}
|
||||||
n.inodes = append(n.inodes, inode{})
|
n.inodes = append(n.inodes, inode{})
|
||||||
|
@ -303,7 +298,7 @@ func (n *node) rebalance() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update parent key for node.
|
// Update parent key for node.
|
||||||
n.parent.put(n.key, n.inodes[0].key, nil, n.pgid)
|
n.parent.put(n.key, n.inodes[0].key, nil, n.pgid, 0)
|
||||||
n.key = n.inodes[0].key
|
n.key = n.inodes[0].key
|
||||||
|
|
||||||
return
|
return
|
||||||
|
@ -313,7 +308,7 @@ func (n *node) rebalance() {
|
||||||
if useNextSibling {
|
if useNextSibling {
|
||||||
// Reparent all child nodes being moved.
|
// Reparent all child nodes being moved.
|
||||||
for _, inode := range target.inodes {
|
for _, inode := range target.inodes {
|
||||||
if child, ok := n.tx.nodes[inode.pgid]; ok {
|
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||||
child.parent = n
|
child.parent = n
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -321,12 +316,12 @@ func (n *node) rebalance() {
|
||||||
// Copy over inodes from target and remove target.
|
// Copy over inodes from target and remove target.
|
||||||
n.inodes = append(n.inodes, target.inodes...)
|
n.inodes = append(n.inodes, target.inodes...)
|
||||||
n.parent.del(target.key)
|
n.parent.del(target.key)
|
||||||
delete(n.tx.nodes, target.pgid)
|
delete(n.bucket.nodes, target.pgid)
|
||||||
target.free()
|
target.free()
|
||||||
} else {
|
} else {
|
||||||
// Reparent all child nodes being moved.
|
// Reparent all child nodes being moved.
|
||||||
for _, inode := range n.inodes {
|
for _, inode := range n.inodes {
|
||||||
if child, ok := n.tx.nodes[inode.pgid]; ok {
|
if child, ok := n.bucket.nodes[inode.pgid]; ok {
|
||||||
child.parent = target
|
child.parent = target
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -334,8 +329,8 @@ func (n *node) rebalance() {
|
||||||
// Copy over inodes to target and remove node.
|
// Copy over inodes to target and remove node.
|
||||||
target.inodes = append(target.inodes, n.inodes...)
|
target.inodes = append(target.inodes, n.inodes...)
|
||||||
n.parent.del(n.key)
|
n.parent.del(n.key)
|
||||||
n.parent.put(target.key, target.inodes[0].key, nil, target.pgid)
|
n.parent.put(target.key, target.inodes[0].key, nil, target.pgid, 0)
|
||||||
delete(n.tx.nodes, n.pgid)
|
delete(n.bucket.nodes, n.pgid)
|
||||||
n.free()
|
n.free()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -366,7 +361,7 @@ func (n *node) dereference() {
|
||||||
// free adds the node's underlying page to the freelist.
|
// free adds the node's underlying page to the freelist.
|
||||||
func (n *node) free() {
|
func (n *node) free() {
|
||||||
if n.pgid != 0 {
|
if n.pgid != 0 {
|
||||||
n.tx.db.freelist.free(n.tx.id(), n.tx.page(n.pgid))
|
n.bucket.tx.db.freelist.free(n.bucket.tx.id(), n.bucket.tx.page(n.pgid))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -381,6 +376,7 @@ func (s nodesByDepth) Less(i, j int) bool { return s[i].depth > s[j].depth }
|
||||||
// It can be used to point to elements in a page or point
|
// It can be used to point to elements in a page or point
|
||||||
// to an element which hasn't been added to a page yet.
|
// to an element which hasn't been added to a page yet.
|
||||||
type inode struct {
|
type inode struct {
|
||||||
|
flags uint32
|
||||||
pgid pgid
|
pgid pgid
|
||||||
key []byte
|
key []byte
|
||||||
value []byte
|
value []byte
|
||||||
|
|
51
node_test.go
51
node_test.go
|
@ -8,12 +8,12 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure that a node can insert a key/value.
|
// Ensure that a node can insert a key/value.
|
||||||
func TestNodePut(t *testing.T) {
|
func TestNode_put(t *testing.T) {
|
||||||
n := &node{inodes: make(inodes, 0)}
|
n := &node{inodes: make(inodes, 0)}
|
||||||
n.put([]byte("baz"), []byte("baz"), []byte("2"), 0)
|
n.put([]byte("baz"), []byte("baz"), []byte("2"), 0, 0)
|
||||||
n.put([]byte("foo"), []byte("foo"), []byte("0"), 0)
|
n.put([]byte("foo"), []byte("foo"), []byte("0"), 0, 0)
|
||||||
n.put([]byte("bar"), []byte("bar"), []byte("1"), 0)
|
n.put([]byte("bar"), []byte("bar"), []byte("1"), 0, 0)
|
||||||
n.put([]byte("foo"), []byte("foo"), []byte("3"), 0)
|
n.put([]byte("foo"), []byte("foo"), []byte("3"), 0, leafPageFlag)
|
||||||
assert.Equal(t, len(n.inodes), 3)
|
assert.Equal(t, len(n.inodes), 3)
|
||||||
assert.Equal(t, n.inodes[0].key, []byte("bar"))
|
assert.Equal(t, n.inodes[0].key, []byte("bar"))
|
||||||
assert.Equal(t, n.inodes[0].value, []byte("1"))
|
assert.Equal(t, n.inodes[0].value, []byte("1"))
|
||||||
|
@ -21,10 +21,11 @@ func TestNodePut(t *testing.T) {
|
||||||
assert.Equal(t, n.inodes[1].value, []byte("2"))
|
assert.Equal(t, n.inodes[1].value, []byte("2"))
|
||||||
assert.Equal(t, n.inodes[2].key, []byte("foo"))
|
assert.Equal(t, n.inodes[2].key, []byte("foo"))
|
||||||
assert.Equal(t, n.inodes[2].value, []byte("3"))
|
assert.Equal(t, n.inodes[2].value, []byte("3"))
|
||||||
|
assert.Equal(t, n.inodes[2].flags, uint32(leafPageFlag))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a node can deserialize from a leaf page.
|
// Ensure that a node can deserialize from a leaf page.
|
||||||
func TestNodeReadLeafPage(t *testing.T) {
|
func TestNode_read_LeafPage(t *testing.T) {
|
||||||
// Create a page.
|
// Create a page.
|
||||||
var buf [4096]byte
|
var buf [4096]byte
|
||||||
page := (*page)(unsafe.Pointer(&buf[0]))
|
page := (*page)(unsafe.Pointer(&buf[0]))
|
||||||
|
@ -55,12 +56,12 @@ func TestNodeReadLeafPage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a node can serialize into a leaf page.
|
// Ensure that a node can serialize into a leaf page.
|
||||||
func TestNodeWriteLeafPage(t *testing.T) {
|
func TestNode_write_LeafPage(t *testing.T) {
|
||||||
// Create a node.
|
// Create a node.
|
||||||
n := &node{isLeaf: true, inodes: make(inodes, 0)}
|
n := &node{isLeaf: true, inodes: make(inodes, 0)}
|
||||||
n.put([]byte("susy"), []byte("susy"), []byte("que"), 0)
|
n.put([]byte("susy"), []byte("susy"), []byte("que"), 0, 0)
|
||||||
n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0)
|
n.put([]byte("ricki"), []byte("ricki"), []byte("lake"), 0, 0)
|
||||||
n.put([]byte("john"), []byte("john"), []byte("johnson"), 0)
|
n.put([]byte("john"), []byte("john"), []byte("johnson"), 0, 0)
|
||||||
|
|
||||||
// Write it to a page.
|
// Write it to a page.
|
||||||
var buf [4096]byte
|
var buf [4096]byte
|
||||||
|
@ -82,14 +83,14 @@ func TestNodeWriteLeafPage(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a node can split into appropriate subgroups.
|
// Ensure that a node can split into appropriate subgroups.
|
||||||
func TestNodeSplit(t *testing.T) {
|
func TestNode_split(t *testing.T) {
|
||||||
// Create a node.
|
// Create a node.
|
||||||
n := &node{inodes: make(inodes, 0)}
|
n := &node{inodes: make(inodes, 0)}
|
||||||
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
|
||||||
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
|
||||||
n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
|
||||||
n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
|
||||||
n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
|
||||||
|
|
||||||
// Split between 2 & 3.
|
// Split between 2 & 3.
|
||||||
nodes := n.split(100)
|
nodes := n.split(100)
|
||||||
|
@ -100,11 +101,11 @@ func TestNodeSplit(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a page with the minimum number of inodes just returns a single node.
|
// Ensure that a page with the minimum number of inodes just returns a single node.
|
||||||
func TestNodeSplitWithMinKeys(t *testing.T) {
|
func TestNode_split_MinKeys(t *testing.T) {
|
||||||
// Create a node.
|
// Create a node.
|
||||||
n := &node{inodes: make(inodes, 0)}
|
n := &node{inodes: make(inodes, 0)}
|
||||||
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
|
||||||
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
|
||||||
|
|
||||||
// Split.
|
// Split.
|
||||||
nodes := n.split(20)
|
nodes := n.split(20)
|
||||||
|
@ -113,14 +114,14 @@ func TestNodeSplitWithMinKeys(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a node that has keys that all fit on a page just returns one leaf.
|
// Ensure that a node that has keys that all fit on a page just returns one leaf.
|
||||||
func TestNodeSplitFitsInPage(t *testing.T) {
|
func TestNode_split_SinglePage(t *testing.T) {
|
||||||
// Create a node.
|
// Create a node.
|
||||||
n := &node{inodes: make(inodes, 0)}
|
n := &node{inodes: make(inodes, 0)}
|
||||||
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000001"), []byte("00000001"), []byte("0123456701234567"), 0, 0)
|
||||||
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000002"), []byte("00000002"), []byte("0123456701234567"), 0, 0)
|
||||||
n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000003"), []byte("00000003"), []byte("0123456701234567"), 0, 0)
|
||||||
n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000004"), []byte("00000004"), []byte("0123456701234567"), 0, 0)
|
||||||
n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0)
|
n.put([]byte("00000005"), []byte("00000005"), []byte("0123456701234567"), 0, 0)
|
||||||
|
|
||||||
// Split.
|
// Split.
|
||||||
nodes := n.split(4096)
|
nodes := n.split(4096)
|
||||||
|
|
7
page.go
7
page.go
|
@ -19,10 +19,13 @@ const (
|
||||||
branchPageFlag = 0x01
|
branchPageFlag = 0x01
|
||||||
leafPageFlag = 0x02
|
leafPageFlag = 0x02
|
||||||
metaPageFlag = 0x04
|
metaPageFlag = 0x04
|
||||||
bucketsPageFlag = 0x08
|
|
||||||
freelistPageFlag = 0x10
|
freelistPageFlag = 0x10
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
bucketLeafFlag = 0x01
|
||||||
|
)
|
||||||
|
|
||||||
type pgid uint64
|
type pgid uint64
|
||||||
|
|
||||||
type page struct {
|
type page struct {
|
||||||
|
@ -41,8 +44,6 @@ func (p *page) typ() string {
|
||||||
return "leaf"
|
return "leaf"
|
||||||
} else if (p.flags & metaPageFlag) != 0 {
|
} else if (p.flags & metaPageFlag) != 0 {
|
||||||
return "meta"
|
return "meta"
|
||||||
} else if (p.flags & bucketsPageFlag) != 0 {
|
|
||||||
return "buckets"
|
|
||||||
} else if (p.flags & freelistPageFlag) != 0 {
|
} else if (p.flags & freelistPageFlag) != 0 {
|
||||||
return "freelist"
|
return "freelist"
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,16 +6,15 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure that the page type can be returned in human readable format.
|
// Ensure that the page type can be returned in human readable format.
|
||||||
func TestPageTyp(t *testing.T) {
|
func TestPage_typ(t *testing.T) {
|
||||||
assert.Equal(t, (&page{flags: branchPageFlag}).typ(), "branch")
|
assert.Equal(t, (&page{flags: branchPageFlag}).typ(), "branch")
|
||||||
assert.Equal(t, (&page{flags: leafPageFlag}).typ(), "leaf")
|
assert.Equal(t, (&page{flags: leafPageFlag}).typ(), "leaf")
|
||||||
assert.Equal(t, (&page{flags: metaPageFlag}).typ(), "meta")
|
assert.Equal(t, (&page{flags: metaPageFlag}).typ(), "meta")
|
||||||
assert.Equal(t, (&page{flags: bucketsPageFlag}).typ(), "buckets")
|
|
||||||
assert.Equal(t, (&page{flags: freelistPageFlag}).typ(), "freelist")
|
assert.Equal(t, (&page{flags: freelistPageFlag}).typ(), "freelist")
|
||||||
assert.Equal(t, (&page{flags: 20000}).typ(), "unknown<4e20>")
|
assert.Equal(t, (&page{flags: 20000}).typ(), "unknown<4e20>")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the hexdump debugging function doesn't blow up.
|
// Ensure that the hexdump debugging function doesn't blow up.
|
||||||
func TestPageDump(t *testing.T) {
|
func TestPage_dump(t *testing.T) {
|
||||||
(&page{id: 256}).hexdump(16)
|
(&page{id: 256}).hexdump(16)
|
||||||
}
|
}
|
||||||
|
|
294
tx.go
294
tx.go
|
@ -33,10 +33,8 @@ type Tx struct {
|
||||||
managed bool
|
managed bool
|
||||||
db *DB
|
db *DB
|
||||||
meta *meta
|
meta *meta
|
||||||
buckets *buckets
|
root Bucket
|
||||||
nodes map[pgid]*node
|
|
||||||
pages map[pgid]*page
|
pages map[pgid]*page
|
||||||
pending []*node
|
|
||||||
stats TxStats
|
stats TxStats
|
||||||
commitHandlers []func()
|
commitHandlers []func()
|
||||||
}
|
}
|
||||||
|
@ -50,15 +48,14 @@ func (tx *Tx) init(db *DB) {
|
||||||
tx.meta = &meta{}
|
tx.meta = &meta{}
|
||||||
db.meta().copy(tx.meta)
|
db.meta().copy(tx.meta)
|
||||||
|
|
||||||
// Read in the buckets page.
|
// Copy over the root bucket.
|
||||||
tx.buckets = &buckets{}
|
tx.root = newBucket(tx)
|
||||||
tx.buckets.read(tx.page(tx.meta.buckets))
|
tx.root.bucket = &bucket{}
|
||||||
|
*tx.root.bucket = tx.meta.root
|
||||||
|
|
||||||
|
// Increment the transaction id and add a page cache for writable transactions.
|
||||||
if tx.writable {
|
if tx.writable {
|
||||||
tx.pages = make(map[pgid]*page)
|
tx.pages = make(map[pgid]*page)
|
||||||
tx.nodes = make(map[pgid]*node)
|
|
||||||
|
|
||||||
// Increment the transaction id.
|
|
||||||
tx.meta.txid += txid(1)
|
tx.meta.txid += txid(1)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -85,95 +82,38 @@ func (tx *Tx) Stats() TxStats {
|
||||||
|
|
||||||
// Bucket retrieves a bucket by name.
|
// Bucket retrieves a bucket by name.
|
||||||
// Returns nil if the bucket does not exist.
|
// Returns nil if the bucket does not exist.
|
||||||
func (tx *Tx) Bucket(name string) *Bucket {
|
func (tx *Tx) Bucket(name []byte) *Bucket {
|
||||||
b := tx.buckets.get(name)
|
return tx.root.Bucket(name)
|
||||||
if b == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Bucket{
|
|
||||||
bucket: b,
|
|
||||||
name: name,
|
|
||||||
tx: tx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Buckets retrieves a list of all buckets.
|
|
||||||
func (tx *Tx) Buckets() []*Bucket {
|
|
||||||
buckets := make([]*Bucket, 0, len(tx.buckets.items))
|
|
||||||
for name, b := range tx.buckets.items {
|
|
||||||
bucket := &Bucket{
|
|
||||||
bucket: b,
|
|
||||||
name: name,
|
|
||||||
tx: tx,
|
|
||||||
}
|
|
||||||
buckets = append(buckets, bucket)
|
|
||||||
}
|
|
||||||
sort.Sort(bucketsByName(buckets))
|
|
||||||
return buckets
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateBucket creates a new bucket.
|
// CreateBucket creates a new bucket.
|
||||||
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
||||||
func (tx *Tx) CreateBucket(name string) error {
|
func (tx *Tx) CreateBucket(name []byte) error {
|
||||||
if tx.db == nil {
|
return tx.root.CreateBucket(name)
|
||||||
return ErrTxClosed
|
|
||||||
} else if !tx.writable {
|
|
||||||
return ErrTxNotWritable
|
|
||||||
} else if b := tx.Bucket(name); b != nil {
|
|
||||||
return ErrBucketExists
|
|
||||||
} else if len(name) == 0 {
|
|
||||||
return ErrBucketNameRequired
|
|
||||||
} else if len(name) > MaxBucketNameSize {
|
|
||||||
return ErrBucketNameTooLarge
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a blank root leaf page.
|
|
||||||
p, err := tx.allocate(1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.flags = leafPageFlag
|
|
||||||
|
|
||||||
// Add bucket to buckets page.
|
|
||||||
tx.buckets.put(name, &bucket{root: p.id})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
||||||
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
||||||
func (tx *Tx) CreateBucketIfNotExists(name string) error {
|
func (tx *Tx) CreateBucketIfNotExists(name []byte) error {
|
||||||
err := tx.CreateBucket(name)
|
return tx.root.CreateBucketIfNotExists(name)
|
||||||
if err != nil && err != ErrBucketExists {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteBucket deletes a bucket.
|
// DeleteBucket deletes a bucket.
|
||||||
// Returns an error if the bucket cannot be found.
|
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
|
||||||
func (tx *Tx) DeleteBucket(name string) error {
|
func (tx *Tx) DeleteBucket(name []byte) error {
|
||||||
if tx.db == nil {
|
return tx.root.DeleteBucket(name)
|
||||||
return ErrTxClosed
|
}
|
||||||
} else if !tx.writable {
|
|
||||||
return ErrTxNotWritable
|
|
||||||
}
|
|
||||||
|
|
||||||
b := tx.Bucket(name)
|
// ForEach executes a function for each bucket in the root.
|
||||||
if b == nil {
|
// If the provided function returns an error then the iteration is stopped and
|
||||||
return ErrBucketNotFound
|
// the error is returned to the caller.
|
||||||
}
|
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
||||||
|
return tx.root.ForEach(func(k, v []byte) error {
|
||||||
// Remove from buckets page.
|
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
||||||
tx.buckets.del(name)
|
return err
|
||||||
|
}
|
||||||
// Free all pages.
|
return nil
|
||||||
tx.forEachPage(b.root, 0, func(p *page, depth int) {
|
|
||||||
tx.db.freelist.free(tx.id(), p)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// OnCommit adds a handler function to be executed after the transaction successfully commits.
|
// OnCommit adds a handler function to be executed after the transaction successfully commits.
|
||||||
|
@ -184,9 +124,8 @@ func (tx *Tx) OnCommit(fn func()) {
|
||||||
// Commit writes all changes to disk and updates the meta page.
|
// Commit writes all changes to disk and updates the meta page.
|
||||||
// Returns an error if a disk write error occurs.
|
// Returns an error if a disk write error occurs.
|
||||||
func (tx *Tx) Commit() error {
|
func (tx *Tx) Commit() error {
|
||||||
if tx.managed {
|
_assert(!tx.managed, "managed tx commit not allowed")
|
||||||
panic("managed tx commit not allowed")
|
if tx.db == nil {
|
||||||
} else if tx.db == nil {
|
|
||||||
return ErrTxClosed
|
return ErrTxClosed
|
||||||
} else if !tx.writable {
|
} else if !tx.writable {
|
||||||
return ErrTxNotWritable
|
return ErrTxNotWritable
|
||||||
|
@ -196,33 +135,24 @@ func (tx *Tx) Commit() error {
|
||||||
|
|
||||||
// Rebalance nodes which have had deletions.
|
// Rebalance nodes which have had deletions.
|
||||||
var startTime = time.Now()
|
var startTime = time.Now()
|
||||||
tx.rebalance()
|
tx.root.rebalance()
|
||||||
tx.stats.RebalanceTime += time.Since(startTime)
|
tx.stats.RebalanceTime += time.Since(startTime)
|
||||||
|
|
||||||
// spill data onto dirty pages.
|
// spill data onto dirty pages.
|
||||||
startTime = time.Now()
|
startTime = time.Now()
|
||||||
if err := tx.spill(); err != nil {
|
if err := tx.root.spill(); err != nil {
|
||||||
tx.close()
|
tx.close()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
tx.stats.SpillTime += time.Since(startTime)
|
tx.stats.SpillTime += time.Since(startTime)
|
||||||
|
|
||||||
// Spill buckets page.
|
// Free the old root bucket.
|
||||||
p, err := tx.allocate((tx.buckets.size() / tx.db.pageSize) + 1)
|
tx.meta.root.root = tx.root.root
|
||||||
if err != nil {
|
|
||||||
tx.close()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tx.buckets.write(p)
|
|
||||||
|
|
||||||
// Free previous bucket page and update meta.
|
|
||||||
tx.db.freelist.free(tx.id(), tx.page(tx.meta.buckets))
|
|
||||||
tx.meta.buckets = p.id
|
|
||||||
|
|
||||||
// Free the freelist and allocate new pages for it. This will overestimate
|
// Free the freelist and allocate new pages for it. This will overestimate
|
||||||
// the size of the freelist but not underestimate the size (which would be bad).
|
// the size of the freelist but not underestimate the size (which would be bad).
|
||||||
tx.db.freelist.free(tx.id(), tx.page(tx.meta.freelist))
|
tx.db.freelist.free(tx.id(), tx.db.page(tx.meta.freelist))
|
||||||
p, err = tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
tx.close()
|
tx.close()
|
||||||
return err
|
return err
|
||||||
|
@ -257,9 +187,8 @@ func (tx *Tx) Commit() error {
|
||||||
|
|
||||||
// Rollback closes the transaction and ignores all previous updates.
|
// Rollback closes the transaction and ignores all previous updates.
|
||||||
func (tx *Tx) Rollback() error {
|
func (tx *Tx) Rollback() error {
|
||||||
if tx.managed {
|
_assert(!tx.managed, "managed tx rollback not allowed")
|
||||||
panic("managed tx rollback not allowed")
|
if tx.db == nil {
|
||||||
} else if tx.db == nil {
|
|
||||||
return ErrTxClosed
|
return ErrTxClosed
|
||||||
}
|
}
|
||||||
tx.close()
|
tx.close()
|
||||||
|
@ -268,13 +197,13 @@ func (tx *Tx) Rollback() error {
|
||||||
|
|
||||||
func (tx *Tx) close() {
|
func (tx *Tx) close() {
|
||||||
if tx.writable {
|
if tx.writable {
|
||||||
// Merge statistics.
|
|
||||||
tx.db.metalock.Lock()
|
|
||||||
tx.db.stats.TxStats.add(&tx.stats)
|
|
||||||
tx.db.metalock.Unlock()
|
|
||||||
|
|
||||||
// Remove writer lock.
|
// Remove writer lock.
|
||||||
tx.db.rwlock.Unlock()
|
tx.db.rwlock.Unlock()
|
||||||
|
|
||||||
|
// Merge statistics.
|
||||||
|
tx.db.statlock.Lock()
|
||||||
|
tx.db.stats.TxStats.add(&tx.stats)
|
||||||
|
tx.db.statlock.Unlock()
|
||||||
} else {
|
} else {
|
||||||
tx.db.removeTx(tx)
|
tx.db.removeTx(tx)
|
||||||
}
|
}
|
||||||
|
@ -298,99 +227,6 @@ func (tx *Tx) allocate(count int) (*page, error) {
|
||||||
return p, nil
|
return p, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// rebalance attempts to balance all nodes.
|
|
||||||
func (tx *Tx) rebalance() {
|
|
||||||
for _, n := range tx.nodes {
|
|
||||||
n.rebalance()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// spill writes all the nodes to dirty pages.
|
|
||||||
func (tx *Tx) spill() error {
|
|
||||||
// Keep track of the current root nodes.
|
|
||||||
// We will update this at the end once all nodes are created.
|
|
||||||
type root struct {
|
|
||||||
node *node
|
|
||||||
pgid pgid
|
|
||||||
}
|
|
||||||
var roots []root
|
|
||||||
|
|
||||||
// Sort nodes by highest depth first.
|
|
||||||
nodes := make(nodesByDepth, 0, len(tx.nodes))
|
|
||||||
for _, n := range tx.nodes {
|
|
||||||
nodes = append(nodes, n)
|
|
||||||
}
|
|
||||||
sort.Sort(nodes)
|
|
||||||
|
|
||||||
// Spill nodes by deepest first.
|
|
||||||
for i := 0; i < len(nodes); i++ {
|
|
||||||
n := nodes[i]
|
|
||||||
|
|
||||||
// Save existing root buckets for later.
|
|
||||||
if n.parent == nil && n.pgid != 0 {
|
|
||||||
roots = append(roots, root{n, n.pgid})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Split nodes into appropriate sized nodes.
|
|
||||||
// The first node in this list will be a reference to n to preserve ancestry.
|
|
||||||
newNodes := n.split(tx.db.pageSize)
|
|
||||||
tx.pending = newNodes
|
|
||||||
|
|
||||||
// If this is a root node that split then create a parent node.
|
|
||||||
if n.parent == nil && len(newNodes) > 1 {
|
|
||||||
n.parent = &node{tx: tx, isLeaf: false}
|
|
||||||
nodes = append(nodes, n.parent)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add node's page to the freelist.
|
|
||||||
if n.pgid > 0 {
|
|
||||||
tx.db.freelist.free(tx.id(), tx.page(n.pgid))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write nodes to dirty pages.
|
|
||||||
for i, newNode := range newNodes {
|
|
||||||
// Allocate contiguous space for the node.
|
|
||||||
p, err := tx.allocate((newNode.size() / tx.db.pageSize) + 1)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write the node to the page.
|
|
||||||
newNode.write(p)
|
|
||||||
newNode.pgid = p.id
|
|
||||||
newNode.parent = n.parent
|
|
||||||
|
|
||||||
// The first node should use the existing entry, other nodes are inserts.
|
|
||||||
var oldKey []byte
|
|
||||||
if i == 0 {
|
|
||||||
oldKey = n.key
|
|
||||||
} else {
|
|
||||||
oldKey = newNode.inodes[0].key
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the parent entry.
|
|
||||||
if newNode.parent != nil {
|
|
||||||
newNode.parent.put(oldKey, newNode.inodes[0].key, nil, newNode.pgid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update the statistics.
|
|
||||||
tx.stats.Spill++
|
|
||||||
}
|
|
||||||
|
|
||||||
tx.pending = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update roots with new roots.
|
|
||||||
for _, root := range roots {
|
|
||||||
tx.buckets.updateRoot(root.pgid, root.node.root().pgid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear out nodes now that they are all spilled.
|
|
||||||
tx.nodes = make(map[pgid]*node)
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// write writes any dirty pages to disk.
|
// write writes any dirty pages to disk.
|
||||||
func (tx *Tx) write() error {
|
func (tx *Tx) write() error {
|
||||||
// Sort pages by id.
|
// Sort pages by id.
|
||||||
|
@ -443,43 +279,6 @@ func (tx *Tx) writeMeta() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// node creates a node from a page and associates it with a given parent.
|
|
||||||
func (tx *Tx) node(pgid pgid, parent *node) *node {
|
|
||||||
// Retrieve node if it's already been created.
|
|
||||||
if tx.nodes == nil {
|
|
||||||
return nil
|
|
||||||
} else if n := tx.nodes[pgid]; n != nil {
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise create a branch and cache it.
|
|
||||||
n := &node{tx: tx, parent: parent}
|
|
||||||
if n.parent != nil {
|
|
||||||
n.depth = n.parent.depth + 1
|
|
||||||
}
|
|
||||||
n.read(tx.page(pgid))
|
|
||||||
tx.nodes[pgid] = n
|
|
||||||
|
|
||||||
// Update statistics.
|
|
||||||
tx.stats.NodeCount++
|
|
||||||
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
// dereference removes all references to the old mmap.
|
|
||||||
func (tx *Tx) dereference() {
|
|
||||||
for _, n := range tx.nodes {
|
|
||||||
n.dereference()
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, n := range tx.pending {
|
|
||||||
n.dereference()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update statistics
|
|
||||||
tx.stats.NodeDeref += len(tx.nodes) + len(tx.pending)
|
|
||||||
}
|
|
||||||
|
|
||||||
// page returns a reference to the page with a given id.
|
// page returns a reference to the page with a given id.
|
||||||
// If page has been written to then a temporary bufferred page is returned.
|
// If page has been written to then a temporary bufferred page is returned.
|
||||||
func (tx *Tx) page(id pgid) *page {
|
func (tx *Tx) page(id pgid) *page {
|
||||||
|
@ -494,17 +293,6 @@ func (tx *Tx) page(id pgid) *page {
|
||||||
return tx.db.page(id)
|
return tx.db.page(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
// pageNode returns the in-memory node, if it exists.
|
|
||||||
// Otherwise returns the underlying page.
|
|
||||||
func (tx *Tx) pageNode(id pgid) (*page, *node) {
|
|
||||||
if tx.nodes != nil {
|
|
||||||
if n := tx.nodes[id]; n != nil {
|
|
||||||
return nil, n
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tx.page(id), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// forEachPage iterates over every page within a given page and executes a function.
|
// forEachPage iterates over every page within a given page and executes a function.
|
||||||
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
||||||
p := tx.page(pgid)
|
p := tx.page(pgid)
|
||||||
|
@ -533,7 +321,7 @@ func (tx *Tx) Page(id int) (*PageInfo, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Build the page info.
|
// Build the page info.
|
||||||
p := tx.page(pgid(id))
|
p := tx.db.page(pgid(id))
|
||||||
info := &PageInfo{
|
info := &PageInfo{
|
||||||
ID: id,
|
ID: id,
|
||||||
Count: int(p.count),
|
Count: int(p.count),
|
||||||
|
|
413
tx_test.go
413
tx_test.go
|
@ -5,27 +5,25 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"os"
|
"os"
|
||||||
"sort"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"testing/quick"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Ensure that committing a closed transaction returns an error.
|
// Ensure that committing a closed transaction returns an error.
|
||||||
func TestTxCommitClosed(t *testing.T) {
|
func TestTx_Commit_Closed(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
tx, _ := db.Begin(true)
|
tx, _ := db.Begin(true)
|
||||||
tx.CreateBucket("foo")
|
tx.CreateBucket([]byte("foo"))
|
||||||
assert.NoError(t, tx.Commit())
|
assert.NoError(t, tx.Commit())
|
||||||
assert.Equal(t, tx.Commit(), ErrTxClosed)
|
assert.Equal(t, tx.Commit(), ErrTxClosed)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that rolling back a closed transaction returns an error.
|
// Ensure that rolling back a closed transaction returns an error.
|
||||||
func TestTxRollbackClosed(t *testing.T) {
|
func TestTx_Rollback_Closed(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
tx, _ := db.Begin(true)
|
tx, _ := db.Begin(true)
|
||||||
assert.NoError(t, tx.Rollback())
|
assert.NoError(t, tx.Rollback())
|
||||||
|
@ -34,114 +32,69 @@ func TestTxRollbackClosed(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that committing a read-only transaction returns an error.
|
// Ensure that committing a read-only transaction returns an error.
|
||||||
func TestTxCommitReadOnly(t *testing.T) {
|
func TestTx_Commit_ReadOnly(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
tx, _ := db.Begin(false)
|
tx, _ := db.Begin(false)
|
||||||
assert.Equal(t, tx.Commit(), ErrTxNotWritable)
|
assert.Equal(t, tx.Commit(), ErrTxNotWritable)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the database can retrieve a list of buckets.
|
|
||||||
func TestTxBuckets(t *testing.T) {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
tx.CreateBucket("foo")
|
|
||||||
tx.CreateBucket("bar")
|
|
||||||
tx.CreateBucket("baz")
|
|
||||||
buckets := tx.Buckets()
|
|
||||||
if assert.Equal(t, len(buckets), 3) {
|
|
||||||
assert.Equal(t, buckets[0].Name(), "bar")
|
|
||||||
assert.Equal(t, buckets[1].Name(), "baz")
|
|
||||||
assert.Equal(t, buckets[2].Name(), "foo")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that creating a bucket with a read-only transaction returns an error.
|
// Ensure that creating a bucket with a read-only transaction returns an error.
|
||||||
func TestTxCreateBucketReadOnly(t *testing.T) {
|
func TestTx_CreateBucket_ReadOnly(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
assert.Equal(t, tx.CreateBucket("foo"), ErrTxNotWritable)
|
assert.Equal(t, tx.CreateBucket([]byte("foo")), ErrTxNotWritable)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that creating a bucket on a closed transaction returns an error.
|
// Ensure that creating a bucket on a closed transaction returns an error.
|
||||||
func TestTxCreateBucketClosed(t *testing.T) {
|
func TestTx_CreateBucket_Closed(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
tx, _ := db.Begin(true)
|
tx, _ := db.Begin(true)
|
||||||
tx.Commit()
|
tx.Commit()
|
||||||
assert.Equal(t, tx.CreateBucket("foo"), ErrTxClosed)
|
assert.Equal(t, tx.CreateBucket([]byte("foo")), ErrTxClosed)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a Tx can retrieve a bucket.
|
// Ensure that a Tx can retrieve a bucket.
|
||||||
func TestTxBucket(t *testing.T) {
|
func TestTx_Bucket(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
if assert.NotNil(t, b) {
|
assert.NotNil(t, b)
|
||||||
assert.Equal(t, "widgets", b.Name())
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a Tx retrieving a non-existent key returns nil.
|
// Ensure that a Tx retrieving a non-existent key returns nil.
|
||||||
func TestTxGetMissing(t *testing.T) {
|
func TestTx_Get_Missing(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||||
value := tx.Bucket("widgets").Get([]byte("no_such_key"))
|
value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
|
||||||
assert.Nil(t, value)
|
assert.Nil(t, value)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that retrieving all buckets returns writable buckets.
|
|
||||||
func TestTxWritableBuckets(t *testing.T) {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
tx.CreateBucket("woojits")
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
buckets := tx.Buckets()
|
|
||||||
assert.Equal(t, len(buckets), 2)
|
|
||||||
assert.Equal(t, buckets[0].Name(), "widgets")
|
|
||||||
assert.Equal(t, buckets[1].Name(), "woojits")
|
|
||||||
buckets[0].Put([]byte("foo"), []byte("0000"))
|
|
||||||
buckets[1].Put([]byte("bar"), []byte("0001"))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
db.View(func(tx *Tx) error {
|
|
||||||
assert.Equal(t, []byte("0000"), tx.Bucket("widgets").Get([]byte("foo")))
|
|
||||||
assert.Equal(t, []byte("0001"), tx.Bucket("woojits").Get([]byte("bar")))
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a bucket can be created and retrieved.
|
// Ensure that a bucket can be created and retrieved.
|
||||||
func TestTxCreateBucket(t *testing.T) {
|
func TestTx_CreateBucket(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
// Create a bucket.
|
// Create a bucket.
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
assert.NoError(t, tx.CreateBucket("widgets"))
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
// Read the bucket through a separate transaction.
|
// Read the bucket through a separate transaction.
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
assert.NotNil(t, b)
|
assert.NotNil(t, b)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -149,18 +102,19 @@ func TestTxCreateBucket(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a bucket can be created if it doesn't already exist.
|
// Ensure that a bucket can be created if it doesn't already exist.
|
||||||
func TestTxCreateBucketIfNotExists(t *testing.T) {
|
func TestTx_CreateBucketIfNotExists(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
assert.NoError(t, tx.CreateBucketIfNotExists("widgets"))
|
assert.NoError(t, tx.CreateBucketIfNotExists([]byte("widgets")))
|
||||||
assert.NoError(t, tx.CreateBucketIfNotExists("widgets"))
|
assert.NoError(t, tx.CreateBucketIfNotExists([]byte("widgets")))
|
||||||
assert.Equal(t, tx.CreateBucketIfNotExists(""), ErrBucketNameRequired)
|
assert.Equal(t, ErrBucketNameRequired, tx.CreateBucketIfNotExists([]byte{}))
|
||||||
|
assert.Equal(t, ErrBucketNameRequired, tx.CreateBucketIfNotExists(nil))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
// Read the bucket through a separate transaction.
|
// Read the bucket through a separate transaction.
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
b := tx.Bucket("widgets")
|
b := tx.Bucket([]byte("widgets"))
|
||||||
assert.NotNil(t, b)
|
assert.NotNil(t, b)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -168,64 +122,53 @@ func TestTxCreateBucketIfNotExists(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a bucket cannot be created twice.
|
// Ensure that a bucket cannot be created twice.
|
||||||
func TestTxRecreateBucket(t *testing.T) {
|
func TestTx_CreateBucket_Exists(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
// Create a bucket.
|
// Create a bucket.
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
assert.NoError(t, tx.CreateBucket("widgets"))
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create the same bucket again.
|
// Create the same bucket again.
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
assert.Equal(t, ErrBucketExists, tx.CreateBucket("widgets"))
|
assert.Equal(t, ErrBucketExists, tx.CreateBucket([]byte("widgets")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a bucket is created with a non-blank name.
|
// Ensure that a bucket is created with a non-blank name.
|
||||||
func TestTxCreateBucketWithoutName(t *testing.T) {
|
func TestTx_CreateBucket_NameRequired(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
assert.Equal(t, ErrBucketNameRequired, tx.CreateBucket(""))
|
assert.Equal(t, ErrBucketNameRequired, tx.CreateBucket(nil))
|
||||||
return nil
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a bucket name is not too long.
|
|
||||||
func TestTxCreateBucketWithLongName(t *testing.T) {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
assert.NoError(t, tx.CreateBucket(strings.Repeat("X", 255)))
|
|
||||||
assert.Equal(t, ErrBucketNameTooLarge, tx.CreateBucket(strings.Repeat("X", 256)))
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a bucket can be deleted.
|
// Ensure that a bucket can be deleted.
|
||||||
func TestTxDeleteBucket(t *testing.T) {
|
func TestTx_DeleteBucket(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
// Create a bucket and add a value.
|
// Create a bucket and add a value.
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte("bar"))
|
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
// Save root page id.
|
// Save root page id.
|
||||||
var root pgid
|
var root pgid
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
root = tx.Bucket("widgets").root
|
root = tx.Bucket([]byte("widgets")).root
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
// Delete the bucket and make sure we can't get the value.
|
// Delete the bucket and make sure we can't get the value.
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
assert.NoError(t, tx.DeleteBucket("widgets"))
|
assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
|
||||||
assert.Nil(t, tx.Bucket("widgets"))
|
assert.Nil(t, tx.Bucket([]byte("widgets")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -234,257 +177,42 @@ func TestTxDeleteBucket(t *testing.T) {
|
||||||
assert.Equal(t, []pgid{7, 6, root, 2}, db.freelist.all())
|
assert.Equal(t, []pgid{7, 6, root, 2}, db.freelist.all())
|
||||||
|
|
||||||
// Create the bucket again and make sure there's not a phantom value.
|
// Create the bucket again and make sure there's not a phantom value.
|
||||||
assert.NoError(t, tx.CreateBucket("widgets"))
|
assert.NoError(t, tx.CreateBucket([]byte("widgets")))
|
||||||
assert.Nil(t, tx.Bucket("widgets").Get([]byte("foo")))
|
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that deleting a bucket on a closed transaction returns an error.
|
// Ensure that deleting a bucket on a closed transaction returns an error.
|
||||||
func TestTxDeleteBucketClosed(t *testing.T) {
|
func TestTx_DeleteBucket_Closed(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
tx, _ := db.Begin(true)
|
tx, _ := db.Begin(true)
|
||||||
tx.Commit()
|
tx.Commit()
|
||||||
assert.Equal(t, tx.DeleteBucket("foo"), ErrTxClosed)
|
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that deleting a bucket with a read-only transaction returns an error.
|
// Ensure that deleting a bucket with a read-only transaction returns an error.
|
||||||
func TestTxDeleteBucketReadOnly(t *testing.T) {
|
func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
assert.Equal(t, tx.DeleteBucket("foo"), ErrTxNotWritable)
|
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that an error is returned when deleting from a bucket that doesn't exist.
|
// Ensure that nothing happens when deleting a bucket that doesn't exist.
|
||||||
func TestTxDeleteBucketNotFound(t *testing.T) {
|
func TestTx_DeleteBucket_NotFound(t *testing.T) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket("widgets"))
|
assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that a Tx cursor can iterate over an empty bucket without error.
|
|
||||||
func TestTxCursorEmptyBucket(t *testing.T) {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
return tx.CreateBucket("widgets")
|
|
||||||
})
|
|
||||||
db.View(func(tx *Tx) error {
|
|
||||||
c := tx.Bucket("widgets").Cursor()
|
|
||||||
k, v := c.First()
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Nil(t, v)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
|
|
||||||
func TestCursorEmptyBucketReverse(t *testing.T) {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
return tx.CreateBucket("widgets")
|
|
||||||
})
|
|
||||||
db.View(func(tx *Tx) error {
|
|
||||||
c := tx.Bucket("widgets").Cursor()
|
|
||||||
k, v := c.Last()
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Nil(t, v)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a Tx cursor can iterate over a single root with a couple elements.
|
|
||||||
func TestTxCursorLeafRoot(t *testing.T) {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
tx.Bucket("widgets").Put([]byte("baz"), []byte{})
|
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte{0})
|
|
||||||
tx.Bucket("widgets").Put([]byte("bar"), []byte{1})
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
tx, _ := db.Begin(false)
|
|
||||||
c := tx.Bucket("widgets").Cursor()
|
|
||||||
|
|
||||||
k, v := c.First()
|
|
||||||
assert.Equal(t, string(k), "bar")
|
|
||||||
assert.Equal(t, v, []byte{1})
|
|
||||||
|
|
||||||
k, v = c.Next()
|
|
||||||
assert.Equal(t, string(k), "baz")
|
|
||||||
assert.Equal(t, v, []byte{})
|
|
||||||
|
|
||||||
k, v = c.Next()
|
|
||||||
assert.Equal(t, string(k), "foo")
|
|
||||||
assert.Equal(t, v, []byte{0})
|
|
||||||
|
|
||||||
k, v = c.Next()
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Nil(t, v)
|
|
||||||
|
|
||||||
k, v = c.Next()
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Nil(t, v)
|
|
||||||
|
|
||||||
tx.Rollback()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
|
|
||||||
func TestTxCursorLeafRootReverse(t *testing.T) {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
tx.Bucket("widgets").Put([]byte("baz"), []byte{})
|
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte{0})
|
|
||||||
tx.Bucket("widgets").Put([]byte("bar"), []byte{1})
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
tx, _ := db.Begin(false)
|
|
||||||
c := tx.Bucket("widgets").Cursor()
|
|
||||||
|
|
||||||
k, v := c.Last()
|
|
||||||
assert.Equal(t, string(k), "foo")
|
|
||||||
assert.Equal(t, v, []byte{0})
|
|
||||||
|
|
||||||
k, v = c.Prev()
|
|
||||||
assert.Equal(t, string(k), "baz")
|
|
||||||
assert.Equal(t, v, []byte{})
|
|
||||||
|
|
||||||
k, v = c.Prev()
|
|
||||||
assert.Equal(t, string(k), "bar")
|
|
||||||
assert.Equal(t, v, []byte{1})
|
|
||||||
|
|
||||||
k, v = c.Prev()
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Nil(t, v)
|
|
||||||
|
|
||||||
k, v = c.Prev()
|
|
||||||
assert.Nil(t, k)
|
|
||||||
assert.Nil(t, v)
|
|
||||||
|
|
||||||
tx.Rollback()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a Tx cursor can restart from the beginning.
|
|
||||||
func TestTxCursorRestart(t *testing.T) {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
db.Update(func(tx *Tx) error {
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
tx.Bucket("widgets").Put([]byte("bar"), []byte{})
|
|
||||||
tx.Bucket("widgets").Put([]byte("foo"), []byte{})
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
|
|
||||||
tx, _ := db.Begin(false)
|
|
||||||
c := tx.Bucket("widgets").Cursor()
|
|
||||||
|
|
||||||
k, _ := c.First()
|
|
||||||
assert.Equal(t, string(k), "bar")
|
|
||||||
|
|
||||||
k, _ = c.Next()
|
|
||||||
assert.Equal(t, string(k), "foo")
|
|
||||||
|
|
||||||
k, _ = c.First()
|
|
||||||
assert.Equal(t, string(k), "bar")
|
|
||||||
|
|
||||||
k, _ = c.Next()
|
|
||||||
assert.Equal(t, string(k), "foo")
|
|
||||||
|
|
||||||
tx.Rollback()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a Tx can iterate over all elements in a bucket.
|
|
||||||
func TestTxCursorIterate(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
f := func(items testdata) bool {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
// Bulk insert all values.
|
|
||||||
tx, _ := db.Begin(true)
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
b := tx.Bucket("widgets")
|
|
||||||
for _, item := range items {
|
|
||||||
assert.NoError(t, b.Put(item.Key, item.Value))
|
|
||||||
}
|
|
||||||
assert.NoError(t, tx.Commit())
|
|
||||||
|
|
||||||
// Sort test data.
|
|
||||||
sort.Sort(items)
|
|
||||||
|
|
||||||
// Iterate over all items and check consistency.
|
|
||||||
var index = 0
|
|
||||||
tx, _ = db.Begin(false)
|
|
||||||
c := tx.Bucket("widgets").Cursor()
|
|
||||||
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
|
|
||||||
assert.Equal(t, k, items[index].Key)
|
|
||||||
assert.Equal(t, v, items[index].Value)
|
|
||||||
index++
|
|
||||||
}
|
|
||||||
assert.Equal(t, len(items), index)
|
|
||||||
tx.Rollback()
|
|
||||||
})
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err := quick.Check(f, qconfig()); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
fmt.Fprint(os.Stderr, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that a transaction can iterate over all elements in a bucket in reverse.
|
|
||||||
func TestTxCursorIterateReverse(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("skipping test in short mode.")
|
|
||||||
}
|
|
||||||
|
|
||||||
f := func(items testdata) bool {
|
|
||||||
withOpenDB(func(db *DB, path string) {
|
|
||||||
// Bulk insert all values.
|
|
||||||
tx, _ := db.Begin(true)
|
|
||||||
tx.CreateBucket("widgets")
|
|
||||||
b := tx.Bucket("widgets")
|
|
||||||
for _, item := range items {
|
|
||||||
assert.NoError(t, b.Put(item.Key, item.Value))
|
|
||||||
}
|
|
||||||
assert.NoError(t, tx.Commit())
|
|
||||||
|
|
||||||
// Sort test data.
|
|
||||||
sort.Sort(revtestdata(items))
|
|
||||||
|
|
||||||
// Iterate over all items and check consistency.
|
|
||||||
var index = 0
|
|
||||||
tx, _ = db.Begin(false)
|
|
||||||
c := tx.Bucket("widgets").Cursor()
|
|
||||||
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
|
|
||||||
assert.Equal(t, k, items[index].Key)
|
|
||||||
assert.Equal(t, v, items[index].Value)
|
|
||||||
index++
|
|
||||||
}
|
|
||||||
assert.Equal(t, len(items), index)
|
|
||||||
tx.Rollback()
|
|
||||||
})
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if err := quick.Check(f, qconfig()); err != nil {
|
|
||||||
t.Error(err)
|
|
||||||
}
|
|
||||||
fmt.Fprint(os.Stderr, "\n")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure that Tx commit handlers are called after a transaction successfully commits.
|
// Ensure that Tx commit handlers are called after a transaction successfully commits.
|
||||||
func TestTx_OnCommit(t *testing.T) {
|
func TestTx_OnCommit(t *testing.T) {
|
||||||
var x int
|
var x int
|
||||||
|
@ -492,7 +220,7 @@ func TestTx_OnCommit(t *testing.T) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.OnCommit(func() { x += 1 })
|
tx.OnCommit(func() { x += 1 })
|
||||||
tx.OnCommit(func() { x += 2 })
|
tx.OnCommit(func() { x += 2 })
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
assert.Equal(t, 3, x)
|
assert.Equal(t, 3, x)
|
||||||
|
@ -505,7 +233,7 @@ func TestTx_OnCommit_Rollback(t *testing.T) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.OnCommit(func() { x += 1 })
|
tx.OnCommit(func() { x += 1 })
|
||||||
tx.OnCommit(func() { x += 2 })
|
tx.OnCommit(func() { x += 2 })
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
return errors.New("rollback this commit")
|
return errors.New("rollback this commit")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -522,8 +250,8 @@ func BenchmarkTxCursor(b *testing.B) {
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
// Write data to bucket.
|
// Write data to bucket.
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
tx.CreateBucket("widgets")
|
tx.CreateBucket([]byte("widgets"))
|
||||||
bucket := tx.Bucket("widgets")
|
bucket := tx.Bucket([]byte("widgets"))
|
||||||
for i := 0; i < total; i++ {
|
for i := 0; i < total; i++ {
|
||||||
bucket.Put([]byte(fmt.Sprintf("%016d", indexes[i])), value)
|
bucket.Put([]byte(fmt.Sprintf("%016d", indexes[i])), value)
|
||||||
}
|
}
|
||||||
|
@ -535,7 +263,7 @@ func BenchmarkTxCursor(b *testing.B) {
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
db.View(func(tx *Tx) error {
|
db.View(func(tx *Tx) error {
|
||||||
count := 0
|
count := 0
|
||||||
c := tx.Bucket("widgets").Cursor()
|
c := tx.Bucket([]byte("widgets")).Cursor()
|
||||||
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
for k, _ := c.First(); k != nil; k, _ = c.Next() {
|
||||||
count++
|
count++
|
||||||
}
|
}
|
||||||
|
@ -554,7 +282,7 @@ func BenchmarkTxPutRandom(b *testing.B) {
|
||||||
value := []byte(strings.Repeat("0", 64))
|
value := []byte(strings.Repeat("0", 64))
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
var tx *Tx
|
var tx *Tx
|
||||||
var bucket *Bucket
|
var bucket *Bucket
|
||||||
|
@ -564,7 +292,7 @@ func BenchmarkTxPutRandom(b *testing.B) {
|
||||||
tx.Commit()
|
tx.Commit()
|
||||||
}
|
}
|
||||||
tx, _ = db.Begin(true)
|
tx, _ = db.Begin(true)
|
||||||
bucket = tx.Bucket("widgets")
|
bucket = tx.Bucket([]byte("widgets"))
|
||||||
}
|
}
|
||||||
bucket.Put([]byte(strconv.Itoa(indexes[i])), value)
|
bucket.Put([]byte(strconv.Itoa(indexes[i])), value)
|
||||||
}
|
}
|
||||||
|
@ -577,10 +305,10 @@ func BenchmarkTxPutSequential(b *testing.B) {
|
||||||
value := []byte(strings.Repeat("0", 64))
|
value := []byte(strings.Repeat("0", 64))
|
||||||
withOpenDB(func(db *DB, path string) {
|
withOpenDB(func(db *DB, path string) {
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
return tx.CreateBucket("widgets")
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
})
|
})
|
||||||
db.Update(func(tx *Tx) error {
|
db.Update(func(tx *Tx) error {
|
||||||
bucket := tx.Bucket("widgets")
|
bucket := tx.Bucket([]byte("widgets"))
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
bucket.Put([]byte(strconv.Itoa(i)), value)
|
bucket.Put([]byte(strconv.Itoa(i)), value)
|
||||||
}
|
}
|
||||||
|
@ -588,3 +316,36 @@ func BenchmarkTxPutSequential(b *testing.B) {
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ExampleTx_Rollback() {
|
||||||
|
// Open the database.
|
||||||
|
db, _ := Open(tempfile(), 0666)
|
||||||
|
defer os.Remove(db.Path())
|
||||||
|
defer db.Close()
|
||||||
|
|
||||||
|
// Create a bucket.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
return tx.CreateBucket([]byte("widgets"))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Set a value for a key.
|
||||||
|
db.Update(func(tx *Tx) error {
|
||||||
|
return tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
|
||||||
|
})
|
||||||
|
|
||||||
|
// Update the key but rollback the transaction so it never saves.
|
||||||
|
tx, _ := db.Begin(true)
|
||||||
|
b := tx.Bucket([]byte("widgets"))
|
||||||
|
b.Put([]byte("foo"), []byte("baz"))
|
||||||
|
tx.Rollback()
|
||||||
|
|
||||||
|
// Ensure that our original value is still set.
|
||||||
|
db.View(func(tx *Tx) error {
|
||||||
|
value := tx.Bucket([]byte("widgets")).Get([]byte("foo"))
|
||||||
|
fmt.Printf("The value for 'foo' is still: %s\n", string(value))
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
// Output:
|
||||||
|
// The value for 'foo' is still: bar
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in New Issue