Remove wrapping test closures.

master
Ben Johnson 2014-07-26 14:44:04 -06:00
parent 06222e06de
commit ca2339d7cb
5 changed files with 1496 additions and 1461 deletions

File diff suppressed because it is too large Load Diff

View File

@ -12,98 +12,99 @@ import (
// Ensure that a cursor can return a reference to the bucket that created it.
func TestCursor_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
c := b.Cursor()
assert.Equal(t, b, c.Bucket())
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
c := b.Cursor()
assert.Equal(t, b, c.Bucket())
return nil
})
}
// Ensure that a Tx cursor can seek to the appropriate keys.
func TestCursor_Seek(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
assert.NoError(t, b.Put([]byte("foo"), []byte("0001")))
assert.NoError(t, b.Put([]byte("bar"), []byte("0002")))
assert.NoError(t, b.Put([]byte("baz"), []byte("0003")))
_, err = b.CreateBucket([]byte("bkt"))
assert.NoError(t, err)
return nil
})
db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
assert.NoError(t, b.Put([]byte("foo"), []byte("0001")))
assert.NoError(t, b.Put([]byte("bar"), []byte("0002")))
assert.NoError(t, b.Put([]byte("baz"), []byte("0003")))
_, err = b.CreateBucket([]byte("bkt"))
assert.NoError(t, err)
return nil
})
db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
// Exact match should go to the key.
k, v := c.Seek([]byte("bar"))
assert.Equal(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v)
// Exact match should go to the key.
k, v := c.Seek([]byte("bar"))
assert.Equal(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v)
// Inexact match should go to the next key.
k, v = c.Seek([]byte("bas"))
assert.Equal(t, []byte("baz"), k)
assert.Equal(t, []byte("0003"), v)
// Inexact match should go to the next key.
k, v = c.Seek([]byte("bas"))
assert.Equal(t, []byte("baz"), k)
assert.Equal(t, []byte("0003"), v)
// Low key should go to the first key.
k, v = c.Seek([]byte(""))
assert.Equal(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v)
// Low key should go to the first key.
k, v = c.Seek([]byte(""))
assert.Equal(t, []byte("bar"), k)
assert.Equal(t, []byte("0002"), v)
// High key should return no key.
k, v = c.Seek([]byte("zzz"))
assert.Nil(t, k)
assert.Nil(t, v)
// High key should return no key.
k, v = c.Seek([]byte("zzz"))
assert.Nil(t, k)
assert.Nil(t, v)
// Buckets should return their key but no value.
k, v = c.Seek([]byte("bkt"))
assert.Equal(t, []byte("bkt"), k)
assert.Nil(t, v)
// Buckets should return their key but no value.
k, v = c.Seek([]byte("bkt"))
assert.Equal(t, []byte("bkt"), k)
assert.Nil(t, v)
return nil
})
return nil
})
}
func TestCursor_Delete(t *testing.T) {
withOpenDB(func(db *DB, path string) {
var count = 1000
db := NewTestDB()
defer db.Close()
// Insert every other key between 0 and $count.
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
for i := 0; i < count; i += 1 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(i))
b.Put(k, make([]byte, 100))
var count = 1000
// Insert every other key between 0 and $count.
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
for i := 0; i < count; i += 1 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(i))
b.Put(k, make([]byte, 100))
}
b.CreateBucket([]byte("sub"))
return nil
})
db.Update(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
bound := make([]byte, 8)
binary.BigEndian.PutUint64(bound, uint64(count/2))
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
if err := c.Delete(); err != nil {
return err
}
b.CreateBucket([]byte("sub"))
return nil
})
}
c.Seek([]byte("sub"))
err := c.Delete()
assert.Equal(t, err, ErrIncompatibleValue)
return nil
})
db.Update(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
bound := make([]byte, 8)
binary.BigEndian.PutUint64(bound, uint64(count/2))
for key, _ := c.First(); bytes.Compare(key, bound) < 0; key, _ = c.Next() {
if err := c.Delete(); err != nil {
return err
}
}
c.Seek([]byte("sub"))
err := c.Delete()
assert.Equal(t, err, ErrIncompatibleValue)
return nil
})
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
assert.Equal(t, b.Stats().KeyN, count/2+1)
return nil
})
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
assert.Equal(t, b.Stats().KeyN, count/2+1)
return nil
})
}
@ -113,216 +114,223 @@ func TestCursor_Delete(t *testing.T) {
//
// Related: https://github.com/boltdb/bolt/pull/187
func TestCursor_Seek_Large(t *testing.T) {
withOpenDB(func(db *DB, path string) {
var count = 10000
db := NewTestDB()
defer db.Close()
// Insert every other key between 0 and $count.
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
for i := 0; i < count; i += 100 {
for j := i; j < i+100; j += 2 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(j))
b.Put(k, make([]byte, 100))
}
var count = 10000
// Insert every other key between 0 and $count.
db.Update(func(tx *Tx) error {
b, _ := tx.CreateBucket([]byte("widgets"))
for i := 0; i < count; i += 100 {
for j := i; j < i+100; j += 2 {
k := make([]byte, 8)
binary.BigEndian.PutUint64(k, uint64(j))
b.Put(k, make([]byte, 100))
}
return nil
})
}
return nil
})
db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
for i := 0; i < count; i++ {
seek := make([]byte, 8)
binary.BigEndian.PutUint64(seek, uint64(i))
db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
for i := 0; i < count; i++ {
seek := make([]byte, 8)
binary.BigEndian.PutUint64(seek, uint64(i))
k, _ := c.Seek(seek)
k, _ := c.Seek(seek)
// The last seek is beyond the end of the the range so
// it should return nil.
if i == count-1 {
assert.Nil(t, k)
continue
}
// Otherwise we should seek to the exact key or the next key.
num := binary.BigEndian.Uint64(k)
if i%2 == 0 {
assert.Equal(t, uint64(i), num)
} else {
assert.Equal(t, uint64(i+1), num)
}
// The last seek is beyond the end of the the range so
// it should return nil.
if i == count-1 {
assert.Nil(t, k)
continue
}
return nil
})
// Otherwise we should seek to the exact key or the next key.
num := binary.BigEndian.Uint64(k)
if i%2 == 0 {
assert.Equal(t, uint64(i), num)
} else {
assert.Equal(t, uint64(i+1), num)
}
}
return nil
})
}
// Ensure that a cursor can iterate over an empty bucket without error.
func TestCursor_EmptyBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First()
assert.Nil(t, k)
assert.Nil(t, v)
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First()
assert.Nil(t, k)
assert.Nil(t, v)
return nil
})
}
// Ensure that a Tx cursor can reverse iterate over an empty bucket without error.
func TestCursor_EmptyBucketReverse(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last()
assert.Nil(t, k)
assert.Nil(t, v)
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db.View(func(tx *Tx) error {
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last()
assert.Nil(t, k)
assert.Nil(t, v)
return nil
})
}
// Ensure that a Tx cursor can iterate over a single root with a couple elements.
func TestCursor_Iterate_Leaf(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
db := NewTestDB()
defer db.Close()
k, v := c.First()
assert.Equal(t, string(k), "bar")
assert.Equal(t, v, []byte{1})
k, v = c.Next()
assert.Equal(t, string(k), "baz")
assert.Equal(t, v, []byte{})
k, v = c.Next()
assert.Equal(t, string(k), "foo")
assert.Equal(t, v, []byte{0})
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.First()
assert.Equal(t, string(k), "bar")
assert.Equal(t, v, []byte{1})
k, v = c.Next()
assert.Equal(t, string(k), "baz")
assert.Equal(t, v, []byte{})
k, v = c.Next()
assert.Equal(t, string(k), "foo")
assert.Equal(t, v, []byte{0})
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
}
// Ensure that a Tx cursor can iterate in reverse over a single root with a couple elements.
func TestCursor_LeafRootReverse(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
db := NewTestDB()
defer db.Close()
k, v := c.Last()
assert.Equal(t, string(k), "foo")
assert.Equal(t, v, []byte{0})
k, v = c.Prev()
assert.Equal(t, string(k), "baz")
assert.Equal(t, v, []byte{})
k, v = c.Prev()
assert.Equal(t, string(k), "bar")
assert.Equal(t, v, []byte{1})
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{0})
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{1})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, v := c.Last()
assert.Equal(t, string(k), "foo")
assert.Equal(t, v, []byte{0})
k, v = c.Prev()
assert.Equal(t, string(k), "baz")
assert.Equal(t, v, []byte{})
k, v = c.Prev()
assert.Equal(t, string(k), "bar")
assert.Equal(t, v, []byte{1})
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Prev()
assert.Nil(t, k)
assert.Nil(t, v)
tx.Rollback()
}
// Ensure that a Tx cursor can restart from the beginning.
func TestCursor_Restart(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
return nil
})
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, _ := c.First()
assert.Equal(t, string(k), "bar")
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
k, _ = c.First()
assert.Equal(t, string(k), "bar")
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
tx.Rollback()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("bar"), []byte{})
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte{})
return nil
})
tx, _ := db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
k, _ := c.First()
assert.Equal(t, string(k), "bar")
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
k, _ = c.First()
assert.Equal(t, string(k), "bar")
k, _ = c.Next()
assert.Equal(t, string(k), "foo")
tx.Rollback()
}
// Ensure that a Tx can iterate over all elements in a bucket.
func TestCursor_QuickCheck(t *testing.T) {
f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) {
// Bulk insert all values.
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
db := NewTestDB()
defer db.Close()
// Sort test data.
sort.Sort(items)
// Bulk insert all values.
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
// Sort test data.
sort.Sort(items)
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil && index < len(items); k, v = c.Next() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
})
return true
}
if err := quick.Check(f, qconfig()); err != nil {
@ -333,31 +341,33 @@ func TestCursor_QuickCheck(t *testing.T) {
// Ensure that a transaction can iterate over all elements in a bucket in reverse.
func TestCursor_QuickCheck_Reverse(t *testing.T) {
f := func(items testdata) bool {
withOpenDB(func(db *DB, path string) {
// Bulk insert all values.
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
db := NewTestDB()
defer db.Close()
// Sort test data.
sort.Sort(revtestdata(items))
// Bulk insert all values.
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
for _, item := range items {
assert.NoError(t, b.Put(item.Key, item.Value))
}
assert.NoError(t, tx.Commit())
// Sort test data.
sort.Sort(revtestdata(items))
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
// Iterate over all items and check consistency.
var index = 0
tx, _ = db.Begin(false)
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil && index < len(items); k, v = c.Prev() {
assert.Equal(t, k, items[index].Key)
assert.Equal(t, v, items[index].Value)
index++
}
assert.Equal(t, len(items), index)
tx.Rollback()
})
return true
}
if err := quick.Check(f, qconfig()); err != nil {
@ -367,54 +377,56 @@ func TestCursor_QuickCheck_Reverse(t *testing.T) {
// Ensure that a Tx cursor can iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("baz"))
assert.NoError(t, err)
return nil
})
db.View(func(tx *Tx) error {
var names []string
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
names = append(names, string(k))
assert.Nil(t, v)
}
assert.Equal(t, names, []string{"bar", "baz", "foo"})
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("baz"))
assert.NoError(t, err)
return nil
})
db.View(func(tx *Tx) error {
var names []string
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
names = append(names, string(k))
assert.Nil(t, v)
}
assert.Equal(t, names, []string{"bar", "baz", "foo"})
return nil
})
}
// Ensure that a Tx cursor can reverse iterate over subbuckets.
func TestCursor_QuickCheck_BucketsOnly_Reverse(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("baz"))
assert.NoError(t, err)
return nil
})
db.View(func(tx *Tx) error {
var names []string
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil; k, v = c.Prev() {
names = append(names, string(k))
assert.Nil(t, v)
}
assert.Equal(t, names, []string{"foo", "baz", "bar"})
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("foo"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("bar"))
assert.NoError(t, err)
_, err = b.CreateBucket([]byte("baz"))
assert.NoError(t, err)
return nil
})
db.View(func(tx *Tx) error {
var names []string
c := tx.Bucket([]byte("widgets")).Cursor()
for k, v := c.Last(); k != nil; k, v = c.Prev() {
names = append(names, string(k))
assert.Nil(t, v)
}
assert.Equal(t, names, []string{"foo", "baz", "bar"})
return nil
})
}

View File

@ -206,14 +206,14 @@ func TestDB_Begin_DatabaseNotOpen(t *testing.T) {
// Ensure that a read-write transaction can be retrieved.
func TestDB_BeginRW(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, err := db.Begin(true)
assert.NotNil(t, tx)
assert.NoError(t, err)
assert.Equal(t, tx.DB(), db)
assert.Equal(t, tx.Writable(), true)
assert.NoError(t, tx.Commit())
})
db := NewTestDB()
defer db.Close()
tx, err := db.Begin(true)
assert.NotNil(t, tx)
assert.NoError(t, err)
assert.Equal(t, tx.DB(), db)
assert.Equal(t, tx.Writable(), true)
assert.NoError(t, tx.Commit())
}
// Ensure that opening a transaction while the DB is closed returns an error.
@ -226,23 +226,23 @@ func TestDB_BeginRW_Closed(t *testing.T) {
// Ensure a database can provide a transactional block.
func TestDB_Update(t *testing.T) {
withOpenDB(func(db *DB, path string) {
err := db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("bar"))
b.Put([]byte("baz"), []byte("bat"))
b.Delete([]byte("foo"))
return nil
})
assert.NoError(t, err)
err = db.View(func(tx *Tx) error {
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
assert.NoError(t, err)
db := NewTestDB()
defer db.Close()
err := db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
b.Put([]byte("foo"), []byte("bar"))
b.Put([]byte("baz"), []byte("bat"))
b.Delete([]byte("foo"))
return nil
})
assert.NoError(t, err)
err = db.View(func(tx *Tx) error {
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
assert.NoError(t, err)
}
// Ensure a closed database returns an error while running a transaction block
@ -273,69 +273,70 @@ func TestDB_Update_ManualCommitAndRollback(t *testing.T) {
// Ensure a write transaction that panics does not hold open locks.
func TestDB_Update_Panic(t *testing.T) {
withOpenDB(func(db *DB, path string) {
func() {
defer func() {
if r := recover(); r != nil {
warn("recover: update", r)
}
}()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
panic("omg")
})
db := NewTestDB()
defer db.Close()
func() {
defer func() {
if r := recover(); r != nil {
warn("recover: update", r)
}
}()
// Verify we can update again.
err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
panic("omg")
})
assert.NoError(t, err)
}()
// Verify that our change persisted.
err = db.Update(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil
})
// Verify we can update again.
err := db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
assert.NoError(t, err)
// Verify that our change persisted.
err = db.Update(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil
})
}
// Ensure a database can return an error through a read-only transactional block.
func TestDB_View_Error(t *testing.T) {
withOpenDB(func(db *DB, path string) {
err := db.View(func(tx *Tx) error {
return errors.New("xxx")
})
assert.Equal(t, errors.New("xxx"), err)
db := NewTestDB()
defer db.Close()
err := db.View(func(tx *Tx) error {
return errors.New("xxx")
})
assert.Equal(t, errors.New("xxx"), err)
}
// Ensure a read transaction that panics does not hold open locks.
func TestDB_View_Panic(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
return nil
})
func() {
defer func() {
if r := recover(); r != nil {
warn("recover: view", r)
}
}()
db.View(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
panic("omg")
})
func() {
defer func() {
if r := recover(); r != nil {
warn("recover: view", r)
}
}()
// Verify that we can still use read transactions.
db.View(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil
panic("omg")
})
}()
// Verify that we can still use read transactions.
db.View(func(tx *Tx) error {
assert.NotNil(t, tx.Bucket([]byte("widgets")))
return nil
})
}
@ -346,16 +347,16 @@ func TestDB_Commit_WriteFail(t *testing.T) {
// Ensure that DB stats can be returned.
func TestDB_Stats(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
stats := db.Stats()
assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount")
assert.Equal(t, 0, stats.FreePageN, "FreePageN")
assert.Equal(t, 2, stats.PendingPageN, "PendingPageN")
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
stats := db.Stats()
assert.Equal(t, 2, stats.TxStats.PageCount, "PageCount")
assert.Equal(t, 0, stats.FreePageN, "FreePageN")
assert.Equal(t, 2, stats.PendingPageN, "PendingPageN")
}
// Ensure that the mmap grows appropriately.
@ -373,41 +374,41 @@ func TestDB_mmapSize(t *testing.T) {
// Ensure that database pages are in expected order and type.
func TestDB_Consistency(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
for i := 0; i < 10; i++ {
db.Update(func(tx *Tx) error {
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
return nil
})
}
for i := 0; i < 10; i++ {
db.Update(func(tx *Tx) error {
if p, _ := tx.Page(0); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(1); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(2); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(3); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(4); assert.NotNil(t, p) {
assert.Equal(t, "leaf", p.Type) // root leaf
}
if p, _ := tx.Page(5); assert.NotNil(t, p) {
assert.Equal(t, "freelist", p.Type)
}
p, _ := tx.Page(6)
assert.Nil(t, p)
assert.NoError(t, tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar")))
return nil
})
}
db.Update(func(tx *Tx) error {
if p, _ := tx.Page(0); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(1); assert.NotNil(t, p) {
assert.Equal(t, "meta", p.Type)
}
if p, _ := tx.Page(2); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(3); assert.NotNil(t, p) {
assert.Equal(t, "free", p.Type)
}
if p, _ := tx.Page(4); assert.NotNil(t, p) {
assert.Equal(t, "leaf", p.Type) // root leaf
}
if p, _ := tx.Page(5); assert.NotNil(t, p) {
assert.Equal(t, "freelist", p.Type)
}
p, _ := tx.Page(6)
assert.Nil(t, p)
return nil
})
}
@ -451,16 +452,17 @@ func TestDB_StrictMode(t *testing.T) {
msg = fmt.Sprintf("%s", recover())
}()
withOpenDB(func(db *DB, path string) {
db.StrictMode = true
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
db := NewTestDB()
defer db.Close()
// Corrupt the DB by extending the high water mark.
tx.meta.pgid++
db.StrictMode = true
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
return nil
})
// Corrupt the DB by extending the high water mark.
tx.meta.pgid++
return nil
})
}()
@ -474,15 +476,18 @@ func TestDB_DoubleFree(t *testing.T) {
defer func() {
msg = fmt.Sprintf("%s", recover())
}()
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
// Corrupt the DB by adding a page to the freelist.
db.freelist.free(0, tx.page(3))
db := NewTestDB()
defer os.Remove(db.DB.Path())
defer db.DB.Close()
return nil
})
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("foo"))
// Corrupt the DB by adding a page to the freelist.
db.freelist.free(0, tx.page(3))
return nil
})
}()
@ -580,37 +585,53 @@ func ExampleDB_Begin_ReadOnly() {
// zephyr likes purple
}
// tempfile returns a temporary file path.
func tempfile() string {
f, _ := ioutil.TempFile("", "bolt-")
f.Close()
os.Remove(f.Name())
return f.Name()
// TestDB represents a wrapper around a Bolt DB to handle temporary file
// creation and automatic cleanup on close.
type TestDB struct {
*DB
}
// withOpenDB executes a function with an already opened database.
func withOpenDB(fn func(*DB, string)) {
path := tempfile()
defer os.Remove(path)
db, err := Open(path, 0666, nil)
// NewTestDB returns a new instance of TestDB.
func NewTestDB() *TestDB {
db, err := Open(tempfile(), 0666, nil)
if err != nil {
panic("cannot open db: " + err.Error())
}
defer db.Close()
fn(db, path)
return &TestDB{db}
}
// Close closes the database and deletes the underlying file.
func (db *TestDB) Close() {
// Log statistics.
if *statsFlag {
logStats(db)
db.PrintStats()
}
// Check database consistency after every test.
mustCheck(db)
db.MustCheck()
// Close database and remove file.
defer os.Remove(db.Path())
db.DB.Close()
}
// mustCheck runs a consistency check on the database and panics if any errors are found.
func mustCheck(db *DB) {
// PrintStats prints the database stats
func (db *TestDB) PrintStats() {
var stats = db.Stats()
fmt.Printf("[db] %-20s %-20s %-20s\n",
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
)
fmt.Printf(" %-20s %-20s %-20s\n",
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
)
}
// MustCheck runs a consistency check on the database and panics if any errors are found.
func (db *TestDB) MustCheck() {
db.View(func(tx *Tx) error {
// Collect all the errors.
var errors []error
@ -643,6 +664,21 @@ func mustCheck(db *DB) {
})
}
// CopyTempFile copies a database to a temporary file.
func (db *TestDB) CopyTempFile() {
path := tempfile()
db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
fmt.Println("db copied to: ", path)
}
// tempfile returns a temporary file path.
func tempfile() string {
f, _ := ioutil.TempFile("", "bolt-")
f.Close()
os.Remove(f.Name())
return f.Name()
}
// mustContainKeys checks that a bucket contains a given set of keys.
func mustContainKeys(b *Bucket, m map[string]string) {
found := make(map[string]string)
@ -682,29 +718,6 @@ func trunc(b []byte, length int) []byte {
return b
}
// writes the current database stats to the testing log.
func logStats(db *DB) {
var stats = db.Stats()
fmt.Printf("[db] %-20s %-20s %-20s\n",
fmt.Sprintf("pg(%d/%d)", stats.TxStats.PageCount, stats.TxStats.PageAlloc),
fmt.Sprintf("cur(%d)", stats.TxStats.CursorCount),
fmt.Sprintf("node(%d/%d)", stats.TxStats.NodeCount, stats.TxStats.NodeDeref),
)
fmt.Printf(" %-20s %-20s %-20s\n",
fmt.Sprintf("rebal(%d/%v)", stats.TxStats.Rebalance, truncDuration(stats.TxStats.RebalanceTime)),
fmt.Sprintf("spill(%d/%v)", stats.TxStats.Spill, truncDuration(stats.TxStats.SpillTime)),
fmt.Sprintf("w(%d/%v)", stats.TxStats.Write, truncDuration(stats.TxStats.WriteTime)),
)
}
func truncDuration(d time.Duration) string {
return regexp.MustCompile(`^(\d+)(\.\d+)`).ReplaceAllString(d.String(), "$1")
}
// copyAndFailNow copies a database to a new location and then fails then test.
func copyAndFailNow(t *testing.T, db *DB) {
path := tempfile()
db.View(func(tx *Tx) error { return tx.CopyFile(path, 0600) })
fmt.Println("db copied to: ", path)
t.FailNow()
}

View File

@ -41,78 +41,80 @@ func testSimulate(t *testing.T, threadCount, parallelism int) {
var versions = make(map[txid]*QuickDB)
versions[1] = NewQuickDB()
withOpenDB(func(db *DB, path string) {
var mutex sync.Mutex
// Run n threads in parallel, each with their own operation.
var wg sync.WaitGroup
var threads = make(chan bool, parallelism)
var i int
for {
threads <- true
wg.Add(1)
writable := ((rand.Int() % 100) < 20) // 20% writers
db := NewTestDB()
defer db.Close()
// Choose an operation to execute.
var handler simulateHandler
if writable {
handler = writerHandlers[rand.Intn(len(writerHandlers))]
} else {
handler = readerHandlers[rand.Intn(len(readerHandlers))]
}
var mutex sync.Mutex
// Execute a thread for the given operation.
go func(writable bool, handler simulateHandler) {
defer wg.Done()
// Run n threads in parallel, each with their own operation.
var wg sync.WaitGroup
var threads = make(chan bool, parallelism)
var i int
for {
threads <- true
wg.Add(1)
writable := ((rand.Int() % 100) < 20) // 20% writers
// Start transaction.
tx, err := db.Begin(writable)
if err != nil {
t.Fatal("tx begin: ", err)
}
// Obtain current state of the dataset.
mutex.Lock()
var qdb = versions[tx.id()]
if writable {
qdb = versions[tx.id()-1].Copy()
}
mutex.Unlock()
// Make sure we commit/rollback the tx at the end and update the state.
if writable {
defer func() {
mutex.Lock()
versions[tx.id()] = qdb
mutex.Unlock()
assert.NoError(t, tx.Commit())
}()
} else {
defer tx.Rollback()
}
// Ignore operation if we don't have data yet.
if qdb == nil {
return
}
// Execute handler.
handler(tx, qdb)
// Release a thread back to the scheduling loop.
<-threads
}(writable, handler)
i++
if i > threadCount {
break
}
// Choose an operation to execute.
var handler simulateHandler
if writable {
handler = writerHandlers[rand.Intn(len(writerHandlers))]
} else {
handler = readerHandlers[rand.Intn(len(readerHandlers))]
}
// Wait until all threads are done.
wg.Wait()
})
// Execute a thread for the given operation.
go func(writable bool, handler simulateHandler) {
defer wg.Done()
// Start transaction.
tx, err := db.Begin(writable)
if err != nil {
t.Fatal("tx begin: ", err)
}
// Obtain current state of the dataset.
mutex.Lock()
var qdb = versions[tx.id()]
if writable {
qdb = versions[tx.id()-1].Copy()
}
mutex.Unlock()
// Make sure we commit/rollback the tx at the end and update the state.
if writable {
defer func() {
mutex.Lock()
versions[tx.id()] = qdb
mutex.Unlock()
assert.NoError(t, tx.Commit())
}()
} else {
defer tx.Rollback()
}
// Ignore operation if we don't have data yet.
if qdb == nil {
return
}
// Execute handler.
handler(tx, qdb)
// Release a thread back to the scheduling loop.
<-threads
}(writable, handler)
i++
if i > threadCount {
break
}
}
// Wait until all threads are done.
wg.Wait()
}
type simulateHandler func(tx *Tx, qdb *QuickDB)

View File

@ -11,265 +11,267 @@ import (
// Ensure that committing a closed transaction returns an error.
func TestTx_Commit_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("foo"))
assert.NoError(t, tx.Commit())
assert.Equal(t, tx.Commit(), ErrTxClosed)
})
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.CreateBucket([]byte("foo"))
assert.NoError(t, tx.Commit())
assert.Equal(t, tx.Commit(), ErrTxClosed)
}
// Ensure that rolling back a closed transaction returns an error.
func TestTx_Rollback_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(true)
assert.NoError(t, tx.Rollback())
assert.Equal(t, tx.Rollback(), ErrTxClosed)
})
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
assert.NoError(t, tx.Rollback())
assert.Equal(t, tx.Rollback(), ErrTxClosed)
}
// Ensure that committing a read-only transaction returns an error.
func TestTx_Commit_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(false)
assert.Equal(t, tx.Commit(), ErrTxNotWritable)
})
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(false)
assert.Equal(t, tx.Commit(), ErrTxNotWritable)
}
// Ensure that a transaction can retrieve a cursor on the root bucket.
func TestTx_Cursor(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.CreateBucket([]byte("woojits"))
c := tx.Cursor()
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.CreateBucket([]byte("woojits"))
c := tx.Cursor()
k, v := c.First()
assert.Equal(t, "widgets", string(k))
assert.Nil(t, v)
k, v := c.First()
assert.Equal(t, "widgets", string(k))
assert.Nil(t, v)
k, v = c.Next()
assert.Equal(t, "woojits", string(k))
assert.Nil(t, v)
k, v = c.Next()
assert.Equal(t, "woojits", string(k))
assert.Nil(t, v)
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
k, v = c.Next()
assert.Nil(t, k)
assert.Nil(t, v)
return nil
})
return nil
})
}
// Ensure that creating a bucket with a read-only transaction returns an error.
func TestTx_CreateBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.View(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("foo"))
assert.Nil(t, b)
assert.Equal(t, ErrTxNotWritable, err)
return nil
})
db := NewTestDB()
defer db.Close()
db.View(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("foo"))
assert.Nil(t, b)
assert.Equal(t, ErrTxNotWritable, err)
return nil
})
}
// Ensure that creating a bucket on a closed transaction returns an error.
func TestTx_CreateBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(true)
tx.Commit()
b, err := tx.CreateBucket([]byte("foo"))
assert.Nil(t, b)
assert.Equal(t, ErrTxClosed, err)
})
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.Commit()
b, err := tx.CreateBucket([]byte("foo"))
assert.Nil(t, b)
assert.Equal(t, ErrTxClosed, err)
}
// Ensure that a Tx can retrieve a bucket.
func TestTx_Bucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
})
}
// Ensure that a Tx retrieving a non-existent key returns nil.
func TestTx_Get_Missing(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
assert.Nil(t, value)
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
value := tx.Bucket([]byte("widgets")).Get([]byte("no_such_key"))
assert.Nil(t, value)
return nil
})
}
// Ensure that a bucket can be created and retrieved.
func TestTx_CreateBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
// Create a bucket.
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
return nil
})
db := NewTestDB()
defer db.Close()
// Read the bucket through a separate transaction.
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
})
// Create a bucket.
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
return nil
})
// Read the bucket through a separate transaction.
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
})
}
// Ensure that a bucket can be created if it doesn't already exist.
func TestTx_CreateBucketIfNotExists(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
b, err = tx.CreateBucketIfNotExists([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
b, err = tx.CreateBucketIfNotExists([]byte{})
assert.Nil(t, b)
assert.Equal(t, ErrBucketNameRequired, err)
b, err = tx.CreateBucketIfNotExists([]byte{})
assert.Nil(t, b)
assert.Equal(t, ErrBucketNameRequired, err)
b, err = tx.CreateBucketIfNotExists(nil)
assert.Nil(t, b)
assert.Equal(t, ErrBucketNameRequired, err)
return nil
})
b, err = tx.CreateBucketIfNotExists(nil)
assert.Nil(t, b)
assert.Equal(t, ErrBucketNameRequired, err)
return nil
})
// Read the bucket through a separate transaction.
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
})
// Read the bucket through a separate transaction.
db.View(func(tx *Tx) error {
b := tx.Bucket([]byte("widgets"))
assert.NotNil(t, b)
return nil
})
}
// Ensure that a bucket cannot be created twice.
func TestTx_CreateBucket_Exists(t *testing.T) {
withOpenDB(func(db *DB, path string) {
// Create a bucket.
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
return nil
})
db := NewTestDB()
defer db.Close()
// Create a bucket.
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
return nil
})
// Create the same bucket again.
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.Nil(t, b)
assert.Equal(t, ErrBucketExists, err)
return nil
})
// Create the same bucket again.
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket([]byte("widgets"))
assert.Nil(t, b)
assert.Equal(t, ErrBucketExists, err)
return nil
})
}
// Ensure that a bucket is created with a non-blank name.
func TestTx_CreateBucket_NameRequired(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket(nil)
assert.Nil(t, b)
assert.Equal(t, ErrBucketNameRequired, err)
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
b, err := tx.CreateBucket(nil)
assert.Nil(t, b)
assert.Equal(t, ErrBucketNameRequired, err)
return nil
})
}
// Ensure that a bucket can be deleted.
func TestTx_DeleteBucket(t *testing.T) {
withOpenDB(func(db *DB, path string) {
// Create a bucket and add a value.
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil
})
db := NewTestDB()
defer db.Close()
// Save root page id.
var root pgid
db.View(func(tx *Tx) error {
root = tx.Bucket([]byte("widgets")).root
return nil
})
// Create a bucket and add a value.
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
return nil
})
// Delete the bucket and make sure we can't get the value.
db.Update(func(tx *Tx) error {
assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
assert.Nil(t, tx.Bucket([]byte("widgets")))
return nil
})
// Save root page id.
var root pgid
db.View(func(tx *Tx) error {
root = tx.Bucket([]byte("widgets")).root
return nil
})
db.Update(func(tx *Tx) error {
// Verify that the bucket's page is free.
assert.Equal(t, []pgid{4, 5}, db.freelist.all())
// Delete the bucket and make sure we can't get the value.
db.Update(func(tx *Tx) error {
assert.NoError(t, tx.DeleteBucket([]byte("widgets")))
assert.Nil(t, tx.Bucket([]byte("widgets")))
return nil
})
// Create the bucket again and make sure there's not a phantom value.
b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
return nil
})
db.Update(func(tx *Tx) error {
// Verify that the bucket's page is free.
assert.Equal(t, []pgid{4, 5}, db.freelist.all())
// Create the bucket again and make sure there's not a phantom value.
b, err := tx.CreateBucket([]byte("widgets"))
assert.NotNil(t, b)
assert.NoError(t, err)
assert.Nil(t, tx.Bucket([]byte("widgets")).Get([]byte("foo")))
return nil
})
}
// Ensure that deleting a bucket on a closed transaction returns an error.
func TestTx_DeleteBucket_Closed(t *testing.T) {
withOpenDB(func(db *DB, path string) {
tx, _ := db.Begin(true)
tx.Commit()
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed)
})
db := NewTestDB()
defer db.Close()
tx, _ := db.Begin(true)
tx.Commit()
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxClosed)
}
// Ensure that deleting a bucket with a read-only transaction returns an error.
func TestTx_DeleteBucket_ReadOnly(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.View(func(tx *Tx) error {
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable)
return nil
})
db := NewTestDB()
defer db.Close()
db.View(func(tx *Tx) error {
assert.Equal(t, tx.DeleteBucket([]byte("foo")), ErrTxNotWritable)
return nil
})
}
// Ensure that nothing happens when deleting a bucket that doesn't exist.
func TestTx_DeleteBucket_NotFound(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
return nil
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
assert.Equal(t, ErrBucketNotFound, tx.DeleteBucket([]byte("widgets")))
return nil
})
}
// Ensure that Tx commit handlers are called after a transaction successfully commits.
func TestTx_OnCommit(t *testing.T) {
var x int
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
assert.Equal(t, 3, x)
}
@ -277,39 +279,39 @@ func TestTx_OnCommit(t *testing.T) {
// Ensure that Tx commit handlers are NOT called after a transaction rolls back.
func TestTx_OnCommit_Rollback(t *testing.T) {
var x int
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
tx.CreateBucket([]byte("widgets"))
return errors.New("rollback this commit")
})
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.OnCommit(func() { x += 1 })
tx.OnCommit(func() { x += 2 })
tx.CreateBucket([]byte("widgets"))
return errors.New("rollback this commit")
})
assert.Equal(t, 0, x)
}
// Ensure that the database can be copied to a file path.
func TestTx_CopyFile(t *testing.T) {
withOpenDB(func(db *DB, path string) {
var dest = tempfile()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
db := NewTestDB()
defer db.Close()
var dest = tempfile()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
assert.NoError(t, db.View(func(tx *Tx) error { return tx.CopyFile(dest, 0600) }))
assert.NoError(t, db.View(func(tx *Tx) error { return tx.CopyFile(dest, 0600) }))
db2, err := Open(dest, 0600, nil)
assert.NoError(t, err)
defer db2.Close()
db2, err := Open(dest, 0600, nil)
assert.NoError(t, err)
defer db2.Close()
db2.View(func(tx *Tx) error {
assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
db2.View(func(tx *Tx) error {
assert.Equal(t, []byte("bar"), tx.Bucket([]byte("widgets")).Get([]byte("foo")))
assert.Equal(t, []byte("bat"), tx.Bucket([]byte("widgets")).Get([]byte("baz")))
return nil
})
}
@ -336,32 +338,32 @@ func (f *failWriter) Write(p []byte) (n int, err error) {
// Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Meta(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) })
assert.EqualError(t, err, "meta copy: error injected for tests")
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{}) })
assert.EqualError(t, err, "meta copy: error injected for tests")
}
// Ensure that Copy handles write errors right.
func TestTx_CopyFile_Error_Normal(t *testing.T) {
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) })
assert.EqualError(t, err, "error injected for tests")
db := NewTestDB()
defer db.Close()
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
tx.Bucket([]byte("widgets")).Put([]byte("foo"), []byte("bar"))
tx.Bucket([]byte("widgets")).Put([]byte("baz"), []byte("bat"))
return nil
})
err := db.View(func(tx *Tx) error { return tx.Copy(&failWriter{3 * db.pageSize}) })
assert.EqualError(t, err, "error injected for tests")
}
func ExampleTx_Rollback() {