Add 'bolt bench'.

This commit adds a flexible benchmarking tool to the 'bolt' CLI. It allows
the user to separately specify the write mode and read mode (e.g. sequential
random, etc). It also allows the user to isolate profiling to either the
read or the writes.

Currently the bench tool only supports "seq" read and write modes. It also
does not support streaming of Bolt counters yet.

Fixes #95.

/cc @snormore
master
Ben Johnson 2014-04-18 21:37:45 -05:00
parent 71e91e24b0
commit a42d74da7e
9 changed files with 283 additions and 498 deletions

View File

@ -24,7 +24,7 @@ cpuprofile: fmt
# go get github.com/kisielk/errcheck
errcheck:
@echo "=== errcheck ==="
@.go/bin/errcheck github.com/boltdb/bolt
@errcheck github.com/boltdb/bolt
fmt:
@go fmt ./...
@ -34,8 +34,7 @@ get:
build: get
@mkdir -p bin
@go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt-`git rev-parse --short HEAD` ./cmd/bolt
@echo "writing bin/bolt-`git rev-parse --short HEAD`"
@go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt ./cmd/bolt
test: fmt errcheck
@go get github.com/stretchr/testify/assert

View File

@ -1,31 +1,271 @@
package main
import (
"testing"
"encoding/binary"
"errors"
"fmt"
"io/ioutil"
"os"
"runtime"
"runtime/pprof"
"time"
"github.com/boltdb/bolt"
"github.com/boltdb/bolt/bench"
)
// Import converts an exported database dump into a new database.
// readWriteMode: 'read' or 'write'
// traversalPattern: 'sequentrial' or 'random'
// parallelism: integer representing number of concurrent reads/writes
func Bench(inputPath string, readWriteMode string, traversalPattern string, parallelism int) {
// File handlers for the various profiles.
var cpuprofile, memprofile, blockprofile *os.File
// Open the database.
db, err := bolt.Open(inputPath, 0600)
var benchBucketName = []byte("bench")
// Bench executes a customizable, synthetic benchmark against Bolt.
func Bench(options *BenchOptions) {
var results BenchResults
// Find temporary location.
path := tempfile()
defer os.Remove(path)
// Create database.
db, err := bolt.Open(path, 0600)
if err != nil {
fatalf("error: %+v", err)
fatal(err)
return
}
defer db.Close()
b := bench.New(db, &bench.Config{
ReadWriteMode: readWriteMode,
TraversalPattern: traversalPattern,
Parallelism: parallelism,
})
// Start profiling for writes.
if options.ProfileMode == "rw" || options.ProfileMode == "w" {
benchStartProfiling(options)
}
println(testing.Benchmark(b.Run))
// Write to the database.
if err := benchWrite(db, options, &results); err != nil {
fatal("bench: write: ", err)
}
// Stop profiling for writes only.
if options.ProfileMode == "w" {
benchStopProfiling()
}
// Start profiling for reads.
if options.ProfileMode == "r" {
benchStartProfiling(options)
}
// Read from the database.
if err := benchRead(db, options, &results); err != nil {
fatal("bench: read: ", err)
}
// Stop profiling for writes only.
if options.ProfileMode == "rw" || options.ProfileMode == "r" {
benchStopProfiling()
}
// Print results.
fmt.Printf("# Write\t%v\t(%v/op)\t(%v op/sec)\n", results.WriteDuration, results.WriteOpDuration(), results.WriteOpsPerSecond())
fmt.Printf("# Read\t%v\t(%v/op)\t(%v op/sec)\n", results.ReadDuration, results.ReadOpDuration(), results.ReadOpsPerSecond())
fmt.Println("")
}
// Writes to the database.
func benchWrite(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
var err error
var t = time.Now()
switch options.WriteMode {
case "seq":
err = benchWriteSequential(db, options, results)
default:
return fmt.Errorf("invalid write mode: %s", options.WriteMode)
}
results.WriteDuration = time.Since(t)
return err
}
func benchWriteSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
results.WriteOps = options.Iterations
return db.Update(func(tx *bolt.Tx) error {
b, _ := tx.CreateBucketIfNotExists(benchBucketName)
for i := 0; i < options.Iterations; i++ {
var key = make([]byte, options.KeySize)
var value = make([]byte, options.ValueSize)
binary.BigEndian.PutUint32(key, uint32(i))
if err := b.Put(key, value); err != nil {
return err
}
}
return nil
})
}
// Reads from the database.
func benchRead(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
var err error
var t = time.Now()
switch options.ReadMode {
case "seq":
err = benchReadSequential(db, options, results)
default:
return fmt.Errorf("invalid read mode: %s", options.ReadMode)
}
results.ReadDuration = time.Since(t)
return err
}
func benchReadSequential(db *bolt.DB, options *BenchOptions, results *BenchResults) error {
return db.View(func(tx *bolt.Tx) error {
var t = time.Now()
for {
c := tx.Bucket(benchBucketName).Cursor()
var count int
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
return errors.New("invalid value")
}
count++
}
if count != options.Iterations {
return fmt.Errorf("read seq: iter mismatch: expected %d, got %d", options.Iterations, count)
}
results.ReadOps += count
// Make sure we do this for at least a second.
if time.Since(t) >= time.Second {
break
}
}
return nil
})
}
// Starts all profiles set on the options.
func benchStartProfiling(options *BenchOptions) {
var err error
// Start CPU profiling.
if options.CPUProfile != "" {
cpuprofile, err = os.Create(options.CPUProfile)
if err != nil {
fatal("bench: could not create cpu profile %q: %v", options.CPUProfile, err)
}
pprof.StartCPUProfile(cpuprofile)
}
// Start memory profiling.
if options.MemProfile != "" {
memprofile, err = os.Create(options.MemProfile)
if err != nil {
fatal("bench: could not create memory profile %q: %v", options.MemProfile, err)
}
runtime.MemProfileRate = 4096
}
// Start fatal profiling.
if options.BlockProfile != "" {
blockprofile, err = os.Create(options.BlockProfile)
if err != nil {
fatal("bench: could not create block profile %q: %v", options.BlockProfile, err)
}
runtime.SetBlockProfileRate(1)
}
}
// Stops all profiles.
func benchStopProfiling() {
if cpuprofile != nil {
pprof.StopCPUProfile()
cpuprofile.Close()
cpuprofile = nil
}
if memprofile != nil {
pprof.Lookup("heap").WriteTo(memprofile, 0)
memprofile.Close()
memprofile = nil
}
if blockprofile != nil {
pprof.Lookup("block").WriteTo(blockprofile, 0)
blockprofile.Close()
blockprofile = nil
runtime.SetBlockProfileRate(0)
}
}
// BenchOptions represents the set of options that can be passed to Bench().
type BenchOptions struct {
ProfileMode string
WriteMode string
ReadMode string
Iterations int
KeySize int
ValueSize int
CPUProfile string
MemProfile string
BlockProfile string
}
// BenchResults represents the performance results of the benchmark.
type BenchResults struct {
WriteOps int
WriteDuration time.Duration
ReadOps int
ReadDuration time.Duration
}
// Returns the duration for a single write operation.
func (r *BenchResults) WriteOpDuration() time.Duration {
if r.WriteOps == 0 {
return 0
}
return r.WriteDuration / time.Duration(r.WriteOps)
}
// Returns average number of write operations that can be performed per second.
func (r *BenchResults) WriteOpsPerSecond() int {
var op = r.WriteOpDuration()
if op == 0 {
return 0
}
return int(time.Second) / int(op)
}
// Returns the duration for a single read operation.
func (r *BenchResults) ReadOpDuration() time.Duration {
if r.ReadOps == 0 {
return 0
}
return r.ReadDuration / time.Duration(r.ReadOps)
}
// Returns average number of read operations that can be performed per second.
func (r *BenchResults) ReadOpsPerSecond() int {
var op = r.ReadOpDuration()
if op == 0 {
return 0
}
return int(time.Second) / int(op)
}
// tempfile returns a temporary file path.
func tempfile() string {
f, _ := ioutil.TempFile("", "bolt-bench-")
f.Close()
os.Remove(f.Name())
return f.Name()
}

View File

@ -1,126 +0,0 @@
package bench
import (
"errors"
"fmt"
"sync"
"testing"
"github.com/boltdb/bolt"
)
const (
BenchReadMode = "read"
BenchWriteMode = "write"
BenchSequentialTraversal = "sequential"
BenchRandomTraversal = "random"
)
type Benchmark struct {
db *bolt.DB
config *Config
}
func New(db *bolt.DB, config *Config) *Benchmark {
b := new(Benchmark)
b.db = db
b.config = config
return b
}
func (bm *Benchmark) Run(b *testing.B) {
// Read buckets and keys before benchmark begins so we don't knew the
// results.
buckets, err := buckets(bm.db)
if err != nil {
b.Fatalf("error: %+v", err)
}
bucketsWithKeys := make(map[string][]string)
for _, bucket := range buckets {
keys, err := keys(bm.db, bucket)
if err != nil {
b.Fatalf("error: %+v", err)
}
bucketsWithKeys[bucket] = keys
}
b.ResetTimer()
// Keep running a fixed number of parallel reads until we run out of time.
for i := 0; i < b.N; i++ {
var wg sync.WaitGroup
for j := 0; j < bm.config.Parallelism; j++ {
wg.Add(1)
go func() {
defer wg.Done()
if err := bm.readBuckets(b, bm.db, bucketsWithKeys); err != nil {
b.Fatalf("error: %+v", err)
}
}()
}
wg.Wait()
}
}
// Run benchmark(s) for each of the given buckets.
func (bm *Benchmark) readBuckets(b *testing.B, db *bolt.DB, bucketsWithKeys map[string][]string) error {
return db.View(func(tx *bolt.Tx) error {
bucketsCount := len(bucketsWithKeys)
count := 0
for bucket, keys := range bucketsWithKeys {
bucket := tx.Bucket([]byte(bucket))
if err := bm.readKeys(b, bucket, keys); err != nil {
return err
}
count++
}
if count != bucketsCount {
return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, bucketsCount))
}
return nil
})
}
func (bm *Benchmark) readKeys(b *testing.B, bucket *bolt.Bucket, keys []string) error {
c := bucket.Cursor()
keysCount := len(keys)
count := 0
for k, _ := c.First(); k != nil; k, _ = c.Next() {
count++
}
if count != keysCount {
return errors.New(fmt.Sprintf("wrong count: %d; expected: %d", count, keysCount))
}
return nil
}
func buckets(db *bolt.DB) ([]string, error) {
buckets := []string{}
err := db.View(func(tx *bolt.Tx) error {
// Iterate over each bucket.
return tx.ForEach(func(name []byte, _ *bolt.Bucket) error {
buckets = append(buckets, string(name))
return nil
})
})
return buckets, err
}
func keys(db *bolt.DB, bucket string) ([]string, error) {
keys := []string{}
err := db.View(func(tx *bolt.Tx) error {
// Find bucket.
b := tx.Bucket([]byte(bucket))
if b == nil {
return errors.New(fmt.Sprintf("bucket %+v not found", b))
}
// Iterate over each key.
return b.ForEach(func(key, _ []byte) error {
keys = append(keys, string(key))
return nil
})
})
return keys, err
}

View File

@ -1,7 +0,0 @@
package bench
type Config struct {
ReadWriteMode string
TraversalPattern string
Parallelism int
}

View File

@ -1,24 +0,0 @@
package bench
import (
"fmt"
"strings"
"github.com/boltdb/bolt"
)
// Generate and write data to specified number of buckets/items.
func GenerateDB(db *bolt.DB, numBuckets, numItemsPerBucket int) error {
return db.Update(func(tx *bolt.Tx) error {
for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ {
bucketName := fmt.Sprintf("bucket%08d")
tx.CreateBucket([]byte(bucketName))
bucket := tx.Bucket([]byte(bucketName))
for i := 0; i < numItemsPerBucket; i++ {
value := []byte(strings.Repeat("0", 100))
bucket.Put([]byte(fmt.Sprintf("key%08d", i)), value)
}
}
return nil
})
}

View File

@ -1,55 +0,0 @@
package main
import (
"fmt"
"github.com/boltdb/bolt"
)
// Generate data for benchmarks.
func Generate(destPath string, numBuckets, numItems int) {
// Open the database.
db, err := bolt.Open(destPath, 0600)
if err != nil {
fatalf("open db:", err)
return
}
defer db.Close()
for bucketIndex := 0; bucketIndex < numBuckets; bucketIndex++ {
bucketName := fmt.Sprintf("bucket%03d", bucketIndex)
err = db.Update(func(tx *bolt.Tx) error {
// Create the bucket if it doesn't exist.
if err := tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
fatalf("create bucket: %s", err)
return nil
}
// Find bucket.
b := tx.Bucket([]byte(bucketName))
if b == nil {
fatalf("bucket not found: %s", bucketName)
return nil
}
for i := 0; i < numItems; i++ {
key := fmt.Sprintf("key%03d", i)
value := fmt.Sprintf("value%03d", i)
// Set value for a given key.
if err := b.Put([]byte(key), []byte(value)); err != nil {
return err
}
}
return nil
})
}
if err != nil {
fatal(err)
return
}
}

View File

@ -6,7 +6,6 @@ import (
"fmt"
"log"
"os"
"strconv"
"github.com/codegangsta/cli"
)
@ -91,31 +90,34 @@ func NewApp() *cli.App {
Check(path)
},
},
{
Name: "generate",
Usage: "Generate data for benchmarks",
Action: func(c *cli.Context) {
destPath := c.Args().Get(0)
numBuckets, err := strconv.Atoi(c.Args().Get(1))
if err != nil {
fatal(err)
}
numItems, err := strconv.Atoi(c.Args().Get(2))
if err != nil {
fatal(err)
}
Generate(destPath, numBuckets, numItems)
},
},
{
Name: "bench",
Usage: "Run benchmarks on a given dataset",
Action: func(c *cli.Context) {
srcPath := c.Args().Get(0)
Bench(srcPath, "read", "sequential", 1)
Usage: "Performs a synthetic benchmark",
Flags: []cli.Flag{
&cli.StringFlag{Name: "profile-mode", Value: "rw", Usage: "Profile mode"},
&cli.StringFlag{Name: "write-mode", Value: "seq", Usage: "Write mode"},
&cli.StringFlag{Name: "read-mode", Value: "seq", Usage: "Read mode"},
&cli.IntFlag{Name: "count", Value: 1000, Usage: "Item count"},
&cli.IntFlag{Name: "key-size", Value: 8, Usage: "Key size"},
&cli.IntFlag{Name: "value-size", Value: 32, Usage: "Value size"},
&cli.StringFlag{Name: "cpuprofile", Usage: "CPU profile output path"},
&cli.StringFlag{Name: "memprofile", Usage: "Memory profile output path"},
&cli.StringFlag{Name: "blockprofile", Usage: "Block profile output path"},
},
},
}
Action: func(c *cli.Context) {
Bench(&BenchOptions{
ProfileMode: c.String("profile-mode"),
WriteMode: c.String("write-mode"),
ReadMode: c.String("read-mode"),
Iterations: c.Int("count"),
KeySize: c.Int("key-size"),
ValueSize: c.Int("value-size"),
CPUProfile: c.String("cpuprofile"),
MemProfile: c.String("memprofile"),
BlockProfile: c.String("blockprofile"),
})
},
}}
return app
}

View File

@ -5,11 +5,8 @@ import (
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"regexp"
"strconv"
"strings"
"testing"
"time"
"unsafe"
@ -356,39 +353,6 @@ func TestDBStats_Sub(t *testing.T) {
assert.Equal(t, 7, diff.TxStats.PageCount)
}
// Benchmark the performance of single put transactions in random order.
func BenchmarkDB_Put_Sequential(b *testing.B) {
value := []byte(strings.Repeat("0", 64))
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
for i := 0; i < b.N; i++ {
db.Update(func(tx *Tx) error {
return tx.Bucket([]byte("widgets")).Put([]byte(strconv.Itoa(i)), value)
})
}
})
}
// Benchmark the performance of single put transactions in random order.
func BenchmarkDB_Put_Random(b *testing.B) {
indexes := rand.Perm(b.N)
value := []byte(strings.Repeat("0", 64))
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
for i := 0; i < b.N; i++ {
db.Update(func(tx *Tx) error {
return tx.Bucket([]byte("widgets")).Put([]byte(strconv.Itoa(indexes[i])), value)
})
}
})
}
func ExampleDB_Update() {
// Open the database.
db, _ := Open(tempfile(), 0666)

View File

@ -3,13 +3,9 @@ package bolt
import (
"errors"
"fmt"
"math/rand"
"os"
"strconv"
"strings"
"testing"
"github.com/boltdb/bolt/bench"
"github.com/stretchr/testify/assert"
)
@ -267,210 +263,6 @@ func TestTx_OnCommit_Rollback(t *testing.T) {
assert.Equal(t, 0, x)
}
// func BenchmarkReadSequential_1Concurrency_1Buckets_1Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1, 1)
// }
// func BenchmarkReadSequential_1Concurrency_1Buckets_10Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10, 1)
// }
// func BenchmarkReadSequential_1Concurrency_1Buckets_100Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 100, 1)
// }
// func BenchmarkReadSequential_1Concurrency_1Buckets_1000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1000, 1)
// }
// func BenchmarkReadSequential_1Concurrency_1Buckets_10000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10000, 1)
// }
// func BenchmarkReadSequential_10Concurrency_1Buckets_1Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1, 10)
// }
// func BenchmarkReadSequential_10Concurrency_1Buckets_10Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10, 10)
// }
// func BenchmarkReadSequential_10Concurrency_1Buckets_100Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 100, 10)
// }
// func BenchmarkReadSequential_10Concurrency_1Buckets_1000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1000, 10)
// }
// func BenchmarkReadSequential_10Concurrency_1Buckets_10000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10000, 10)
// }
// func BenchmarkReadSequential_100Concurrency_1Buckets_1Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1, 100)
// }
// func BenchmarkReadSequential_100Concurrency_1Buckets_10Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10, 100)
// }
// func BenchmarkReadSequential_100Concurrency_1Buckets_100Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 100, 100)
// }
// func BenchmarkReadSequential_100Concurrency_1Buckets_1000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1000, 100)
// }
// func BenchmarkReadSequential_100Concurrency_1Buckets_10000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10000, 100)
// }
// func BenchmarkReadSequential_1000Concurrency_1Buckets_1Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1, 1000)
// }
// func BenchmarkReadSequential_1000Concurrency_1Buckets_10Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10, 1000)
// }
// func BenchmarkReadSequential_1000Concurrency_1Buckets_100Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 100, 1000)
// }
// func BenchmarkReadSequential_1000Concurrency_1Buckets_1000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1000, 1000)
// }
// func BenchmarkReadSequential_1000Concurrency_1Buckets_10000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10000, 1000)
// }
// func BenchmarkReadSequential_10000Concurrency_1Buckets_1Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1, 10000)
// }
// func BenchmarkReadSequential_10000Concurrency_1Buckets_10Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10, 10000)
// }
// func BenchmarkReadSequential_10000Concurrency_1Buckets_100Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 100, 10000)
// }
// func BenchmarkReadSequential_10000Concurrency_1Buckets_1000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 1000, 10000)
// }
// func BenchmarkReadSequential_10000Concurrency_1Buckets_10000Items(b *testing.B) {
// benchmarkReadSequential(b, 1, 10000, 10000)
// }
// func benchmark(b *testing.B, readWriteMode, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) {
// withOpenDB(func(db *DB, path string) {
// if err := bench.GenerateDB(db, numBuckets, numItemsPerBucket); err != nil {
// b.Fatal(err)
// }
// bench.New(db, &bench.Config{
// ReadWriteMode: readWriteMode,
// TraversalPattern: traversalPattern,
// Parallelism: parallelism,
// }).Run(b)
// })
// }
// func benchmarkRead(b *testing.B, traversalPattern string, numBuckets, numItemsPerBucket, parallelism int) {
// benchmark(b, bench.BenchReadMode, traversalPattern, numBuckets, numItemsPerBucket, parallelism)
// }
// func benchmarkReadSequential(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) {
// benchmark(b, bench.BenchReadMode, bench.BenchSequentialTraversal, numBuckets, numItemsPerBucket, parallelism)
// }
// func benchmarkReadRandom(b *testing.B, numBuckets, numItemsPerBucket, parallelism int) {
// benchmark(b, bench.BenchReadMode, bench.BenchRandomTraversal, numBuckets, numItemsPerBucket, parallelism)
// }
// Benchmark the performance iterating over a cursor.
func BenchmarkTxCursor1(b *testing.B) { benchmarkTxCursor(b, 1) }
func BenchmarkTxCursor10(b *testing.B) { benchmarkTxCursor(b, 10) }
func BenchmarkTxCursor100(b *testing.B) { benchmarkTxCursor(b, 100) }
func BenchmarkTxCursor1000(b *testing.B) { benchmarkTxCursor(b, 1000) }
func BenchmarkTxCursor10000(b *testing.B) { benchmarkTxCursor(b, 10000) }
func benchmarkTxCursor(b *testing.B, total int) {
indexes := rand.Perm(total)
value := []byte(strings.Repeat("0", 100))
withOpenDB(func(db *DB, path string) {
// Write data to bucket.
db.Update(func(tx *Tx) error {
tx.CreateBucket([]byte("widgets"))
bucket := tx.Bucket([]byte("widgets"))
for i := 0; i < total; i++ {
bucket.Put([]byte(fmt.Sprintf("%016d", indexes[i])), value)
}
return nil
})
b.ResetTimer()
// Iterate over bucket using cursor.
for i := 0; i < b.N; i++ {
db.View(func(tx *Tx) error {
count := 0
c := tx.Bucket([]byte("widgets")).Cursor()
for k, _ := c.First(); k != nil; k, _ = c.Next() {
count++
}
if count != total {
b.Fatalf("wrong count: %d; expected: %d", count, total)
}
return nil
})
}
})
}
// Benchmark the performance of bulk put transactions in random order.
func BenchmarkTxPutRandom1(b *testing.B) { benchmarkTxPutRandom(b, 1) }
func BenchmarkTxPutRandom10(b *testing.B) { benchmarkTxPutRandom(b, 10) }
func BenchmarkTxPutRandom100(b *testing.B) { benchmarkTxPutRandom(b, 100) }
func BenchmarkTxPutRandom1000(b *testing.B) { benchmarkTxPutRandom(b, 1000) }
func BenchmarkTxPutRandom10000(b *testing.B) { benchmarkTxPutRandom(b, 10000) }
func benchmarkTxPutRandom(b *testing.B, total int) {
indexes := rand.Perm(total)
value := []byte(strings.Repeat("0", 64))
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
var tx *Tx
var bucket *Bucket
for j := 0; j < b.N; j++ {
for i := 0; i < total; i++ {
if i%1000 == 0 {
if tx != nil {
tx.Commit()
}
tx, _ = db.Begin(true)
bucket = tx.Bucket([]byte("widgets"))
}
bucket.Put([]byte(strconv.Itoa(indexes[i])), value)
}
}
tx.Commit()
})
}
// Benchmark the performance of bulk put transactions in sequential order.
func BenchmarkTxPutSequential1(b *testing.B) { benchmarkTxPutSequential(b, 1) }
func BenchmarkTxPutSequential10(b *testing.B) { benchmarkTxPutSequential(b, 10) }
func BenchmarkTxPutSequential100(b *testing.B) { benchmarkTxPutSequential(b, 100) }
func BenchmarkTxPutSequential1000(b *testing.B) { benchmarkTxPutSequential(b, 1000) }
func BenchmarkTxPutSequential10000(b *testing.B) { benchmarkTxPutSequential(b, 10000) }
func benchmarkTxPutSequential(b *testing.B, total int) {
value := []byte(strings.Repeat("0", 64))
withOpenDB(func(db *DB, path string) {
db.Update(func(tx *Tx) error {
_, err := tx.CreateBucket([]byte("widgets"))
return err
})
db.Update(func(tx *Tx) error {
bucket := tx.Bucket([]byte("widgets"))
for j := 0; j < b.N; j++ {
for i := 0; i < total; i++ {
bucket.Put([]byte(strconv.Itoa(i)), value)
}
}
return nil
})
})
}
func ExampleTx_Rollback() {
// Open the database.
db, _ := Open(tempfile(), 0666)