moar bench package

master
Steven Normore 2014-04-11 13:55:14 +00:00 committed by Ben Johnson
parent 6957c9d534
commit fdde1bcb06
9 changed files with 208 additions and 45 deletions

View File

@ -5,7 +5,7 @@ BRANCH=`git rev-parse --abbrev-ref HEAD`
COMMIT=`git rev-parse --short HEAD`
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
bench: benchpreq
bench:
go test -v -test.bench=$(BENCH)
# http://cloc.sourceforge.net/
@ -35,6 +35,7 @@ get:
build: get
@mkdir -p bin
@go build -ldflags=$(GOLDFLAGS) -a -o bin/bolt-`git rev-parse --short HEAD` ./cmd/bolt
@echo "writing bin/bolt-`git rev-parse --short HEAD`"
test: fmt errcheck
@go get github.com/stretchr/testify/assert

55
bench.go Normal file
View File

@ -0,0 +1,55 @@
package bolt
import (
"sync"
"testing"
)
const (
BenchReadMode = "read"
BenchWriteMode = "write"
BenchSequentialTraversal = "sequential"
BenchRandomTraversal = "random"
)
type Benchmark struct {
InputPath string
ReadWriteMode string
TraversalPattern string
Parallelism int
}
func NewBenchmark(inputPath, readWriteMode, traversalPattern string, parallelism int) *Benchmark {
return &Benchmark{inputPath, readWriteMode, traversalPattern, parallelism}
}
func (bm *Benchmark) Run(b *testing.B) {
// Open the database.
db, err := Open(bm.InputPath, 0600)
if err != nil {
panic(err)
return
}
defer db.Close()
b.ResetTimer()
// Keep running a fixed number of parallel reads until we run out of time.
for i := 0; i < b.N; i++ {
var wg sync.WaitGroup
for j := 0; j < bm.Parallelism; j++ {
wg.Add(1)
go func() {
if bm.TraversalPattern == BenchRandomTraversal {
// Perform all reads in random order.
// indexes := rand.Perm(total)
} else {
// Perform all reads in sequential order.
}
wg.Done()
}()
}
wg.Wait()
}
}

42
bench/bench.go Normal file
View File

@ -0,0 +1,42 @@
package bench
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
)
type bucketItems map[string]string
type buckets map[string]bucketItems
type Benchmark struct {
buckets buckets
}
func New(filePath string) (*Benchmark, error) {
data := readFromFile(filePath)
}
func readFromFile(filePath string) (*Benchmark, error) {
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return nil, err
}
file, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
b := new(Benchmark)
if err := json.Unmarshal(file, &b.buckets); err != nil {
return nil, err
}
return b, nil
}
func (b *Benchmark) Run() error {
fmt.Println("Do things, run benchmarks, tell people...")
return nil
}

View File

@ -1,43 +1,43 @@
package main
import (
"os"
"testing"
"github.com/boltdb/bolt"
)
// Run benchmarks on a given dataset.
func Bench() {
path := "bench"
if _, err := os.Stat(path); os.IsNotExist(err) {
fatal(err)
return
}
// Import converts an exported database dump into a new database.
// parallelism: integer representing number of concurrent reads/writes
// readWriteMode: 'read' or 'write'
// traversalPattern: 'sequentrial' or 'random'
func Bench(inputPath string, readWriteMode string, traversalPattern string, parallelism int) {
db, err := bolt.Open(path, 0600)
if err != nil {
fatal(err)
return
}
defer db.Close()
// cursor/sequential reads
// random reads
bucketName := "widgets"
key := "key1"
value := "value1"
// sequential writes
// random writes
err = db.Update(func(tx *bolt.Tx) error {
// Find bucket.
b := tx.Bucket(bucketName)
if b == nil {
fatalf("bucket not found: %s", bucketName)
return nil
}
// reading from many buckets
// writing to many buckets
// Set value for a given key.
return b.Put([]byte(key), []byte(value))
})
if err != nil {
fatal(err)
return
}
// read from many paths
// writing to many paths
// bucket size/messages
// bucket depth
// concurrency
// chart/graph
// profile
// benchmarks for getting all keys
b := bolt.NewBenchmark(inputPath, readWriteMode, traversalPattern, parallelism)
result := testing.Benchmark(b.Run)
println(result)
}

View File

@ -23,6 +23,11 @@ func Import(path string, input string) {
fatal(err)
}
// Import all of the buckets.
importBuckets(path, root)
}
func importBuckets(path string, root []*rawMessage) {
// Open the database.
db, err := bolt.Open(path, 0600)
if err != nil {

View File

@ -1,6 +1,8 @@
package main
import (
"errors"
"fmt"
"os"
"github.com/boltdb/bolt"
@ -8,34 +10,44 @@ import (
// Keys retrieves a list of keys for a given bucket.
func Keys(path, name string) {
if _, err := os.Stat(path); os.IsNotExist(err) {
fatal(err)
return
}
keys, err := keys(path, name)
db, err := bolt.Open(path, 0600)
if err != nil {
fatal(err)
return
}
for _, key := range keys {
println(key)
}
}
func keys(path, name string) ([]string, error) {
if _, err := os.Stat(path); os.IsNotExist(err) {
return nil, err
}
db, err := bolt.Open(path, 0600)
if err != nil {
return nil, err
}
defer db.Close()
keys := []string{}
err = db.View(func(tx *bolt.Tx) error {
// Find bucket.
b := tx.Bucket([]byte(name))
if b == nil {
fatalf("bucket not found: %s", name)
return nil
return errors.New(fmt.Sprintf("bucket %+v not found", b))
}
// Iterate over each key.
return b.ForEach(func(key, _ []byte) error {
println(string(key))
keys = append(keys, string(key))
return nil
})
})
if err != nil {
fatal(err)
return
}
return keys, err
}

View File

@ -107,7 +107,8 @@ func NewApp() *cli.App {
Name: "bench",
Usage: "Run benchmarks on a given dataset",
Action: func(c *cli.Context) {
Bench()
srcPath := c.Args().Get(0)
Bench(srcPath, "read", "sequential", 1)
},
},
}

View File

@ -21,6 +21,12 @@ func Set(path, name, key, value string) {
defer db.Close()
err = db.Update(func(tx *bolt.Tx) error {
// Create the bucket if it doesn't exist.
if err := tx.CreateBucketIfNotExists([]byte(name)); err != nil {
fatalf("create bucket: %s", err)
return nil
}
// Find bucket.
b := tx.Bucket([]byte(name))
if b == nil {

View File

@ -365,6 +365,47 @@ func benchmarkTxPutSequential(b *testing.B, total int) {
})
}
// func BenchmarkParallel_1items_1threads(b *testing.B) { benchmarkParallel(1, 1) }
// func BenchmarkParallel_1items_10threads(b *testing.B) { benchmarkParallel(1, 10) }
// func BenchmarkParallel_1items_100threads(b *testing.B) { benchmarkParallel(1, 100) }
// func BenchmarkParallel_1items_1000threads(b *testing.B) { benchmarkParallel(1, 1000) }
// func BenchmarkParallel_10items_1threads(b *testing.B) { benchmarkParallel(10, 1) }
// func BenchmarkParallel_10items_10threads(b *testing.B) { benchmarkParallel(10, 10) }
// func BenchmarkParallel_10items_100threads(b *testing.B) { benchmarkParallel(10, 100) }
// func BenchmarkParallel_10items_1000threads(b *testing.B) { benchmarkParallel(10, 1000) }
// func BenchmarkParallel_100items_1threads(b *testing.B) { benchmarkParallel(100, 1) }
// func BenchmarkParallel_100items_10threads(b *testing.B) { benchmarkParallel(100, 10) }
// func BenchmarkParallel_100items_100threads(b *testing.B) { benchmarkParallel(100, 100) }
// func BenchmarkParallel_100items_1000threads(b *testing.B) { benchmarkParallel(100, 1000) }
// func BenchmarkParallel_1000items_1threads(b *testing.B) { benchmarkParallel(1000, 1) }
// func BenchmarkParallel_1000items_10threads(b *testing.B) { benchmarkParallel(1000, 10) }
// func BenchmarkParallel_1000items_100threads(b *testing.B) { benchmarkParallel(1000, 100) }
// func BenchmarkParallel_1000items_1000threads(b *testing.B) { benchmarkParallel(1000, 1000) }
// func benchmarkParallel(b *testing.B, itemCount, parallelism int) {
// // Setup database.
// for i := 0; i < itemCount; i++ {
// // ... insert key/values here ...
// }
// b.ResetTimer()
// // Keep running a fixed number of parallel reads until we run out of time.
// for i := 0; i < b.N; i++ {
// var wg sync.WaitGroup
// for j := 0; j < parallelism; j++ {
// wg.Add(1)
// go func() {
// // ... execute read here ...
// wg.Done()
// }()
// }
// wg.Wait()
// }
// }
func ExampleTx_Rollback() {
// Open the database.
db, _ := Open(tempfile(), 0666)