2014-02-23 05:54:54 +00:00
|
|
|
package bolt
|
|
|
|
|
|
|
|
import (
|
|
|
|
"sort"
|
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
2014-03-09 03:25:37 +00:00
|
|
|
// txid represents the internal transaction identifier.
|
|
|
|
type txid uint64
|
|
|
|
|
|
|
|
// Tx represents a read-only or read/write transaction on the database.
|
|
|
|
// Read-only transactions can be used for retrieving values for keys and creating cursors.
|
|
|
|
// Read/write transactions can create and remove buckets and create and remove keys.
|
|
|
|
//
|
|
|
|
// IMPORTANT: You must commit or rollback transactions when you are done with
|
|
|
|
// them. Pages can not be reclaimed by the writer until no more transactions
|
|
|
|
// are using them. A long running read transaction can cause the database to
|
|
|
|
// quickly grow.
|
|
|
|
type Tx struct {
|
|
|
|
writable bool
|
|
|
|
db *DB
|
|
|
|
meta *meta
|
|
|
|
buckets *buckets
|
|
|
|
nodes map[pgid]*node
|
|
|
|
pages map[pgid]*page
|
|
|
|
pending []*node
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// init initializes the transaction.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) init(db *DB) {
|
|
|
|
t.db = db
|
|
|
|
t.pages = nil
|
|
|
|
|
|
|
|
// Copy the meta page since it can be changed by the writer.
|
|
|
|
t.meta = &meta{}
|
|
|
|
db.meta().copy(t.meta)
|
|
|
|
|
|
|
|
// Read in the buckets page.
|
|
|
|
t.buckets = &buckets{}
|
|
|
|
t.buckets.read(t.page(t.meta.buckets))
|
|
|
|
|
|
|
|
if t.writable {
|
|
|
|
t.pages = make(map[pgid]*page)
|
|
|
|
t.nodes = make(map[pgid]*node)
|
|
|
|
|
|
|
|
// Increment the transaction id.
|
|
|
|
t.meta.txid += txid(1)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// id returns the transaction id.
|
|
|
|
func (t *Tx) id() txid {
|
|
|
|
return t.meta.txid
|
|
|
|
}
|
|
|
|
|
|
|
|
// DB returns a reference to the database that created the transaction.
|
|
|
|
func (t *Tx) DB() *DB {
|
|
|
|
return t.db
|
|
|
|
}
|
|
|
|
|
|
|
|
// Writable returns whether the transaction can perform write operations.
|
|
|
|
func (t *Tx) Writable() bool {
|
|
|
|
return t.writable
|
|
|
|
}
|
2014-02-23 05:54:54 +00:00
|
|
|
|
2014-03-09 03:25:37 +00:00
|
|
|
// Bucket retrieves a bucket by name.
|
|
|
|
// Returns nil if the bucket does not exist.
|
|
|
|
func (t *Tx) Bucket(name string) *Bucket {
|
|
|
|
b := t.buckets.get(name)
|
|
|
|
if b == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
return &Bucket{
|
|
|
|
bucket: b,
|
|
|
|
name: name,
|
|
|
|
tx: t,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Buckets retrieves a list of all buckets.
|
|
|
|
func (t *Tx) Buckets() []*Bucket {
|
|
|
|
buckets := make([]*Bucket, 0, len(t.buckets.items))
|
|
|
|
for name, b := range t.buckets.items {
|
|
|
|
bucket := &Bucket{
|
|
|
|
bucket: b,
|
|
|
|
name: name,
|
|
|
|
tx: t,
|
|
|
|
}
|
|
|
|
buckets = append(buckets, bucket)
|
|
|
|
}
|
2014-03-13 21:08:59 +00:00
|
|
|
sort.Sort(bucketsByName(buckets))
|
2014-03-09 03:25:37 +00:00
|
|
|
return buckets
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateBucket creates a new bucket.
|
|
|
|
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) CreateBucket(name string) error {
|
|
|
|
if !t.writable {
|
|
|
|
return ErrTxNotWritable
|
|
|
|
} else if b := t.Bucket(name); b != nil {
|
2014-02-23 05:54:54 +00:00
|
|
|
return ErrBucketExists
|
|
|
|
} else if len(name) == 0 {
|
|
|
|
return ErrBucketNameRequired
|
|
|
|
} else if len(name) > MaxBucketNameSize {
|
|
|
|
return ErrBucketNameTooLarge
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create a blank root leaf page.
|
|
|
|
p, err := t.allocate(1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
p.flags = leafPageFlag
|
|
|
|
|
|
|
|
// Add bucket to buckets page.
|
|
|
|
t.buckets.put(name, &bucket{root: p.id})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
|
|
|
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) CreateBucketIfNotExists(name string) error {
|
2014-02-23 05:54:54 +00:00
|
|
|
err := t.CreateBucket(name)
|
|
|
|
if err != nil && err != ErrBucketExists {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBucket deletes a bucket.
|
|
|
|
// Returns an error if the bucket cannot be found.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) DeleteBucket(name string) error {
|
|
|
|
if !t.writable {
|
|
|
|
return ErrTxNotWritable
|
|
|
|
}
|
|
|
|
|
2014-02-27 18:55:44 +00:00
|
|
|
b := t.Bucket(name)
|
|
|
|
if b == nil {
|
2014-02-23 05:54:54 +00:00
|
|
|
return ErrBucketNotFound
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove from buckets page.
|
|
|
|
t.buckets.del(name)
|
|
|
|
|
2014-02-27 18:55:44 +00:00
|
|
|
// Free all pages.
|
|
|
|
t.forEachPage(b.root, 0, func(p *page, depth int) {
|
|
|
|
t.db.freelist.free(t.id(), p)
|
|
|
|
})
|
2014-02-23 05:54:54 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Commit writes all changes to disk and updates the meta page.
|
|
|
|
// Returns an error if a disk write error occurs.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) Commit() error {
|
2014-03-01 19:53:05 +00:00
|
|
|
if t.db == nil {
|
|
|
|
return nil
|
2014-03-09 03:25:37 +00:00
|
|
|
} else if !t.writable {
|
|
|
|
t.Rollback()
|
|
|
|
return nil
|
2014-03-01 19:53:05 +00:00
|
|
|
}
|
2014-02-23 05:54:54 +00:00
|
|
|
defer t.close()
|
|
|
|
|
|
|
|
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
|
|
|
|
|
|
|
|
// Rebalance and spill data onto dirty pages.
|
|
|
|
t.rebalance()
|
|
|
|
t.spill()
|
|
|
|
|
|
|
|
// Spill buckets page.
|
|
|
|
p, err := t.allocate((t.buckets.size() / t.db.pageSize) + 1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
t.buckets.write(p)
|
|
|
|
|
|
|
|
// Write dirty pages to disk.
|
|
|
|
if err := t.write(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the meta.
|
|
|
|
t.meta.buckets = p.id
|
|
|
|
|
|
|
|
// Write meta to disk.
|
|
|
|
if err := t.writeMeta(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Rollback closes the transaction and ignores all previous updates.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) Rollback() {
|
2014-02-23 05:54:54 +00:00
|
|
|
t.close()
|
|
|
|
}
|
|
|
|
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) close() {
|
2014-03-01 19:53:05 +00:00
|
|
|
if t.db != nil {
|
2014-03-09 03:25:37 +00:00
|
|
|
if t.writable {
|
|
|
|
t.db.rwlock.Unlock()
|
|
|
|
} else {
|
|
|
|
t.db.removeTx(t)
|
|
|
|
}
|
2014-03-01 19:53:05 +00:00
|
|
|
t.db = nil
|
|
|
|
}
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// allocate returns a contiguous block of memory starting at a given page.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) allocate(count int) (*page, error) {
|
2014-02-23 05:54:54 +00:00
|
|
|
p, err := t.db.allocate(count)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save to our page cache.
|
|
|
|
t.pages[p.id] = p
|
|
|
|
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// rebalance attempts to balance all nodes.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) rebalance() {
|
2014-02-23 05:54:54 +00:00
|
|
|
for _, n := range t.nodes {
|
|
|
|
n.rebalance()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// spill writes all the nodes to dirty pages.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) spill() error {
|
2014-02-23 05:54:54 +00:00
|
|
|
// Keep track of the current root nodes.
|
|
|
|
// We will update this at the end once all nodes are created.
|
|
|
|
type root struct {
|
|
|
|
node *node
|
|
|
|
pgid pgid
|
|
|
|
}
|
|
|
|
var roots []root
|
|
|
|
|
|
|
|
// Sort nodes by highest depth first.
|
|
|
|
nodes := make(nodesByDepth, 0, len(t.nodes))
|
|
|
|
for _, n := range t.nodes {
|
|
|
|
nodes = append(nodes, n)
|
|
|
|
}
|
|
|
|
sort.Sort(nodes)
|
|
|
|
|
|
|
|
// Spill nodes by deepest first.
|
|
|
|
for i := 0; i < len(nodes); i++ {
|
|
|
|
n := nodes[i]
|
|
|
|
|
|
|
|
// Save existing root buckets for later.
|
|
|
|
if n.parent == nil && n.pgid != 0 {
|
|
|
|
roots = append(roots, root{n, n.pgid})
|
|
|
|
}
|
|
|
|
|
|
|
|
// Split nodes into appropriate sized nodes.
|
|
|
|
// The first node in this list will be a reference to n to preserve ancestry.
|
|
|
|
newNodes := n.split(t.db.pageSize)
|
|
|
|
t.pending = newNodes
|
|
|
|
|
|
|
|
// If this is a root node that split then create a parent node.
|
|
|
|
if n.parent == nil && len(newNodes) > 1 {
|
2014-03-09 00:01:49 +00:00
|
|
|
n.parent = &node{tx: t, isLeaf: false}
|
2014-02-23 05:54:54 +00:00
|
|
|
nodes = append(nodes, n.parent)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add node's page to the freelist.
|
|
|
|
if n.pgid > 0 {
|
|
|
|
t.db.freelist.free(t.id(), t.page(n.pgid))
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write nodes to dirty pages.
|
|
|
|
for i, newNode := range newNodes {
|
|
|
|
// Allocate contiguous space for the node.
|
|
|
|
p, err := t.allocate((newNode.size() / t.db.pageSize) + 1)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write the node to the page.
|
|
|
|
newNode.write(p)
|
|
|
|
newNode.pgid = p.id
|
|
|
|
newNode.parent = n.parent
|
|
|
|
|
|
|
|
// The first node should use the existing entry, other nodes are inserts.
|
|
|
|
var oldKey []byte
|
|
|
|
if i == 0 {
|
|
|
|
oldKey = n.key
|
|
|
|
} else {
|
|
|
|
oldKey = newNode.inodes[0].key
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update the parent entry.
|
|
|
|
if newNode.parent != nil {
|
|
|
|
newNode.parent.put(oldKey, newNode.inodes[0].key, nil, newNode.pgid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
t.pending = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update roots with new roots.
|
|
|
|
for _, root := range roots {
|
|
|
|
t.buckets.updateRoot(root.pgid, root.node.root().pgid)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear out nodes now that they are all spilled.
|
|
|
|
t.nodes = make(map[pgid]*node)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// write writes any dirty pages to disk.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) write() error {
|
2014-02-23 05:54:54 +00:00
|
|
|
// Sort pages by id.
|
|
|
|
pages := make(pages, 0, len(t.pages))
|
|
|
|
for _, p := range t.pages {
|
|
|
|
pages = append(pages, p)
|
|
|
|
}
|
|
|
|
sort.Sort(pages)
|
|
|
|
|
|
|
|
// Write pages to disk in order.
|
|
|
|
for _, p := range pages {
|
|
|
|
size := (int(p.overflow) + 1) * t.db.pageSize
|
|
|
|
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:size]
|
|
|
|
offset := int64(p.id) * int64(t.db.pageSize)
|
|
|
|
if _, err := t.db.file.WriteAt(buf, offset); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Clear out page cache.
|
|
|
|
t.pages = make(map[pgid]*page)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeMeta writes the meta to the disk.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) writeMeta() error {
|
2014-02-23 05:54:54 +00:00
|
|
|
// Create a temporary buffer for the meta page.
|
|
|
|
buf := make([]byte, t.db.pageSize)
|
|
|
|
p := t.db.pageInBuffer(buf, 0)
|
|
|
|
t.meta.write(p)
|
|
|
|
|
|
|
|
// Write the meta page to file.
|
|
|
|
t.db.metafile.WriteAt(buf, int64(p.id)*int64(t.db.pageSize))
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// node creates a node from a page and associates it with a given parent.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) node(pgid pgid, parent *node) *node {
|
|
|
|
// Retrieve node if it's already been created.
|
|
|
|
if t.nodes == nil {
|
|
|
|
return nil
|
|
|
|
} else if n := t.nodes[pgid]; n != nil {
|
2014-02-23 05:54:54 +00:00
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise create a branch and cache it.
|
2014-03-09 00:01:49 +00:00
|
|
|
n := &node{tx: t, parent: parent}
|
2014-02-23 05:54:54 +00:00
|
|
|
if n.parent != nil {
|
|
|
|
n.depth = n.parent.depth + 1
|
|
|
|
}
|
|
|
|
n.read(t.page(pgid))
|
|
|
|
t.nodes[pgid] = n
|
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
|
|
|
// dereference removes all references to the old mmap.
|
2014-03-09 03:25:37 +00:00
|
|
|
func (t *Tx) dereference() {
|
2014-02-23 05:54:54 +00:00
|
|
|
for _, n := range t.nodes {
|
|
|
|
n.dereference()
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, n := range t.pending {
|
|
|
|
n.dereference()
|
|
|
|
}
|
|
|
|
}
|
2014-03-09 03:25:37 +00:00
|
|
|
|
|
|
|
// page returns a reference to the page with a given id.
|
|
|
|
// If page has been written to then a temporary bufferred page is returned.
|
|
|
|
func (t *Tx) page(id pgid) *page {
|
|
|
|
// Check the dirty pages first.
|
|
|
|
if t.pages != nil {
|
|
|
|
if p, ok := t.pages[id]; ok {
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise return directly from the mmap.
|
|
|
|
return t.db.page(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// pageNode returns the in-memory node, if it exists.
|
|
|
|
// Otherwise returns the underlying page.
|
|
|
|
func (t *Tx) pageNode(id pgid) (*page, *node) {
|
|
|
|
if t.nodes != nil {
|
|
|
|
if n := t.nodes[id]; n != nil {
|
|
|
|
return nil, n
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return t.page(id), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// forEachPage iterates over every page within a given page and executes a function.
|
|
|
|
func (t *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
|
|
|
p := t.page(pgid)
|
|
|
|
|
|
|
|
// Execute function.
|
|
|
|
fn(p, depth)
|
|
|
|
|
|
|
|
// Recursively loop over children.
|
|
|
|
if (p.flags & branchPageFlag) != 0 {
|
|
|
|
for i := 0; i < int(p.count); i++ {
|
|
|
|
elem := p.branchPageElement(uint16(i))
|
|
|
|
t.forEachPage(elem.pgid, depth+1, fn)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|