2014-02-23 05:54:54 +00:00
|
|
|
package bolt
|
|
|
|
|
|
|
|
import (
|
2014-05-15 00:08:55 +00:00
|
|
|
"fmt"
|
2014-05-21 15:08:37 +00:00
|
|
|
"io"
|
|
|
|
"os"
|
2014-02-23 05:54:54 +00:00
|
|
|
"sort"
|
2014-04-02 21:36:53 +00:00
|
|
|
"time"
|
2014-02-23 05:54:54 +00:00
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
2014-03-09 03:25:37 +00:00
|
|
|
// txid represents the internal transaction identifier.
|
|
|
|
type txid uint64
|
|
|
|
|
|
|
|
// Tx represents a read-only or read/write transaction on the database.
|
|
|
|
// Read-only transactions can be used for retrieving values for keys and creating cursors.
|
|
|
|
// Read/write transactions can create and remove buckets and create and remove keys.
|
|
|
|
//
|
|
|
|
// IMPORTANT: You must commit or rollback transactions when you are done with
|
|
|
|
// them. Pages can not be reclaimed by the writer until no more transactions
|
|
|
|
// are using them. A long running read transaction can cause the database to
|
|
|
|
// quickly grow.
|
|
|
|
type Tx struct {
|
2014-04-04 13:51:01 +00:00
|
|
|
writable bool
|
|
|
|
managed bool
|
|
|
|
db *DB
|
|
|
|
meta *meta
|
2014-04-07 22:24:51 +00:00
|
|
|
root Bucket
|
2014-04-04 13:51:01 +00:00
|
|
|
pages map[pgid]*page
|
|
|
|
stats TxStats
|
|
|
|
commitHandlers []func()
|
2015-11-06 18:49:46 +00:00
|
|
|
|
2015-11-06 20:18:58 +00:00
|
|
|
// WriteFlag specifies the flag for write-related methods like WriteTo().
|
|
|
|
// Tx opens the database file with the specified flag to copy the data.
|
2015-11-06 18:49:46 +00:00
|
|
|
//
|
2015-11-06 20:18:58 +00:00
|
|
|
// By default, the flag is unset, which works well for mostly in-memory
|
|
|
|
// workloads. For databases that are much larger than available RAM,
|
|
|
|
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
|
2015-11-06 18:49:46 +00:00
|
|
|
WriteFlag int
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// init initializes the transaction.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) init(db *DB) {
|
|
|
|
tx.db = db
|
|
|
|
tx.pages = nil
|
2014-03-09 03:25:37 +00:00
|
|
|
|
|
|
|
// Copy the meta page since it can be changed by the writer.
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.meta = &meta{}
|
|
|
|
db.meta().copy(tx.meta)
|
2014-03-09 03:25:37 +00:00
|
|
|
|
2014-04-07 22:24:51 +00:00
|
|
|
// Copy over the root bucket.
|
|
|
|
tx.root = newBucket(tx)
|
|
|
|
tx.root.bucket = &bucket{}
|
|
|
|
*tx.root.bucket = tx.meta.root
|
2014-03-09 03:25:37 +00:00
|
|
|
|
2014-04-07 22:24:51 +00:00
|
|
|
// Increment the transaction id and add a page cache for writable transactions.
|
2014-04-04 18:03:04 +00:00
|
|
|
if tx.writable {
|
|
|
|
tx.pages = make(map[pgid]*page)
|
|
|
|
tx.meta.txid += txid(1)
|
2014-03-09 03:25:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-26 21:11:47 +00:00
|
|
|
// ID returns the transaction id.
|
|
|
|
func (tx *Tx) ID() int {
|
|
|
|
return int(tx.meta.txid)
|
2014-03-09 03:25:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DB returns a reference to the database that created the transaction.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) DB() *DB {
|
|
|
|
return tx.db
|
2014-03-09 03:25:37 +00:00
|
|
|
}
|
|
|
|
|
2014-05-21 16:57:29 +00:00
|
|
|
// Size returns current database size in bytes as seen by this transaction.
|
2014-05-21 15:43:11 +00:00
|
|
|
func (tx *Tx) Size() int64 {
|
|
|
|
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
|
|
|
|
}
|
|
|
|
|
2014-03-09 03:25:37 +00:00
|
|
|
// Writable returns whether the transaction can perform write operations.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) Writable() bool {
|
|
|
|
return tx.writable
|
2014-03-09 03:25:37 +00:00
|
|
|
}
|
2014-02-23 05:54:54 +00:00
|
|
|
|
2014-04-29 13:25:14 +00:00
|
|
|
// Cursor creates a cursor associated with the root bucket.
|
|
|
|
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
|
|
|
|
// The cursor is only valid as long as the transaction is open.
|
|
|
|
// Do not use a cursor after the transaction is closed.
|
|
|
|
func (tx *Tx) Cursor() *Cursor {
|
|
|
|
return tx.root.Cursor()
|
|
|
|
}
|
|
|
|
|
2014-04-02 21:36:53 +00:00
|
|
|
// Stats retrieves a copy of the current transaction statistics.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) Stats() TxStats {
|
|
|
|
return tx.stats
|
2014-04-02 21:36:53 +00:00
|
|
|
}
|
|
|
|
|
2014-03-09 03:25:37 +00:00
|
|
|
// Bucket retrieves a bucket by name.
|
|
|
|
// Returns nil if the bucket does not exist.
|
2015-10-21 08:55:50 +00:00
|
|
|
// The bucket instance is only valid for the lifetime of the transaction.
|
2014-04-07 22:24:51 +00:00
|
|
|
func (tx *Tx) Bucket(name []byte) *Bucket {
|
|
|
|
return tx.root.Bucket(name)
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateBucket creates a new bucket.
|
|
|
|
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
|
2015-10-21 08:55:50 +00:00
|
|
|
// The bucket instance is only valid for the lifetime of the transaction.
|
2014-04-16 03:45:06 +00:00
|
|
|
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
|
2014-04-07 22:24:51 +00:00
|
|
|
return tx.root.CreateBucket(name)
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
|
|
|
|
// Returns an error if the bucket name is blank, or if the bucket name is too long.
|
2015-10-21 08:55:50 +00:00
|
|
|
// The bucket instance is only valid for the lifetime of the transaction.
|
2014-04-16 03:45:06 +00:00
|
|
|
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
|
2014-04-07 22:24:51 +00:00
|
|
|
return tx.root.CreateBucketIfNotExists(name)
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteBucket deletes a bucket.
|
2014-04-07 22:24:51 +00:00
|
|
|
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
|
|
|
|
func (tx *Tx) DeleteBucket(name []byte) error {
|
|
|
|
return tx.root.DeleteBucket(name)
|
|
|
|
}
|
2014-02-23 05:54:54 +00:00
|
|
|
|
2014-04-07 22:24:51 +00:00
|
|
|
// ForEach executes a function for each bucket in the root.
|
|
|
|
// If the provided function returns an error then the iteration is stopped and
|
|
|
|
// the error is returned to the caller.
|
|
|
|
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
|
|
|
|
return tx.root.ForEach(func(k, v []byte) error {
|
|
|
|
if err := fn(k, tx.root.Bucket(k)); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2014-02-27 18:55:44 +00:00
|
|
|
})
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
2014-04-04 13:51:01 +00:00
|
|
|
// OnCommit adds a handler function to be executed after the transaction successfully commits.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) OnCommit(fn func()) {
|
|
|
|
tx.commitHandlers = append(tx.commitHandlers, fn)
|
2014-04-04 13:51:01 +00:00
|
|
|
}
|
|
|
|
|
2014-02-23 05:54:54 +00:00
|
|
|
// Commit writes all changes to disk and updates the meta page.
|
2015-04-23 14:49:55 +00:00
|
|
|
// Returns an error if a disk write error occurs, or if Commit is
|
|
|
|
// called on a read-only transaction.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) Commit() error {
|
2014-04-07 22:24:51 +00:00
|
|
|
_assert(!tx.managed, "managed tx commit not allowed")
|
|
|
|
if tx.db == nil {
|
2014-03-23 18:17:30 +00:00
|
|
|
return ErrTxClosed
|
2014-04-04 18:03:04 +00:00
|
|
|
} else if !tx.writable {
|
2014-03-23 18:17:30 +00:00
|
|
|
return ErrTxNotWritable
|
2014-03-01 19:53:05 +00:00
|
|
|
}
|
2014-02-23 05:54:54 +00:00
|
|
|
|
|
|
|
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
|
|
|
|
|
2014-04-02 21:36:53 +00:00
|
|
|
// Rebalance nodes which have had deletions.
|
|
|
|
var startTime = time.Now()
|
2014-04-07 22:24:51 +00:00
|
|
|
tx.root.rebalance()
|
2014-07-15 13:26:02 +00:00
|
|
|
if tx.stats.Rebalance > 0 {
|
|
|
|
tx.stats.RebalanceTime += time.Since(startTime)
|
|
|
|
}
|
2014-04-02 21:36:53 +00:00
|
|
|
|
|
|
|
// spill data onto dirty pages.
|
|
|
|
startTime = time.Now()
|
2014-04-07 22:24:51 +00:00
|
|
|
if err := tx.root.spill(); err != nil {
|
2014-06-13 21:50:47 +00:00
|
|
|
tx.rollback()
|
2014-03-23 17:40:39 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.stats.SpillTime += time.Since(startTime)
|
2014-02-23 05:54:54 +00:00
|
|
|
|
2014-04-07 22:24:51 +00:00
|
|
|
// Free the old root bucket.
|
|
|
|
tx.meta.root.root = tx.root.root
|
2014-03-25 13:25:00 +00:00
|
|
|
|
2015-11-04 23:12:18 +00:00
|
|
|
opgid := tx.meta.pgid
|
|
|
|
|
2014-03-31 14:52:13 +00:00
|
|
|
// Free the freelist and allocate new pages for it. This will overestimate
|
|
|
|
// the size of the freelist but not underestimate the size (which would be bad).
|
2014-07-26 21:11:47 +00:00
|
|
|
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
|
2014-04-07 22:24:51 +00:00
|
|
|
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
|
2014-03-31 14:52:13 +00:00
|
|
|
if err != nil {
|
2014-06-13 21:50:47 +00:00
|
|
|
tx.rollback()
|
2014-03-31 14:52:13 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-06-13 13:56:10 +00:00
|
|
|
if err := tx.db.freelist.write(p); err != nil {
|
2014-06-13 21:50:47 +00:00
|
|
|
tx.rollback()
|
2014-06-13 13:56:10 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.meta.freelist = p.id
|
2014-03-31 14:52:13 +00:00
|
|
|
|
2016-01-11 22:40:23 +00:00
|
|
|
// If the high water mark has moved up then attempt to grow the database.
|
2015-11-04 23:12:18 +00:00
|
|
|
if tx.meta.pgid > opgid {
|
2016-01-11 22:40:23 +00:00
|
|
|
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
|
|
|
|
tx.rollback()
|
|
|
|
return err
|
|
|
|
}
|
2015-11-04 23:12:18 +00:00
|
|
|
}
|
|
|
|
|
2014-02-23 05:54:54 +00:00
|
|
|
// Write dirty pages to disk.
|
2014-04-02 21:36:53 +00:00
|
|
|
startTime = time.Now()
|
2014-04-04 18:03:04 +00:00
|
|
|
if err := tx.write(); err != nil {
|
2014-06-13 21:50:47 +00:00
|
|
|
tx.rollback()
|
2014-02-23 05:54:54 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2014-05-15 00:08:55 +00:00
|
|
|
// If strict mode is enabled then perform a consistency check.
|
2014-05-28 16:28:15 +00:00
|
|
|
// Only the first consistency error is reported in the panic.
|
2014-05-15 00:08:55 +00:00
|
|
|
if tx.db.StrictMode {
|
2014-05-28 16:28:15 +00:00
|
|
|
if err, ok := <-tx.Check(); ok {
|
|
|
|
panic("check fail: " + err.Error())
|
2014-05-15 00:08:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-23 05:54:54 +00:00
|
|
|
// Write meta to disk.
|
2014-04-04 18:03:04 +00:00
|
|
|
if err := tx.writeMeta(); err != nil {
|
2014-06-13 21:50:47 +00:00
|
|
|
tx.rollback()
|
2014-02-23 05:54:54 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.stats.WriteTime += time.Since(startTime)
|
2014-02-23 05:54:54 +00:00
|
|
|
|
2014-04-04 13:51:01 +00:00
|
|
|
// Finalize the transaction.
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.close()
|
2014-04-04 13:51:01 +00:00
|
|
|
|
|
|
|
// Execute commit handlers now that the locks have been removed.
|
2014-04-04 18:03:04 +00:00
|
|
|
for _, fn := range tx.commitHandlers {
|
2014-04-04 13:51:01 +00:00
|
|
|
fn()
|
|
|
|
}
|
|
|
|
|
2014-02-23 05:54:54 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2015-04-23 14:49:55 +00:00
|
|
|
// Rollback closes the transaction and ignores all previous updates. Read-only
|
|
|
|
// transactions must be rolled back and not committed.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) Rollback() error {
|
2014-04-07 22:24:51 +00:00
|
|
|
_assert(!tx.managed, "managed tx rollback not allowed")
|
|
|
|
if tx.db == nil {
|
2014-03-23 18:17:30 +00:00
|
|
|
return ErrTxClosed
|
2014-03-23 16:34:53 +00:00
|
|
|
}
|
2014-06-13 21:50:47 +00:00
|
|
|
tx.rollback()
|
2014-03-23 18:17:30 +00:00
|
|
|
return nil
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
2014-06-13 21:50:47 +00:00
|
|
|
func (tx *Tx) rollback() {
|
2014-07-11 15:54:10 +00:00
|
|
|
if tx.db == nil {
|
|
|
|
return
|
|
|
|
}
|
2014-06-13 21:50:47 +00:00
|
|
|
if tx.writable {
|
2014-07-26 21:11:47 +00:00
|
|
|
tx.db.freelist.rollback(tx.meta.txid)
|
2014-06-13 21:50:47 +00:00
|
|
|
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
|
|
|
|
}
|
|
|
|
tx.close()
|
|
|
|
}
|
|
|
|
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) close() {
|
2014-07-11 15:54:10 +00:00
|
|
|
if tx.db == nil {
|
|
|
|
return
|
|
|
|
}
|
2014-04-04 18:03:04 +00:00
|
|
|
if tx.writable {
|
2014-06-17 19:30:10 +00:00
|
|
|
// Grab freelist stats.
|
2014-06-20 14:53:25 +00:00
|
|
|
var freelistFreeN = tx.db.freelist.free_count()
|
|
|
|
var freelistPendingN = tx.db.freelist.pending_count()
|
2014-06-17 19:30:10 +00:00
|
|
|
var freelistAlloc = tx.db.freelist.size()
|
|
|
|
|
2015-09-04 19:07:06 +00:00
|
|
|
// Remove transaction ref & writer lock.
|
|
|
|
tx.db.rwtx = nil
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.db.rwlock.Unlock()
|
2014-04-07 22:24:51 +00:00
|
|
|
|
|
|
|
// Merge statistics.
|
2014-06-17 19:30:10 +00:00
|
|
|
tx.db.statlock.Lock()
|
2014-06-20 14:53:25 +00:00
|
|
|
tx.db.stats.FreePageN = freelistFreeN
|
|
|
|
tx.db.stats.PendingPageN = freelistPendingN
|
|
|
|
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
|
2014-06-18 18:10:42 +00:00
|
|
|
tx.db.stats.FreelistInuse = freelistAlloc
|
2014-06-17 19:30:10 +00:00
|
|
|
tx.db.stats.TxStats.add(&tx.stats)
|
|
|
|
tx.db.statlock.Unlock()
|
2014-03-23 18:17:30 +00:00
|
|
|
} else {
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.db.removeTx(tx)
|
2014-03-01 19:53:05 +00:00
|
|
|
}
|
2015-09-04 19:07:06 +00:00
|
|
|
|
|
|
|
// Clear all references.
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.db = nil
|
2015-09-04 19:07:06 +00:00
|
|
|
tx.meta = nil
|
|
|
|
tx.root = Bucket{tx: tx}
|
|
|
|
tx.pages = nil
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
|
2014-05-21 15:08:37 +00:00
|
|
|
// Copy writes the entire database to a writer.
|
2015-11-23 01:53:11 +00:00
|
|
|
// This function exists for backwards compatibility. Use WriteTo() instead.
|
2014-05-21 15:08:37 +00:00
|
|
|
func (tx *Tx) Copy(w io.Writer) error {
|
2015-03-17 21:23:31 +00:00
|
|
|
_, err := tx.WriteTo(w)
|
|
|
|
return err
|
|
|
|
}
|
2014-06-05 15:58:41 +00:00
|
|
|
|
2015-03-17 21:23:31 +00:00
|
|
|
// WriteTo writes the entire database to a writer.
|
|
|
|
// If err == nil then exactly tx.Size() bytes will be written into the writer.
|
|
|
|
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
|
2015-11-06 18:49:46 +00:00
|
|
|
// Attempt to open reader with WriteFlag
|
2015-11-06 20:18:58 +00:00
|
|
|
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
|
|
|
|
if err != nil {
|
2015-11-06 18:49:46 +00:00
|
|
|
return 0, err
|
2014-05-21 15:08:37 +00:00
|
|
|
}
|
2015-12-27 22:27:39 +00:00
|
|
|
defer func() { _ = f.Close() }()
|
2014-05-21 15:08:37 +00:00
|
|
|
|
|
|
|
// Copy the meta pages.
|
|
|
|
tx.db.metalock.Lock()
|
2015-03-17 21:23:31 +00:00
|
|
|
n, err = io.CopyN(w, f, int64(tx.db.pageSize*2))
|
2014-05-21 15:08:37 +00:00
|
|
|
tx.db.metalock.Unlock()
|
|
|
|
if err != nil {
|
2015-03-17 21:23:31 +00:00
|
|
|
return n, fmt.Errorf("meta copy: %s", err)
|
2014-05-21 15:08:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Copy data pages.
|
2015-03-17 21:23:31 +00:00
|
|
|
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
|
|
|
|
n += wn
|
|
|
|
if err != nil {
|
|
|
|
return n, err
|
2014-05-21 15:08:37 +00:00
|
|
|
}
|
|
|
|
|
2015-03-17 21:23:31 +00:00
|
|
|
return n, f.Close()
|
2014-05-21 15:08:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// CopyFile copies the entire database to file at the given path.
|
|
|
|
// A reader transaction is maintained during the copy so it is safe to continue
|
|
|
|
// using the database while a copy is in progress.
|
|
|
|
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
|
|
|
|
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
err = tx.Copy(f)
|
|
|
|
if err != nil {
|
|
|
|
_ = f.Close()
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return f.Close()
|
|
|
|
}
|
|
|
|
|
2014-05-15 00:08:55 +00:00
|
|
|
// Check performs several consistency checks on the database for this transaction.
|
2014-05-27 17:31:55 +00:00
|
|
|
// An error is returned if any inconsistency is found.
|
|
|
|
//
|
|
|
|
// It can be safely run concurrently on a writable transaction. However, this
|
|
|
|
// incurs a high cost for large databases and databases with a lot of subbuckets
|
|
|
|
// because of caching. This overhead can be removed if running on a read-only
|
|
|
|
// transaction, however, it is not safe to execute other writer transactions at
|
|
|
|
// the same time.
|
2014-05-28 16:28:15 +00:00
|
|
|
func (tx *Tx) Check() <-chan error {
|
|
|
|
ch := make(chan error)
|
|
|
|
go tx.check(ch)
|
|
|
|
return ch
|
|
|
|
}
|
2014-05-15 00:08:55 +00:00
|
|
|
|
2014-05-28 16:28:15 +00:00
|
|
|
func (tx *Tx) check(ch chan error) {
|
2014-05-15 00:08:55 +00:00
|
|
|
// Check if any pages are double freed.
|
|
|
|
freed := make(map[pgid]bool)
|
|
|
|
for _, id := range tx.db.freelist.all() {
|
|
|
|
if freed[id] {
|
2014-05-28 16:28:15 +00:00
|
|
|
ch <- fmt.Errorf("page %d: already freed", id)
|
2014-05-15 00:08:55 +00:00
|
|
|
}
|
|
|
|
freed[id] = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Track every reachable page.
|
|
|
|
reachable := make(map[pgid]*page)
|
|
|
|
reachable[0] = tx.page(0) // meta0
|
|
|
|
reachable[1] = tx.page(1) // meta1
|
|
|
|
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
|
|
|
|
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Recursively check buckets.
|
2014-05-28 18:50:46 +00:00
|
|
|
tx.checkBucket(&tx.root, reachable, freed, ch)
|
2014-05-15 00:08:55 +00:00
|
|
|
|
|
|
|
// Ensure all pages below high water mark are either reachable or freed.
|
|
|
|
for i := pgid(0); i < tx.meta.pgid; i++ {
|
|
|
|
_, isReachable := reachable[i]
|
|
|
|
if !isReachable && !freed[i] {
|
2014-05-28 16:28:15 +00:00
|
|
|
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
|
2014-05-15 00:08:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-05-28 16:28:15 +00:00
|
|
|
// Close the channel to signal completion.
|
|
|
|
close(ch)
|
2014-05-15 00:08:55 +00:00
|
|
|
}
|
|
|
|
|
2014-05-28 18:50:46 +00:00
|
|
|
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
|
2014-05-15 00:08:55 +00:00
|
|
|
// Ignore inline buckets.
|
|
|
|
if b.root == 0 {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check every page used by this bucket.
|
|
|
|
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
|
2014-05-28 18:50:46 +00:00
|
|
|
if p.id > tx.meta.pgid {
|
|
|
|
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
|
|
|
|
}
|
|
|
|
|
2014-05-15 00:08:55 +00:00
|
|
|
// Ensure each page is only referenced once.
|
|
|
|
for i := pgid(0); i <= pgid(p.overflow); i++ {
|
|
|
|
var id = p.id + i
|
|
|
|
if _, ok := reachable[id]; ok {
|
2014-05-28 16:28:15 +00:00
|
|
|
ch <- fmt.Errorf("page %d: multiple references", int(id))
|
2014-05-15 00:08:55 +00:00
|
|
|
}
|
|
|
|
reachable[id] = p
|
|
|
|
}
|
|
|
|
|
2014-05-28 18:50:46 +00:00
|
|
|
// We should only encounter un-freed leaf and branch pages.
|
|
|
|
if freed[p.id] {
|
|
|
|
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
|
|
|
|
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
|
|
|
|
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
|
2014-05-15 00:08:55 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// Check each bucket within this bucket.
|
|
|
|
_ = b.ForEach(func(k, v []byte) error {
|
|
|
|
if child := b.Bucket(k); child != nil {
|
2014-05-28 18:50:46 +00:00
|
|
|
tx.checkBucket(child, reachable, freed, ch)
|
2014-05-15 00:08:55 +00:00
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2014-02-23 05:54:54 +00:00
|
|
|
// allocate returns a contiguous block of memory starting at a given page.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) allocate(count int) (*page, error) {
|
|
|
|
p, err := tx.db.allocate(count)
|
2014-02-23 05:54:54 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save to our page cache.
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.pages[p.id] = p
|
2014-02-23 05:54:54 +00:00
|
|
|
|
2014-04-02 21:36:53 +00:00
|
|
|
// Update statistics.
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.stats.PageCount++
|
|
|
|
tx.stats.PageAlloc += count * tx.db.pageSize
|
2014-04-02 21:36:53 +00:00
|
|
|
|
2014-02-23 05:54:54 +00:00
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// write writes any dirty pages to disk.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) write() error {
|
2014-02-23 05:54:54 +00:00
|
|
|
// Sort pages by id.
|
2014-04-04 18:03:04 +00:00
|
|
|
pages := make(pages, 0, len(tx.pages))
|
|
|
|
for _, p := range tx.pages {
|
2014-02-23 05:54:54 +00:00
|
|
|
pages = append(pages, p)
|
|
|
|
}
|
|
|
|
sort.Sort(pages)
|
|
|
|
|
|
|
|
// Write pages to disk in order.
|
|
|
|
for _, p := range pages {
|
2014-04-04 18:03:04 +00:00
|
|
|
size := (int(p.overflow) + 1) * tx.db.pageSize
|
|
|
|
offset := int64(p.id) * int64(tx.db.pageSize)
|
2014-04-02 21:36:53 +00:00
|
|
|
|
2015-05-18 16:14:47 +00:00
|
|
|
// Write out page in "max allocation" sized chunks.
|
fix `slice bounds out of range`/maxAllocSize bugs
when accessing the node data we used to use cast to
*[maxAllocSize]byte, which breaks if we try to go across maxAllocSize boundary.
This leads to occasional panics.
Sample stacktrace:
```
panic: runtime error: slice bounds out of range
goroutine 1 [running]:
github.com/boltdb/bolt.(*node).write(0xc208010f50, 0xc27452a000)
$GOPATH/src/github.com/boltdb/bolt/node.go:228 +0x5a5
github.com/boltdb/bolt.(*node).spill(0xc208010f50, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:364 +0x506
github.com/boltdb/bolt.(*node).spill(0xc208010700, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*node).spill(0xc208010620, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae880, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:535 +0x1c4
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae840, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Bucket).spill(0xc22f4e2018, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Tx).Commit(0xc22f4e2000, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/tx.go:150 +0x1ee
github.com/boltdb/bolt.(*DB).Update(0xc2080e4000, 0xc24d077508, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/db.go:483 +0x169
```
It usually happens when working with large (50M/100M) values.
One way to reproduce it is to change maxAllocSize in bolt_amd64.go to 70000 and run the tests.
TestBucket_Put_Large crashes.
2015-03-26 23:47:24 +00:00
|
|
|
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
|
|
|
|
for {
|
2015-05-18 16:14:47 +00:00
|
|
|
// Limit our write to our max allocation size.
|
fix `slice bounds out of range`/maxAllocSize bugs
when accessing the node data we used to use cast to
*[maxAllocSize]byte, which breaks if we try to go across maxAllocSize boundary.
This leads to occasional panics.
Sample stacktrace:
```
panic: runtime error: slice bounds out of range
goroutine 1 [running]:
github.com/boltdb/bolt.(*node).write(0xc208010f50, 0xc27452a000)
$GOPATH/src/github.com/boltdb/bolt/node.go:228 +0x5a5
github.com/boltdb/bolt.(*node).spill(0xc208010f50, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:364 +0x506
github.com/boltdb/bolt.(*node).spill(0xc208010700, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*node).spill(0xc208010620, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae880, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:535 +0x1c4
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae840, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Bucket).spill(0xc22f4e2018, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Tx).Commit(0xc22f4e2000, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/tx.go:150 +0x1ee
github.com/boltdb/bolt.(*DB).Update(0xc2080e4000, 0xc24d077508, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/db.go:483 +0x169
```
It usually happens when working with large (50M/100M) values.
One way to reproduce it is to change maxAllocSize in bolt_amd64.go to 70000 and run the tests.
TestBucket_Put_Large crashes.
2015-03-26 23:47:24 +00:00
|
|
|
sz := size
|
|
|
|
if sz > maxAllocSize-1 {
|
|
|
|
sz = maxAllocSize - 1
|
|
|
|
}
|
2015-05-18 16:14:47 +00:00
|
|
|
|
|
|
|
// Write chunk to disk.
|
fix `slice bounds out of range`/maxAllocSize bugs
when accessing the node data we used to use cast to
*[maxAllocSize]byte, which breaks if we try to go across maxAllocSize boundary.
This leads to occasional panics.
Sample stacktrace:
```
panic: runtime error: slice bounds out of range
goroutine 1 [running]:
github.com/boltdb/bolt.(*node).write(0xc208010f50, 0xc27452a000)
$GOPATH/src/github.com/boltdb/bolt/node.go:228 +0x5a5
github.com/boltdb/bolt.(*node).spill(0xc208010f50, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:364 +0x506
github.com/boltdb/bolt.(*node).spill(0xc208010700, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*node).spill(0xc208010620, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae880, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:535 +0x1c4
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae840, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Bucket).spill(0xc22f4e2018, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Tx).Commit(0xc22f4e2000, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/tx.go:150 +0x1ee
github.com/boltdb/bolt.(*DB).Update(0xc2080e4000, 0xc24d077508, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/db.go:483 +0x169
```
It usually happens when working with large (50M/100M) values.
One way to reproduce it is to change maxAllocSize in bolt_amd64.go to 70000 and run the tests.
TestBucket_Put_Large crashes.
2015-03-26 23:47:24 +00:00
|
|
|
buf := ptr[:sz]
|
|
|
|
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2015-05-18 16:14:47 +00:00
|
|
|
|
fix `slice bounds out of range`/maxAllocSize bugs
when accessing the node data we used to use cast to
*[maxAllocSize]byte, which breaks if we try to go across maxAllocSize boundary.
This leads to occasional panics.
Sample stacktrace:
```
panic: runtime error: slice bounds out of range
goroutine 1 [running]:
github.com/boltdb/bolt.(*node).write(0xc208010f50, 0xc27452a000)
$GOPATH/src/github.com/boltdb/bolt/node.go:228 +0x5a5
github.com/boltdb/bolt.(*node).spill(0xc208010f50, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:364 +0x506
github.com/boltdb/bolt.(*node).spill(0xc208010700, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*node).spill(0xc208010620, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae880, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:535 +0x1c4
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae840, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Bucket).spill(0xc22f4e2018, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Tx).Commit(0xc22f4e2000, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/tx.go:150 +0x1ee
github.com/boltdb/bolt.(*DB).Update(0xc2080e4000, 0xc24d077508, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/db.go:483 +0x169
```
It usually happens when working with large (50M/100M) values.
One way to reproduce it is to change maxAllocSize in bolt_amd64.go to 70000 and run the tests.
TestBucket_Put_Large crashes.
2015-03-26 23:47:24 +00:00
|
|
|
// Update statistics.
|
|
|
|
tx.stats.Write++
|
2015-05-18 16:14:47 +00:00
|
|
|
|
|
|
|
// Exit inner for loop if we've written all the chunks.
|
fix `slice bounds out of range`/maxAllocSize bugs
when accessing the node data we used to use cast to
*[maxAllocSize]byte, which breaks if we try to go across maxAllocSize boundary.
This leads to occasional panics.
Sample stacktrace:
```
panic: runtime error: slice bounds out of range
goroutine 1 [running]:
github.com/boltdb/bolt.(*node).write(0xc208010f50, 0xc27452a000)
$GOPATH/src/github.com/boltdb/bolt/node.go:228 +0x5a5
github.com/boltdb/bolt.(*node).spill(0xc208010f50, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:364 +0x506
github.com/boltdb/bolt.(*node).spill(0xc208010700, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*node).spill(0xc208010620, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae880, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:535 +0x1c4
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae840, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Bucket).spill(0xc22f4e2018, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Tx).Commit(0xc22f4e2000, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/tx.go:150 +0x1ee
github.com/boltdb/bolt.(*DB).Update(0xc2080e4000, 0xc24d077508, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/db.go:483 +0x169
```
It usually happens when working with large (50M/100M) values.
One way to reproduce it is to change maxAllocSize in bolt_amd64.go to 70000 and run the tests.
TestBucket_Put_Large crashes.
2015-03-26 23:47:24 +00:00
|
|
|
size -= sz
|
|
|
|
if size == 0 {
|
|
|
|
break
|
|
|
|
}
|
2015-05-18 16:14:47 +00:00
|
|
|
|
|
|
|
// Otherwise move offset forward and move pointer to next chunk.
|
fix `slice bounds out of range`/maxAllocSize bugs
when accessing the node data we used to use cast to
*[maxAllocSize]byte, which breaks if we try to go across maxAllocSize boundary.
This leads to occasional panics.
Sample stacktrace:
```
panic: runtime error: slice bounds out of range
goroutine 1 [running]:
github.com/boltdb/bolt.(*node).write(0xc208010f50, 0xc27452a000)
$GOPATH/src/github.com/boltdb/bolt/node.go:228 +0x5a5
github.com/boltdb/bolt.(*node).spill(0xc208010f50, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:364 +0x506
github.com/boltdb/bolt.(*node).spill(0xc208010700, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*node).spill(0xc208010620, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/node.go:336 +0x12d
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae880, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:535 +0x1c4
github.com/boltdb/bolt.(*Bucket).spill(0xc22b6ae840, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Bucket).spill(0xc22f4e2018, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/bucket.go:502 +0xac2
github.com/boltdb/bolt.(*Tx).Commit(0xc22f4e2000, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/tx.go:150 +0x1ee
github.com/boltdb/bolt.(*DB).Update(0xc2080e4000, 0xc24d077508, 0x0, 0x0)
$GOPATH/src/github.com/boltdb/bolt/db.go:483 +0x169
```
It usually happens when working with large (50M/100M) values.
One way to reproduce it is to change maxAllocSize in bolt_amd64.go to 70000 and run the tests.
TestBucket_Put_Large crashes.
2015-03-26 23:47:24 +00:00
|
|
|
offset += int64(sz)
|
|
|
|
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
|
2014-02-23 05:54:54 +00:00
|
|
|
}
|
|
|
|
}
|
2015-05-18 16:14:47 +00:00
|
|
|
|
|
|
|
// Ignore file sync if flag is set on DB.
|
2014-09-18 20:15:52 +00:00
|
|
|
if !tx.db.NoSync || IgnoreNoSync {
|
|
|
|
if err := fdatasync(tx.db); err != nil {
|
2014-07-15 13:37:46 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-03-23 03:45:53 +00:00
|
|
|
}
|
2014-02-23 05:54:54 +00:00
|
|
|
|
|
|
|
// Clear out page cache.
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.pages = make(map[pgid]*page)
|
2014-02-23 05:54:54 +00:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeMeta writes the meta to the disk.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) writeMeta() error {
|
2014-02-23 05:54:54 +00:00
|
|
|
// Create a temporary buffer for the meta page.
|
2014-04-04 18:03:04 +00:00
|
|
|
buf := make([]byte, tx.db.pageSize)
|
|
|
|
p := tx.db.pageInBuffer(buf, 0)
|
|
|
|
tx.meta.write(p)
|
2014-02-23 05:54:54 +00:00
|
|
|
|
|
|
|
// Write the meta page to file.
|
2014-04-04 18:03:04 +00:00
|
|
|
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
|
2014-04-02 19:50:03 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-09-18 20:15:52 +00:00
|
|
|
if !tx.db.NoSync || IgnoreNoSync {
|
|
|
|
if err := fdatasync(tx.db); err != nil {
|
2014-07-15 13:37:46 +00:00
|
|
|
return err
|
|
|
|
}
|
2014-03-23 17:38:35 +00:00
|
|
|
}
|
2014-02-23 05:54:54 +00:00
|
|
|
|
2014-04-02 21:36:53 +00:00
|
|
|
// Update statistics.
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.stats.Write++
|
2014-04-02 21:36:53 +00:00
|
|
|
|
2014-02-23 05:54:54 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-03-09 03:25:37 +00:00
|
|
|
// page returns a reference to the page with a given id.
|
2016-01-08 07:33:40 +00:00
|
|
|
// If page has been written to then a temporary buffered page is returned.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) page(id pgid) *page {
|
2014-03-09 03:25:37 +00:00
|
|
|
// Check the dirty pages first.
|
2014-04-04 18:03:04 +00:00
|
|
|
if tx.pages != nil {
|
|
|
|
if p, ok := tx.pages[id]; ok {
|
2014-03-09 03:25:37 +00:00
|
|
|
return p
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Otherwise return directly from the mmap.
|
2014-04-04 18:03:04 +00:00
|
|
|
return tx.db.page(id)
|
2014-03-09 03:25:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// forEachPage iterates over every page within a given page and executes a function.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
|
|
|
|
p := tx.page(pgid)
|
2014-03-09 03:25:37 +00:00
|
|
|
|
|
|
|
// Execute function.
|
|
|
|
fn(p, depth)
|
|
|
|
|
|
|
|
// Recursively loop over children.
|
|
|
|
if (p.flags & branchPageFlag) != 0 {
|
|
|
|
for i := 0; i < int(p.count); i++ {
|
|
|
|
elem := p.branchPageElement(uint16(i))
|
2014-04-04 18:03:04 +00:00
|
|
|
tx.forEachPage(elem.pgid, depth+1, fn)
|
2014-03-09 03:25:37 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-03-22 04:34:54 +00:00
|
|
|
|
|
|
|
// Page returns page information for a given page number.
|
2014-05-27 17:31:55 +00:00
|
|
|
// This is only safe for concurrent use when used by a writable transaction.
|
2014-04-04 18:03:04 +00:00
|
|
|
func (tx *Tx) Page(id int) (*PageInfo, error) {
|
|
|
|
if tx.db == nil {
|
2014-03-23 18:17:30 +00:00
|
|
|
return nil, ErrTxClosed
|
2014-04-04 18:03:04 +00:00
|
|
|
} else if pgid(id) >= tx.meta.pgid {
|
2014-03-22 04:34:54 +00:00
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Build the page info.
|
2014-04-07 22:24:51 +00:00
|
|
|
p := tx.db.page(pgid(id))
|
2014-03-22 04:34:54 +00:00
|
|
|
info := &PageInfo{
|
|
|
|
ID: id,
|
|
|
|
Count: int(p.count),
|
|
|
|
OverflowCount: int(p.overflow),
|
|
|
|
}
|
|
|
|
|
|
|
|
// Determine the type (or if it's free).
|
2014-06-30 14:01:41 +00:00
|
|
|
if tx.db.freelist.freed(pgid(id)) {
|
2014-03-22 04:34:54 +00:00
|
|
|
info.Type = "free"
|
|
|
|
} else {
|
|
|
|
info.Type = p.typ()
|
|
|
|
}
|
|
|
|
|
|
|
|
return info, nil
|
|
|
|
}
|
2014-04-02 21:36:53 +00:00
|
|
|
|
|
|
|
// TxStats represents statistics about the actions performed by the transaction.
|
|
|
|
type TxStats struct {
|
|
|
|
// Page statistics.
|
|
|
|
PageCount int // number of page allocations
|
|
|
|
PageAlloc int // total bytes allocated
|
|
|
|
|
|
|
|
// Cursor statistics.
|
|
|
|
CursorCount int // number of cursors created
|
|
|
|
|
|
|
|
// Node statistics
|
|
|
|
NodeCount int // number of node allocations
|
|
|
|
NodeDeref int // number of node dereferences
|
|
|
|
|
|
|
|
// Rebalance statistics.
|
|
|
|
Rebalance int // number of node rebalances
|
|
|
|
RebalanceTime time.Duration // total time spent rebalancing
|
|
|
|
|
2014-05-02 19:59:23 +00:00
|
|
|
// Split/Spill statistics.
|
|
|
|
Split int // number of nodes split
|
|
|
|
Spill int // number of nodes spilled
|
2014-04-02 21:36:53 +00:00
|
|
|
SpillTime time.Duration // total time spent spilling
|
|
|
|
|
|
|
|
// Write statistics.
|
|
|
|
Write int // number of writes performed
|
|
|
|
WriteTime time.Duration // total time spent writing to disk
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *TxStats) add(other *TxStats) {
|
|
|
|
s.PageCount += other.PageCount
|
|
|
|
s.PageAlloc += other.PageAlloc
|
|
|
|
s.CursorCount += other.CursorCount
|
|
|
|
s.NodeCount += other.NodeCount
|
|
|
|
s.NodeDeref += other.NodeDeref
|
|
|
|
s.Rebalance += other.Rebalance
|
|
|
|
s.RebalanceTime += other.RebalanceTime
|
2014-05-02 19:59:23 +00:00
|
|
|
s.Split += other.Split
|
2014-04-02 21:36:53 +00:00
|
|
|
s.Spill += other.Spill
|
|
|
|
s.SpillTime += other.SpillTime
|
|
|
|
s.Write += other.Write
|
|
|
|
s.WriteTime += other.WriteTime
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sub calculates and returns the difference between two sets of transaction stats.
|
|
|
|
// This is useful when obtaining stats at two different points and time and
|
|
|
|
// you need the performance counters that occurred within that time span.
|
|
|
|
func (s *TxStats) Sub(other *TxStats) TxStats {
|
|
|
|
var diff TxStats
|
|
|
|
diff.PageCount = s.PageCount - other.PageCount
|
|
|
|
diff.PageAlloc = s.PageAlloc - other.PageAlloc
|
|
|
|
diff.CursorCount = s.CursorCount - other.CursorCount
|
|
|
|
diff.NodeCount = s.NodeCount - other.NodeCount
|
|
|
|
diff.NodeDeref = s.NodeDeref - other.NodeDeref
|
|
|
|
diff.Rebalance = s.Rebalance - other.Rebalance
|
|
|
|
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
|
2014-05-02 19:59:23 +00:00
|
|
|
diff.Split = s.Split - other.Split
|
2014-04-02 21:36:53 +00:00
|
|
|
diff.Spill = s.Spill - other.Spill
|
|
|
|
diff.SpillTime = s.SpillTime - other.SpillTime
|
|
|
|
diff.Write = s.Write - other.Write
|
|
|
|
diff.WriteTime = s.WriteTime - other.WriteTime
|
|
|
|
return diff
|
|
|
|
}
|