Add inline documentation for bdc109b.

This commit simply adds some additional comments to the commit provided
by sasha-s that fixes the "slice out of bounds" errors.
master
Ben Johnson 2015-05-18 10:14:47 -06:00
parent bdc109bdc7
commit bf5458de2f
3 changed files with 22 additions and 11 deletions

View File

@ -5,7 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Setting
// const maxAllocSize = 70000
// reveals the index out of bound bug(s)

17
node.go
View File

@ -220,16 +220,21 @@ func (n *node) write(p *page) {
elem.pgid = item.pgid
_assert(elem.pgid != p.id, "write: circular dependency occurred")
}
lk, lv := len(item.key), len(item.value)
if len(b) < lk+lv {
// If the length of key+value is larger than the max allocation size
// then we need to reallocate the byte array pointer.
//
// See: https://github.com/boltdb/bolt/pull/335
klen, vlen := len(item.key), len(item.value)
if len(b) < klen+vlen {
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
}
// Write data for the element to the end of the page.
copy(b[0:], item.key)
b = b[lk:]
b = b[klen:]
copy(b[0:], item.value)
b = b[lv:]
b = b[vlen:]
}
// DEBUG ONLY: n.dump()
@ -355,9 +360,7 @@ func (n *node) spill() error {
}
// Allocate contiguous space for the node.
// sz := node.size() + n.pageElementSize()*len(n.inodes)
sz := node.size()
p, err := tx.allocate((sz / tx.db.pageSize) + 1)
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
if err != nil {
return err
}

12
tx.go
View File

@ -422,26 +422,38 @@ func (tx *Tx) write() error {
for _, p := range pages {
size := (int(p.overflow) + 1) * tx.db.pageSize
offset := int64(p.id) * int64(tx.db.pageSize)
// Write out page in "max allocation" sized chunks.
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
for {
// Limit our write to our max allocation size.
sz := size
if sz > maxAllocSize-1 {
sz = maxAllocSize - 1
}
// Write chunk to disk.
buf := ptr[:sz]
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
return err
}
// Update statistics.
tx.stats.Write++
// Exit inner for loop if we've written all the chunks.
size -= sz
if size == 0 {
break
}
// Otherwise move offset forward and move pointer to next chunk.
offset += int64(sz)
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
}
}
// Ignore file sync if flag is set on DB.
if !tx.db.NoSync || IgnoreNoSync {
if err := fdatasync(tx.db); err != nil {
return err