contenthash: add lru for context objects

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
docker-18.09
Tonis Tiigi 2017-07-31 13:22:35 -07:00
parent ce86ec18bd
commit 6690f79d35
5 changed files with 1034 additions and 165 deletions

View File

@ -9,10 +9,13 @@ import (
"path/filepath"
"sync"
"github.com/BurntSushi/locker"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/symlink"
iradix "github.com/hashicorp/go-immutable-radix"
"github.com/hashicorp/golang-lru/simplelru"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/snapshot"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@ -21,7 +24,18 @@ import (
var errNotFound = errors.Errorf("not found")
var defaultManager = &cacheManager{actives: map[string]*cacheContext{}}
var defaultManager *cacheManager
var defaultManagerOnce sync.Once
const keyContentHash = "buildkit.contenthash.v0"
func getDefaultManager() *cacheManager {
defaultManagerOnce.Do(func() {
lru, _ := simplelru.NewLRU(20, nil) // error is impossible on positive size
defaultManager = &cacheManager{lru: lru, locker: locker.NewLocker()}
})
return defaultManager
}
// Layout in the radix tree: Every path is saved by cleaned absolute unix path.
// Directories have 2 records, one contains digest for directory header, other
@ -30,79 +44,76 @@ var defaultManager = &cacheManager{actives: map[string]*cacheContext{}}
// key for root, "/" for the root header
func Checksum(ctx context.Context, ref cache.ImmutableRef, path string) (digest.Digest, error) {
return defaultManager.Checksum(ctx, ref, path)
return getDefaultManager().Checksum(ctx, ref, path)
}
// func GetCacheContext(ctx context.Context, ref cache.ImmutableRef) (CacheContext, error) {
//
// }
//
// func SetCacheContext(ctx context.Context, ref cache.ImmutableRef, cc CacheContext) error {
//
// }
func GetCacheContext(ctx context.Context, ref cache.ImmutableRef) (CacheContext, error) {
return getDefaultManager().GetCacheContext(ctx, ref)
}
func SetCacheContext(ctx context.Context, ref cache.ImmutableRef, cc CacheContext) error {
return getDefaultManager().SetCacheContext(ctx, ref, cc)
}
type CacheContext interface {
HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error)
// Reset(p string)
Marshal() ([]byte, error)
}
type CacheRecord struct {
Type CacheRecordType
Link string
Digest digest.Digest
Checksum(ctx context.Context, ref cache.Mountable, p string) (digest.Digest, error)
HandleChange(kind fsutil.ChangeKind, p string, fi os.FileInfo, err error) error
}
type Hashed interface {
Digest() digest.Digest
}
type CacheRecordType int
const (
CacheRecordFile CacheRecordType = iota
CacheRecordDir
CacheRecordDirHeader
CacheRecordSymlink
)
type cacheManager struct {
mu sync.Mutex
actives map[string]*cacheContext
locker *locker.Locker
lru *simplelru.LRU
}
func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string) (digest.Digest, error) {
cm.mu.Lock()
cc, ok := cm.actives[ref.ID()]
if !ok {
cc = newCacheContext(ref)
cm.actives[ref.ID()] = cc
cc, err := cm.GetCacheContext(ctx, ref)
if err != nil {
return "", nil
}
cc.refs++
cm.mu.Unlock()
return cc.Checksum(ctx, ref, p)
}
defer func() {
cm.mu.Lock()
cc.refs--
if cc.refs == 0 {
cc.save() // TODO: do this on background, BUT need to unmount before releasing, possibly wrap ref
cc.clean()
delete(cm.actives, ref.ID())
}
cm.mu.Unlock()
}()
func (cm *cacheManager) GetCacheContext(ctx context.Context, ref cache.ImmutableRef) (CacheContext, error) {
cm.locker.Lock(ref.ID())
v, ok := cm.lru.Get(ref.ID())
if ok {
cm.locker.Unlock(ref.ID())
return v.(*cacheContext), nil
}
cc, err := newCacheContext(ref.Metadata())
if err != nil {
cm.locker.Unlock(ref.ID())
return nil, err
}
cm.lru.Add(ref.ID(), cc)
cm.locker.Unlock(ref.ID())
return cc, nil
}
return cc.Checksum(ctx, p)
func (cm *cacheManager) SetCacheContext(ctx context.Context, ref cache.ImmutableRef, cci CacheContext) error {
cc, ok := cci.(*cacheContext)
if !ok {
return errors.Errorf("invalid cachecontext: %T", cc)
}
if ref.ID() != cc.md.ID() {
return errors.New("saving cachecontext under different ID not supported")
}
if err := cc.save(); err != nil {
return err
}
cm.lru.Add(ref.ID(), cc)
return nil
}
type cacheContext struct {
mu sync.RWMutex
mountPath string
unmount func() error
ref cache.ImmutableRef
refs int
tree *iradix.Tree
// isDirty bool
mu sync.RWMutex
md *metadata.StorageItem
tree *iradix.Tree
dirty bool // needs to be persisted to disk
// used in HandleChange
txn *iradix.Txn
@ -110,18 +121,96 @@ type cacheContext struct {
dirtyMap map[string]struct{}
}
func newCacheContext(ref cache.ImmutableRef) *cacheContext {
type mount struct {
mountable cache.Mountable
mountPath string
unmount func() error
}
func (m *mount) mount(ctx context.Context) (string, error) {
if m.mountPath != "" {
return m.mountPath, nil
}
mounts, err := m.mountable.Mount(ctx, true)
if err != nil {
return "", err
}
lm := snapshot.LocalMounter(mounts)
mp, err := lm.Mount()
if err != nil {
return "", err
}
m.mountPath = mp
m.unmount = lm.Unmount
return mp, nil
}
func (m *mount) clean() error {
if m.mountPath != "" {
if err := m.unmount(); err != nil {
return err
}
m.mountPath = ""
}
return nil
}
func newCacheContext(md *metadata.StorageItem) (*cacheContext, error) {
cc := &cacheContext{
ref: ref,
md: md,
tree: iradix.New(),
dirtyMap: map[string]struct{}{},
}
// cc.Load(md)
return cc
if err := cc.load(); err != nil {
return nil, err
}
return cc, nil
}
func (cc *cacheContext) save() {
// TODO:
func (cc *cacheContext) load() error {
dt, err := cc.md.GetExternal(keyContentHash)
if err != nil {
return nil
}
var l CacheRecords
if err := l.Unmarshal(dt); err != nil {
return err
}
txn := cc.tree.Txn()
for _, p := range l.Paths {
txn.Insert([]byte(p.Path), p.Record)
}
cc.tree = txn.Commit()
return nil
}
func (cc *cacheContext) save() error {
cc.mu.Lock()
defer cc.mu.Unlock()
cc.dirty = true
var l CacheRecords
node := cc.tree.Root()
node.Walk(func(k []byte, v interface{}) bool {
l.Paths = append(l.Paths, &CacheRecordWithPath{
Path: string(k),
Record: v.(*CacheRecord),
})
return false
})
dt, err := l.Marshal()
if err != nil {
return err
}
return cc.md.SetExternal(keyContentHash, dt)
}
// HandleChange notifies the source about a modification operation
@ -133,7 +222,7 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
k := []byte(p)
deleteDir := func(cr *CacheRecord) {
if cr.Type == CacheRecordDir {
if cr.Type == CacheRecordTypeDir {
cc.node.WalkPrefix(append(k, []byte("/")...), func(k []byte, v interface{}) bool {
cc.txn.Delete(k)
return false
@ -177,16 +266,16 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
}
cr := &CacheRecord{
Type: CacheRecordFile,
Type: CacheRecordTypeFile,
}
if fi.Mode()&os.ModeSymlink != 0 {
cr.Type = CacheRecordSymlink
cr.Link = filepath.ToSlash(stat.Linkname)
cr.Type = CacheRecordTypeSymlink
cr.Linkname = filepath.ToSlash(stat.Linkname)
}
if fi.IsDir() {
cr.Type = CacheRecordDirHeader
cr.Type = CacheRecordTypeDirHeader
cr2 := &CacheRecord{
Type: CacheRecordDir,
Type: CacheRecordTypeDir,
}
cc.txn.Insert(k, cr2)
k = append(k, []byte("/")...)
@ -203,20 +292,23 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
return nil
}
func (cc *cacheContext) Checksum(ctx context.Context, p string) (digest.Digest, error) {
func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, p string) (digest.Digest, error) {
m := &mount{mountable: mountable}
defer m.clean()
const maxSymlinkLimit = 255
i := 0
for {
if i > maxSymlinkLimit {
return "", errors.Errorf("too many symlinks: %s", p)
}
cr, err := cc.ChecksumNoFollow(ctx, p)
cr, err := cc.checksumNoFollow(ctx, m, p)
if err != nil {
return "", err
}
if cr.Type == CacheRecordSymlink {
link := cr.Link
if !path.IsAbs(cr.Link) {
if cr.Type == CacheRecordTypeSymlink {
link := cr.Linkname
if !path.IsAbs(cr.Linkname) {
link = path.Join(path.Dir(p), link)
}
i++
@ -227,7 +319,7 @@ func (cc *cacheContext) Checksum(ctx context.Context, p string) (digest.Digest,
}
}
func (cc *cacheContext) ChecksumNoFollow(ctx context.Context, p string) (*CacheRecord, error) {
func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
p = path.Join("/", filepath.ToSlash(p))
if p == "/" {
p = ""
@ -255,7 +347,14 @@ func (cc *cacheContext) ChecksumNoFollow(ctx context.Context, p string) (*CacheR
cc.commitActiveTransaction()
}
return cc.lazyChecksum(ctx, p)
defer func() {
if cc.dirty {
go cc.save()
cc.dirty = false
}
}()
return cc.lazyChecksum(ctx, m, p)
}
func (cc *cacheContext) commitActiveTransaction() {
@ -263,7 +362,7 @@ func (cc *cacheContext) commitActiveTransaction() {
addParentToMap(d, cc.dirtyMap)
}
for d := range cc.dirtyMap {
cc.txn.Insert([]byte(d), &CacheRecord{Type: CacheRecordDir})
cc.txn.Insert([]byte(d), &CacheRecord{Type: CacheRecordTypeDir})
}
cc.tree = cc.txn.Commit()
cc.node = nil
@ -271,39 +370,40 @@ func (cc *cacheContext) commitActiveTransaction() {
cc.txn = nil
}
func (cc *cacheContext) lazyChecksum(ctx context.Context, p string) (*CacheRecord, error) {
func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*CacheRecord, error) {
root := cc.tree.Root()
if cc.needsScan(root, p) {
if err := cc.scanPath(ctx, p); err != nil {
if err := cc.scanPath(ctx, m, p); err != nil {
return nil, err
}
}
k := []byte(p)
root = cc.tree.Root()
txn := cc.tree.Txn()
cr, err := cc.checksum(ctx, root, txn, k)
cr, updated, err := cc.checksum(ctx, root, txn, m, k)
if err != nil {
return nil, err
}
cc.tree = txn.Commit()
cc.dirty = updated
return cr, err
}
func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, k []byte) (*CacheRecord, error) {
func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte) (*CacheRecord, bool, error) {
v, ok := root.Get(k)
if !ok {
return nil, errors.Wrapf(errNotFound, "%s not found", string(k))
return nil, false, errors.Wrapf(errNotFound, "%s not found", string(k))
}
cr := v.(*CacheRecord)
if cr.Digest != "" {
return cr, nil
return cr, false, nil
}
var dgst digest.Digest
switch cr.Type {
case CacheRecordDir:
case CacheRecordTypeDir:
h := sha256.New()
iter := root.Iterator()
next := append(k, []byte("/")...)
@ -315,13 +415,13 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
}
h.Write(bytes.TrimPrefix(subk, k))
subcr, err := cc.checksum(ctx, root, txn, subk)
subcr, _, err := cc.checksum(ctx, root, txn, m, subk)
if err != nil {
return nil, err
return nil, false, err
}
h.Write([]byte(subcr.Digest))
if subcr.Type == CacheRecordDir { // skip subfiles
if subcr.Type == CacheRecordTypeDir { // skip subfiles
next = append(k, []byte("/\xff")...)
iter.SeekPrefix(next)
}
@ -330,9 +430,9 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
default:
p := string(bytes.TrimSuffix(k, []byte("/")))
target, err := cc.root(ctx)
target, err := m.mount(ctx)
if err != nil {
return nil, err
return nil, false, err
}
// no FollowSymlinkInScope because invalid paths should not be inserted
@ -340,24 +440,24 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
fi, err := os.Lstat(fp)
if err != nil {
return nil, err
return nil, false, err
}
dgst, err = prepareDigest(fp, p, fi)
if err != nil {
return nil, err
return nil, false, err
}
}
cr2 := &CacheRecord{
Digest: dgst,
Type: cr.Type,
Link: cr.Link,
Digest: dgst,
Type: cr.Type,
Linkname: cr.Linkname,
}
txn.Insert(k, cr2)
return cr2, nil
return cr2, true, nil
}
func (cc *cacheContext) needsScan(root *iradix.Node, p string) bool {
@ -373,42 +473,11 @@ func (cc *cacheContext) needsScan(root *iradix.Node, p string) bool {
return false
}
func (cc *cacheContext) root(ctx context.Context) (string, error) {
if cc.mountPath != "" {
return cc.mountPath, nil
}
mounts, err := cc.ref.Mount(ctx, true)
if err != nil {
return "", err
}
lm := snapshot.LocalMounter(mounts)
mp, err := lm.Mount()
if err != nil {
return "", err
}
cc.mountPath = mp
cc.unmount = lm.Unmount
return mp, nil
}
func (cc *cacheContext) clean() error {
if cc.mountPath != "" {
err := cc.unmount()
cc.mountPath = ""
cc.unmount = nil
return err
}
return nil
}
func (cc *cacheContext) scanPath(ctx context.Context, p string) (retErr error) {
func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retErr error) {
p = path.Join("/", p)
d, _ := path.Split(p)
mp, err := cc.root(ctx)
mp, err := m.mount(ctx)
if err != nil {
return err
}
@ -435,20 +504,20 @@ func (cc *cacheContext) scanPath(ctx context.Context, p string) (retErr error) {
}
if _, ok := n.Get(k); !ok {
cr := &CacheRecord{
Type: CacheRecordFile,
Type: CacheRecordTypeFile,
}
if fi.Mode()&os.ModeSymlink != 0 {
cr.Type = CacheRecordSymlink
cr.Type = CacheRecordTypeSymlink
link, err := os.Readlink(path)
if err != nil {
return err
}
cr.Link = filepath.ToSlash(link)
cr.Linkname = filepath.ToSlash(link)
}
if fi.IsDir() {
cr.Type = CacheRecordDirHeader
cr.Type = CacheRecordTypeDirHeader
cr2 := &CacheRecord{
Type: CacheRecordDir,
Type: CacheRecordTypeDir,
}
txn.Insert(k, cr2)
k = append(k, []byte("/")...)

773
cache/contenthash/checksum.pb.go vendored Normal file
View File

@ -0,0 +1,773 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: checksum.proto
/*
Package contenthash is a generated protocol buffer package.
It is generated from these files:
checksum.proto
It has these top-level messages:
CacheRecord
CacheRecordWithPath
CacheRecords
*/
package contenthash
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type CacheRecordType int32
const (
CacheRecordTypeFile CacheRecordType = 0
CacheRecordTypeDir CacheRecordType = 1
CacheRecordTypeDirHeader CacheRecordType = 2
CacheRecordTypeSymlink CacheRecordType = 3
)
var CacheRecordType_name = map[int32]string{
0: "FILE",
1: "DIR",
2: "DIR_HEADER",
3: "SYMLINK",
}
var CacheRecordType_value = map[string]int32{
"FILE": 0,
"DIR": 1,
"DIR_HEADER": 2,
"SYMLINK": 3,
}
func (x CacheRecordType) String() string {
return proto.EnumName(CacheRecordType_name, int32(x))
}
func (CacheRecordType) EnumDescriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} }
type CacheRecord struct {
Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"`
Type CacheRecordType `protobuf:"varint,2,opt,name=type,proto3,enum=contenthash.CacheRecordType" json:"type,omitempty"`
Linkname string `protobuf:"bytes,3,opt,name=linkname,proto3" json:"linkname,omitempty"`
}
func (m *CacheRecord) Reset() { *m = CacheRecord{} }
func (m *CacheRecord) String() string { return proto.CompactTextString(m) }
func (*CacheRecord) ProtoMessage() {}
func (*CacheRecord) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{0} }
func (m *CacheRecord) GetType() CacheRecordType {
if m != nil {
return m.Type
}
return CacheRecordTypeFile
}
func (m *CacheRecord) GetLinkname() string {
if m != nil {
return m.Linkname
}
return ""
}
type CacheRecordWithPath struct {
Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
Record *CacheRecord `protobuf:"bytes,2,opt,name=record" json:"record,omitempty"`
}
func (m *CacheRecordWithPath) Reset() { *m = CacheRecordWithPath{} }
func (m *CacheRecordWithPath) String() string { return proto.CompactTextString(m) }
func (*CacheRecordWithPath) ProtoMessage() {}
func (*CacheRecordWithPath) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{1} }
func (m *CacheRecordWithPath) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *CacheRecordWithPath) GetRecord() *CacheRecord {
if m != nil {
return m.Record
}
return nil
}
type CacheRecords struct {
Paths []*CacheRecordWithPath `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"`
}
func (m *CacheRecords) Reset() { *m = CacheRecords{} }
func (m *CacheRecords) String() string { return proto.CompactTextString(m) }
func (*CacheRecords) ProtoMessage() {}
func (*CacheRecords) Descriptor() ([]byte, []int) { return fileDescriptorChecksum, []int{2} }
func (m *CacheRecords) GetPaths() []*CacheRecordWithPath {
if m != nil {
return m.Paths
}
return nil
}
func init() {
proto.RegisterType((*CacheRecord)(nil), "contenthash.CacheRecord")
proto.RegisterType((*CacheRecordWithPath)(nil), "contenthash.CacheRecordWithPath")
proto.RegisterType((*CacheRecords)(nil), "contenthash.CacheRecords")
proto.RegisterEnum("contenthash.CacheRecordType", CacheRecordType_name, CacheRecordType_value)
}
func (m *CacheRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheRecord) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Digest) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Digest)))
i += copy(dAtA[i:], m.Digest)
}
if m.Type != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintChecksum(dAtA, i, uint64(m.Type))
}
if len(m.Linkname) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Linkname)))
i += copy(dAtA[i:], m.Linkname)
}
return i, nil
}
func (m *CacheRecordWithPath) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheRecordWithPath) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Path) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintChecksum(dAtA, i, uint64(len(m.Path)))
i += copy(dAtA[i:], m.Path)
}
if m.Record != nil {
dAtA[i] = 0x12
i++
i = encodeVarintChecksum(dAtA, i, uint64(m.Record.Size()))
n1, err := m.Record.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
}
return i, nil
}
func (m *CacheRecords) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CacheRecords) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Paths) > 0 {
for _, msg := range m.Paths {
dAtA[i] = 0xa
i++
i = encodeVarintChecksum(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func encodeFixed64Checksum(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Checksum(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintChecksum(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *CacheRecord) Size() (n int) {
var l int
_ = l
l = len(m.Digest)
if l > 0 {
n += 1 + l + sovChecksum(uint64(l))
}
if m.Type != 0 {
n += 1 + sovChecksum(uint64(m.Type))
}
l = len(m.Linkname)
if l > 0 {
n += 1 + l + sovChecksum(uint64(l))
}
return n
}
func (m *CacheRecordWithPath) Size() (n int) {
var l int
_ = l
l = len(m.Path)
if l > 0 {
n += 1 + l + sovChecksum(uint64(l))
}
if m.Record != nil {
l = m.Record.Size()
n += 1 + l + sovChecksum(uint64(l))
}
return n
}
func (m *CacheRecords) Size() (n int) {
var l int
_ = l
if len(m.Paths) > 0 {
for _, e := range m.Paths {
l = e.Size()
n += 1 + l + sovChecksum(uint64(l))
}
}
return n
}
func sovChecksum(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozChecksum(x uint64) (n int) {
return sovChecksum(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *CacheRecord) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheRecord: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheRecord: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
m.Type = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Type |= (CacheRecordType(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Linkname", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Linkname = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipChecksum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthChecksum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CacheRecordWithPath) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheRecordWithPath: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheRecordWithPath: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Record", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Record == nil {
m.Record = &CacheRecord{}
}
if err := m.Record.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipChecksum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthChecksum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CacheRecords) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CacheRecords: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CacheRecords: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowChecksum
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthChecksum
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Paths = append(m.Paths, &CacheRecordWithPath{})
if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipChecksum(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthChecksum
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipChecksum(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthChecksum
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowChecksum
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipChecksum(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthChecksum = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowChecksum = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("checksum.proto", fileDescriptorChecksum) }
var fileDescriptorChecksum = []byte{
// 418 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0xd4, 0x40,
0x18, 0xc7, 0x77, 0xba, 0xeb, 0xaa, 0xdf, 0x4a, 0x0d, 0x53, 0x68, 0xc3, 0x50, 0xb2, 0xe3, 0x5e,
0x5c, 0x8a, 0xcd, 0x96, 0x08, 0xde, 0xad, 0xd9, 0xa5, 0xd1, 0x2a, 0x32, 0x15, 0x44, 0x3c, 0x48,
0x36, 0x3b, 0x66, 0x42, 0x9b, 0x4c, 0x48, 0x66, 0x0f, 0xfb, 0x06, 0x92, 0x93, 0x2f, 0x90, 0x93,
0x82, 0xef, 0xe0, 0x5d, 0xe8, 0xd1, 0xb3, 0x87, 0x22, 0xeb, 0x8b, 0x48, 0x26, 0x55, 0x42, 0xca,
0x9e, 0xe6, 0xfb, 0x66, 0x7e, 0xdf, 0xff, 0xff, 0x9f, 0x61, 0x60, 0x3b, 0x10, 0x3c, 0x38, 0xcf,
0x97, 0xb1, 0x9d, 0x66, 0x52, 0x49, 0x3c, 0x08, 0x64, 0xa2, 0x78, 0xa2, 0x84, 0x9f, 0x0b, 0x72,
0x18, 0x46, 0x4a, 0x2c, 0xe7, 0x76, 0x20, 0xe3, 0x49, 0x28, 0x43, 0x39, 0xd1, 0xcc, 0x7c, 0xf9,
0x51, 0x77, 0xba, 0xd1, 0x55, 0x3d, 0x3b, 0xfa, 0x86, 0x60, 0xf0, 0xcc, 0x0f, 0x04, 0x67, 0x3c,
0x90, 0xd9, 0x02, 0x3f, 0x87, 0xfe, 0x22, 0x0a, 0x79, 0xae, 0x4c, 0x44, 0xd1, 0xf8, 0xee, 0xb1,
0x73, 0x79, 0x35, 0xec, 0xfc, 0xba, 0x1a, 0x1e, 0x34, 0x64, 0x65, 0xca, 0x93, 0xca, 0xd2, 0x8f,
0x12, 0x9e, 0xe5, 0x93, 0x50, 0x1e, 0xd6, 0x23, 0xb6, 0xab, 0x17, 0x76, 0xad, 0x80, 0x8f, 0xa0,
0xa7, 0x56, 0x29, 0x37, 0xb7, 0x28, 0x1a, 0x6f, 0x3b, 0xfb, 0x76, 0x23, 0xa6, 0xdd, 0xf0, 0x7c,
0xb3, 0x4a, 0x39, 0xd3, 0x24, 0x26, 0x70, 0xe7, 0x22, 0x4a, 0xce, 0x13, 0x3f, 0xe6, 0x66, 0xb7,
0xf2, 0x67, 0xff, 0xfb, 0xd1, 0x7b, 0xd8, 0x69, 0x0c, 0xbd, 0x8d, 0x94, 0x78, 0xed, 0x2b, 0x81,
0x31, 0xf4, 0x52, 0x5f, 0x89, 0x3a, 0x2e, 0xd3, 0x35, 0x3e, 0x82, 0x7e, 0xa6, 0x29, 0x6d, 0x3d,
0x70, 0xcc, 0x4d, 0xd6, 0xec, 0x9a, 0x1b, 0xcd, 0xe0, 0x5e, 0x63, 0x3b, 0xc7, 0x4f, 0xe0, 0x56,
0xa5, 0x94, 0x9b, 0x88, 0x76, 0xc7, 0x03, 0x87, 0x6e, 0x12, 0xf8, 0x17, 0x83, 0xd5, 0xf8, 0xc1,
0x0f, 0x04, 0xf7, 0x5b, 0x57, 0xc3, 0x0f, 0xa0, 0x37, 0xf3, 0x4e, 0xa7, 0x46, 0x87, 0xec, 0x15,
0x25, 0xdd, 0x69, 0x1d, 0xcf, 0xa2, 0x0b, 0x8e, 0x87, 0xd0, 0x75, 0x3d, 0x66, 0x20, 0xb2, 0x5b,
0x94, 0x14, 0xb7, 0x08, 0x37, 0xca, 0xf0, 0x23, 0x00, 0xd7, 0x63, 0x1f, 0x4e, 0xa6, 0x4f, 0xdd,
0x29, 0x33, 0xb6, 0xc8, 0x7e, 0x51, 0x52, 0xf3, 0x26, 0x77, 0xc2, 0xfd, 0x05, 0xcf, 0xf0, 0x43,
0xb8, 0x7d, 0xf6, 0xee, 0xe5, 0xa9, 0xf7, 0xea, 0x85, 0xd1, 0x25, 0xa4, 0x28, 0xe9, 0x6e, 0x0b,
0x3d, 0x5b, 0xc5, 0xd5, 0xbb, 0x92, 0xbd, 0x4f, 0x5f, 0xac, 0xce, 0xf7, 0xaf, 0x56, 0x3b, 0xf3,
0xb1, 0x71, 0xb9, 0xb6, 0xd0, 0xcf, 0xb5, 0x85, 0x7e, 0xaf, 0x2d, 0xf4, 0xf9, 0x8f, 0xd5, 0x99,
0xf7, 0xf5, 0x7f, 0x79, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x55, 0xf2, 0x2e, 0x06, 0x7d, 0x02,
0x00, 0x00,
}

30
cache/contenthash/checksum.proto vendored Normal file
View File

@ -0,0 +1,30 @@
syntax = "proto3";
package contenthash;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
enum CacheRecordType {
option (gogoproto.goproto_enum_prefix) = false;
option (gogoproto.enum_customname) = "CacheRecordType";
FILE = 0 [(gogoproto.enumvalue_customname) = "CacheRecordTypeFile"];
DIR = 1 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDir"];
DIR_HEADER = 2 [(gogoproto.enumvalue_customname) = "CacheRecordTypeDirHeader"];
SYMLINK = 3 [(gogoproto.enumvalue_customname) = "CacheRecordTypeSymlink"];
}
message CacheRecord {
string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
CacheRecordType type = 2;
string linkname = 3;
}
message CacheRecordWithPath {
string path = 1;
CacheRecord record = 2;
}
message CacheRecords {
repeated CacheRecordWithPath paths = 1;
}

View File

@ -49,56 +49,55 @@ func TestChecksumBasicFile(t *testing.T) {
// for the digest values, the actual values are not important in development
// phase but consistency is
cc := newCacheContext(ref)
cc, err := newCacheContext(ref.Metadata())
assert.NoError(t, err)
_, err = cc.Checksum(context.TODO(), "nosuch")
_, err = cc.Checksum(context.TODO(), ref, "nosuch")
assert.Error(t, err)
dgst, err := cc.Checksum(context.TODO(), "foo")
dgst, err := cc.Checksum(context.TODO(), ref, "foo")
assert.NoError(t, err)
assert.Equal(t, dgstFileData0, dgst)
// second file returns different hash
dgst, err = cc.Checksum(context.TODO(), "bar")
dgst, err = cc.Checksum(context.TODO(), ref, "bar")
assert.NoError(t, err)
assert.Equal(t, digest.Digest("sha256:cb62966e6dc11e3252ce1a14ed51c6ed0cf112de9c5d23104dc6dcc708f914f1"), dgst)
// same file inside a directory
dgst, err = cc.Checksum(context.TODO(), "d0/abc")
dgst, err = cc.Checksum(context.TODO(), ref, "d0/abc")
assert.NoError(t, err)
assert.Equal(t, dgstFileData0, dgst)
// repeat because codepath is different
dgst, err = cc.Checksum(context.TODO(), "d0/abc")
dgst, err = cc.Checksum(context.TODO(), ref, "d0/abc")
assert.NoError(t, err)
assert.Equal(t, dgstFileData0, dgst)
// symlink to the same file is followed, returns same hash
dgst, err = cc.Checksum(context.TODO(), "d0/def")
dgst, err = cc.Checksum(context.TODO(), ref, "d0/def")
assert.NoError(t, err)
assert.Equal(t, dgstFileData0, dgst)
_, err = cc.Checksum(context.TODO(), "d0/ghi")
_, err = cc.Checksum(context.TODO(), ref, "d0/ghi")
assert.Error(t, err)
assert.Equal(t, errNotFound, errors.Cause(err))
dgst, err = cc.Checksum(context.TODO(), "/")
dgst, err = cc.Checksum(context.TODO(), ref, "/")
assert.NoError(t, err)
assert.Equal(t, digest.Digest("sha256:0d87c8c2a606f961483cd4c5dc0350a4136a299b4066eea4a969d6ed756614cd"), dgst)
dgst, err = cc.Checksum(context.TODO(), "d0")
dgst, err = cc.Checksum(context.TODO(), ref, "d0")
assert.NoError(t, err)
assert.Equal(t, dgstDirD0, dgst)
cc.clean()
err = ref.Release(context.TODO())
require.NoError(t, err)
@ -111,15 +110,14 @@ func TestChecksumBasicFile(t *testing.T) {
ref = createRef(t, cm, ch)
cc = newCacheContext(ref)
cc, err = newCacheContext(ref.Metadata())
assert.NoError(t, err)
dgst, err = cc.Checksum(context.TODO(), "/")
dgst, err = cc.Checksum(context.TODO(), ref, "/")
assert.NoError(t, err)
assert.Equal(t, dgstDirD0, dgst)
cc.clean()
err = ref.Release(context.TODO())
require.NoError(t, err)
@ -131,16 +129,15 @@ func TestChecksumBasicFile(t *testing.T) {
ref = createRef(t, cm, ch)
cc = newCacheContext(ref)
cc, err = newCacheContext(ref.Metadata())
assert.NoError(t, err)
dgst, err = cc.Checksum(context.TODO(), "/")
dgst, err = cc.Checksum(context.TODO(), ref, "/")
assert.NoError(t, err)
assert.Equal(t, dgstDirD0Modified, dgst)
assert.NotEqual(t, dgstDirD0, dgst)
cc.clean()
err = ref.Release(context.TODO())
require.NoError(t, err)
@ -158,25 +155,23 @@ func TestChecksumBasicFile(t *testing.T) {
ref = createRef(t, cm, ch)
cc = newCacheContext(ref)
cc, err = newCacheContext(ref.Metadata())
assert.NoError(t, err)
dgst, err = cc.Checksum(context.TODO(), "abc/aa/foo")
dgst, err = cc.Checksum(context.TODO(), ref, "abc/aa/foo")
assert.NoError(t, err)
assert.Equal(t, digest.Digest("sha256:e1e22281a1ebb637e46aa0781c7fceaca817f1268dd2047dfbce4a23a6cf50ad"), dgst)
assert.NotEqual(t, dgstDirD0, dgst)
// this will force rescan
dgst, err = cc.Checksum(context.TODO(), "d0")
dgst, err = cc.Checksum(context.TODO(), ref, "d0")
assert.NoError(t, err)
assert.Equal(t, dgstDirD0, dgst)
cc.clean()
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestHandleChange(t *testing.T) {
@ -201,24 +196,25 @@ func TestHandleChange(t *testing.T) {
// for the digest values, the actual values are not important in development
// phase but consistency is
cc := newCacheContext(ref)
cc, err := newCacheContext(ref.Metadata())
assert.NoError(t, err)
err = emit(cc.HandleChange, changeStream(ch))
assert.NoError(t, err)
dgstFoo, err := cc.Checksum(context.TODO(), "foo")
dgstFoo, err := cc.Checksum(context.TODO(), ref, "foo")
assert.NoError(t, err)
assert.Equal(t, dgstFileData0, dgstFoo)
// symlink to the same file is followed, returns same hash
dgst, err := cc.Checksum(context.TODO(), "d0/def")
dgst, err := cc.Checksum(context.TODO(), ref, "d0/def")
assert.NoError(t, err)
assert.Equal(t, dgstFoo, dgst)
// symlink to the same file is followed, returns same hash
dgst, err = cc.Checksum(context.TODO(), "d0")
dgst, err = cc.Checksum(context.TODO(), ref, "d0")
assert.NoError(t, err)
assert.Equal(t, dgstDirD0, dgst)
@ -230,7 +226,7 @@ func TestHandleChange(t *testing.T) {
err = emit(cc.HandleChange, changeStream(ch))
assert.NoError(t, err)
dgst, err = cc.Checksum(context.TODO(), "d0")
dgst, err = cc.Checksum(context.TODO(), ref, "d0")
assert.NoError(t, err)
assert.Equal(t, dgstDirD0Modified, dgst)
@ -241,16 +237,14 @@ func TestHandleChange(t *testing.T) {
err = emit(cc.HandleChange, changeStream(ch))
assert.NoError(t, err)
_, err = cc.Checksum(context.TODO(), "d0")
_, err = cc.Checksum(context.TODO(), ref, "d0")
assert.Error(t, err)
assert.Equal(t, errNotFound, errors.Cause(err))
_, err = cc.Checksum(context.TODO(), "d0/abc")
_, err = cc.Checksum(context.TODO(), ref, "d0/abc")
assert.Error(t, err)
assert.Equal(t, errNotFound, errors.Cause(err))
cc.clean()
err = ref.Release(context.TODO())
require.NoError(t, err)
}

3
cache/contenthash/generate.go vendored Normal file
View File

@ -0,0 +1,3 @@
package contenthash
//go:generate protoc -I=. -I=../../vendor/ --gogofaster_out=. checksum.proto