contenthash: fix ignored files from path separator

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
docker-18.09
Tonis Tiigi 2018-03-13 14:36:34 -07:00
parent b403f71a62
commit 8f8221d173
2 changed files with 85 additions and 19 deletions

View File

@ -233,11 +233,11 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
if p == "/" {
p = ""
}
k := []byte(p)
k := convertPathToKey([]byte(p))
deleteDir := func(cr *CacheRecord) {
if cr.Type == CacheRecordTypeDir {
cc.node.WalkPrefix(append(k, []byte("/")...), func(k []byte, v interface{}) bool {
cc.node.WalkPrefix(append(k, 0), func(k []byte, v interface{}) bool {
cc.txn.Delete(k)
return false
})
@ -251,8 +251,8 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
cc.node = cc.tree.Root()
// root is not called by HandleChange. need to fake it
if _, ok := cc.node.Get([]byte("/")); !ok {
cc.txn.Insert([]byte("/"), &CacheRecord{
if _, ok := cc.node.Get([]byte{0}); !ok {
cc.txn.Insert([]byte{0}, &CacheRecord{
Type: CacheRecordTypeDirHeader,
Digest: digest.FromBytes(nil),
})
@ -267,7 +267,7 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
if ok {
deleteDir(v.(*CacheRecord))
}
d := path.Dir(string(k))
d := path.Dir(p)
if d == "/" {
d = ""
}
@ -303,11 +303,12 @@ func (cc *cacheContext) HandleChange(kind fsutil.ChangeKind, p string, fi os.Fil
Type: CacheRecordTypeDir,
}
cc.txn.Insert(k, cr2)
k = append(k, []byte("/")...)
k = append(k, 0)
p += "/"
}
cr.Digest = h.Digest()
cc.txn.Insert(k, cr)
d := path.Dir(string(k))
d := path.Dir(p)
if d == "/" {
d = ""
}
@ -353,7 +354,7 @@ func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string
if cc.txn == nil {
root := cc.tree.Root()
cc.mu.RUnlock()
v, ok := root.Get([]byte(p))
v, ok := root.Get(convertPathToKey([]byte(p)))
if ok {
cr := v.(*CacheRecord)
if cr.Digest != "" {
@ -386,8 +387,9 @@ func (cc *cacheContext) commitActiveTransaction() {
addParentToMap(d, cc.dirtyMap)
}
for d := range cc.dirtyMap {
if _, ok := cc.txn.Get([]byte(d)); ok {
cc.txn.Insert([]byte(d), &CacheRecord{Type: CacheRecordTypeDir})
k := convertPathToKey([]byte(d))
if _, ok := cc.txn.Get(k); ok {
cc.txn.Insert(k, &CacheRecord{Type: CacheRecordTypeDir})
}
}
cc.tree = cc.txn.Commit()
@ -403,7 +405,7 @@ func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*
return nil, err
}
}
k := []byte(p)
k := convertPathToKey([]byte(p))
txn := cc.tree.Txn()
root = txn.Root()
cr, updated, err := cc.checksum(ctx, root, txn, m, k)
@ -431,7 +433,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
switch cr.Type {
case CacheRecordTypeDir:
h := sha256.New()
next := append(k, []byte("/")...)
next := append(k, 0)
iter := root.Seek(next)
subk := next
ok := true
@ -447,15 +449,17 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir
}
h.Write([]byte(subcr.Digest))
if subcr.Type == CacheRecordTypeDir { // skip subfiles
next := append(subk, []byte("/\xff")...)
next := append(subk, 0, 0xff)
iter = root.Seek(next)
}
subk, _, ok = iter.Next()
}
dgst = digest.NewDigest(digest.SHA256, h)
default:
p := string(bytes.TrimSuffix(k, []byte("/")))
p := string(convertKeyToPath(bytes.TrimSuffix(k, []byte{0})))
target, err := m.mount(ctx)
if err != nil {
@ -491,7 +495,7 @@ func (cc *cacheContext) needsScan(root *iradix.Node, p string) bool {
if p == "/" {
p = ""
}
if _, ok := root.Get([]byte(p)); !ok {
if _, ok := root.Get(convertPathToKey([]byte(p))); !ok {
if p == "" {
return true
}
@ -529,6 +533,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
if string(k) == "/" {
k = []byte{}
}
k = convertPathToKey(k)
if _, ok := n.Get(k); !ok {
cr := &CacheRecord{
Type: CacheRecordTypeFile,
@ -547,7 +552,7 @@ func (cc *cacheContext) scanPath(ctx context.Context, m *mount, p string) (retEr
Type: CacheRecordTypeDir,
}
txn.Insert(k, cr2)
k = append(k, []byte("/")...)
k = append(k, 0)
}
txn.Insert(k, cr)
}
@ -619,3 +624,11 @@ func poolsCopy(dst io.Writer, src io.Reader) (written int64, err error) {
pool32K.Put(buf)
return
}
func convertPathToKey(p []byte) []byte {
return bytes.Replace([]byte(p), []byte("/"), []byte{0}, -1)
}
func convertKeyToPath(p []byte) []byte {
return bytes.Replace([]byte(p), []byte{0}, []byte("/"), -1)
}

View File

@ -25,8 +25,8 @@ import (
const (
dgstFileData0 = digest.Digest("sha256:cd8e75bca50f2d695f220d0cb0997d8ead387e4f926e8669a92d7f104cc9885b")
dgstDirD0 = digest.Digest("sha256:5f8fc802a74ea165f2dfa25d356a581a3d8282a343192421b670079819f4afa7")
dgstDirD0Modified = digest.Digest("sha256:4071993a2baf46d92cf3ea64a3fac9f7ab5d0b3876aed0333769ed99756f968b")
dgstDirD0 = digest.Digest("sha256:311457c20a9b6bfc7b368282be86a0c98b7be882a268967605559c9b5acd7068")
dgstDirD0Modified = digest.Digest("sha256:a0da3975efcd81ddec35ba1481f7b57a46af1c1e42a14b6024323d3fe2e7b2d8")
)
func TestChecksumBasicFile(t *testing.T) {
@ -96,7 +96,7 @@ func TestChecksumBasicFile(t *testing.T) {
dgst, err = cc.Checksum(context.TODO(), ref, "/")
assert.NoError(t, err)
assert.Equal(t, digest.Digest("sha256:6308f8be7bb12f5f6c99635cfa09e9d7055a6c03033e0f6b034cb48849906180"), dgst)
assert.Equal(t, digest.Digest("sha256:f57ab28e15b8dadb573ef097f2f99967f3acc4c44accc4888f4df510f9e9d2de"), dgst)
dgst, err = cc.Checksum(context.TODO(), ref, "d0")
assert.NoError(t, err)
@ -306,6 +306,59 @@ func TestHandleRecursiveDir(t *testing.T) {
assert.NoError(t, err)
}
func TestChecksumUnorderedFiles(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := naive.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm := setupCacheManager(t, tmpdir, snapshotter)
defer cm.Close()
ch := []string{
"ADD d0 dir",
"ADD d0/foo dir",
"ADD d0/foo/bar file data0",
"ADD d0/foo-subdir dir",
"ADD d0/foo.subdir file data1",
}
ref := createRef(t, cm, nil)
cc, err := newCacheContext(ref.Metadata())
assert.NoError(t, err)
err = emit(cc.HandleChange, changeStream(ch))
assert.NoError(t, err)
dgst, err := cc.Checksum(context.TODO(), ref, "d0")
assert.NoError(t, err)
assert.Equal(t, dgst, digest.Digest("sha256:67bed5f4c5ec9cd367b89962f6b1836740e1694e35a127fa4af58b0c339a7b7b"))
// check regression from earier version that didn't track some files
ch = []string{
"ADD d0 dir",
"ADD d0/foo dir",
"ADD d0/foo/bar file data0",
}
ref = createRef(t, cm, nil)
cc, err = newCacheContext(ref.Metadata())
assert.NoError(t, err)
err = emit(cc.HandleChange, changeStream(ch))
assert.NoError(t, err)
dgst2, err := cc.Checksum(context.TODO(), ref, "d0")
assert.NoError(t, err)
assert.NotEqual(t, dgst, dgst2)
}
func TestPersistence(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")