commit
a67ba78d9e
|
@ -47,6 +47,10 @@ func Checksum(ctx context.Context, ref cache.ImmutableRef, path string, followLi
|
|||
return getDefaultManager().Checksum(ctx, ref, path, followLinks)
|
||||
}
|
||||
|
||||
func ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool) (digest.Digest, error) {
|
||||
return getDefaultManager().ChecksumWildcard(ctx, ref, path, followLinks)
|
||||
}
|
||||
|
||||
func GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
|
||||
return getDefaultManager().GetCacheContext(ctx, md)
|
||||
}
|
||||
|
@ -84,6 +88,14 @@ func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p
|
|||
return cc.Checksum(ctx, ref, p, followLinks)
|
||||
}
|
||||
|
||||
func (cm *cacheManager) ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) {
|
||||
cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()))
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return cc.ChecksumWildcard(ctx, ref, p, followLinks)
|
||||
}
|
||||
|
||||
func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) {
|
||||
cm.locker.Lock(md.ID())
|
||||
cm.lruMu.Lock()
|
||||
|
@ -343,6 +355,9 @@ func (cc *cacheContext) ChecksumWildcard(ctx context.Context, mountable cache.Mo
|
|||
}
|
||||
}
|
||||
}
|
||||
if len(wildcards) == 0 {
|
||||
return digest.FromBytes([]byte{}), nil
|
||||
}
|
||||
|
||||
if len(wildcards) > 1 {
|
||||
digester := digest.Canonical.Digester()
|
||||
|
@ -543,12 +558,13 @@ func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (*
|
|||
}
|
||||
|
||||
func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, follow bool) (*CacheRecord, bool, error) {
|
||||
origk := k
|
||||
k, cr, err := getFollowLinks(root, k, follow)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
if cr == nil {
|
||||
return nil, false, errors.Wrapf(errNotFound, "%s not found", convertKeyToPath(k))
|
||||
return nil, false, errors.Wrapf(errNotFound, "%q not found", convertKeyToPath(origk))
|
||||
}
|
||||
if cr.Digest != "" {
|
||||
return cr, false, nil
|
||||
|
|
|
@ -311,7 +311,7 @@ func (sr *mutableRef) updateLastUsed() bool {
|
|||
|
||||
func (sr *mutableRef) commit(ctx context.Context) (ImmutableRef, error) {
|
||||
if !sr.mutable || len(sr.refs) == 0 {
|
||||
return nil, errors.Wrapf(errInvalid, "invalid mutable ref")
|
||||
return nil, errors.Wrapf(errInvalid, "invalid mutable ref %p", sr)
|
||||
}
|
||||
|
||||
id := identity.NewID()
|
||||
|
|
|
@ -51,6 +51,8 @@ func (nopWriteCloser) Close() error { return nil }
|
|||
func TestClientIntegration(t *testing.T) {
|
||||
integration.Run(t, []integration.Test{
|
||||
testRelativeWorkDir,
|
||||
testFileOpMkdirMkfile,
|
||||
testFileOpCopyRm,
|
||||
testCallDiskUsage,
|
||||
testBuildMultiMount,
|
||||
testBuildHTTPSource,
|
||||
|
@ -653,6 +655,111 @@ func testRelativeWorkDir(t *testing.T, sb integration.Sandbox) {
|
|||
require.Equal(t, []byte("/test1/test2\n"), dt)
|
||||
}
|
||||
|
||||
func testFileOpMkdirMkfile(t *testing.T, sb integration.Sandbox) {
|
||||
requiresLinux(t)
|
||||
c, err := New(context.TODO(), sb.Address())
|
||||
require.NoError(t, err)
|
||||
defer c.Close()
|
||||
|
||||
st := llb.Scratch().
|
||||
File(llb.Mkdir("/foo", 0700).Mkfile("bar", 0600, []byte("contents")))
|
||||
|
||||
def, err := st.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
destDir, err := ioutil.TempDir("", "buildkit")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(destDir)
|
||||
|
||||
_, err = c.Solve(context.TODO(), def, SolveOpt{
|
||||
Exports: []ExportEntry{
|
||||
{
|
||||
Type: ExporterLocal,
|
||||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
fi, err := os.Stat(filepath.Join(destDir, "foo"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, fi.IsDir())
|
||||
|
||||
dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("contents"), dt)
|
||||
}
|
||||
|
||||
func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) {
|
||||
requiresLinux(t)
|
||||
c, err := New(context.TODO(), sb.Address())
|
||||
require.NoError(t, err)
|
||||
defer c.Close()
|
||||
|
||||
dir, err := tmpdir(
|
||||
fstest.CreateFile("myfile", []byte("data0"), 0600),
|
||||
fstest.CreateDir("sub", 0700),
|
||||
fstest.CreateFile("sub/foo", []byte("foo0"), 0600),
|
||||
fstest.CreateFile("sub/bar", []byte("bar0"), 0600),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
dir2, err := tmpdir(
|
||||
fstest.CreateFile("file2", []byte("file2"), 0600),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
st := llb.Scratch().
|
||||
File(
|
||||
llb.Copy(llb.Local("mylocal"), "myfile", "myfile2").
|
||||
Copy(llb.Local("mylocal"), "sub", "out").
|
||||
Rm("out/foo").
|
||||
Copy(llb.Local("mylocal2"), "file2", "/"))
|
||||
|
||||
def, err := st.Marshal()
|
||||
require.NoError(t, err)
|
||||
|
||||
destDir, err := ioutil.TempDir("", "buildkit")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(destDir)
|
||||
|
||||
_, err = c.Solve(context.TODO(), def, SolveOpt{
|
||||
Exports: []ExportEntry{
|
||||
{
|
||||
Type: ExporterLocal,
|
||||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
"mylocal": dir,
|
||||
"mylocal2": dir2,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dt, err := ioutil.ReadFile(filepath.Join(destDir, "myfile2"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("data0"), dt)
|
||||
|
||||
fi, err := os.Stat(filepath.Join(destDir, "out"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, fi.IsDir())
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(destDir, "out/bar"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("bar0"), dt)
|
||||
|
||||
_, err = os.Stat(filepath.Join(destDir, "out/foo"))
|
||||
require.Equal(t, true, os.IsNotExist(err))
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(destDir, "file2"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, []byte("file2"), dt)
|
||||
|
||||
}
|
||||
|
||||
func testCallDiskUsage(t *testing.T, sb integration.Sandbox) {
|
||||
c, err := New(context.TODO(), sb.Address())
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -0,0 +1,725 @@
|
|||
package llb
|
||||
|
||||
import (
|
||||
_ "crypto/sha256"
|
||||
"os"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Examples:
|
||||
// local := llb.Local(...)
|
||||
// llb.Image().Dir("/abc").File(Mkdir("./foo").Mkfile("/abc/foo/bar", []byte("data")))
|
||||
// llb.Image().File(Mkdir("/foo").Mkfile("/foo/bar", []byte("data")))
|
||||
// llb.Image().File(Copy(local, "/foo", "/bar")).File(Copy(local, "/foo2", "/bar2"))
|
||||
//
|
||||
// a := Mkdir("./foo") // *FileAction /ced/foo
|
||||
// b := Mkdir("./bar") // /abc/bar
|
||||
// c := b.Copy(a.WithState(llb.Scratch().Dir("/ced")), "./foo", "./baz") // /abc/baz
|
||||
// llb.Image().Dir("/abc").File(c)
|
||||
//
|
||||
// In future this can be extended to multiple outputs with:
|
||||
// a := Mkdir("./foo")
|
||||
// b, id := a.GetSelector()
|
||||
// c := b.Mkdir("./bar")
|
||||
// filestate = state.File(c)
|
||||
// filestate.GetOutput(id).Exec()
|
||||
|
||||
func NewFileOp(s State, action *FileAction, c Constraints) *FileOp {
|
||||
action = action.bind(s)
|
||||
|
||||
f := &FileOp{
|
||||
action: action,
|
||||
constraints: c,
|
||||
}
|
||||
|
||||
f.output = &output{vertex: f, getIndex: func() (pb.OutputIndex, error) {
|
||||
return pb.OutputIndex(0), nil
|
||||
}}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// CopyInput is either llb.State or *FileActionWithState
|
||||
type CopyInput interface {
|
||||
isFileOpCopyInput()
|
||||
}
|
||||
|
||||
type subAction interface {
|
||||
toProtoAction(string, pb.InputIndex) pb.IsFileAction
|
||||
}
|
||||
|
||||
type FileAction struct {
|
||||
state *State
|
||||
prev *FileAction
|
||||
action subAction
|
||||
err error
|
||||
}
|
||||
|
||||
func (fa *FileAction) Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction {
|
||||
a := Mkdir(p, m, opt...)
|
||||
a.prev = fa
|
||||
return a
|
||||
}
|
||||
|
||||
func (fa *FileAction) Mkfile(p string, m os.FileMode, dt []byte, opt ...MkfileOption) *FileAction {
|
||||
a := Mkfile(p, m, dt, opt...)
|
||||
a.prev = fa
|
||||
return a
|
||||
}
|
||||
|
||||
func (fa *FileAction) Rm(p string, opt ...RmOption) *FileAction {
|
||||
a := Rm(p, opt...)
|
||||
a.prev = fa
|
||||
return a
|
||||
}
|
||||
|
||||
func (fa *FileAction) Copy(input CopyInput, src, dest string, opt ...CopyOption) *FileAction {
|
||||
a := Copy(input, src, dest, opt...)
|
||||
a.prev = fa
|
||||
return a
|
||||
}
|
||||
|
||||
func (fa *FileAction) allOutputs(m map[Output]struct{}) {
|
||||
if fa == nil {
|
||||
return
|
||||
}
|
||||
if fa.state != nil && fa.state.Output() != nil {
|
||||
m[fa.state.Output()] = struct{}{}
|
||||
}
|
||||
|
||||
if a, ok := fa.action.(*fileActionCopy); ok {
|
||||
if a.state != nil {
|
||||
if out := a.state.Output(); out != nil {
|
||||
m[out] = struct{}{}
|
||||
}
|
||||
} else if a.fas != nil {
|
||||
a.fas.allOutputs(m)
|
||||
}
|
||||
}
|
||||
fa.prev.allOutputs(m)
|
||||
}
|
||||
|
||||
func (fa *FileAction) bind(s State) *FileAction {
|
||||
if fa == nil {
|
||||
return nil
|
||||
}
|
||||
fa2 := *fa
|
||||
fa2.prev = fa.prev.bind(s)
|
||||
fa2.state = &s
|
||||
return &fa2
|
||||
}
|
||||
|
||||
func (fa *FileAction) WithState(s State) CopyInput {
|
||||
return &fileActionWithState{FileAction: fa.bind(s)}
|
||||
}
|
||||
|
||||
type fileActionWithState struct {
|
||||
*FileAction
|
||||
}
|
||||
|
||||
func (fas *fileActionWithState) isFileOpCopyInput() {}
|
||||
|
||||
func Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction {
|
||||
var mi MkdirInfo
|
||||
for _, o := range opt {
|
||||
o.SetMkdirOption(&mi)
|
||||
}
|
||||
return &FileAction{
|
||||
action: &fileActionMkdir{
|
||||
file: p,
|
||||
mode: m,
|
||||
info: mi,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type fileActionMkdir struct {
|
||||
file string
|
||||
mode os.FileMode
|
||||
info MkdirInfo
|
||||
}
|
||||
|
||||
func (a *fileActionMkdir) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction {
|
||||
return &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: normalizePath(parent, a.file, false),
|
||||
Mode: int32(a.mode & 0777),
|
||||
MakeParents: a.info.MakeParents,
|
||||
Owner: a.info.ChownOpt.marshal(base),
|
||||
Timestamp: marshalTime(a.info.CreatedTime),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type MkdirOption interface {
|
||||
SetMkdirOption(*MkdirInfo)
|
||||
}
|
||||
|
||||
type ChownOption interface {
|
||||
MkdirOption
|
||||
MkfileOption
|
||||
CopyOption
|
||||
}
|
||||
|
||||
type mkdirOptionFunc func(*MkdirInfo)
|
||||
|
||||
func (fn mkdirOptionFunc) SetMkdirOption(mi *MkdirInfo) {
|
||||
fn(mi)
|
||||
}
|
||||
|
||||
var _ MkdirOption = &MkdirInfo{}
|
||||
|
||||
func WithParents(b bool) MkdirOption {
|
||||
return mkdirOptionFunc(func(mi *MkdirInfo) {
|
||||
mi.MakeParents = b
|
||||
})
|
||||
}
|
||||
|
||||
type MkdirInfo struct {
|
||||
MakeParents bool
|
||||
ChownOpt *ChownOpt
|
||||
CreatedTime *time.Time
|
||||
}
|
||||
|
||||
func (mi *MkdirInfo) SetMkdirOption(mi2 *MkdirInfo) {
|
||||
*mi2 = *mi
|
||||
}
|
||||
|
||||
func WithUser(name string) ChownOption {
|
||||
opt := ChownOpt{}
|
||||
|
||||
parts := strings.SplitN(name, ":", 2)
|
||||
for i, v := range parts {
|
||||
switch i {
|
||||
case 0:
|
||||
uid, err := parseUID(v)
|
||||
if err != nil {
|
||||
opt.User = &UserOpt{Name: v}
|
||||
} else {
|
||||
opt.User = &UserOpt{UID: uid}
|
||||
}
|
||||
case 1:
|
||||
gid, err := parseUID(v)
|
||||
if err != nil {
|
||||
opt.Group = &UserOpt{Name: v}
|
||||
} else {
|
||||
opt.Group = &UserOpt{UID: gid}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return opt
|
||||
}
|
||||
|
||||
func parseUID(str string) (int, error) {
|
||||
if str == "root" {
|
||||
return 0, nil
|
||||
}
|
||||
uid, err := strconv.ParseInt(str, 10, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int(uid), nil
|
||||
}
|
||||
|
||||
func WithUIDGID(uid, gid int) ChownOption {
|
||||
return ChownOpt{
|
||||
User: &UserOpt{UID: uid},
|
||||
Group: &UserOpt{UID: gid},
|
||||
}
|
||||
}
|
||||
|
||||
type ChownOpt struct {
|
||||
User *UserOpt
|
||||
Group *UserOpt
|
||||
}
|
||||
|
||||
func (co ChownOpt) SetMkdirOption(mi *MkdirInfo) {
|
||||
mi.ChownOpt = &co
|
||||
}
|
||||
func (co ChownOpt) SetMkfileOption(mi *MkfileInfo) {
|
||||
mi.ChownOpt = &co
|
||||
}
|
||||
func (co ChownOpt) SetCopyOption(mi *CopyInfo) {
|
||||
mi.ChownOpt = &co
|
||||
}
|
||||
|
||||
func (cp *ChownOpt) marshal(base pb.InputIndex) *pb.ChownOpt {
|
||||
if cp == nil {
|
||||
return nil
|
||||
}
|
||||
return &pb.ChownOpt{
|
||||
User: cp.User.marshal(base),
|
||||
Group: cp.Group.marshal(base),
|
||||
}
|
||||
}
|
||||
|
||||
type UserOpt struct {
|
||||
UID int
|
||||
Name string
|
||||
}
|
||||
|
||||
func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt {
|
||||
if up == nil {
|
||||
return nil
|
||||
}
|
||||
if up.Name != "" {
|
||||
return &pb.UserOpt{User: &pb.UserOpt_ByName{ByName: &pb.NamedUserOpt{
|
||||
Name: up.Name, Input: base}}}
|
||||
}
|
||||
return &pb.UserOpt{User: &pb.UserOpt_ByID{ByID: uint32(up.UID)}}
|
||||
}
|
||||
|
||||
func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction {
|
||||
var mi MkfileInfo
|
||||
for _, o := range opts {
|
||||
o.SetMkfileOption(&mi)
|
||||
}
|
||||
|
||||
return &FileAction{
|
||||
action: &fileActionMkfile{
|
||||
file: p,
|
||||
mode: m,
|
||||
dt: dt,
|
||||
info: mi,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type MkfileOption interface {
|
||||
SetMkfileOption(*MkfileInfo)
|
||||
}
|
||||
|
||||
type MkfileInfo struct {
|
||||
ChownOpt *ChownOpt
|
||||
CreatedTime *time.Time
|
||||
}
|
||||
|
||||
func (mi *MkfileInfo) SetMkfileOption(mi2 *MkfileInfo) {
|
||||
*mi2 = *mi
|
||||
}
|
||||
|
||||
var _ MkfileOption = &MkfileInfo{}
|
||||
|
||||
type fileActionMkfile struct {
|
||||
file string
|
||||
mode os.FileMode
|
||||
dt []byte
|
||||
info MkfileInfo
|
||||
}
|
||||
|
||||
func (a *fileActionMkfile) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction {
|
||||
return &pb.FileAction_Mkfile{
|
||||
Mkfile: &pb.FileActionMkFile{
|
||||
Path: normalizePath(parent, a.file, false),
|
||||
Mode: int32(a.mode & 0777),
|
||||
Data: a.dt,
|
||||
Owner: a.info.ChownOpt.marshal(base),
|
||||
Timestamp: marshalTime(a.info.CreatedTime),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func Rm(p string, opts ...RmOption) *FileAction {
|
||||
var mi RmInfo
|
||||
for _, o := range opts {
|
||||
o.SetRmOption(&mi)
|
||||
}
|
||||
|
||||
return &FileAction{
|
||||
action: &fileActionRm{
|
||||
file: p,
|
||||
info: mi,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type RmOption interface {
|
||||
SetRmOption(*RmInfo)
|
||||
}
|
||||
|
||||
type rmOptionFunc func(*RmInfo)
|
||||
|
||||
func (fn rmOptionFunc) SetRmOption(mi *RmInfo) {
|
||||
fn(mi)
|
||||
}
|
||||
|
||||
type RmInfo struct {
|
||||
AllowNotFound bool
|
||||
AllowWildcard bool
|
||||
}
|
||||
|
||||
func (mi *RmInfo) SetRmOption(mi2 *RmInfo) {
|
||||
*mi2 = *mi
|
||||
}
|
||||
|
||||
var _ RmOption = &RmInfo{}
|
||||
|
||||
func WithAllowNotFound(b bool) RmOption {
|
||||
return rmOptionFunc(func(mi *RmInfo) {
|
||||
mi.AllowNotFound = b
|
||||
})
|
||||
}
|
||||
|
||||
func WithAllowWildcard(b bool) RmOption {
|
||||
return rmOptionFunc(func(mi *RmInfo) {
|
||||
mi.AllowWildcard = b
|
||||
})
|
||||
}
|
||||
|
||||
type fileActionRm struct {
|
||||
file string
|
||||
info RmInfo
|
||||
}
|
||||
|
||||
func (a *fileActionRm) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction {
|
||||
return &pb.FileAction_Rm{
|
||||
Rm: &pb.FileActionRm{
|
||||
Path: normalizePath(parent, a.file, false),
|
||||
AllowNotFound: a.info.AllowNotFound,
|
||||
AllowWildcard: a.info.AllowWildcard,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction {
|
||||
var state *State
|
||||
var fas *fileActionWithState
|
||||
var err error
|
||||
if st, ok := input.(State); ok {
|
||||
state = &st
|
||||
} else if v, ok := input.(*fileActionWithState); ok {
|
||||
fas = v
|
||||
} else {
|
||||
err = errors.Errorf("invalid input type %T for copy", input)
|
||||
}
|
||||
|
||||
var mi CopyInfo
|
||||
for _, o := range opts {
|
||||
o.SetCopyOption(&mi)
|
||||
}
|
||||
|
||||
return &FileAction{
|
||||
action: &fileActionCopy{
|
||||
state: state,
|
||||
fas: fas,
|
||||
src: src,
|
||||
dest: dest,
|
||||
info: mi,
|
||||
},
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
type CopyOption interface {
|
||||
SetCopyOption(*CopyInfo)
|
||||
}
|
||||
|
||||
type CopyInfo struct {
|
||||
Mode *os.FileMode
|
||||
FollowSymlinks bool
|
||||
CopyDirContentsOnly bool
|
||||
AttemptUnpack bool
|
||||
CreateDestPath bool
|
||||
AllowWildcard bool
|
||||
AllowEmptyWildcard bool
|
||||
ChownOpt *ChownOpt
|
||||
CreatedTime *time.Time
|
||||
}
|
||||
|
||||
func (mi *CopyInfo) SetCopyOption(mi2 *CopyInfo) {
|
||||
*mi2 = *mi
|
||||
}
|
||||
|
||||
var _ CopyOption = &CopyInfo{}
|
||||
|
||||
type fileActionCopy struct {
|
||||
state *State
|
||||
fas *fileActionWithState
|
||||
src string
|
||||
dest string
|
||||
info CopyInfo
|
||||
}
|
||||
|
||||
func (a *fileActionCopy) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction {
|
||||
c := &pb.FileActionCopy{
|
||||
Src: a.sourcePath(),
|
||||
Dest: normalizePath(parent, a.dest, true),
|
||||
Owner: a.info.ChownOpt.marshal(base),
|
||||
AllowWildcard: a.info.AllowWildcard,
|
||||
AllowEmptyWildcard: a.info.AllowEmptyWildcard,
|
||||
FollowSymlink: a.info.FollowSymlinks,
|
||||
DirCopyContents: a.info.CopyDirContentsOnly,
|
||||
AttemptUnpackDockerCompatibility: a.info.AttemptUnpack,
|
||||
CreateDestPath: a.info.CreateDestPath,
|
||||
Timestamp: marshalTime(a.info.CreatedTime),
|
||||
}
|
||||
if a.info.Mode != nil {
|
||||
c.Mode = int32(*a.info.Mode)
|
||||
} else {
|
||||
c.Mode = -1
|
||||
}
|
||||
return &pb.FileAction_Copy{
|
||||
Copy: c,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *fileActionCopy) sourcePath() string {
|
||||
p := path.Clean(c.src)
|
||||
if !path.IsAbs(p) {
|
||||
if c.state != nil {
|
||||
p = path.Join("/", c.state.GetDir(), p)
|
||||
} else if c.fas != nil {
|
||||
p = path.Join("/", c.fas.state.GetDir(), p)
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type CreatedTime time.Time
|
||||
|
||||
func WithCreatedTime(t time.Time) CreatedTime {
|
||||
return CreatedTime(t)
|
||||
}
|
||||
|
||||
func (c CreatedTime) SetMkdirOption(mi *MkdirInfo) {
|
||||
mi.CreatedTime = (*time.Time)(&c)
|
||||
}
|
||||
|
||||
func (c CreatedTime) SetMkfileOption(mi *MkfileInfo) {
|
||||
mi.CreatedTime = (*time.Time)(&c)
|
||||
}
|
||||
|
||||
func (c CreatedTime) SetCopyOption(mi *CopyInfo) {
|
||||
mi.CreatedTime = (*time.Time)(&c)
|
||||
}
|
||||
|
||||
func marshalTime(t *time.Time) int64 {
|
||||
if t == nil {
|
||||
return -1
|
||||
}
|
||||
return t.UnixNano()
|
||||
}
|
||||
|
||||
type FileOp struct {
|
||||
MarshalCache
|
||||
action *FileAction
|
||||
output Output
|
||||
|
||||
constraints Constraints
|
||||
isValidated bool
|
||||
}
|
||||
|
||||
func (f *FileOp) Validate() error {
|
||||
if f.isValidated {
|
||||
return nil
|
||||
}
|
||||
if f.action == nil {
|
||||
return errors.Errorf("action is required")
|
||||
}
|
||||
f.isValidated = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type marshalState struct {
|
||||
visited map[*FileAction]*fileActionState
|
||||
inputs []*pb.Input
|
||||
actions []*fileActionState
|
||||
}
|
||||
|
||||
func newMarshalState() *marshalState {
|
||||
return &marshalState{
|
||||
visited: map[*FileAction]*fileActionState{},
|
||||
}
|
||||
}
|
||||
|
||||
type fileActionState struct {
|
||||
base pb.InputIndex
|
||||
input pb.InputIndex
|
||||
inputRelative *int
|
||||
input2 pb.InputIndex
|
||||
input2Relative *int
|
||||
target int
|
||||
action subAction
|
||||
fa *FileAction
|
||||
}
|
||||
|
||||
func (ms *marshalState) addInput(st *fileActionState, c *Constraints, o Output) (pb.InputIndex, error) {
|
||||
inp, err := o.ToInput(c)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for i, inp2 := range ms.inputs {
|
||||
if *inp == *inp2 {
|
||||
return pb.InputIndex(i), nil
|
||||
}
|
||||
}
|
||||
i := pb.InputIndex(len(ms.inputs))
|
||||
ms.inputs = append(ms.inputs, inp)
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (ms *marshalState) add(fa *FileAction, c *Constraints) (*fileActionState, error) {
|
||||
if st, ok := ms.visited[fa]; ok {
|
||||
return st, nil
|
||||
}
|
||||
|
||||
if fa.err != nil {
|
||||
return nil, fa.err
|
||||
}
|
||||
|
||||
var prevState *fileActionState
|
||||
if parent := fa.prev; parent != nil {
|
||||
var err error
|
||||
prevState, err = ms.add(parent, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
st := &fileActionState{
|
||||
action: fa.action,
|
||||
input: -1,
|
||||
input2: -1,
|
||||
base: -1,
|
||||
fa: fa,
|
||||
}
|
||||
|
||||
if source := fa.state.Output(); source != nil {
|
||||
inp, err := ms.addInput(st, c, source)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st.base = inp
|
||||
}
|
||||
|
||||
if fa.prev == nil {
|
||||
st.input = st.base
|
||||
} else {
|
||||
st.inputRelative = &prevState.target
|
||||
}
|
||||
|
||||
if a, ok := fa.action.(*fileActionCopy); ok {
|
||||
if a.state != nil {
|
||||
if out := a.state.Output(); out != nil {
|
||||
inp, err := ms.addInput(st, c, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st.input2 = inp
|
||||
}
|
||||
} else if a.fas != nil {
|
||||
src, err := ms.add(a.fas.FileAction, c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st.input2Relative = &src.target
|
||||
} else {
|
||||
return nil, errors.Errorf("invalid empty source for copy")
|
||||
}
|
||||
}
|
||||
|
||||
st.target = len(ms.actions)
|
||||
|
||||
ms.visited[fa] = st
|
||||
ms.actions = append(ms.actions, st)
|
||||
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (f *FileOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) {
|
||||
if f.Cached(c) {
|
||||
return f.Load()
|
||||
}
|
||||
if err := f.Validate(); err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
|
||||
pfo := &pb.FileOp{}
|
||||
|
||||
pop, md := MarshalConstraints(c, &f.constraints)
|
||||
pop.Op = &pb.Op_File{
|
||||
File: pfo,
|
||||
}
|
||||
|
||||
state := newMarshalState()
|
||||
_, err := state.add(f.action, c)
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
pop.Inputs = state.inputs
|
||||
|
||||
for i, st := range state.actions {
|
||||
output := pb.OutputIndex(-1)
|
||||
if i+1 == len(state.actions) {
|
||||
output = 0
|
||||
}
|
||||
|
||||
var parent string
|
||||
if st.fa.state != nil {
|
||||
parent = st.fa.state.GetDir()
|
||||
}
|
||||
|
||||
pfo.Actions = append(pfo.Actions, &pb.FileAction{
|
||||
Input: getIndex(st.input, len(state.inputs), st.inputRelative),
|
||||
SecondaryInput: getIndex(st.input2, len(state.inputs), st.input2Relative),
|
||||
Output: output,
|
||||
Action: st.action.toProtoAction(parent, st.base),
|
||||
})
|
||||
}
|
||||
|
||||
dt, err := pop.Marshal()
|
||||
if err != nil {
|
||||
return "", nil, nil, err
|
||||
}
|
||||
f.Store(dt, md, c)
|
||||
return f.Load()
|
||||
}
|
||||
|
||||
func normalizePath(parent, p string, keepSlash bool) string {
|
||||
origPath := p
|
||||
p = path.Clean(p)
|
||||
if !path.IsAbs(p) {
|
||||
p = path.Join("/", parent, p)
|
||||
}
|
||||
if keepSlash {
|
||||
if strings.HasSuffix(origPath, "/") && !strings.HasSuffix(p, "/") {
|
||||
p += "/"
|
||||
} else if strings.HasSuffix(origPath, "/.") {
|
||||
if p != "/" {
|
||||
p += "/"
|
||||
}
|
||||
p += "."
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *FileOp) Output() Output {
|
||||
return f.output
|
||||
}
|
||||
|
||||
func (f *FileOp) Inputs() (inputs []Output) {
|
||||
mm := map[Output]struct{}{}
|
||||
|
||||
f.action.allOutputs(mm)
|
||||
|
||||
for o := range mm {
|
||||
inputs = append(inputs, o)
|
||||
}
|
||||
return inputs
|
||||
}
|
||||
|
||||
func getIndex(input pb.InputIndex, len int, relative *int) pb.InputIndex {
|
||||
if relative != nil {
|
||||
return pb.InputIndex(len + *relative)
|
||||
}
|
||||
return input
|
||||
}
|
|
@ -0,0 +1,666 @@
|
|||
package llb
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFileMkdir(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Image("foo").File(Mkdir("/foo", 0700))
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 3, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[1])
|
||||
|
||||
f := arr[1].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[1].Inputs), 1)
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
|
||||
require.Equal(t, 1, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
|
||||
require.Equal(t, "/foo", mkdir.Path)
|
||||
require.Equal(t, 0700, int(mkdir.Mode))
|
||||
require.Equal(t, int64(-1), mkdir.Timestamp)
|
||||
}
|
||||
|
||||
func TestFileMkdirChain(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Image("foo").Dir("/etc").File(Mkdir("/foo", 0700).Mkdir("bar", 0600, WithParents(true)).Mkdir("bar/baz", 0701, WithParents(false)))
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 3, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[1])
|
||||
|
||||
f := arr[1].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[1].Inputs), 1)
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
|
||||
require.Equal(t, 3, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
require.Equal(t, "/foo", mkdir.Path)
|
||||
require.Equal(t, 0700, int(mkdir.Mode))
|
||||
require.Equal(t, false, mkdir.MakeParents)
|
||||
require.Nil(t, mkdir.Owner)
|
||||
|
||||
action = f.Actions[1]
|
||||
require.Equal(t, 1, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
require.Equal(t, "/etc/bar", mkdir.Path)
|
||||
require.Equal(t, 0600, int(mkdir.Mode))
|
||||
require.Equal(t, true, mkdir.MakeParents)
|
||||
require.Nil(t, mkdir.Owner)
|
||||
|
||||
action = f.Actions[2]
|
||||
require.Equal(t, 2, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
require.Equal(t, "/etc/bar/baz", mkdir.Path)
|
||||
require.Equal(t, 0701, int(mkdir.Mode))
|
||||
require.Equal(t, false, mkdir.MakeParents)
|
||||
require.Nil(t, mkdir.Owner)
|
||||
}
|
||||
|
||||
func TestFileMkdirMkfile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Scratch().File(Mkdir("/foo", 0700).Mkfile("bar", 0700, []byte("data")))
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 2, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[0])
|
||||
|
||||
f := arr[0].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[1].Inputs), 1)
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
|
||||
require.Equal(t, 2, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
require.Equal(t, -1, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
|
||||
mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
|
||||
require.Equal(t, "/foo", mkdir.Path)
|
||||
require.Equal(t, 0700, int(mkdir.Mode))
|
||||
require.Equal(t, int64(-1), mkdir.Timestamp)
|
||||
|
||||
action = f.Actions[1]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile
|
||||
|
||||
require.Equal(t, "/bar", mkfile.Path)
|
||||
require.Equal(t, 0700, int(mkfile.Mode))
|
||||
require.Equal(t, "data", string(mkfile.Data))
|
||||
require.Equal(t, int64(-1), mkfile.Timestamp)
|
||||
}
|
||||
|
||||
func TestFileMkfile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Image("foo").File(Mkfile("/foo", 0700, []byte("data")))
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 3, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[1])
|
||||
|
||||
f := arr[1].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[1].Inputs), 1)
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
|
||||
require.Equal(t, 1, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
mkdir := action.Action.(*pb.FileAction_Mkfile).Mkfile
|
||||
|
||||
require.Equal(t, "/foo", mkdir.Path)
|
||||
require.Equal(t, 0700, int(mkdir.Mode))
|
||||
require.Equal(t, "data", string(mkdir.Data))
|
||||
require.Equal(t, int64(-1), mkdir.Timestamp)
|
||||
}
|
||||
|
||||
func TestFileRm(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Image("foo").File(Rm("/foo"))
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 3, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[1])
|
||||
|
||||
f := arr[1].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[1].Inputs), 1)
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
|
||||
require.Equal(t, 1, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
rm := action.Action.(*pb.FileAction_Rm).Rm
|
||||
require.Equal(t, "/foo", rm.Path)
|
||||
}
|
||||
|
||||
func TestFileSimpleChains(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Image("foo").Dir("/tmp").
|
||||
File(
|
||||
Mkdir("foo/bar/", 0700).
|
||||
Rm("abc").
|
||||
Mkfile("foo/bar/baz", 0777, []byte("d0")),
|
||||
).
|
||||
Dir("sub").
|
||||
File(
|
||||
Rm("foo").
|
||||
Mkfile("/abc", 0701, []byte("d1")),
|
||||
)
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 4, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[2])
|
||||
|
||||
f := arr[2].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[2].Inputs), 1)
|
||||
require.Equal(t, m[arr[2].Inputs[0].Digest], arr[1])
|
||||
require.Equal(t, 0, int(arr[2].Inputs[0].Index))
|
||||
require.Equal(t, 2, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
|
||||
rm := action.Action.(*pb.FileAction_Rm).Rm
|
||||
require.Equal(t, "/tmp/sub/foo", rm.Path)
|
||||
|
||||
action = f.Actions[1]
|
||||
require.Equal(t, 1, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile
|
||||
require.Equal(t, "/abc", mkfile.Path)
|
||||
|
||||
f = arr[1].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[1].Inputs), 1)
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
require.Equal(t, 3, len(f.Actions))
|
||||
|
||||
action = f.Actions[0]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
|
||||
mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
require.Equal(t, "/tmp/foo/bar", mkdir.Path)
|
||||
|
||||
action = f.Actions[1]
|
||||
require.Equal(t, 1, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
|
||||
rm = action.Action.(*pb.FileAction_Rm).Rm
|
||||
require.Equal(t, "/tmp/abc", rm.Path)
|
||||
|
||||
action = f.Actions[2]
|
||||
require.Equal(t, 2, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
mkfile = action.Action.(*pb.FileAction_Mkfile).Mkfile
|
||||
require.Equal(t, "/tmp/foo/bar/baz", mkfile.Path)
|
||||
}
|
||||
|
||||
func TestFileCopy(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Image("foo").Dir("/tmp").File(Copy(Image("bar").Dir("/etc"), "foo", "bar"))
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 4, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[2])
|
||||
|
||||
f := arr[2].Op.(*pb.Op_File).File
|
||||
require.Equal(t, 2, len(arr[2].Inputs))
|
||||
require.Equal(t, "docker-image://docker.io/library/foo:latest", m[arr[2].Inputs[0].Digest].Op.(*pb.Op_Source).Source.Identifier)
|
||||
require.Equal(t, 0, int(arr[2].Inputs[0].Index))
|
||||
require.Equal(t, "docker-image://docker.io/library/bar:latest", m[arr[2].Inputs[1].Digest].Op.(*pb.Op_Source).Source.Identifier)
|
||||
require.Equal(t, 0, int(arr[2].Inputs[1].Index))
|
||||
|
||||
require.Equal(t, 1, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, 1, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
copy := action.Action.(*pb.FileAction_Copy).Copy
|
||||
|
||||
require.Equal(t, "/etc/foo", copy.Src)
|
||||
require.Equal(t, "/tmp/bar", copy.Dest)
|
||||
require.Equal(t, int64(-1), copy.Timestamp)
|
||||
}
|
||||
|
||||
func TestFileCopyFromAction(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Image("foo").Dir("/out").File(
|
||||
Copy(
|
||||
Mkdir("foo", 0700).
|
||||
Mkfile("foo/bar", 0600, []byte("dt")).
|
||||
WithState(Scratch().Dir("/tmp")),
|
||||
"foo/bar", "baz"))
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 3, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[1])
|
||||
|
||||
f := arr[1].Op.(*pb.Op_File).File
|
||||
require.Equal(t, 1, len(arr[1].Inputs))
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
|
||||
require.Equal(t, 3, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
require.Equal(t, -1, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
|
||||
mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
|
||||
require.Equal(t, "/tmp/foo", mkdir.Path)
|
||||
require.Equal(t, 0700, int(mkdir.Mode))
|
||||
|
||||
action = f.Actions[1]
|
||||
require.Equal(t, 1, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
|
||||
mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile
|
||||
|
||||
require.Equal(t, "/tmp/foo/bar", mkfile.Path)
|
||||
require.Equal(t, 0600, int(mkfile.Mode))
|
||||
require.Equal(t, "dt", string(mkfile.Data))
|
||||
|
||||
action = f.Actions[2]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, 2, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
copy := action.Action.(*pb.FileAction_Copy).Copy
|
||||
|
||||
require.Equal(t, "/tmp/foo/bar", copy.Src)
|
||||
require.Equal(t, "/out/baz", copy.Dest)
|
||||
}
|
||||
|
||||
func TestFilePipeline(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Image("foo").Dir("/out").
|
||||
File(
|
||||
Copy(
|
||||
Mkdir("foo", 0700).
|
||||
Mkfile("foo/bar", 0600, []byte("dt")).
|
||||
WithState(Image("bar").Dir("/tmp")),
|
||||
"foo/bar", "baz").
|
||||
Rm("foo/bax"),
|
||||
).
|
||||
File(
|
||||
Mkdir("/bar", 0701).
|
||||
Copy(Image("foo"), "in", "out").
|
||||
Copy(Image("baz").Dir("/base"), "in2", "out2"),
|
||||
)
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
|
||||
require.Equal(t, 6, len(arr)) // 3 img + 2 file + pointer
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[4])
|
||||
|
||||
f := arr[4].Op.(*pb.Op_File).File
|
||||
require.Equal(t, 3, len(arr[4].Inputs))
|
||||
|
||||
require.Equal(t, "docker-image://docker.io/library/foo:latest", m[arr[4].Inputs[1].Digest].Op.(*pb.Op_Source).Source.Identifier)
|
||||
require.Equal(t, 0, int(arr[4].Inputs[1].Index))
|
||||
require.Equal(t, "docker-image://docker.io/library/baz:latest", m[arr[4].Inputs[2].Digest].Op.(*pb.Op_Source).Source.Identifier)
|
||||
require.Equal(t, 0, int(arr[4].Inputs[2].Index))
|
||||
|
||||
require.Equal(t, 3, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
|
||||
mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
|
||||
require.Equal(t, "/bar", mkdir.Path)
|
||||
require.Equal(t, 0701, int(mkdir.Mode))
|
||||
|
||||
action = f.Actions[1]
|
||||
require.Equal(t, 3, int(action.Input))
|
||||
require.Equal(t, 1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
|
||||
copy := action.Action.(*pb.FileAction_Copy).Copy
|
||||
|
||||
require.Equal(t, "/in", copy.Src)
|
||||
require.Equal(t, "/out/out", copy.Dest)
|
||||
|
||||
action = f.Actions[2]
|
||||
require.Equal(t, 4, int(action.Input))
|
||||
require.Equal(t, 2, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
copy = action.Action.(*pb.FileAction_Copy).Copy
|
||||
|
||||
require.Equal(t, "/base/in2", copy.Src)
|
||||
require.Equal(t, "/out/out2", copy.Dest)
|
||||
|
||||
f = m[arr[4].Inputs[0].Digest].Op.(*pb.Op_File).File
|
||||
op := m[arr[4].Inputs[0].Digest]
|
||||
require.Equal(t, 2, len(op.Inputs))
|
||||
require.Equal(t, 4, len(f.Actions))
|
||||
|
||||
action = f.Actions[0]
|
||||
require.Equal(t, 1, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
require.Equal(t, "docker-image://docker.io/library/bar:latest", m[op.Inputs[1].Digest].Op.(*pb.Op_Source).Source.Identifier)
|
||||
mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
|
||||
require.Equal(t, "/tmp/foo", mkdir.Path)
|
||||
require.Equal(t, 0700, int(mkdir.Mode))
|
||||
|
||||
action = f.Actions[1]
|
||||
require.Equal(t, 2, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
|
||||
mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile
|
||||
|
||||
require.Equal(t, "/tmp/foo/bar", mkfile.Path)
|
||||
require.Equal(t, 0600, int(mkfile.Mode))
|
||||
require.Equal(t, "dt", string(mkfile.Data))
|
||||
|
||||
action = f.Actions[2]
|
||||
require.Equal(t, 0, int(action.Input))
|
||||
require.Equal(t, 3, int(action.SecondaryInput))
|
||||
require.Equal(t, -1, int(action.Output))
|
||||
require.Equal(t, arr[4].Inputs[1].Digest, op.Inputs[0].Digest)
|
||||
|
||||
copy = action.Action.(*pb.FileAction_Copy).Copy
|
||||
|
||||
require.Equal(t, "/tmp/foo/bar", copy.Src)
|
||||
require.Equal(t, "/out/baz", copy.Dest)
|
||||
|
||||
action = f.Actions[3]
|
||||
require.Equal(t, 4, int(action.Input))
|
||||
require.Equal(t, -1, int(action.SecondaryInput))
|
||||
require.Equal(t, 0, int(action.Output))
|
||||
|
||||
rm := action.Action.(*pb.FileAction_Rm).Rm
|
||||
require.Equal(t, "/out/foo/bax", rm.Path)
|
||||
}
|
||||
|
||||
func TestFileOwner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Image("foo").File(Mkdir("/foo", 0700).Mkdir("bar", 0600, WithUIDGID(123, 456)).Mkdir("bar/baz", 0701, WithUser("foouser")))
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 3, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[1])
|
||||
|
||||
f := arr[1].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[1].Inputs), 1)
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
|
||||
require.Equal(t, 3, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
require.Nil(t, mkdir.Owner)
|
||||
|
||||
action = f.Actions[1]
|
||||
mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
require.Equal(t, 123, int(mkdir.Owner.User.User.(*pb.UserOpt_ByID).ByID))
|
||||
require.Equal(t, 456, int(mkdir.Owner.Group.User.(*pb.UserOpt_ByID).ByID))
|
||||
|
||||
action = f.Actions[2]
|
||||
mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
|
||||
require.Equal(t, "foouser", mkdir.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name)
|
||||
require.Equal(t, 0, int(mkdir.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input))
|
||||
require.Nil(t, mkdir.Owner.Group)
|
||||
}
|
||||
|
||||
func TestFileCopyOwner(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
st := Scratch().
|
||||
File(Mkdir("/foo", 0700, WithUser("user1")).
|
||||
Copy(Image("foo"), "src1", "dst", WithUser("user2")).
|
||||
Copy(
|
||||
Copy(Scratch(), "src0", "src2", WithUser("user3")).WithState(Image("foo")),
|
||||
"src2", "dst", WithUser("user4")).
|
||||
Copy(Image("foo"), "src3", "dst", WithUIDGID(1, 2)),
|
||||
)
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 3, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[1])
|
||||
|
||||
f := arr[1].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[1].Inputs), 1)
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
|
||||
require.Equal(t, 5, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
require.Equal(t, "user1", mkdir.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name)
|
||||
require.Equal(t, -1, int(mkdir.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input))
|
||||
require.Nil(t, mkdir.Owner.Group)
|
||||
|
||||
action = f.Actions[1]
|
||||
copy := action.Action.(*pb.FileAction_Copy).Copy
|
||||
require.Equal(t, "/src1", copy.Src)
|
||||
require.Equal(t, "user2", copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name)
|
||||
require.Equal(t, -1, int(copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input))
|
||||
require.Nil(t, copy.Owner.Group)
|
||||
|
||||
action = f.Actions[2]
|
||||
copy = action.Action.(*pb.FileAction_Copy).Copy
|
||||
require.Equal(t, "/src0", copy.Src)
|
||||
require.Equal(t, "user3", copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name)
|
||||
require.Equal(t, 0, int(copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input))
|
||||
require.Nil(t, copy.Owner.Group)
|
||||
|
||||
action = f.Actions[3]
|
||||
copy = action.Action.(*pb.FileAction_Copy).Copy
|
||||
require.Equal(t, "/src2", copy.Src)
|
||||
require.Equal(t, "user4", copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name)
|
||||
require.Equal(t, -1, int(copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input))
|
||||
require.Nil(t, copy.Owner.Group)
|
||||
|
||||
action = f.Actions[4]
|
||||
copy = action.Action.(*pb.FileAction_Copy).Copy
|
||||
require.Equal(t, "/src3", copy.Src)
|
||||
require.Equal(t, 1, int(copy.Owner.User.User.(*pb.UserOpt_ByID).ByID))
|
||||
require.Equal(t, 2, int(copy.Owner.Group.User.(*pb.UserOpt_ByID).ByID))
|
||||
}
|
||||
|
||||
func TestFileCreatedTime(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
dt := time.Now()
|
||||
dt2 := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
dt3 := time.Date(2019, time.November, 10, 23, 0, 0, 0, time.UTC)
|
||||
|
||||
st := Image("foo").File(
|
||||
Mkdir("/foo", 0700, WithCreatedTime(dt)).
|
||||
Mkfile("bar", 0600, []byte{}, WithCreatedTime(dt2)).
|
||||
Copy(Scratch(), "src", "dst", WithCreatedTime(dt3)))
|
||||
def, err := st.Marshal()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
m, arr := parseDef(t, def.Def)
|
||||
require.Equal(t, 3, len(arr))
|
||||
|
||||
dgst, idx := last(t, arr)
|
||||
require.Equal(t, 0, idx)
|
||||
require.Equal(t, m[dgst], arr[1])
|
||||
|
||||
f := arr[1].Op.(*pb.Op_File).File
|
||||
require.Equal(t, len(arr[1].Inputs), 1)
|
||||
require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0])
|
||||
require.Equal(t, 0, int(arr[1].Inputs[0].Index))
|
||||
|
||||
require.Equal(t, 3, len(f.Actions))
|
||||
|
||||
action := f.Actions[0]
|
||||
mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir
|
||||
require.Equal(t, dt.UnixNano(), mkdir.Timestamp)
|
||||
|
||||
action = f.Actions[1]
|
||||
mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile
|
||||
require.Equal(t, dt2.UnixNano(), mkfile.Timestamp)
|
||||
|
||||
action = f.Actions[2]
|
||||
copy := action.Action.(*pb.FileAction_Copy).Copy
|
||||
require.Equal(t, dt3.UnixNano(), copy.Timestamp)
|
||||
}
|
||||
|
||||
func parseDef(t *testing.T, def [][]byte) (map[digest.Digest]pb.Op, []pb.Op) {
|
||||
m := map[digest.Digest]pb.Op{}
|
||||
arr := make([]pb.Op, 0, len(def))
|
||||
|
||||
for _, dt := range def {
|
||||
var op pb.Op
|
||||
err := (&op).Unmarshal(dt)
|
||||
require.NoError(t, err)
|
||||
dgst := digest.FromBytes(dt)
|
||||
m[dgst] = op
|
||||
arr = append(arr, op)
|
||||
// fmt.Printf(":: %T %+v\n", op.Op, op)
|
||||
}
|
||||
|
||||
return m, arr
|
||||
}
|
||||
|
||||
func last(t *testing.T, arr []pb.Op) (digest.Digest, int) {
|
||||
require.True(t, len(arr) > 1)
|
||||
|
||||
op := arr[len(arr)-1]
|
||||
require.Equal(t, 1, len(op.Inputs))
|
||||
return op.Inputs[0].Digest, int(op.Inputs[0].Index)
|
||||
}
|
|
@ -229,6 +229,15 @@ func (s State) Run(ro ...RunOption) ExecState {
|
|||
}
|
||||
}
|
||||
|
||||
func (s State) File(a *FileAction, opts ...ConstraintsOpt) State {
|
||||
var c Constraints
|
||||
for _, o := range opts {
|
||||
o.SetConstraintsOption(&c)
|
||||
}
|
||||
|
||||
return s.WithOutput(NewFileOp(s, a, c).Output())
|
||||
}
|
||||
|
||||
func (s State) AddEnv(key, value string) State {
|
||||
return s.AddEnvf(key, value)
|
||||
}
|
||||
|
@ -295,6 +304,8 @@ func (s State) AddExtraHost(host string, ip net.IP) State {
|
|||
return extraHost(host, ip)(s)
|
||||
}
|
||||
|
||||
func (s State) isFileOpCopyInput() {}
|
||||
|
||||
type output struct {
|
||||
vertex Vertex
|
||||
getIndex func() (pb.OutputIndex, error)
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
syscall.Umask(0)
|
||||
}
|
|
@ -19,6 +19,7 @@ import (
|
|||
"github.com/containerd/containerd/pkg/seed"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/go-connections/sockets"
|
||||
"github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc"
|
||||
"github.com/moby/buildkit/cache/remotecache"
|
||||
|
@ -55,6 +56,7 @@ import (
|
|||
func init() {
|
||||
apicaps.ExportedProduct = "buildkit"
|
||||
seed.WithTimeAndRand()
|
||||
reexec.Init()
|
||||
}
|
||||
|
||||
type workerInitializerOpt struct {
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
// +build !windows
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func init() {
|
||||
syscall.Umask(0)
|
||||
}
|
|
@ -9,6 +9,7 @@ import (
|
|||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/client/llb/imagemetaresolver"
|
||||
"github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
)
|
||||
|
||||
|
@ -26,9 +27,12 @@ func main() {
|
|||
panic(err)
|
||||
}
|
||||
|
||||
caps := pb.Caps.CapSet(pb.Caps.All())
|
||||
|
||||
state, img, err := dockerfile2llb.Dockerfile2LLB(appcontext.Context(), df, dockerfile2llb.ConvertOpt{
|
||||
MetaResolver: imagemetaresolver.Default(),
|
||||
Target: opt.target,
|
||||
LLBCaps: &caps,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("err: %+v", err)
|
||||
|
|
|
@ -151,6 +151,10 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
|
|||
switch cmd.(type) {
|
||||
case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand:
|
||||
total++
|
||||
case *instructions.WorkdirCommand:
|
||||
if useFileOp(opt.BuildArgs, opt.LLBCaps) {
|
||||
total++
|
||||
}
|
||||
}
|
||||
}
|
||||
ds.cmdTotal = total
|
||||
|
@ -307,7 +311,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State,
|
|||
d.state = d.state.AddEnv(k, v)
|
||||
}
|
||||
if d.image.Config.WorkingDir != "" {
|
||||
if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false); err != nil {
|
||||
if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
@ -468,9 +472,9 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error {
|
|||
case *instructions.RunCommand:
|
||||
err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt)
|
||||
case *instructions.WorkdirCommand:
|
||||
err = dispatchWorkdir(d, c, true)
|
||||
err = dispatchWorkdir(d, c, true, &opt)
|
||||
case *instructions.AddCommand:
|
||||
err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, "", opt)
|
||||
err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, c.Chown, opt)
|
||||
if err == nil {
|
||||
for _, src := range c.Sources() {
|
||||
if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") {
|
||||
|
@ -648,7 +652,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE
|
|||
return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs), true, &d.state)
|
||||
}
|
||||
|
||||
func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool) error {
|
||||
func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error {
|
||||
d.state = d.state.Dir(c.Path)
|
||||
wd := c.Path
|
||||
if !path.IsAbs(c.Path) {
|
||||
|
@ -656,13 +660,115 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo
|
|||
}
|
||||
d.image.Config.WorkingDir = wd
|
||||
if commit {
|
||||
return commitToHistory(&d.image, "WORKDIR "+wd, false, nil)
|
||||
withLayer := false
|
||||
if wd != "/" && opt != nil && useFileOp(opt.buildArgValues, opt.llbCaps) {
|
||||
mkdirOpt := []llb.MkdirOption{llb.WithParents(true)}
|
||||
if user := d.image.Config.User; user != "" {
|
||||
mkdirOpt = append(mkdirOpt, llb.WithUser(user))
|
||||
}
|
||||
platform := opt.targetPlatform
|
||||
if d.platform != nil {
|
||||
platform = *d.platform
|
||||
}
|
||||
d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), d.state.Env())), d.prefixPlatform, &platform)))
|
||||
withLayer = true
|
||||
}
|
||||
return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error {
|
||||
dest := path.Join("/", pathRelativeToWorkingDir(d.state, c.Dest()))
|
||||
if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator {
|
||||
dest += string(filepath.Separator)
|
||||
}
|
||||
|
||||
var copyOpt []llb.CopyOption
|
||||
|
||||
if chown != "" {
|
||||
copyOpt = append(copyOpt, llb.WithUser(chown))
|
||||
}
|
||||
|
||||
commitMessage := bytes.NewBufferString("")
|
||||
if isAddCommand {
|
||||
commitMessage.WriteString("ADD")
|
||||
} else {
|
||||
commitMessage.WriteString("COPY")
|
||||
}
|
||||
|
||||
var a *llb.FileAction
|
||||
|
||||
for _, src := range c.Sources() {
|
||||
commitMessage.WriteString(" " + src)
|
||||
if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") {
|
||||
if !isAddCommand {
|
||||
return errors.New("source can't be a URL for COPY")
|
||||
}
|
||||
|
||||
// Resources from remote URLs are not decompressed.
|
||||
// https://docs.docker.com/engine/reference/builder/#add
|
||||
//
|
||||
// Note: mixing up remote archives and local archives in a single ADD instruction
|
||||
// would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717
|
||||
u, err := url.Parse(src)
|
||||
f := "__unnamed__"
|
||||
if err == nil {
|
||||
if base := path.Base(u.Path); base != "." && base != "/" {
|
||||
f = base
|
||||
}
|
||||
}
|
||||
|
||||
st := llb.HTTP(src, llb.Filename(f), dfCmd(c))
|
||||
|
||||
opts := append([]llb.CopyOption{&llb.CopyInfo{
|
||||
CreateDestPath: true,
|
||||
}}, copyOpt...)
|
||||
|
||||
if a == nil {
|
||||
a = llb.Copy(st, f, dest, opts...)
|
||||
} else {
|
||||
a = a.Copy(st, f, dest, opts...)
|
||||
}
|
||||
} else {
|
||||
opts := append([]llb.CopyOption{&llb.CopyInfo{
|
||||
FollowSymlinks: true,
|
||||
CopyDirContentsOnly: true,
|
||||
AttemptUnpack: isAddCommand,
|
||||
CreateDestPath: true,
|
||||
AllowWildcard: true,
|
||||
AllowEmptyWildcard: true,
|
||||
}}, copyOpt...)
|
||||
|
||||
if a == nil {
|
||||
a = llb.Copy(sourceState, src, dest, opts...)
|
||||
} else {
|
||||
a = a.Copy(sourceState, src, dest, opts...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
commitMessage.WriteString(" " + c.Dest())
|
||||
|
||||
platform := opt.targetPlatform
|
||||
if d.platform != nil {
|
||||
platform = *d.platform
|
||||
}
|
||||
|
||||
fileOpt := []llb.ConstraintsOpt{llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, &platform))}
|
||||
if d.ignoreCache {
|
||||
fileOpt = append(fileOpt, llb.IgnoreCache)
|
||||
}
|
||||
|
||||
d.state = d.state.File(a, fileOpt...)
|
||||
return commitToHistory(&d.image, commitMessage.String(), true, &d.state)
|
||||
}
|
||||
|
||||
func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error {
|
||||
// TODO: this should use CopyOp instead. Current implementation is inefficient
|
||||
if useFileOp(opt.buildArgValues, opt.llbCaps) {
|
||||
return dispatchCopyFileOp(d, c, sourceState, isAddCommand, cmdToPrint, chown, opt)
|
||||
}
|
||||
|
||||
img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations"))
|
||||
|
||||
dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest()))
|
||||
|
@ -1176,3 +1282,13 @@ func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform
|
|||
out += fmt.Sprintf("%d/%d] ", ds.cmdIndex, ds.cmdTotal)
|
||||
return out + str
|
||||
}
|
||||
|
||||
func useFileOp(args map[string]string, caps *apicaps.CapSet) bool {
|
||||
enabled := true
|
||||
if v, ok := args["BUILDKIT_DISABLE_FILEOP"]; ok {
|
||||
if b, err := strconv.ParseBool(v); err == nil {
|
||||
enabled = !b
|
||||
}
|
||||
}
|
||||
return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -38,7 +39,6 @@ import (
|
|||
)
|
||||
|
||||
var allTests = []integration.Test{
|
||||
testNoSnapshotLeak,
|
||||
testCmdShell,
|
||||
testGlobalArg,
|
||||
testDockerfileDirs,
|
||||
|
@ -52,11 +52,7 @@ var allTests = []integration.Test{
|
|||
testDockerignore,
|
||||
testDockerignoreInvalid,
|
||||
testDockerfileFromGit,
|
||||
testCopyChown,
|
||||
testCopyWildcards,
|
||||
testCopyOverrideFiles,
|
||||
testMultiStageImplicitFrom,
|
||||
testCopyVarSubstitution,
|
||||
testMultiStageCaseInsensitive,
|
||||
testLabels,
|
||||
testCacheImportExport,
|
||||
|
@ -68,19 +64,33 @@ var allTests = []integration.Test{
|
|||
testPullScratch,
|
||||
testSymlinkDestination,
|
||||
testHTTPDockerfile,
|
||||
testNoSnapshotLeak,
|
||||
testCopySymlinks,
|
||||
testContextChangeDirToFile,
|
||||
testPlatformArgsImplicit,
|
||||
testPlatformArgsExplicit,
|
||||
testExportMultiPlatform,
|
||||
testQuotedMetaArgs,
|
||||
testIgnoreEntrypoint,
|
||||
testSymlinkedDockerfile,
|
||||
testDockerfileAddArchiveWildcard,
|
||||
testEmptyWildcard,
|
||||
testWorkdirCreatesDir,
|
||||
testDockerfileAddArchiveWildcard,
|
||||
testCopyChownExistingDir,
|
||||
testCopyWildcardCache,
|
||||
}
|
||||
|
||||
var fileOpTests = []integration.Test{
|
||||
testEmptyDestDir,
|
||||
testCopyChownCreateDest,
|
||||
testCopyThroughSymlinkContext,
|
||||
testCopyThroughSymlinkMultiStage,
|
||||
testCopyChownCreateDest,
|
||||
testEmptyDestDir,
|
||||
testSymlinkedDockerfile,
|
||||
testContextChangeDirToFile,
|
||||
testNoSnapshotLeak,
|
||||
testCopySymlinks,
|
||||
testCopyChown,
|
||||
testCopyOverrideFiles,
|
||||
testCopyVarSubstitution,
|
||||
testCopyWildcards,
|
||||
testCopyRelative,
|
||||
}
|
||||
|
||||
var opts []integration.TestOpt
|
||||
|
@ -120,10 +130,15 @@ func init() {
|
|||
|
||||
func TestIntegration(t *testing.T) {
|
||||
integration.Run(t, allTests, opts...)
|
||||
integration.Run(t, fileOpTests, append(opts, integration.WithMatrix("fileop", map[string]interface{}{
|
||||
"true": true,
|
||||
"false": false,
|
||||
}))...)
|
||||
}
|
||||
|
||||
func testEmptyDestDir(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
|
@ -144,6 +159,9 @@ RUN [ "$(cat testfile)" == "contents0" ]
|
|||
defer c.Close()
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -152,6 +170,48 @@ RUN [ "$(cat testfile)" == "contents0" ]
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testWorkdirCreatesDir(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
WORKDIR /foo
|
||||
WORKDIR /
|
||||
`)
|
||||
|
||||
dir, err := tmpdir(
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
c, err := client.New(context.TODO(), sb.Address())
|
||||
require.NoError(t, err)
|
||||
defer c.Close()
|
||||
|
||||
destDir, err := ioutil.TempDir("", "buildkit")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(destDir)
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
Exports: []client.ExportEntry{
|
||||
{
|
||||
Type: client.ExporterLocal,
|
||||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
fi, err := os.Lstat(filepath.Join(destDir, "foo"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, fi.IsDir())
|
||||
}
|
||||
|
||||
func testSymlinkedDockerfile(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
|
||||
|
@ -180,8 +240,242 @@ ENV foo bar
|
|||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testCopyChownExistingDir(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
# Set up files and directories with known ownership
|
||||
FROM busybox AS source
|
||||
RUN touch /file && chown 100:200 /file \
|
||||
&& mkdir -p /dir/subdir \
|
||||
&& touch /dir/subdir/nestedfile \
|
||||
&& chown 100:200 /dir \
|
||||
&& chown 101:201 /dir/subdir \
|
||||
&& chown 102:202 /dir/subdir/nestedfile
|
||||
|
||||
FROM busybox AS test_base
|
||||
RUN mkdir -p /existingdir/existingsubdir \
|
||||
&& touch /existingdir/existingfile \
|
||||
&& chown 500:600 /existingdir \
|
||||
&& chown 501:601 /existingdir/existingsubdir \
|
||||
&& chown 501:601 /existingdir/existingfile
|
||||
|
||||
|
||||
# Copy files from the source stage
|
||||
FROM test_base AS copy_from
|
||||
COPY --from=source /file .
|
||||
# Copy to a non-existing target directory creates the target directory (as root), then copies the _contents_ of the source directory into it
|
||||
COPY --from=source /dir /dir
|
||||
# Copying to an existing target directory will copy the _contents_ of the source directory into it
|
||||
COPY --from=source /dir/. /existingdir
|
||||
|
||||
RUN e="100:200"; p="/file" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="0:0"; p="/dir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="101:201"; p="/dir/subdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="102:202"; p="/dir/subdir/nestedfile" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
# Existing files and directories ownership should not be modified
|
||||
&& e="500:600"; p="/existingdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="501:601"; p="/existingdir/existingsubdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="501:601"; p="/existingdir/existingfile" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
# But new files and directories should maintain their ownership
|
||||
&& e="101:201"; p="/existingdir/subdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="102:202"; p="/existingdir/subdir/nestedfile"; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi
|
||||
|
||||
|
||||
# Copy files from the source stage and chown them.
|
||||
FROM test_base AS copy_from_chowned
|
||||
COPY --from=source --chown=300:400 /file .
|
||||
# Copy to a non-existing target directory creates the target directory (as root), then copies the _contents_ of the source directory into it
|
||||
COPY --from=source --chown=300:400 /dir /dir
|
||||
# Copying to an existing target directory copies the _contents_ of the source directory into it
|
||||
COPY --from=source --chown=300:400 /dir/. /existingdir
|
||||
|
||||
RUN e="300:400"; p="/file" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="300:400"; p="/dir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="300:400"; p="/dir/subdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="300:400"; p="/dir/subdir/nestedfile" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
# Existing files and directories ownership should not be modified
|
||||
&& e="500:600"; p="/existingdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="501:601"; p="/existingdir/existingsubdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="501:601"; p="/existingdir/existingfile" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
# But new files and directories should be chowned
|
||||
&& e="300:400"; p="/existingdir/subdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \
|
||||
&& e="300:400"; p="/existingdir/subdir/nestedfile"; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi
|
||||
`)
|
||||
|
||||
dir, err := tmpdir(
|
||||
fstest.CreateFile("Dockerfile.web", dockerfile, 0600),
|
||||
fstest.Symlink("Dockerfile.web", "Dockerfile"),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
c, err := client.New(context.TODO(), sb.Address())
|
||||
require.NoError(t, err)
|
||||
defer c.Close()
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{
|
||||
"target": "copy_from",
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testCopyWildcardCache(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM busybox AS base
|
||||
COPY foo* files/
|
||||
RUN cat /dev/urandom | head -c 100 | sha256sum > unique
|
||||
COPY bar files/
|
||||
FROM scratch
|
||||
COPY --from=base unique /
|
||||
`)
|
||||
|
||||
dir, err := tmpdir(
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("foo1", []byte("foo1-data"), 0600),
|
||||
fstest.CreateFile("foo2", []byte("foo2-data"), 0600),
|
||||
fstest.CreateFile("bar", []byte("bar-data"), 0600),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
c, err := client.New(context.TODO(), sb.Address())
|
||||
require.NoError(t, err)
|
||||
defer c.Close()
|
||||
|
||||
destDir, err := ioutil.TempDir("", "buildkit")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(destDir)
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
Exports: []client.ExportEntry{
|
||||
{
|
||||
Type: client.ExporterLocal,
|
||||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dt, err := ioutil.ReadFile(filepath.Join(destDir, "unique"))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(dir, "bar"), []byte("bar-data-mod"), 0600)
|
||||
require.NoError(t, err)
|
||||
|
||||
destDir, err = ioutil.TempDir("", "buildkit")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(destDir)
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
Exports: []client.ExportEntry{
|
||||
{
|
||||
Type: client.ExporterLocal,
|
||||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dt2, err := ioutil.ReadFile(filepath.Join(destDir, "unique"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(dt), string(dt2))
|
||||
|
||||
err = ioutil.WriteFile(filepath.Join(dir, "foo2"), []byte("foo2-data-mod"), 0600)
|
||||
require.NoError(t, err)
|
||||
|
||||
destDir, err = ioutil.TempDir("", "buildkit")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(destDir)
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
Exports: []client.ExportEntry{
|
||||
{
|
||||
Type: client.ExporterLocal,
|
||||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique"))
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, string(dt), string(dt2))
|
||||
}
|
||||
|
||||
func testEmptyWildcard(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
COPY foo nomatch* /
|
||||
`)
|
||||
|
||||
dir, err := tmpdir(
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("foo", []byte("contents0"), 0600),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
c, err := client.New(context.TODO(), sb.Address())
|
||||
require.NoError(t, err)
|
||||
defer c.Close()
|
||||
|
||||
destDir, err := ioutil.TempDir("", "buildkit")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(destDir)
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
Exports: []client.ExportEntry{
|
||||
{
|
||||
Type: client.ExporterLocal,
|
||||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "contents0", string(dt))
|
||||
}
|
||||
|
||||
func testCopyChownCreateDest(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
|
@ -201,6 +495,9 @@ RUN [ "$(stat -c "%U %G" /dest)" == "user user" ]
|
|||
defer c.Close()
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -211,6 +508,7 @@ RUN [ "$(stat -c "%U %G" /dest)" == "user user" ]
|
|||
|
||||
func testCopyThroughSymlinkContext(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
|
@ -241,6 +539,9 @@ COPY link/foo .
|
|||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -255,6 +556,7 @@ COPY link/foo .
|
|||
|
||||
func testCopyThroughSymlinkMultiStage(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM busybox AS build
|
||||
|
@ -285,6 +587,9 @@ COPY --from=build /sub2/foo bar
|
|||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -514,6 +819,7 @@ COPY arch-$TARGETARCH whoami
|
|||
// tonistiigi/fsutil#46
|
||||
func testContextChangeDirToFile(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
|
@ -533,6 +839,9 @@ COPY foo /
|
|||
defer c.Close()
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -558,6 +867,9 @@ COPY foo /
|
|||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -572,6 +884,7 @@ COPY foo /
|
|||
|
||||
func testNoSnapshotLeak(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
|
@ -590,6 +903,9 @@ COPY foo /
|
|||
defer c.Close()
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -601,6 +917,9 @@ COPY foo /
|
|||
require.NoError(t, err)
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -616,6 +935,7 @@ COPY foo /
|
|||
|
||||
func testCopySymlinks(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
|
@ -628,7 +948,7 @@ COPY sub/l* alllinks/
|
|||
fstest.CreateFile("bar", []byte(`bar-contents`), 0600),
|
||||
fstest.Symlink("bar", "foo"),
|
||||
fstest.CreateDir("sub", 0700),
|
||||
fstest.CreateFile("sub/lfile", []byte(`baz-contents`), 0600),
|
||||
fstest.CreateFile("sub/lfile", []byte(`lfile-contents`), 0600),
|
||||
fstest.Symlink("subfile", "sub/l0"),
|
||||
fstest.CreateFile("sub/subfile", []byte(`subfile-contents`), 0600),
|
||||
fstest.Symlink("second", "sub/l1"),
|
||||
|
@ -642,13 +962,42 @@ COPY sub/l* alllinks/
|
|||
require.NoError(t, err)
|
||||
defer c.Close()
|
||||
|
||||
destDir, err := ioutil.TempDir("", "buildkit")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(destDir)
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
Exports: []client.ExportEntry{
|
||||
{
|
||||
Type: client.ExporterLocal,
|
||||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "bar-contents", string(dt))
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/l0"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "subfile-contents", string(dt))
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/lfile"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "lfile-contents", string(dt))
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/l1"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "baz-contents", string(dt))
|
||||
}
|
||||
|
||||
func testHTTPDockerfile(t *testing.T, sb integration.Sandbox) {
|
||||
|
@ -1309,6 +1658,83 @@ ADD %s /newname.tar.gz
|
|||
require.Equal(t, buf2.Bytes(), dt)
|
||||
}
|
||||
|
||||
func testDockerfileAddArchiveWildcard(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
tw := tar.NewWriter(buf)
|
||||
expectedContent := []byte("content0")
|
||||
err := tw.WriteHeader(&tar.Header{
|
||||
Name: "foo",
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len(expectedContent)),
|
||||
Mode: 0644,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = tw.Write(expectedContent)
|
||||
require.NoError(t, err)
|
||||
err = tw.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
buf2 := bytes.NewBuffer(nil)
|
||||
tw = tar.NewWriter(buf2)
|
||||
expectedContent = []byte("content1")
|
||||
err = tw.WriteHeader(&tar.Header{
|
||||
Name: "bar",
|
||||
Typeflag: tar.TypeReg,
|
||||
Size: int64(len(expectedContent)),
|
||||
Mode: 0644,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
_, err = tw.Write(expectedContent)
|
||||
require.NoError(t, err)
|
||||
err = tw.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch
|
||||
ADD *.tar /dest
|
||||
`)
|
||||
|
||||
dir, err := tmpdir(
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("t.tar", buf.Bytes(), 0600),
|
||||
fstest.CreateFile("b.tar", buf2.Bytes(), 0600),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
destDir, err := ioutil.TempDir("", "buildkit")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(destDir)
|
||||
|
||||
c, err := client.New(context.TODO(), sb.Address())
|
||||
require.NoError(t, err)
|
||||
defer c.Close()
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
Exports: []client.ExportEntry{
|
||||
{
|
||||
Type: client.ExporterLocal,
|
||||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest/foo"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "content0", string(dt))
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(destDir, "dest/bar"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "content1", string(dt))
|
||||
}
|
||||
|
||||
func testSymlinkDestination(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
f.RequiresBuildctl(t)
|
||||
|
@ -1848,6 +2274,7 @@ USER nobody
|
|||
|
||||
func testCopyChown(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM busybox AS base
|
||||
|
@ -1884,6 +2311,9 @@ COPY --from=base /out /
|
|||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -1902,6 +2332,7 @@ COPY --from=base /out /
|
|||
|
||||
func testCopyOverrideFiles(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch AS base
|
||||
|
@ -1939,6 +2370,9 @@ COPY files dest
|
|||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -1957,6 +2391,7 @@ COPY files dest
|
|||
|
||||
func testCopyVarSubstitution(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch AS base
|
||||
|
@ -1987,6 +2422,9 @@ COPY $FOO baz
|
|||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -2001,6 +2439,7 @@ COPY $FOO baz
|
|||
|
||||
func testCopyWildcards(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM scratch AS base
|
||||
|
@ -2042,6 +2481,9 @@ COPY sub/dir1 subdest6
|
|||
OutputDir: destDir,
|
||||
},
|
||||
},
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
|
@ -2061,9 +2503,11 @@ COPY sub/dir1 subdest6
|
|||
require.NoError(t, err)
|
||||
require.Equal(t, "foo-contents", string(dt))
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir1/dir2/foo"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foo-contents", string(dt))
|
||||
if isFileOp { // non-fileop implementation is historically buggy
|
||||
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir2/foo"))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "foo-contents", string(dt))
|
||||
}
|
||||
|
||||
dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest2/foo"))
|
||||
require.NoError(t, err)
|
||||
|
@ -2090,6 +2534,57 @@ COPY sub/dir1 subdest6
|
|||
require.Equal(t, "foo-contents", string(dt))
|
||||
}
|
||||
|
||||
func testCopyRelative(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
isFileOp := getFileOp(t, sb)
|
||||
|
||||
dockerfile := []byte(`
|
||||
FROM busybox
|
||||
WORKDIR /test1
|
||||
WORKDIR test2
|
||||
RUN sh -c "[ "$PWD" = '/test1/test2' ]"
|
||||
COPY foo ./
|
||||
RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]"
|
||||
ADD foo ./bar/baz
|
||||
RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]"
|
||||
COPY foo ./bar/baz2
|
||||
RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]"
|
||||
WORKDIR ..
|
||||
COPY foo ./
|
||||
RUN sh -c "[ $(cat /test1/foo) = 'hello' ]"
|
||||
COPY foo /test3/
|
||||
RUN sh -c "[ $(cat /test3/foo) = 'hello' ]"
|
||||
WORKDIR /test4
|
||||
COPY . .
|
||||
RUN sh -c "[ $(cat /test4/foo) = 'hello' ]"
|
||||
WORKDIR /test5/test6
|
||||
COPY foo ../
|
||||
RUN sh -c "[ $(cat /test5/foo) = 'hello' ]"
|
||||
`)
|
||||
|
||||
dir, err := tmpdir(
|
||||
fstest.CreateFile("Dockerfile", dockerfile, 0600),
|
||||
fstest.CreateFile("foo", []byte(`hello`), 0600),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
c, err := client.New(context.TODO(), sb.Address())
|
||||
require.NoError(t, err)
|
||||
defer c.Close()
|
||||
|
||||
_, err = f.Solve(context.TODO(), c, client.SolveOpt{
|
||||
FrontendAttrs: map[string]string{
|
||||
"build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp),
|
||||
},
|
||||
LocalDirs: map[string]string{
|
||||
builder.DefaultLocalNameDockerfile: dir,
|
||||
builder.DefaultLocalNameContext: dir,
|
||||
},
|
||||
}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func testDockerfileFromGit(t *testing.T, sb integration.Sandbox) {
|
||||
f := getFrontend(t, sb)
|
||||
|
||||
|
@ -3140,3 +3635,11 @@ func getFrontend(t *testing.T, sb integration.Sandbox) frontend {
|
|||
require.True(t, ok)
|
||||
return fn
|
||||
}
|
||||
|
||||
func getFileOp(t *testing.T, sb integration.Sandbox) bool {
|
||||
v := sb.Value("fileop")
|
||||
require.NotNil(t, v)
|
||||
vv, ok := v.(bool)
|
||||
require.True(t, ok)
|
||||
return vv
|
||||
}
|
||||
|
|
2
go.mod
2
go.mod
|
@ -49,7 +49,7 @@ require (
|
|||
github.com/sirupsen/logrus v1.0.3
|
||||
github.com/stretchr/testify v1.3.0
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20190314220245-1ec1983587cd
|
||||
github.com/tonistiigi/fsutil v0.0.0-20190316003333-2a10686c7e92
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||
github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e
|
||||
github.com/uber/jaeger-lib v1.2.1 // indirect
|
||||
|
|
4
go.sum
4
go.sum
|
@ -118,8 +118,8 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0
|
|||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI=
|
||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20190314220245-1ec1983587cd h1:TT5pfgTCocfXTnVeEZRKSEsO8vAGM+OMZOSSXEO6ixw=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20190314220245-1ec1983587cd/go.mod h1:pzh7kdwkDRh+Bx8J30uqaKJ1M4QrSH/um8fcIXeM8rc=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20190316003333-2a10686c7e92 h1:+Njk7pGJkAqK0k007oRFmr9xSmZUA+VjV0SdW0ctqXs=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20190316003333-2a10686c7e92/go.mod h1:pzh7kdwkDRh+Bx8J30uqaKJ1M4QrSH/um8fcIXeM8rc=
|
||||
github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe h1:pd7hrFSqUPxYS9IB+UMG1AB/8EXGXo17ssx0bSQ5L6Y=
|
||||
github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe/go.mod h1:/+MCh11CJf2oz0BXmlmqyopK/ad1rKkcOXPoYuPCJYU=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
|
||||
|
|
|
@ -0,0 +1,266 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/pkg/errors"
|
||||
copy "github.com/tonistiigi/fsutil/copy"
|
||||
)
|
||||
|
||||
func timestampToTime(ts int64) *time.Time {
|
||||
if ts == -1 {
|
||||
return nil
|
||||
}
|
||||
tm := time.Unix(ts/1e9, ts%1e9)
|
||||
return &tm
|
||||
}
|
||||
|
||||
func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.ChownOpt) error {
|
||||
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if action.MakeParents {
|
||||
if err := copy.MkdirAll(p, os.FileMode(action.Mode)&0777, user, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := os.Mkdir(p, os.FileMode(action.Mode)&0777); err != nil {
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := copy.Chown(p, user); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.ChownOpt) error {
|
||||
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := copy.Chown(p, user); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func rm(ctx context.Context, d string, action pb.FileActionRm) error {
|
||||
p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(p); err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) && action.AllowNotFound {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.ChownOpt) error {
|
||||
srcPath := cleanPath(action.Src)
|
||||
destPath := cleanPath(action.Dest)
|
||||
|
||||
if !action.CreateDestPath {
|
||||
p, err := fs.RootPath(dest, filepath.Join(filepath.Join("/", action.Dest)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := os.Lstat(filepath.Dir(p)); err != nil {
|
||||
return errors.Wrapf(err, "failed to stat %s", action.Dest)
|
||||
}
|
||||
}
|
||||
|
||||
xattrErrorHandler := func(dst, src, key string, err error) error {
|
||||
log.Println(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
opt := []copy.Opt{
|
||||
func(ci *copy.CopyInfo) {
|
||||
ci.Chown = u
|
||||
ci.Utime = timestampToTime(action.Timestamp)
|
||||
if m := int(action.Mode); m != -1 {
|
||||
ci.Mode = &m
|
||||
}
|
||||
ci.CopyDirContents = action.DirCopyContents
|
||||
ci.FollowLinks = action.FollowSymlink
|
||||
},
|
||||
copy.WithXAttrErrorHandler(xattrErrorHandler),
|
||||
}
|
||||
|
||||
if !action.AllowWildcard {
|
||||
if action.AttemptUnpackDockerCompatibility {
|
||||
if ok, err := unpack(ctx, src, srcPath, dest, destPath, u, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return copy.Copy(ctx, src, srcPath, dest, destPath, opt...)
|
||||
}
|
||||
|
||||
m, err := copy.ResolveWildcards(src, srcPath, action.FollowSymlink)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(m) == 0 {
|
||||
if action.AllowEmptyWildcard {
|
||||
return nil
|
||||
}
|
||||
return errors.Errorf("%s not found", srcPath)
|
||||
}
|
||||
|
||||
for _, s := range m {
|
||||
if action.AttemptUnpackDockerCompatibility {
|
||||
if ok, err := unpack(ctx, src, s, dest, destPath, u, timestampToTime(action.Timestamp)); err != nil {
|
||||
return err
|
||||
} else if ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err := copy.Copy(ctx, src, s, dest, destPath, opt...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanPath(s string) string {
|
||||
s2 := filepath.Join("/", s)
|
||||
if strings.HasSuffix(s, "/.") {
|
||||
if s2 != "/" {
|
||||
s2 += "/"
|
||||
}
|
||||
s2 += "."
|
||||
} else if strings.HasSuffix(s, "/") && s2 != "/" {
|
||||
s2 += "/"
|
||||
}
|
||||
return s2
|
||||
}
|
||||
|
||||
type Backend struct {
|
||||
}
|
||||
|
||||
func (fb *Backend) Mkdir(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkDir) error {
|
||||
mnt, ok := m.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m)
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mnt.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
u, err := readUser(action.Owner, user, group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mkdir(ctx, dir, action, u)
|
||||
}
|
||||
|
||||
func (fb *Backend) Mkfile(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkFile) error {
|
||||
mnt, ok := m.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m)
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mnt.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
u, err := readUser(action.Owner, user, group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return mkfile(ctx, dir, action, u)
|
||||
}
|
||||
func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error {
|
||||
mnt, ok := m.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m)
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mnt.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
return rm(ctx, dir, action)
|
||||
}
|
||||
func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mount, action pb.FileActionCopy) error {
|
||||
mnt1, ok := m1.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m1)
|
||||
}
|
||||
mnt2, ok := m2.(*Mount)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid mount type %T", m2)
|
||||
}
|
||||
|
||||
lm := snapshot.LocalMounter(mnt1.m)
|
||||
src, err := lm.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
lm2 := snapshot.LocalMounter(mnt2.m)
|
||||
dest, err := lm2.Mount()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer lm2.Unmount()
|
||||
|
||||
u, err := readUser(action.Owner, user, group)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return docopy(ctx, src, dest, action, u)
|
||||
}
|
|
@ -0,0 +1,71 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func NewRefManager(cm cache.Manager) *RefManager {
|
||||
return &RefManager{cm: cm}
|
||||
}
|
||||
|
||||
type RefManager struct {
|
||||
cm cache.Manager
|
||||
}
|
||||
|
||||
func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool) (fileoptypes.Mount, error) {
|
||||
ir, ok := ref.(cache.ImmutableRef)
|
||||
if !ok && ref != nil {
|
||||
return nil, errors.Errorf("invalid ref type: %T", ref)
|
||||
}
|
||||
|
||||
if ir != nil && readonly {
|
||||
m, err := ir.Mount(ctx, readonly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Mount{m: m}, nil
|
||||
}
|
||||
|
||||
mr, err := rm.cm.New(ctx, ir, cache.WithDescription("fileop target"), cache.CachePolicyRetain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m, err := mr.Mount(ctx, readonly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Mount{m: m, mr: mr}, nil
|
||||
}
|
||||
|
||||
func (rm *RefManager) Commit(ctx context.Context, mount fileoptypes.Mount) (fileoptypes.Ref, error) {
|
||||
m, ok := mount.(*Mount)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid mount type %T", mount)
|
||||
}
|
||||
if err := m.m.Release(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if m.mr == nil {
|
||||
return nil, errors.Errorf("invalid mount without active ref for commit")
|
||||
}
|
||||
return m.mr.Commit(ctx)
|
||||
}
|
||||
|
||||
type Mount struct {
|
||||
m snapshot.Mountable
|
||||
mr cache.MutableRef
|
||||
}
|
||||
|
||||
func (m *Mount) Release(ctx context.Context) error {
|
||||
m.m.Release()
|
||||
if m.mr != nil {
|
||||
return m.mr.Release(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *Mount) IsFileOpMount() {}
|
|
@ -0,0 +1,61 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/chrootarchive"
|
||||
copy "github.com/tonistiigi/fsutil/copy"
|
||||
)
|
||||
|
||||
func unpack(ctx context.Context, srcRoot string, src string, destRoot string, dest string, user *copy.ChownOpt, tm *time.Time) (bool, error) {
|
||||
src, err := fs.RootPath(srcRoot, src)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !isArchivePath(src) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
dest, err = fs.RootPath(destRoot, dest)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := copy.MkdirAll(dest, 0755, user, tm); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
file, err := os.Open(src)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return true, chrootarchive.Untar(file, dest, nil)
|
||||
}
|
||||
|
||||
func isArchivePath(path string) bool {
|
||||
fi, err := os.Lstat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if fi.Mode()&os.ModeType != 0 {
|
||||
return false
|
||||
}
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
defer file.Close()
|
||||
rdr, err := archive.DecompressStream(file)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
r := tar.NewReader(rdr)
|
||||
_, err = r.Next()
|
||||
return err == nil
|
||||
}
|
|
@ -0,0 +1,119 @@
|
|||
package file
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/moby/buildkit/snapshot"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
"github.com/pkg/errors"
|
||||
copy "github.com/tonistiigi/fsutil/copy"
|
||||
)
|
||||
|
||||
func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.ChownOpt, error) {
|
||||
if chopt == nil {
|
||||
return nil, nil
|
||||
}
|
||||
var us copy.ChownOpt
|
||||
if chopt.User != nil {
|
||||
switch u := chopt.User.User.(type) {
|
||||
case *pb.UserOpt_ByName:
|
||||
if mu == nil {
|
||||
return nil, errors.Errorf("invalid missing user mount")
|
||||
}
|
||||
mmu, ok := mu.(*Mount)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid mount type %T", mu)
|
||||
}
|
||||
lm := snapshot.LocalMounter(mmu.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
passwdPath, err := user.GetPasswdPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
passwdPath, err = fs.RootPath(dir, passwdPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ufile, err := os.Open(passwdPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer ufile.Close()
|
||||
|
||||
users, err := user.ParsePasswdFilter(ufile, func(uu user.User) bool {
|
||||
return uu.Name == u.ByName.Name
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(users) > 0 {
|
||||
us.Uid = users[0].Uid
|
||||
us.Gid = users[0].Gid
|
||||
}
|
||||
case *pb.UserOpt_ByID:
|
||||
us.Uid = int(u.ByID)
|
||||
us.Gid = int(u.ByID)
|
||||
}
|
||||
}
|
||||
|
||||
if chopt.Group != nil {
|
||||
switch u := chopt.Group.User.(type) {
|
||||
case *pb.UserOpt_ByName:
|
||||
if mg == nil {
|
||||
return nil, errors.Errorf("invalid missing group mount")
|
||||
}
|
||||
mmg, ok := mg.(*Mount)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid mount type %T", mg)
|
||||
}
|
||||
lm := snapshot.LocalMounter(mmg.m)
|
||||
dir, err := lm.Mount()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer lm.Unmount()
|
||||
|
||||
groupPath, err := user.GetGroupPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groupPath, err = fs.RootPath(dir, groupPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gfile, err := os.Open(groupPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer gfile.Close()
|
||||
|
||||
groups, err := user.ParseGroupFilter(gfile, func(g user.Group) bool {
|
||||
return g.Name == u.ByName.Name
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(groups) > 0 {
|
||||
us.Gid = groups[0].Gid
|
||||
}
|
||||
case *pb.UserOpt_ByID:
|
||||
us.Gid = int(u.ByID)
|
||||
}
|
||||
}
|
||||
|
||||
return &us, nil
|
||||
}
|
|
@ -149,7 +149,7 @@ func (e *execOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo
|
|||
cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0}))
|
||||
}
|
||||
if !dep.NoContentBasedHash {
|
||||
cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupePaths(dep.Selectors))
|
||||
cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,6 +180,14 @@ func dedupePaths(inp []string) []string {
|
|||
return paths
|
||||
}
|
||||
|
||||
func toSelectors(p []string) []llbsolver.Selector {
|
||||
sel := make([]llbsolver.Selector, 0, len(p))
|
||||
for _, p := range p {
|
||||
sel = append(sel, llbsolver.Selector{Path: p, FollowLinks: true})
|
||||
}
|
||||
return sel
|
||||
}
|
||||
|
||||
type dep struct {
|
||||
Selectors []string
|
||||
NoContentBasedHash bool
|
||||
|
|
|
@ -0,0 +1,580 @@
|
|||
package ops
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"runtime"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/moby/buildkit/cache"
|
||||
"github.com/moby/buildkit/cache/metadata"
|
||||
"github.com/moby/buildkit/solver"
|
||||
"github.com/moby/buildkit/solver/llbsolver"
|
||||
"github.com/moby/buildkit/solver/llbsolver/file"
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/flightcontrol"
|
||||
"github.com/moby/buildkit/worker"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const fileCacheType = "buildkit.file.v0"
|
||||
|
||||
type fileOp struct {
|
||||
op *pb.FileOp
|
||||
md *metadata.Store
|
||||
w worker.Worker
|
||||
solver *FileOpSolver
|
||||
numInputs int
|
||||
}
|
||||
|
||||
func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, md *metadata.Store, w worker.Worker) (solver.Op, error) {
|
||||
return &fileOp{
|
||||
op: op.File,
|
||||
md: md,
|
||||
numInputs: len(v.Inputs()),
|
||||
w: w,
|
||||
solver: NewFileOpSolver(&file.Backend{}, file.NewRefManager(cm)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) {
|
||||
selectors := map[int]map[llbsolver.Selector]struct{}{}
|
||||
invalidSelectors := map[int]struct{}{}
|
||||
|
||||
actions := make([][]byte, 0, len(f.op.Actions))
|
||||
|
||||
markInvalid := func(idx pb.InputIndex) {
|
||||
if idx != -1 {
|
||||
invalidSelectors[int(idx)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, action := range f.op.Actions {
|
||||
var dt []byte
|
||||
var err error
|
||||
switch a := action.Action.(type) {
|
||||
case *pb.FileAction_Mkdir:
|
||||
p := *a.Mkdir
|
||||
markInvalid(action.Input)
|
||||
processOwner(p.Owner, selectors)
|
||||
dt, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
case *pb.FileAction_Mkfile:
|
||||
p := *a.Mkfile
|
||||
markInvalid(action.Input)
|
||||
processOwner(p.Owner, selectors)
|
||||
dt, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
case *pb.FileAction_Rm:
|
||||
p := *a.Rm
|
||||
markInvalid(action.Input)
|
||||
dt, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
case *pb.FileAction_Copy:
|
||||
p := *a.Copy
|
||||
markInvalid(action.Input)
|
||||
processOwner(p.Owner, selectors)
|
||||
if action.SecondaryInput != -1 && int(action.SecondaryInput) < f.numInputs {
|
||||
addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard, p.FollowSymlink)
|
||||
p.Src = path.Base(p.Src)
|
||||
}
|
||||
dt, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
}
|
||||
|
||||
actions = append(actions, dt)
|
||||
}
|
||||
|
||||
dt, err := json.Marshal(struct {
|
||||
Type string
|
||||
Actions [][]byte
|
||||
}{
|
||||
Type: fileCacheType,
|
||||
Actions: actions,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
cm := &solver.CacheMap{
|
||||
Digest: digest.FromBytes(dt),
|
||||
Deps: make([]struct {
|
||||
Selector digest.Digest
|
||||
ComputeDigestFunc solver.ResultBasedCacheFunc
|
||||
}, f.numInputs),
|
||||
}
|
||||
|
||||
for idx, m := range selectors {
|
||||
if _, ok := invalidSelectors[idx]; ok {
|
||||
continue
|
||||
}
|
||||
dgsts := make([][]byte, 0, len(m))
|
||||
for k := range m {
|
||||
dgsts = append(dgsts, []byte(k.Path))
|
||||
}
|
||||
sort.Slice(dgsts, func(i, j int) bool {
|
||||
return bytes.Compare(dgsts[i], dgsts[j]) > 0
|
||||
})
|
||||
cm.Deps[idx].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0}))
|
||||
|
||||
cm.Deps[idx].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupeSelectors(m))
|
||||
}
|
||||
|
||||
return cm, true, nil
|
||||
}
|
||||
|
||||
func (f *fileOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) {
|
||||
inpRefs := make([]fileoptypes.Ref, 0, len(inputs))
|
||||
for _, inp := range inputs {
|
||||
workerRef, ok := inp.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("invalid reference for exec %T", inp.Sys())
|
||||
}
|
||||
inpRefs = append(inpRefs, workerRef.ImmutableRef)
|
||||
}
|
||||
|
||||
outs, err := f.solver.Solve(ctx, inpRefs, f.op.Actions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
outResults := make([]solver.Result, 0, len(outs))
|
||||
for _, out := range outs {
|
||||
outResults = append(outResults, worker.NewWorkerRefResult(out.(cache.ImmutableRef), f.w))
|
||||
}
|
||||
|
||||
return outResults, nil
|
||||
}
|
||||
|
||||
func addSelector(m map[int]map[llbsolver.Selector]struct{}, idx int, sel string, wildcard, followLinks bool) {
|
||||
mm, ok := m[idx]
|
||||
if !ok {
|
||||
mm = map[llbsolver.Selector]struct{}{}
|
||||
m[idx] = mm
|
||||
}
|
||||
s := llbsolver.Selector{Path: sel}
|
||||
|
||||
if wildcard && containsWildcards(sel) {
|
||||
s.Wildcard = true
|
||||
}
|
||||
if followLinks {
|
||||
s.FollowLinks = true
|
||||
}
|
||||
mm[s] = struct{}{}
|
||||
}
|
||||
|
||||
func containsWildcards(name string) bool {
|
||||
isWindows := runtime.GOOS == "windows"
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '\\' && !isWindows {
|
||||
i++
|
||||
} else if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func dedupeSelectors(m map[llbsolver.Selector]struct{}) []llbsolver.Selector {
|
||||
paths := make([]string, 0, len(m))
|
||||
pathsFollow := make([]string, 0, len(m))
|
||||
for sel := range m {
|
||||
if !sel.Wildcard {
|
||||
if sel.FollowLinks {
|
||||
pathsFollow = append(pathsFollow, sel.Path)
|
||||
} else {
|
||||
paths = append(paths, sel.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
paths = dedupePaths(paths)
|
||||
pathsFollow = dedupePaths(pathsFollow)
|
||||
selectors := make([]llbsolver.Selector, 0, len(m))
|
||||
|
||||
for _, p := range paths {
|
||||
selectors = append(selectors, llbsolver.Selector{Path: p})
|
||||
}
|
||||
for _, p := range pathsFollow {
|
||||
selectors = append(selectors, llbsolver.Selector{Path: p, FollowLinks: true})
|
||||
}
|
||||
|
||||
for sel := range m {
|
||||
if sel.Wildcard {
|
||||
selectors = append(selectors, sel)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(selectors, func(i, j int) bool {
|
||||
return selectors[i].Path < selectors[j].Path
|
||||
})
|
||||
|
||||
return selectors
|
||||
}
|
||||
|
||||
func processOwner(chopt *pb.ChownOpt, selectors map[int]map[llbsolver.Selector]struct{}) error {
|
||||
if chopt == nil {
|
||||
return nil
|
||||
}
|
||||
if chopt.User != nil {
|
||||
if u, ok := chopt.User.User.(*pb.UserOpt_ByName); ok {
|
||||
if u.ByName.Input < 0 {
|
||||
return errors.Errorf("invalid user index %d", u.ByName.Input)
|
||||
}
|
||||
addSelector(selectors, int(u.ByName.Input), "/etc/passwd", false, true)
|
||||
}
|
||||
}
|
||||
if chopt.Group != nil {
|
||||
if u, ok := chopt.Group.User.(*pb.UserOpt_ByName); ok {
|
||||
if u.ByName.Input < 0 {
|
||||
return errors.Errorf("invalid user index %d", u.ByName.Input)
|
||||
}
|
||||
addSelector(selectors, int(u.ByName.Input), "/etc/group", false, true)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewFileOpSolver(b fileoptypes.Backend, r fileoptypes.RefManager) *FileOpSolver {
|
||||
return &FileOpSolver{
|
||||
b: b,
|
||||
r: r,
|
||||
outs: map[int]int{},
|
||||
ins: map[int]input{},
|
||||
}
|
||||
}
|
||||
|
||||
type FileOpSolver struct {
|
||||
b fileoptypes.Backend
|
||||
r fileoptypes.RefManager
|
||||
|
||||
mu sync.Mutex
|
||||
outs map[int]int
|
||||
ins map[int]input
|
||||
g flightcontrol.Group
|
||||
}
|
||||
|
||||
type input struct {
|
||||
requiresCommit bool
|
||||
mount fileoptypes.Mount
|
||||
ref fileoptypes.Ref
|
||||
}
|
||||
|
||||
func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, actions []*pb.FileAction) ([]fileoptypes.Ref, error) {
|
||||
for i, a := range actions {
|
||||
if int(a.Input) < -1 || int(a.Input) >= len(inputs)+len(actions) {
|
||||
return nil, errors.Errorf("invalid input index %d, %d provided", a.Input, len(inputs)+len(actions))
|
||||
}
|
||||
if int(a.SecondaryInput) < -1 || int(a.SecondaryInput) >= len(inputs)+len(actions) {
|
||||
return nil, errors.Errorf("invalid secondary input index %d, %d provided", a.Input, len(inputs))
|
||||
}
|
||||
|
||||
inp, ok := s.ins[int(a.Input)]
|
||||
if ok {
|
||||
inp.requiresCommit = true
|
||||
}
|
||||
s.ins[int(a.Input)] = inp
|
||||
|
||||
inp, ok = s.ins[int(a.SecondaryInput)]
|
||||
if ok {
|
||||
inp.requiresCommit = true
|
||||
}
|
||||
s.ins[int(a.SecondaryInput)] = inp
|
||||
|
||||
if a.Output != -1 {
|
||||
if _, ok := s.outs[int(a.Output)]; ok {
|
||||
return nil, errors.Errorf("duplicate output %d", a.Output)
|
||||
}
|
||||
idx := len(inputs) + i
|
||||
s.outs[int(a.Output)] = idx
|
||||
s.ins[idx] = input{requiresCommit: true}
|
||||
}
|
||||
}
|
||||
|
||||
if len(s.outs) == 0 {
|
||||
return nil, errors.Errorf("no outputs specified")
|
||||
}
|
||||
|
||||
for i := 0; i < len(s.outs); i++ {
|
||||
if _, ok := s.outs[i]; !ok {
|
||||
return nil, errors.Errorf("missing output index %d", i)
|
||||
}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
for _, in := range s.ins {
|
||||
if in.ref == nil && in.mount != nil {
|
||||
in.mount.Release(context.TODO())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
outs := make([]fileoptypes.Ref, len(s.outs))
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for i, idx := range s.outs {
|
||||
func(i, idx int) {
|
||||
eg.Go(func() error {
|
||||
if err := s.validate(idx, inputs, actions, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
inp, err := s.getInput(ctx, idx, inputs, actions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
outs[i] = inp.ref
|
||||
return nil
|
||||
})
|
||||
}(i, idx)
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
for _, r := range outs {
|
||||
if r != nil {
|
||||
r.Release(context.TODO())
|
||||
}
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return outs, nil
|
||||
}
|
||||
|
||||
func (s *FileOpSolver) validate(idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction, loaded []int) error {
|
||||
for _, check := range loaded {
|
||||
if idx == check {
|
||||
return errors.Errorf("loop from index %d", idx)
|
||||
}
|
||||
}
|
||||
if idx < len(inputs) {
|
||||
return nil
|
||||
}
|
||||
loaded = append(loaded, idx)
|
||||
action := actions[idx-len(inputs)]
|
||||
for _, inp := range []int{int(action.Input), int(action.SecondaryInput)} {
|
||||
if err := s.validate(inp, inputs, actions, loaded); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction) (input, error) {
|
||||
inp, err := s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (_ interface{}, err error) {
|
||||
s.mu.Lock()
|
||||
inp := s.ins[idx]
|
||||
s.mu.Unlock()
|
||||
if inp.mount != nil || inp.ref != nil {
|
||||
return inp, nil
|
||||
}
|
||||
|
||||
if idx < len(inputs) {
|
||||
inp.ref = inputs[idx]
|
||||
s.mu.Lock()
|
||||
s.ins[idx] = inp
|
||||
s.mu.Unlock()
|
||||
return inp, nil
|
||||
}
|
||||
|
||||
var inpMount, inpMountSecondary fileoptypes.Mount
|
||||
var toRelease []fileoptypes.Mount
|
||||
var inpMountPrepared bool
|
||||
defer func() {
|
||||
for _, m := range toRelease {
|
||||
m.Release(context.TODO())
|
||||
}
|
||||
if err != nil && inpMount != nil && inpMountPrepared {
|
||||
inpMount.Release(context.TODO())
|
||||
}
|
||||
}()
|
||||
|
||||
action := actions[idx-len(inputs)]
|
||||
|
||||
loadInput := func(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
inp, err := s.getInput(ctx, int(action.Input), inputs, actions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if inp.ref != nil {
|
||||
m, err := s.r.Prepare(ctx, inp.ref, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inpMount = m
|
||||
inpMountPrepared = true
|
||||
return nil
|
||||
}
|
||||
inpMount = inp.mount
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
loadSecondaryInput := func(ctx context.Context) func() error {
|
||||
return func() error {
|
||||
inp, err := s.getInput(ctx, int(action.SecondaryInput), inputs, actions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if inp.ref != nil {
|
||||
m, err := s.r.Prepare(ctx, inp.ref, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
inpMountSecondary = m
|
||||
toRelease = append(toRelease, m)
|
||||
return nil
|
||||
}
|
||||
inpMountSecondary = inp.mount
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
loadUser := func(ctx context.Context, uopt *pb.UserOpt) (fileoptypes.Mount, error) {
|
||||
if uopt == nil {
|
||||
return nil, nil
|
||||
}
|
||||
switch u := uopt.User.(type) {
|
||||
case *pb.UserOpt_ByName:
|
||||
var m fileoptypes.Mount
|
||||
if u.ByName.Input < 0 {
|
||||
return nil, errors.Errorf("invalid user index: %d", u.ByName.Input)
|
||||
}
|
||||
inp, err := s.getInput(ctx, int(u.ByName.Input), inputs, actions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if inp.ref != nil {
|
||||
mm, err := s.r.Prepare(ctx, inp.ref, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
toRelease = append(toRelease, mm)
|
||||
m = mm
|
||||
} else {
|
||||
m = inp.mount
|
||||
}
|
||||
return m, nil
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
loadOwner := func(ctx context.Context, chopt *pb.ChownOpt) (fileoptypes.Mount, fileoptypes.Mount, error) {
|
||||
if chopt == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
um, err := loadUser(ctx, chopt.User)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
gm, err := loadUser(ctx, chopt.Group)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return um, gm, nil
|
||||
}
|
||||
|
||||
if action.Input != -1 && action.SecondaryInput != -1 {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg.Go(loadInput(ctx))
|
||||
eg.Go(loadSecondaryInput(ctx))
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
if action.Input != -1 {
|
||||
if err := loadInput(ctx)(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if action.SecondaryInput != -1 {
|
||||
if err := loadSecondaryInput(ctx)(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if inpMount == nil {
|
||||
m, err := s.r.Prepare(ctx, nil, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inpMount = m
|
||||
inpMountPrepared = true
|
||||
}
|
||||
|
||||
switch a := action.Action.(type) {
|
||||
case *pb.FileAction_Mkdir:
|
||||
user, group, err := loadOwner(ctx, a.Mkdir.Owner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.b.Mkdir(ctx, inpMount, user, group, *a.Mkdir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *pb.FileAction_Mkfile:
|
||||
user, group, err := loadOwner(ctx, a.Mkfile.Owner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.b.Mkfile(ctx, inpMount, user, group, *a.Mkfile); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *pb.FileAction_Rm:
|
||||
if err := s.b.Rm(ctx, inpMount, *a.Rm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case *pb.FileAction_Copy:
|
||||
if inpMountSecondary == nil {
|
||||
m, err := s.r.Prepare(ctx, nil, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inpMountSecondary = m
|
||||
}
|
||||
user, group, err := loadOwner(ctx, a.Copy.Owner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := s.b.Copy(ctx, inpMountSecondary, inpMount, user, group, *a.Copy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
default:
|
||||
return nil, errors.Errorf("invalid action type %T", action.Action)
|
||||
}
|
||||
|
||||
if inp.requiresCommit {
|
||||
ref, err := s.r.Commit(ctx, inpMount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
inp.ref = ref
|
||||
} else {
|
||||
inp.mount = inpMount
|
||||
}
|
||||
s.mu.Lock()
|
||||
s.ins[idx] = inp
|
||||
s.mu.Unlock()
|
||||
return inp, nil
|
||||
})
|
||||
if err != nil {
|
||||
return input{}, err
|
||||
}
|
||||
return inp.(input), err
|
||||
}
|
|
@ -0,0 +1,703 @@
|
|||
package ops
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
||||
"github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestMkdirMkfile(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo/bar",
|
||||
MakeParents: true,
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 1,
|
||||
SecondaryInput: -1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Mkfile{
|
||||
Mkfile: &pb.FileActionMkFile{
|
||||
Path: "/foo/bar/baz",
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
inp := rb.NewRef("ref1")
|
||||
outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp}, fo.Actions)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(outs), 1)
|
||||
rb.checkReleased(t, append(outs, inp))
|
||||
|
||||
o := outs[0].(*testFileRef)
|
||||
require.Equal(t, "mount-ref1-mkdir-mkfile-commit", o.id)
|
||||
require.Equal(t, 2, len(o.mount.chain))
|
||||
require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir)
|
||||
require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile)
|
||||
}
|
||||
|
||||
func TestChownOpt(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo/bar",
|
||||
MakeParents: true,
|
||||
Mode: 0700,
|
||||
Owner: &pb.ChownOpt{
|
||||
User: &pb.UserOpt{
|
||||
User: &pb.UserOpt_ByName{
|
||||
ByName: &pb.NamedUserOpt{
|
||||
Input: 1,
|
||||
Name: "myuser",
|
||||
},
|
||||
},
|
||||
},
|
||||
Group: &pb.UserOpt{
|
||||
User: &pb.UserOpt_ByName{
|
||||
ByName: &pb.NamedUserOpt{
|
||||
Input: 1,
|
||||
Name: "myuser",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 2,
|
||||
SecondaryInput: -1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Mkfile{
|
||||
Mkfile: &pb.FileActionMkFile{
|
||||
Path: "/foo/bar/baz",
|
||||
Mode: 0700,
|
||||
Owner: &pb.ChownOpt{
|
||||
User: &pb.UserOpt{
|
||||
User: &pb.UserOpt_ByID{
|
||||
ByID: 100,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
inp := rb.NewRef("ref1")
|
||||
inp2 := rb.NewRef("usermount")
|
||||
outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp, inp2}, fo.Actions)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(outs), 1)
|
||||
rb.checkReleased(t, append(outs, inp, inp2))
|
||||
|
||||
o := outs[0].(*testFileRef)
|
||||
require.Equal(t, "mount-ref1-mkdir#u(mount-usermount)#g(mount-usermount)-mkfile-commit", o.id)
|
||||
require.Equal(t, 2, len(o.mount.chain))
|
||||
require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir)
|
||||
require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile)
|
||||
}
|
||||
|
||||
func TestChownCopy(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: -1,
|
||||
SecondaryInput: -1,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Mkfile{
|
||||
Mkfile: &pb.FileActionMkFile{
|
||||
Path: "/foo/bar/baz",
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 1,
|
||||
SecondaryInput: 0,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Copy{
|
||||
Copy: &pb.FileActionCopy{
|
||||
Src: "/src",
|
||||
Dest: "/dest",
|
||||
Owner: &pb.ChownOpt{
|
||||
User: &pb.UserOpt{
|
||||
User: &pb.UserOpt_ByName{
|
||||
ByName: &pb.NamedUserOpt{
|
||||
Input: 1,
|
||||
Name: "myuser",
|
||||
},
|
||||
},
|
||||
},
|
||||
Group: &pb.UserOpt{
|
||||
User: &pb.UserOpt_ByName{
|
||||
ByName: &pb.NamedUserOpt{
|
||||
Input: 2,
|
||||
Name: "mygroup",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
inpSrc := rb.NewRef("src")
|
||||
inpDest := rb.NewRef("dest")
|
||||
outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inpSrc, inpDest}, fo.Actions)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(outs), 1)
|
||||
rb.checkReleased(t, append(outs, inpSrc, inpDest))
|
||||
|
||||
o := outs[0].(*testFileRef)
|
||||
require.Equal(t, "mount-dest-copy(mount-src)#u(mount-dest)#g(mount-scratch-mkfile)-commit", o.id)
|
||||
require.Equal(t, 1, len(o.mount.chain))
|
||||
require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Copy).Copy, o.mount.chain[0].copy)
|
||||
}
|
||||
|
||||
func TestInvalidNoOutput(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo/bar",
|
||||
MakeParents: true,
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions)
|
||||
rb.checkReleased(t, outs)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "no outputs specified")
|
||||
}
|
||||
|
||||
func TestInvalidDuplicateOutput(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo/bar",
|
||||
MakeParents: true,
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 1,
|
||||
SecondaryInput: -1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Mkfile{
|
||||
Mkfile: &pb.FileActionMkFile{
|
||||
Path: "/foo/bar/baz",
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
_, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "duplicate output")
|
||||
rb.checkReleased(t, nil)
|
||||
}
|
||||
|
||||
func TestActionInvalidIndex(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo/bar",
|
||||
MakeParents: true,
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
_, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "loop from index")
|
||||
rb.checkReleased(t, nil)
|
||||
}
|
||||
|
||||
func TestActionLoop(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 1,
|
||||
SecondaryInput: -1,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo/bar",
|
||||
MakeParents: true,
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Mkfile{
|
||||
Mkfile: &pb.FileActionMkFile{
|
||||
Path: "/foo/bar/baz",
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
_, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "loop from index")
|
||||
rb.checkReleased(t, nil)
|
||||
}
|
||||
|
||||
func TestMultiOutput(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo/bar",
|
||||
MakeParents: true,
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 1,
|
||||
SecondaryInput: -1,
|
||||
Output: 1,
|
||||
Action: &pb.FileAction_Mkfile{
|
||||
Mkfile: &pb.FileActionMkFile{
|
||||
Path: "/foo/bar/baz",
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
inp := rb.NewRef("ref1")
|
||||
outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp}, fo.Actions)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(outs), 2)
|
||||
rb.checkReleased(t, append(outs, inp))
|
||||
|
||||
o := outs[0].(*testFileRef)
|
||||
require.Equal(t, "mount-ref1-mkdir-commit", o.id)
|
||||
require.Equal(t, 1, len(o.mount.chain))
|
||||
require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir)
|
||||
|
||||
o = outs[1].(*testFileRef)
|
||||
require.Equal(t, "mount-ref1-mkdir-mkfile-commit", o.id)
|
||||
require.Equal(t, 2, len(o.mount.chain))
|
||||
require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir)
|
||||
require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile)
|
||||
}
|
||||
|
||||
func TestFileFromScratch(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: -1,
|
||||
SecondaryInput: -1,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo/bar",
|
||||
MakeParents: true,
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Mkfile{
|
||||
Mkfile: &pb.FileActionMkFile{
|
||||
Path: "/foo/bar/baz",
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(outs), 1)
|
||||
rb.checkReleased(t, outs)
|
||||
|
||||
o := outs[0].(*testFileRef)
|
||||
|
||||
require.Equal(t, "mount-scratch-mkdir-mkfile-commit", o.id)
|
||||
require.Equal(t, 2, len(o.mount.chain))
|
||||
require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir)
|
||||
require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile)
|
||||
}
|
||||
|
||||
func TestFileCopyInputSrc(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 1,
|
||||
SecondaryInput: 0,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Copy{
|
||||
Copy: &pb.FileActionCopy{
|
||||
Src: "/src",
|
||||
Dest: "/dest",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
inp0 := rb.NewRef("srcref")
|
||||
inp1 := rb.NewRef("destref")
|
||||
outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp0, inp1}, fo.Actions)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(outs), 1)
|
||||
rb.checkReleased(t, append(outs, inp0, inp1))
|
||||
|
||||
o := outs[0].(*testFileRef)
|
||||
require.Equal(t, "mount-destref-copy(mount-srcref)-commit", o.id)
|
||||
require.Equal(t, 1, len(o.mount.chain))
|
||||
require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Copy).Copy, o.mount.chain[0].copy)
|
||||
}
|
||||
|
||||
func TestFileCopyInputRm(t *testing.T) {
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo/bar",
|
||||
MakeParents: true,
|
||||
Mode: 0700,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 1,
|
||||
SecondaryInput: 2,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Copy{
|
||||
Copy: &pb.FileActionCopy{
|
||||
Src: "/src",
|
||||
Dest: "/dest",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 3,
|
||||
SecondaryInput: -1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Rm{
|
||||
Rm: &pb.FileActionRm{
|
||||
Path: "/foo/bar/baz",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
inp0 := rb.NewRef("srcref")
|
||||
inp1 := rb.NewRef("destref")
|
||||
outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp0, inp1}, fo.Actions)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(outs), 1)
|
||||
rb.checkReleased(t, append(outs, inp0, inp1))
|
||||
|
||||
o := outs[0].(*testFileRef)
|
||||
require.Equal(t, "mount-destref-copy(mount-srcref-mkdir)-rm-commit", o.id)
|
||||
require.Equal(t, 2, len(o.mount.chain))
|
||||
require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].copySrc[0].mkdir)
|
||||
require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Copy).Copy, o.mount.chain[0].copy)
|
||||
require.Equal(t, fo.Actions[2].Action.(*pb.FileAction_Rm).Rm, o.mount.chain[1].rm)
|
||||
}
|
||||
|
||||
func TestFileParallelActions(t *testing.T) {
|
||||
// two mkdirs from scratch copied over each other. mkdirs should happen in parallel
|
||||
fo := &pb.FileOp{
|
||||
Actions: []*pb.FileAction{
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 0,
|
||||
SecondaryInput: -1,
|
||||
Output: -1,
|
||||
Action: &pb.FileAction_Mkdir{
|
||||
Mkdir: &pb.FileActionMkDir{
|
||||
Path: "/bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Input: 2,
|
||||
SecondaryInput: 1,
|
||||
Output: 0,
|
||||
Action: &pb.FileAction_Copy{
|
||||
Copy: &pb.FileActionCopy{
|
||||
Src: "/src",
|
||||
Dest: "/dest",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
s, rb := newTestFileSolver()
|
||||
inp := rb.NewRef("inpref")
|
||||
|
||||
ch := make(chan struct{})
|
||||
var sem int64
|
||||
inp.callback = func() {
|
||||
if atomic.AddInt64(&sem, 1) == 2 {
|
||||
close(ch)
|
||||
}
|
||||
<-ch
|
||||
}
|
||||
|
||||
outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp}, fo.Actions)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(outs), 1)
|
||||
|
||||
require.Equal(t, int64(2), sem)
|
||||
}
|
||||
|
||||
func newTestFileSolver() (*FileOpSolver, *testFileRefBackend) {
|
||||
trb := &testFileRefBackend{refs: map[*testFileRef]struct{}{}, mounts: map[string]*testMount{}}
|
||||
return NewFileOpSolver(&testFileBackend{}, trb), trb
|
||||
}
|
||||
|
||||
type testFileRef struct {
|
||||
id string
|
||||
mount *testMount
|
||||
refcount int
|
||||
callback func()
|
||||
}
|
||||
|
||||
func (r *testFileRef) Release(context.Context) error {
|
||||
if r.refcount == 0 {
|
||||
return errors.Errorf("ref already released")
|
||||
}
|
||||
r.refcount--
|
||||
return nil
|
||||
}
|
||||
|
||||
type testMount struct {
|
||||
b *testFileRefBackend
|
||||
id string
|
||||
initID string
|
||||
chain []mod
|
||||
callback func()
|
||||
unmounted bool
|
||||
active *testFileRef
|
||||
}
|
||||
|
||||
func (tm *testMount) addUser(user, group fileoptypes.Mount) {
|
||||
if user != nil {
|
||||
um := user.(*testMount)
|
||||
tm.id += "#u(" + um.id + ")"
|
||||
}
|
||||
if group != nil {
|
||||
gm := group.(*testMount)
|
||||
tm.id += "#g(" + gm.id + ")"
|
||||
}
|
||||
}
|
||||
|
||||
type mod struct {
|
||||
mkdir *pb.FileActionMkDir
|
||||
rm *pb.FileActionRm
|
||||
mkfile *pb.FileActionMkFile
|
||||
copy *pb.FileActionCopy
|
||||
copySrc []mod
|
||||
}
|
||||
|
||||
func (m *testMount) IsFileOpMount() {}
|
||||
func (m *testMount) Release(ctx context.Context) error {
|
||||
if m.b.mounts[m.initID] != m {
|
||||
return m.b.mounts[m.initID].Release(ctx)
|
||||
}
|
||||
if m.unmounted {
|
||||
return errors.Errorf("already unmounted")
|
||||
}
|
||||
m.unmounted = true
|
||||
if m.active != nil {
|
||||
return m.active.Release(ctx)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type testFileBackend struct {
|
||||
}
|
||||
|
||||
func (b *testFileBackend) Mkdir(_ context.Context, m, user, group fileoptypes.Mount, a pb.FileActionMkDir) error {
|
||||
mm := m.(*testMount)
|
||||
if mm.callback != nil {
|
||||
mm.callback()
|
||||
}
|
||||
mm.id += "-mkdir"
|
||||
mm.addUser(user, group)
|
||||
mm.chain = append(mm.chain, mod{mkdir: &a})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *testFileBackend) Mkfile(_ context.Context, m, user, group fileoptypes.Mount, a pb.FileActionMkFile) error {
|
||||
mm := m.(*testMount)
|
||||
mm.id += "-mkfile"
|
||||
mm.addUser(user, group)
|
||||
mm.chain = append(mm.chain, mod{mkfile: &a})
|
||||
return nil
|
||||
}
|
||||
func (b *testFileBackend) Rm(_ context.Context, m fileoptypes.Mount, a pb.FileActionRm) error {
|
||||
mm := m.(*testMount)
|
||||
mm.id += "-rm"
|
||||
mm.chain = append(mm.chain, mod{rm: &a})
|
||||
return nil
|
||||
}
|
||||
func (b *testFileBackend) Copy(_ context.Context, m1, m, user, group fileoptypes.Mount, a pb.FileActionCopy) error {
|
||||
mm := m.(*testMount)
|
||||
mm1 := m1.(*testMount)
|
||||
mm.id += "-copy(" + mm1.id + ")"
|
||||
mm.addUser(user, group)
|
||||
mm.chain = append(mm.chain, mod{copy: &a, copySrc: mm1.chain})
|
||||
return nil
|
||||
}
|
||||
|
||||
type testFileRefBackend struct {
|
||||
refs map[*testFileRef]struct{}
|
||||
mounts map[string]*testMount
|
||||
}
|
||||
|
||||
func (b *testFileRefBackend) NewRef(id string) *testFileRef {
|
||||
r := &testFileRef{refcount: 1, id: id}
|
||||
b.refs[r] = struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (b *testFileRefBackend) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool) (fileoptypes.Mount, error) {
|
||||
var active *testFileRef
|
||||
if ref == nil {
|
||||
active = b.NewRef("scratch")
|
||||
ref = active
|
||||
}
|
||||
rr := ref.(*testFileRef)
|
||||
m := rr.mount
|
||||
if m == nil {
|
||||
m = &testMount{b: b, id: "mount-" + rr.id, callback: rr.callback}
|
||||
}
|
||||
m.initID = m.id
|
||||
m.active = active
|
||||
b.mounts[m.initID] = m
|
||||
m2 := *m
|
||||
m2.chain = append([]mod{}, m2.chain...)
|
||||
return &m2, nil
|
||||
}
|
||||
func (b *testFileRefBackend) Commit(ctx context.Context, mount fileoptypes.Mount) (fileoptypes.Ref, error) {
|
||||
m := mount.(*testMount)
|
||||
if err := b.mounts[m.initID].Release(context.TODO()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m2 := *m
|
||||
m2.unmounted = false
|
||||
m2.callback = nil
|
||||
r := b.NewRef(m2.id + "-commit")
|
||||
r.mount = &m2
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (b *testFileRefBackend) checkReleased(t *testing.T, outs []fileoptypes.Ref) {
|
||||
loop0:
|
||||
for r := range b.refs {
|
||||
for _, o := range outs {
|
||||
if o.(*testFileRef) == r {
|
||||
require.Equal(t, 1, r.refcount)
|
||||
continue loop0
|
||||
}
|
||||
}
|
||||
require.Equal(t, 0, r.refcount, "%s not released", r.id)
|
||||
}
|
||||
for _, o := range outs {
|
||||
_, ok := b.refs[o.(*testFileRef)]
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
for _, m := range b.mounts {
|
||||
require.True(t, m.unmounted, "%s %p still mounted", m.id, m)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
package fileoptypes
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
)
|
||||
|
||||
type Ref interface {
|
||||
Release(context.Context) error
|
||||
}
|
||||
|
||||
type Mount interface {
|
||||
IsFileOpMount()
|
||||
Release(context.Context) error
|
||||
}
|
||||
|
||||
type Backend interface {
|
||||
Mkdir(context.Context, Mount, Mount, Mount, pb.FileActionMkDir) error
|
||||
Mkfile(context.Context, Mount, Mount, Mount, pb.FileActionMkFile) error
|
||||
Rm(context.Context, Mount, pb.FileActionRm) error
|
||||
Copy(context.Context, Mount, Mount, Mount, Mount, pb.FileActionCopy) error
|
||||
}
|
||||
|
||||
type RefManager interface {
|
||||
Prepare(ctx context.Context, ref Ref, readonly bool) (Mount, error)
|
||||
Commit(ctx context.Context, mount Mount) (Ref, error)
|
||||
}
|
|
@ -13,7 +13,13 @@ import (
|
|||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func NewContentHashFunc(selectors []string) solver.ResultBasedCacheFunc {
|
||||
type Selector struct {
|
||||
Path string
|
||||
Wildcard bool
|
||||
FollowLinks bool
|
||||
}
|
||||
|
||||
func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc {
|
||||
return func(ctx context.Context, res solver.Result) (digest.Digest, error) {
|
||||
ref, ok := res.Sys().(*worker.WorkerRef)
|
||||
if !ok {
|
||||
|
@ -21,7 +27,7 @@ func NewContentHashFunc(selectors []string) solver.ResultBasedCacheFunc {
|
|||
}
|
||||
|
||||
if len(selectors) == 0 {
|
||||
selectors = []string{""}
|
||||
selectors = []Selector{{}}
|
||||
}
|
||||
|
||||
dgsts := make([][]byte, len(selectors))
|
||||
|
@ -32,11 +38,19 @@ func NewContentHashFunc(selectors []string) solver.ResultBasedCacheFunc {
|
|||
// FIXME(tonistiigi): enabling this parallelization seems to create wrong results for some big inputs(like gobuild)
|
||||
// func(i int) {
|
||||
// eg.Go(func() error {
|
||||
dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel), true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
if !sel.Wildcard {
|
||||
dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dgsts[i] = []byte(dgst)
|
||||
} else {
|
||||
dgst, err := contenthash.ChecksumWildcard(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dgsts[i] = []byte(dgst)
|
||||
}
|
||||
dgsts[i] = []byte(dgst)
|
||||
// return nil
|
||||
// })
|
||||
// }(i)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package llbsolver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
|
@ -228,9 +229,29 @@ func llbOpName(op *pb.Op) string {
|
|||
return op.Source.Identifier
|
||||
case *pb.Op_Exec:
|
||||
return strings.Join(op.Exec.Meta.Args, " ")
|
||||
case *pb.Op_File:
|
||||
return fileOpName(op.File.Actions)
|
||||
case *pb.Op_Build:
|
||||
return "build"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
func fileOpName(actions []*pb.FileAction) string {
|
||||
names := make([]string, 0, len(actions))
|
||||
for _, action := range actions {
|
||||
switch a := action.Action.(type) {
|
||||
case *pb.FileAction_Mkdir:
|
||||
names = append(names, fmt.Sprintf("mkdir %s", a.Mkdir.Path))
|
||||
case *pb.FileAction_Mkfile:
|
||||
names = append(names, fmt.Sprintf("mkfile %s", a.Mkfile.Path))
|
||||
case *pb.FileAction_Rm:
|
||||
names = append(names, fmt.Sprintf("rm %s", a.Rm.Path))
|
||||
case *pb.FileAction_Copy:
|
||||
names = append(names, fmt.Sprintf("copy %s %s", a.Copy.Src, a.Copy.Dest))
|
||||
}
|
||||
}
|
||||
|
||||
return strings.Join(names, ", ")
|
||||
}
|
||||
|
|
|
@ -21,3 +21,5 @@ const AttrImageResolveModeDefault = "default"
|
|||
const AttrImageResolveModeForcePull = "pull"
|
||||
const AttrImageResolveModePreferLocal = "local"
|
||||
const AttrImageRecordType = "image.recordtype"
|
||||
|
||||
type IsFileAction = isFileAction_Action
|
||||
|
|
|
@ -43,6 +43,8 @@ const (
|
|||
CapExecMountSSH apicaps.CapID = "exec.mount.ssh"
|
||||
CapExecCgroupsMounted apicaps.CapID = "exec.cgroup"
|
||||
|
||||
CapFileBase apicaps.CapID = "file.base"
|
||||
|
||||
CapConstraints apicaps.CapID = "constraints"
|
||||
CapPlatform apicaps.CapID = "platform"
|
||||
|
||||
|
@ -226,6 +228,12 @@ func init() {
|
|||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapFileBase,
|
||||
Enabled: true,
|
||||
Status: apicaps.CapStatusPrerelease,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapConstraints,
|
||||
Enabled: true,
|
||||
|
|
3712
solver/pb/ops.pb.go
3712
solver/pb/ops.pb.go
File diff suppressed because it is too large
Load Diff
|
@ -15,7 +15,7 @@ message Op {
|
|||
oneof op {
|
||||
ExecOp exec = 2;
|
||||
SourceOp source = 3;
|
||||
CopyOp copy = 4;
|
||||
FileOp file = 4;
|
||||
BuildOp build = 5;
|
||||
}
|
||||
Platform platform = 10;
|
||||
|
@ -134,18 +134,6 @@ message SSHOpt {
|
|||
bool optional = 5;
|
||||
}
|
||||
|
||||
// CopyOp copies files across Ops.
|
||||
message CopyOp {
|
||||
repeated CopySource src = 1;
|
||||
string dest = 2;
|
||||
}
|
||||
|
||||
// CopySource specifies a source for CopyOp.
|
||||
message CopySource {
|
||||
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
|
||||
string selector = 2;
|
||||
}
|
||||
|
||||
// SourceOp specifies a source such as build contexts and images.
|
||||
message SourceOp {
|
||||
// TODO: use source type or any type instead of URL protocol.
|
||||
|
@ -211,4 +199,101 @@ message Definition {
|
|||
message HostIP {
|
||||
string Host = 1;
|
||||
string IP = 2;
|
||||
}
|
||||
|
||||
message FileOp {
|
||||
repeated FileAction actions = 2;
|
||||
}
|
||||
|
||||
message FileAction {
|
||||
int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // could be real input or target (target index + max input index)
|
||||
int64 secondaryInput = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // --//--
|
||||
int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false];
|
||||
oneof action {
|
||||
// FileActionCopy copies files from secondaryInput on top of input
|
||||
FileActionCopy copy = 4;
|
||||
// FileActionMkFile creates a new file
|
||||
FileActionMkFile mkfile = 5;
|
||||
// FileActionMkDir creates a new directory
|
||||
FileActionMkDir mkdir = 6;
|
||||
// FileActionRm removes a file
|
||||
FileActionRm rm = 7;
|
||||
}
|
||||
}
|
||||
|
||||
message FileActionCopy {
|
||||
// src is the source path
|
||||
string src = 1;
|
||||
// dest path
|
||||
string dest = 2;
|
||||
// optional owner override
|
||||
ChownOpt owner = 3;
|
||||
// optional permission bits override
|
||||
int32 mode = 4;
|
||||
// followSymlink resolves symlinks in src
|
||||
bool followSymlink = 5;
|
||||
// dirCopyContents only copies contents if src is a directory
|
||||
bool dirCopyContents = 6;
|
||||
// attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead
|
||||
bool attemptUnpackDockerCompatibility = 7;
|
||||
// createDestPath creates dest path directories if needed
|
||||
bool createDestPath = 8;
|
||||
// allowWildcard allows filepath.Match wildcards in src path
|
||||
bool allowWildcard = 9;
|
||||
// allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files
|
||||
bool allowEmptyWildcard = 10;
|
||||
// optional created time override
|
||||
int64 timestamp = 11;
|
||||
}
|
||||
|
||||
message FileActionMkFile {
|
||||
// path for the new file
|
||||
string path = 1;
|
||||
// permission bits
|
||||
int32 mode = 2;
|
||||
// data is the new file contents
|
||||
bytes data = 3;
|
||||
// optional owner for the new file
|
||||
ChownOpt owner = 4;
|
||||
// optional created time override
|
||||
int64 timestamp = 5;
|
||||
}
|
||||
|
||||
message FileActionMkDir {
|
||||
// path for the new directory
|
||||
string path = 1;
|
||||
// permission bits
|
||||
int32 mode = 2;
|
||||
// makeParents creates parent directories as well if needed
|
||||
bool makeParents = 3;
|
||||
// optional owner for the new directory
|
||||
ChownOpt owner = 4;
|
||||
// optional created time override
|
||||
int64 timestamp = 5;
|
||||
}
|
||||
|
||||
message FileActionRm {
|
||||
// path to remove
|
||||
string path = 1;
|
||||
// allowNotFound doesn't fail the rm if file is not found
|
||||
bool allowNotFound = 2;
|
||||
// allowWildcard allows filepath.Match wildcards in path
|
||||
bool allowWildcard = 3;
|
||||
}
|
||||
|
||||
message ChownOpt {
|
||||
UserOpt user = 1;
|
||||
UserOpt group = 2;
|
||||
}
|
||||
|
||||
message UserOpt {
|
||||
oneof user {
|
||||
NamedUserOpt byName = 1;
|
||||
uint32 byID = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message NamedUserOpt {
|
||||
string name = 1;
|
||||
int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false];
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
This code provides helper functions for dealing with archive files.
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,92 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||
if format == OverlayWhiteoutFormat {
|
||||
return overlayWhiteoutConverter{}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type overlayWhiteoutConverter struct{}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
|
||||
// convert whiteouts to AUFS format
|
||||
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
|
||||
// we just rename the file and make it normal
|
||||
dir, filename := filepath.Split(hdr.Name)
|
||||
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
|
||||
hdr.Mode = 0600
|
||||
hdr.Typeflag = tar.TypeReg
|
||||
hdr.Size = 0
|
||||
}
|
||||
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
// convert opaque dirs to AUFS format by writing an empty file with the prefix
|
||||
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||
if hdr.Xattrs != nil {
|
||||
delete(hdr.Xattrs, "trusted.overlay.opaque")
|
||||
}
|
||||
|
||||
// create a header for the whiteout file
|
||||
// it should inherit some properties from the parent, but be a regular file
|
||||
wo = &tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Mode: hdr.Mode & int64(os.ModePerm),
|
||||
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
|
||||
Size: 0,
|
||||
Uid: hdr.Uid,
|
||||
Uname: hdr.Uname,
|
||||
Gid: hdr.Gid,
|
||||
Gname: hdr.Gname,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
|
||||
base := filepath.Base(path)
|
||||
dir := filepath.Dir(path)
|
||||
|
||||
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
|
||||
if base == WhiteoutOpaqueDir {
|
||||
err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
|
||||
// don't write the file itself
|
||||
return false, err
|
||||
}
|
||||
|
||||
// if a file was deleted and we are using overlay, we need to create a character device
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
|
||||
if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// don't write the file itself
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return srcPath
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific. On Linux, we
|
||||
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||
// a trailing "." or "/" which may be important.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return srcPath + string(filepath.Separator) + include
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
return p, nil // already unix-style
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
// Currently go does not fill in the major/minors
|
||||
if s.Mode&unix.S_IFBLK != 0 ||
|
||||
s.Mode&unix.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert
|
||||
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
inode = s.Ino
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
}
|
||||
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
if rsystem.RunningInUserNS() {
|
||||
// cannot create a device if running in user namespace
|
||||
return nil
|
||||
}
|
||||
|
||||
mode := uint32(hdr.Mode & 07777)
|
||||
switch hdr.Typeflag {
|
||||
case tar.TypeBlock:
|
||||
mode |= unix.S_IFBLK
|
||||
case tar.TypeChar:
|
||||
mode |= unix.S_IFCHR
|
||||
case tar.TypeFifo:
|
||||
mode |= unix.S_IFIFO
|
||||
}
|
||||
|
||||
return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor)))
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
return longpath.AddPrefix(srcPath)
|
||||
}
|
||||
|
||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||
// We use a separate function as this is platform specific.
|
||||
func getWalkRoot(srcPath string, include string) string {
|
||||
return filepath.Join(srcPath, include)
|
||||
}
|
||||
|
||||
// CanonicalTarNameForPath returns platform-specific filepath
|
||||
// to canonical posix-style path for tar archival. p is relative
|
||||
// path.
|
||||
func CanonicalTarNameForPath(p string) (string, error) {
|
||||
// windows: convert windows style relative path with backslashes
|
||||
// into forward slashes. Since windows does not allow '/' or '\'
|
||||
// in file names, it is mostly safe to replace however we must
|
||||
// check just in case
|
||||
if strings.Contains(p, "/") {
|
||||
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||
}
|
||||
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||
|
||||
}
|
||||
|
||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||
// on the platform the archival is done.
|
||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
//perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.)
|
||||
permPart := perm & os.ModePerm
|
||||
noPermPart := perm &^ os.ModePerm
|
||||
// Add the x bit: make everything +x from windows
|
||||
permPart |= 0111
|
||||
permPart &= 0755
|
||||
|
||||
return noPermPart | permPart
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
// do nothing. no notion of Rdev, Nlink in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
// do nothing. no notion of Inode in stat on Windows
|
||||
return
|
||||
}
|
||||
|
||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
// no notion of file ownership mapping yet on Windows
|
||||
return idtools.IDPair{UID: 0, GID: 0}, nil
|
||||
}
|
|
@ -0,0 +1,441 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ChangeType represents the change type.
|
||||
type ChangeType int
|
||||
|
||||
const (
|
||||
// ChangeModify represents the modify operation.
|
||||
ChangeModify = iota
|
||||
// ChangeAdd represents the add operation.
|
||||
ChangeAdd
|
||||
// ChangeDelete represents the delete operation.
|
||||
ChangeDelete
|
||||
)
|
||||
|
||||
func (c ChangeType) String() string {
|
||||
switch c {
|
||||
case ChangeModify:
|
||||
return "C"
|
||||
case ChangeAdd:
|
||||
return "A"
|
||||
case ChangeDelete:
|
||||
return "D"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Change represents a change, it wraps the change type and path.
|
||||
// It describes changes of the files in the path respect to the
|
||||
// parent layers. The change could be modify, add, delete.
|
||||
// This is used for layer diff.
|
||||
type Change struct {
|
||||
Path string
|
||||
Kind ChangeType
|
||||
}
|
||||
|
||||
func (change *Change) String() string {
|
||||
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||
}
|
||||
|
||||
// for sort.Sort
|
||||
type changesByPath []Change
|
||||
|
||||
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||
func (c changesByPath) Len() int { return len(c) }
|
||||
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||
|
||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||
// precision, which is problematic when we apply changes via tar
|
||||
// files, we handle this by comparing for exact times, *or* same
|
||||
// second count and either a or b having exactly 0 nanoseconds
|
||||
func sameFsTime(a, b time.Time) bool {
|
||||
return a == b ||
|
||||
(a.Unix() == b.Unix() &&
|
||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||
}
|
||||
|
||||
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||
return a.Sec == b.Sec &&
|
||||
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||
}
|
||||
|
||||
// Changes walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func Changes(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
|
||||
}
|
||||
|
||||
func aufsMetadataSkip(path string) (skip bool, err error) {
|
||||
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
|
||||
if err != nil {
|
||||
skip = true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
f := filepath.Base(path)
|
||||
|
||||
// If there is a whiteout, then the file was removed
|
||||
if strings.HasPrefix(f, WhiteoutPrefix) {
|
||||
originalFile := f[len(WhiteoutPrefix):]
|
||||
return filepath.Join(filepath.Dir(path), originalFile), nil
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
type skipChange func(string) (bool, error)
|
||||
type deleteChange func(string, string, os.FileInfo) (string, error)
|
||||
|
||||
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
|
||||
var (
|
||||
changes []Change
|
||||
changedDirs = make(map[string]struct{})
|
||||
)
|
||||
|
||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
path, err = filepath.Rel(rw, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
path = filepath.Join(string(os.PathSeparator), path)
|
||||
|
||||
// Skip root
|
||||
if path == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if sc != nil {
|
||||
if skip, err := sc(path); skip {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
change := Change{
|
||||
Path: path,
|
||||
}
|
||||
|
||||
deletedFile, err := dc(rw, path, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Find out what kind of modification happened
|
||||
if deletedFile != "" {
|
||||
change.Path = deletedFile
|
||||
change.Kind = ChangeDelete
|
||||
} else {
|
||||
// Otherwise, the file was added
|
||||
change.Kind = ChangeAdd
|
||||
|
||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||
for _, layer := range layers {
|
||||
stat, err := os.Stat(filepath.Join(layer, path))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err == nil {
|
||||
// The file existed in the top layer, so that's a modification
|
||||
|
||||
// However, if it's a directory, maybe it wasn't actually modified.
|
||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||
if stat.IsDir() && f.IsDir() {
|
||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||
// Both directories are the same, don't record the change
|
||||
return nil
|
||||
}
|
||||
}
|
||||
change.Kind = ChangeModify
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||
// This block is here to ensure the change is recorded even if the
|
||||
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||
if f.IsDir() {
|
||||
changedDirs[path] = struct{}{}
|
||||
}
|
||||
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||
parent := filepath.Dir(path)
|
||||
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||
changedDirs[parent] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
// Record change
|
||||
changes = append(changes, change)
|
||||
return nil
|
||||
})
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
return changes, nil
|
||||
}
|
||||
|
||||
// FileInfo describes the information of a file.
|
||||
type FileInfo struct {
|
||||
parent *FileInfo
|
||||
name string
|
||||
stat *system.StatT
|
||||
children map[string]*FileInfo
|
||||
capability []byte
|
||||
added bool
|
||||
}
|
||||
|
||||
// LookUp looks up the file information of a file.
|
||||
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
parent := info
|
||||
if path == string(os.PathSeparator) {
|
||||
return info
|
||||
}
|
||||
|
||||
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||
for _, elem := range pathElements {
|
||||
if elem != "" {
|
||||
child := parent.children[elem]
|
||||
if child == nil {
|
||||
return nil
|
||||
}
|
||||
parent = child
|
||||
}
|
||||
}
|
||||
return parent
|
||||
}
|
||||
|
||||
func (info *FileInfo) path() string {
|
||||
if info.parent == nil {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
return string(os.PathSeparator)
|
||||
}
|
||||
return filepath.Join(info.parent.path(), info.name)
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
sizeAtEntry := len(*changes)
|
||||
|
||||
if oldInfo == nil {
|
||||
// add
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeAdd,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
info.added = true
|
||||
}
|
||||
|
||||
// We make a copy so we can modify it to detect additions
|
||||
// also, we only recurse on the old dir if the new info is a directory
|
||||
// otherwise any previous delete/change is considered recursive
|
||||
oldChildren := make(map[string]*FileInfo)
|
||||
if oldInfo != nil && info.isDir() {
|
||||
for k, v := range oldInfo.children {
|
||||
oldChildren[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for name, newChild := range info.children {
|
||||
oldChild := oldChildren[name]
|
||||
if oldChild != nil {
|
||||
// change?
|
||||
oldStat := oldChild.stat
|
||||
newStat := newChild.stat
|
||||
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||
// when copying a file into a container. However, that is not generally a problem
|
||||
// because any content change will change mtime, and any status change should
|
||||
// be visible when actually comparing the stat fields. The only time this
|
||||
// breaks down is if some code intentionally hides a change by setting
|
||||
// back mtime
|
||||
if statDifferent(oldStat, newStat) ||
|
||||
!bytes.Equal(oldChild.capability, newChild.capability) {
|
||||
change := Change{
|
||||
Path: newChild.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
newChild.added = true
|
||||
}
|
||||
|
||||
// Remove from copy so we can detect deletions
|
||||
delete(oldChildren, name)
|
||||
}
|
||||
|
||||
newChild.addChanges(oldChild, changes)
|
||||
}
|
||||
for _, oldChild := range oldChildren {
|
||||
// delete
|
||||
change := Change{
|
||||
Path: oldChild.path(),
|
||||
Kind: ChangeDelete,
|
||||
}
|
||||
*changes = append(*changes, change)
|
||||
}
|
||||
|
||||
// If there were changes inside this directory, we need to add it, even if the directory
|
||||
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||
change := Change{
|
||||
Path: info.path(),
|
||||
Kind: ChangeModify,
|
||||
}
|
||||
// Let's insert the directory entry before the recently added entries located inside this dir
|
||||
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
||||
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||
(*changes)[sizeAtEntry] = change
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Changes add changes to file information.
|
||||
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||
var changes []Change
|
||||
|
||||
info.addChanges(oldInfo, &changes)
|
||||
|
||||
return changes
|
||||
}
|
||||
|
||||
func newRootFileInfo() *FileInfo {
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
root := &FileInfo{
|
||||
name: string(os.PathSeparator),
|
||||
children: make(map[string]*FileInfo),
|
||||
}
|
||||
return root
|
||||
}
|
||||
|
||||
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
)
|
||||
if oldDir == "" {
|
||||
emptyDir, err := ioutil.TempDir("", "empty")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer os.Remove(emptyDir)
|
||||
oldDir = emptyDir
|
||||
}
|
||||
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newRoot.Changes(oldRoot), nil
|
||||
}
|
||||
|
||||
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||
func ChangesSize(newDir string, changes []Change) int64 {
|
||||
var (
|
||||
size int64
|
||||
sf = make(map[uint64]struct{})
|
||||
)
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||
file := filepath.Join(newDir, change.Path)
|
||||
fileInfo, err := os.Lstat(file)
|
||||
if err != nil {
|
||||
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if fileInfo != nil && !fileInfo.IsDir() {
|
||||
if hasHardlinks(fileInfo) {
|
||||
inode := getIno(fileInfo)
|
||||
if _, ok := sf[inode]; !ok {
|
||||
size += fileInfo.Size()
|
||||
sf[inode] = struct{}{}
|
||||
}
|
||||
} else {
|
||||
size += fileInfo.Size()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil)
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
||||
sort.Sort(changesByPath(changes))
|
||||
|
||||
// In general we log errors here but ignore them because
|
||||
// during e.g. a diff operation the container can continue
|
||||
// mutating the filesystem and we can see transient errors
|
||||
// from this
|
||||
for _, change := range changes {
|
||||
if change.Kind == ChangeDelete {
|
||||
whiteOutDir := filepath.Dir(change.Path)
|
||||
whiteOutBase := filepath.Base(change.Path)
|
||||
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||
timestamp := time.Now()
|
||||
hdr := &tar.Header{
|
||||
Name: whiteOut[1:],
|
||||
Size: 0,
|
||||
ModTime: timestamp,
|
||||
AccessTime: timestamp,
|
||||
ChangeTime: timestamp,
|
||||
}
|
||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||
logrus.Debugf("Can't write whiteout header: %s", err)
|
||||
}
|
||||
} else {
|
||||
path := filepath.Join(dir, change.Path)
|
||||
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure to check the error on Close.
|
||||
if err := ta.TarWriter.Close(); err != nil {
|
||||
logrus.Debugf("Can't close layer: %s", err)
|
||||
}
|
||||
if err := writer.Close(); err != nil {
|
||||
logrus.Debugf("failed close Changes writer: %s", err)
|
||||
}
|
||||
}()
|
||||
return reader, nil
|
||||
}
|
|
@ -0,0 +1,313 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||
// method in general returns the entire contents of two directory trees, we
|
||||
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||
// fact that getdents(2) returns the inode of each file in the directory being
|
||||
// walked, which, when walking two trees in parallel to generate a list of
|
||||
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||
// images.
|
||||
type walker struct {
|
||||
dir1 string
|
||||
dir2 string
|
||||
root1 *FileInfo
|
||||
root2 *FileInfo
|
||||
}
|
||||
|
||||
// collectFileInfoForChanges returns a complete representation of the trees
|
||||
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||
// leaf where the inode and device numbers are an exact match between dir1
|
||||
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||
// to generating a list of changes between the two directories, as it does not
|
||||
// reflect the full contents.
|
||||
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||
w := &walker{
|
||||
dir1: dir1,
|
||||
dir2: dir2,
|
||||
root1: newRootFileInfo(),
|
||||
root2: newRootFileInfo(),
|
||||
}
|
||||
|
||||
i1, err := os.Lstat(w.dir1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
i2, err := os.Lstat(w.dir2)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if err := w.walk("/", i1, i2); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return w.root1, w.root2, nil
|
||||
}
|
||||
|
||||
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||
// being constructed, register this file with the tree.
|
||||
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||
if fi == nil {
|
||||
return nil
|
||||
}
|
||||
parent := root.LookUp(filepath.Dir(path))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
|
||||
}
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(path),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
cpath := filepath.Join(dir, path)
|
||||
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = stat
|
||||
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||
parent.children[info.name] = info
|
||||
return nil
|
||||
}
|
||||
|
||||
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||
// Register these nodes with the return trees, unless we're still at the
|
||||
// (already-created) roots:
|
||||
if path != "/" {
|
||||
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
is1Dir := i1 != nil && i1.IsDir()
|
||||
is2Dir := i2 != nil && i2.IsDir()
|
||||
|
||||
sameDevice := false
|
||||
if i1 != nil && i2 != nil {
|
||||
si1 := i1.Sys().(*syscall.Stat_t)
|
||||
si2 := i2.Sys().(*syscall.Stat_t)
|
||||
if si1.Dev == si2.Dev {
|
||||
sameDevice = true
|
||||
}
|
||||
}
|
||||
|
||||
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||
if !is1Dir && !is2Dir {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch the names of all the files contained in both directories being walked:
|
||||
var names1, names2 []nameIno
|
||||
if is1Dir {
|
||||
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We have lists of the files contained in both parallel directories, sorted
|
||||
// in the same order. Walk them in parallel, generating a unique merged list
|
||||
// of all items present in either or both directories.
|
||||
var names []string
|
||||
ix1 := 0
|
||||
ix2 := 0
|
||||
|
||||
for {
|
||||
if ix1 >= len(names1) {
|
||||
break
|
||||
}
|
||||
if ix2 >= len(names2) {
|
||||
break
|
||||
}
|
||||
|
||||
ni1 := names1[ix1]
|
||||
ni2 := names2[ix2]
|
||||
|
||||
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||
case -1: // ni1 < ni2 -- advance ni1
|
||||
// we will not encounter ni1 in names2
|
||||
names = append(names, ni1.name)
|
||||
ix1++
|
||||
case 0: // ni1 == ni2
|
||||
if ni1.ino != ni2.ino || !sameDevice {
|
||||
names = append(names, ni1.name)
|
||||
}
|
||||
ix1++
|
||||
ix2++
|
||||
case 1: // ni1 > ni2 -- advance ni2
|
||||
// we will not encounter ni2 in names1
|
||||
names = append(names, ni2.name)
|
||||
ix2++
|
||||
}
|
||||
}
|
||||
for ix1 < len(names1) {
|
||||
names = append(names, names1[ix1].name)
|
||||
ix1++
|
||||
}
|
||||
for ix2 < len(names2) {
|
||||
names = append(names, names2[ix2].name)
|
||||
ix2++
|
||||
}
|
||||
|
||||
// For each of the names present in either or both of the directories being
|
||||
// iterated, stat the name under each root, and recurse the pair of them:
|
||||
for _, name := range names {
|
||||
fname := filepath.Join(path, name)
|
||||
var cInfo1, cInfo2 os.FileInfo
|
||||
if is1Dir {
|
||||
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if is2Dir {
|
||||
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||
type nameIno struct {
|
||||
name string
|
||||
ino uint64
|
||||
}
|
||||
|
||||
type nameInoSlice []nameIno
|
||||
|
||||
func (s nameInoSlice) Len() int { return len(s) }
|
||||
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||
|
||||
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||
// numbers further up the stack when reading directory contents. Unlike
|
||||
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||
// list of {filename,inode} pairs.
|
||||
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||
var (
|
||||
size = 100
|
||||
buf = make([]byte, 4096)
|
||||
nbuf int
|
||||
bufp int
|
||||
nb int
|
||||
)
|
||||
|
||||
f, err := os.Open(dirname)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||
for {
|
||||
// Refill the buffer if necessary
|
||||
if bufp >= nbuf {
|
||||
bufp = 0
|
||||
nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||
if nbuf < 0 {
|
||||
nbuf = 0
|
||||
}
|
||||
if err != nil {
|
||||
return nil, os.NewSyscallError("readdirent", err)
|
||||
}
|
||||
if nbuf <= 0 {
|
||||
break // EOF
|
||||
}
|
||||
}
|
||||
|
||||
// Drain the buffer
|
||||
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||
bufp += nb
|
||||
}
|
||||
|
||||
sl := nameInoSlice(names)
|
||||
sort.Sort(sl)
|
||||
return sl, nil
|
||||
}
|
||||
|
||||
// parseDirent is a minor modification of unix.ParseDirent (linux version)
|
||||
// which returns {name,inode} pairs instead of just names.
|
||||
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||
origlen := len(buf)
|
||||
for len(buf) > 0 {
|
||||
dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
buf = buf[dirent.Reclen:]
|
||||
if dirent.Ino == 0 { // File absent in directory.
|
||||
continue
|
||||
}
|
||||
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||
var name = string(bytes[0:clen(bytes[:])])
|
||||
if name == "." || name == ".." { // Useless names
|
||||
continue
|
||||
}
|
||||
names = append(names, nameIno{name, dirent.Ino})
|
||||
}
|
||||
return origlen - len(buf), names
|
||||
}
|
||||
|
||||
func clen(n []byte) int {
|
||||
for i := 0; i < len(n); i++ {
|
||||
if n[i] == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
return len(n)
|
||||
}
|
||||
|
||||
// OverlayChanges walks the path rw and determines changes for the files in the path,
|
||||
// with respect to the parent layers
|
||||
func OverlayChanges(layers []string, rw string) ([]Change, error) {
|
||||
return changes(layers, rw, overlayDeletedFile, nil)
|
||||
}
|
||||
|
||||
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
|
||||
if fi.Mode()&os.ModeCharDevice != 0 {
|
||||
s := fi.Sys().(*syscall.Stat_t)
|
||||
if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
if fi.Mode()&os.ModeDir != 0 {
|
||||
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(opaque) == 1 && opaque[0] == 'y' {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||
var (
|
||||
oldRoot, newRoot *FileInfo
|
||||
err1, err2 error
|
||||
errs = make(chan error, 2)
|
||||
)
|
||||
go func() {
|
||||
oldRoot, err1 = collectFileInfo(oldDir)
|
||||
errs <- err1
|
||||
}()
|
||||
go func() {
|
||||
newRoot, err2 = collectFileInfo(newDir)
|
||||
errs <- err2
|
||||
}()
|
||||
|
||||
// block until both routines have returned
|
||||
for i := 0; i < 2; i++ {
|
||||
if err := <-errs; err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return oldRoot, newRoot, nil
|
||||
}
|
||||
|
||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||
root := newRootFileInfo()
|
||||
|
||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Rebase path
|
||||
relPath, err := filepath.Rel(sourceDir, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// As this runs on the daemon side, file paths are OS specific.
|
||||
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||
|
||||
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||
// Temporary workaround. If the returned path starts with two backslashes,
|
||||
// trim it down to a single backslash. Only relevant on Windows.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.HasPrefix(relPath, `\\`) {
|
||||
relPath = relPath[1:]
|
||||
}
|
||||
}
|
||||
|
||||
if relPath == string(os.PathSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
parent := root.LookUp(filepath.Dir(relPath))
|
||||
if parent == nil {
|
||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||
}
|
||||
|
||||
info := &FileInfo{
|
||||
name: filepath.Base(relPath),
|
||||
children: make(map[string]*FileInfo),
|
||||
parent: parent,
|
||||
}
|
||||
|
||||
s, err := system.Lstat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info.stat = s
|
||||
|
||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||
|
||||
parent.children[info.name] = info
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return root, nil
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.UID() != newStat.UID() ||
|
||||
oldStat.GID() != newStat.GID() ||
|
||||
oldStat.Rdev() != newStat.Rdev() ||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
(oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR &&
|
||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) uint64 {
|
||||
return fi.Sys().(*syscall.Stat_t).Ino
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||
}
|
|
@ -0,0 +1,30 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
)
|
||||
|
||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||
|
||||
// Don't look at size for dirs, its not a good measure of change
|
||||
if oldStat.Mtim() != newStat.Mtim() ||
|
||||
oldStat.Mode() != newStat.Mode() ||
|
||||
oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (info *FileInfo) isDir() bool {
|
||||
return info.parent == nil || info.stat.Mode().IsDir()
|
||||
}
|
||||
|
||||
func getIno(fi os.FileInfo) (inode uint64) {
|
||||
return
|
||||
}
|
||||
|
||||
func hasHardlinks(fi os.FileInfo) bool {
|
||||
return false
|
||||
}
|
|
@ -0,0 +1,472 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Errors used or returned by this file.
|
||||
var (
|
||||
ErrNotDirectory = errors.New("not a directory")
|
||||
ErrDirNotExists = errors.New("no such directory")
|
||||
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||
)
|
||||
|
||||
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||
// processing using any utility functions from the path or filepath stdlib
|
||||
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||
// path (from before being processed by utility functions from the path or
|
||||
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||
// path already ends in a `.` path segment, then another is not added. If the
|
||||
// clean path already ends in the separator, then another is not added.
|
||||
func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string {
|
||||
// Ensure paths are in platform semantics
|
||||
cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1)
|
||||
originalPath = strings.Replace(originalPath, "/", string(sep), -1)
|
||||
|
||||
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) {
|
||||
// Add a separator if it doesn't already end with one (a cleaned
|
||||
// path would only end in a separator if it is the root).
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
cleanedPath += "."
|
||||
}
|
||||
|
||||
if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) {
|
||||
cleanedPath += string(sep)
|
||||
}
|
||||
|
||||
return cleanedPath
|
||||
}
|
||||
|
||||
// assertsDirectory returns whether the given path is
|
||||
// asserted to be a directory, i.e., the path ends with
|
||||
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||
func assertsDirectory(path string, sep byte) bool {
|
||||
return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path)
|
||||
}
|
||||
|
||||
// hasTrailingPathSeparator returns whether the given
|
||||
// path ends with the system's path separator character.
|
||||
func hasTrailingPathSeparator(path string, sep byte) bool {
|
||||
return len(path) > 0 && path[len(path)-1] == sep
|
||||
}
|
||||
|
||||
// specifiesCurrentDir returns whether the given path specifies
|
||||
// a "current directory", i.e., the last path segment is `.`.
|
||||
func specifiesCurrentDir(path string) bool {
|
||||
return filepath.Base(path) == "."
|
||||
}
|
||||
|
||||
// SplitPathDirEntry splits the given path between its directory name and its
|
||||
// basename by first cleaning the path but preserves a trailing "." if the
|
||||
// original path specified the current directory.
|
||||
func SplitPathDirEntry(path string) (dir, base string) {
|
||||
cleanedPath := filepath.Clean(filepath.FromSlash(path))
|
||||
|
||||
if specifiesCurrentDir(path) {
|
||||
cleanedPath += string(os.PathSeparator) + "."
|
||||
}
|
||||
|
||||
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||
}
|
||||
|
||||
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||
// asserted to be a directory but exists as another type of file.
|
||||
//
|
||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||
// requires a directory as the source path. TarResource accepts either a
|
||||
// directory or a file path and correctly sets the Tar options.
|
||||
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
|
||||
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||
}
|
||||
|
||||
// TarResourceRebase is like TarResource but renames the first path element of
|
||||
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
|
||||
sourcePath = normalizePath(sourcePath)
|
||||
if _, err = os.Lstat(sourcePath); err != nil {
|
||||
// Catches the case where the source does not exist or is not a
|
||||
// directory if asserted to be a directory, as this also causes an
|
||||
// error.
|
||||
return
|
||||
}
|
||||
|
||||
// Separate the source path between its directory and
|
||||
// the entry in that directory which we are archiving.
|
||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||
opts := TarResourceRebaseOpts(sourceBase, rebaseName)
|
||||
|
||||
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||
return TarWithOptions(sourceDir, opts)
|
||||
}
|
||||
|
||||
// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase
|
||||
// parameters to be sent to TarWithOptions (the TarOptions struct)
|
||||
func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions {
|
||||
filter := []string{sourceBase}
|
||||
return &TarOptions{
|
||||
Compression: Uncompressed,
|
||||
IncludeFiles: filter,
|
||||
IncludeSourceDir: true,
|
||||
RebaseNames: map[string]string{
|
||||
sourceBase: rebaseName,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// CopyInfo holds basic info about the source
|
||||
// or destination path of a copy operation.
|
||||
type CopyInfo struct {
|
||||
Path string
|
||||
Exists bool
|
||||
IsDir bool
|
||||
RebaseName string
|
||||
}
|
||||
|
||||
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the source of an archive copy
|
||||
// operation. The given path should be an absolute local path. A source path
|
||||
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||
// on Unix). As it is to be a copy source, the path must exist.
|
||||
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
||||
// normalize the file path and then evaluate the symbol link
|
||||
// we will use the target file instead of the symbol link if
|
||||
// followLink is set
|
||||
path = normalizePath(path)
|
||||
|
||||
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
stat, err := os.Lstat(resolvedPath)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
return CopyInfo{
|
||||
Path: resolvedPath,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
RebaseName: rebaseName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||
// struct representing that resource for the destination of an archive copy
|
||||
// operation. The given path should be an absolute local path.
|
||||
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||
path = normalizePath(path)
|
||||
originalPath := path
|
||||
|
||||
stat, err := os.Lstat(path)
|
||||
|
||||
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||
// The path exists and is not a symlink.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// While the path is a symlink.
|
||||
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||
if n > maxSymlinkIter {
|
||||
// Don't follow symlinks more than this arbitrary number of times.
|
||||
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||
}
|
||||
|
||||
// The path is a symbolic link. We need to evaluate it so that the
|
||||
// destination of the copy operation is the link target and not the
|
||||
// link itself. This is notably different than CopyInfoSourcePath which
|
||||
// only evaluates symlinks before the last appearing path separator.
|
||||
// Also note that it is okay if the last path element is a broken
|
||||
// symlink as the copy operation should create the target.
|
||||
var linkTarget string
|
||||
|
||||
linkTarget, err = os.Readlink(path)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
if !system.IsAbs(linkTarget) {
|
||||
// Join with the parent directory.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||
}
|
||||
|
||||
path = linkTarget
|
||||
stat, err = os.Lstat(path)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// It's okay if the destination path doesn't exist. We can still
|
||||
// continue the copy operation if the parent directory exists.
|
||||
if !os.IsNotExist(err) {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
|
||||
// Ensure destination parent dir exists.
|
||||
dstParent, _ := SplitPathDirEntry(path)
|
||||
|
||||
parentDirStat, err := os.Stat(dstParent)
|
||||
if err != nil {
|
||||
return CopyInfo{}, err
|
||||
}
|
||||
if !parentDirStat.IsDir() {
|
||||
return CopyInfo{}, ErrNotDirectory
|
||||
}
|
||||
|
||||
return CopyInfo{Path: path}, nil
|
||||
}
|
||||
|
||||
// The path exists after resolving symlinks.
|
||||
return CopyInfo{
|
||||
Path: path,
|
||||
Exists: true,
|
||||
IsDir: stat.IsDir(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||
// contain the archived resource described by srcInfo, to the destination
|
||||
// described by dstInfo. Returns the possibly modified content archive along
|
||||
// with the path to the destination directory which it should be extracted to.
|
||||
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
|
||||
// Ensure in platform semantics
|
||||
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||
|
||||
// Separate the destination path between its directory and base
|
||||
// components in case the source archive contents need to be rebased.
|
||||
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||
|
||||
switch {
|
||||
case dstInfo.Exists && dstInfo.IsDir:
|
||||
// The destination exists as a directory. No alteration
|
||||
// to srcContent is needed as its contents can be
|
||||
// simply extracted to the destination directory.
|
||||
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||
case dstInfo.Exists && srcInfo.IsDir:
|
||||
// The destination exists as some type of file and the source
|
||||
// content is a directory. This is an error condition since
|
||||
// you cannot copy a directory to an existing file location.
|
||||
return "", nil, ErrCannotCopyDir
|
||||
case dstInfo.Exists:
|
||||
// The destination exists as some type of file and the source content
|
||||
// is also a file. The source content entry will have to be renamed to
|
||||
// have a basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case srcInfo.IsDir:
|
||||
// The destination does not exist and the source content is an archive
|
||||
// of a directory. The archive should be extracted to the parent of
|
||||
// the destination path instead, and when it is, the directory that is
|
||||
// created as a result should take the name of the destination path.
|
||||
// The source content entries will have to be renamed to have a
|
||||
// basename which matches the destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
case assertsDirectory(dstInfo.Path, os.PathSeparator):
|
||||
// The destination does not exist and is asserted to be created as a
|
||||
// directory, but the source content is not a directory. This is an
|
||||
// error condition since you cannot create a directory from a file
|
||||
// source.
|
||||
return "", nil, ErrDirNotExists
|
||||
default:
|
||||
// The last remaining case is when the destination does not exist, is
|
||||
// not asserted to be a directory, and the source content is not an
|
||||
// archive of a directory. It this case, the destination file will need
|
||||
// to be created when the archive is extracted and the source content
|
||||
// entry will have to be renamed to have a basename which matches the
|
||||
// destination path's basename.
|
||||
if len(srcInfo.RebaseName) != 0 {
|
||||
srcBase = srcInfo.RebaseName
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
|
||||
if oldBase == string(os.PathSeparator) {
|
||||
// If oldBase specifies the root directory, use an empty string as
|
||||
// oldBase instead so that newBase doesn't replace the path separator
|
||||
// that all paths will start with.
|
||||
oldBase = ""
|
||||
}
|
||||
|
||||
rebased, w := io.Pipe()
|
||||
|
||||
go func() {
|
||||
srcTar := tar.NewReader(srcContent)
|
||||
rebasedTar := tar.NewWriter(w)
|
||||
|
||||
for {
|
||||
hdr, err := srcTar.Next()
|
||||
if err == io.EOF {
|
||||
// Signals end of archive.
|
||||
rebasedTar.Close()
|
||||
w.Close()
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||
if hdr.Typeflag == tar.TypeLink {
|
||||
hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1)
|
||||
}
|
||||
|
||||
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||
w.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return rebased
|
||||
}
|
||||
|
||||
// TODO @gupta-ak. These might have to be changed in the future to be
|
||||
// continuity driver aware as well to support LCOW.
|
||||
|
||||
// CopyResource performs an archive copy from the given source path to the
|
||||
// given destination path. The source path MUST exist and the destination
|
||||
// path's parent directory must exist.
|
||||
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||
var (
|
||||
srcInfo CopyInfo
|
||||
err error
|
||||
)
|
||||
|
||||
// Ensure in platform semantics
|
||||
srcPath = normalizePath(srcPath)
|
||||
dstPath = normalizePath(dstPath)
|
||||
|
||||
// Clean the source and destination paths.
|
||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator)
|
||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator)
|
||||
|
||||
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
content, err := TarResource(srcInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer content.Close()
|
||||
|
||||
return CopyTo(content, srcInfo, dstPath)
|
||||
}
|
||||
|
||||
// CopyTo handles extracting the given content whose
|
||||
// entries should be sourced from srcInfo to dstPath.
|
||||
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
|
||||
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||
// ensure that at least the parent directory exists.
|
||||
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer copyArchive.Close()
|
||||
|
||||
options := &TarOptions{
|
||||
NoLchown: true,
|
||||
NoOverwriteDirNonDir: true,
|
||||
}
|
||||
|
||||
return Untar(copyArchive, dstDir, options)
|
||||
}
|
||||
|
||||
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
||||
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
||||
// link target of any symbol link file, else it will only resolve symlink of directory
|
||||
// but return symbol link file itself without resolving.
|
||||
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
||||
if followLink {
|
||||
resolvedPath, err = filepath.EvalSymlinks(path)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
||||
} else {
|
||||
dirPath, basePath := filepath.Split(path)
|
||||
|
||||
// if not follow symbol link, then resolve symbol link of parent dir
|
||||
var resolvedDirPath string
|
||||
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||
// we can manually join it with the base path element.
|
||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
}
|
||||
return resolvedPath, rebaseName, nil
|
||||
}
|
||||
|
||||
// GetRebaseName normalizes and compares path and resolvedPath,
|
||||
// return completed resolved path and rebased file name
|
||||
func GetRebaseName(path, resolvedPath string) (string, string) {
|
||||
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||
// we can manually join it with them
|
||||
var rebaseName string
|
||||
if specifiesCurrentDir(path) &&
|
||||
!specifiesCurrentDir(resolvedPath) {
|
||||
resolvedPath += string(filepath.Separator) + "."
|
||||
}
|
||||
|
||||
if hasTrailingPathSeparator(path, os.PathSeparator) &&
|
||||
!hasTrailingPathSeparator(resolvedPath, os.PathSeparator) {
|
||||
resolvedPath += string(filepath.Separator)
|
||||
}
|
||||
|
||||
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||
// In the case where the path had a trailing separator and a symlink
|
||||
// evaluation has changed the last path component, we will need to
|
||||
// rebase the name in the archive that is being copied to match the
|
||||
// originally requested name.
|
||||
rebaseName = filepath.Base(path)
|
||||
}
|
||||
return resolvedPath, rebaseName
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
// +build !windows
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.ToSlash(path)
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
func normalizePath(path string) string {
|
||||
return filepath.FromSlash(path)
|
||||
}
|
|
@ -0,0 +1,256 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
|
||||
tr := tar.NewReader(layer)
|
||||
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
unpackedPaths := make(map[string]struct{})
|
||||
|
||||
if options == nil {
|
||||
options = &TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
|
||||
aufsTempdir := ""
|
||||
aufsHardlinks := make(map[string]*tar.Header)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
// end of tar archive
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
size += hdr.Size
|
||||
|
||||
// Normalize name, for safety and for a simple is-root check
|
||||
hdr.Name = filepath.Clean(hdr.Name)
|
||||
|
||||
// Windows does not support filenames with colons in them. Ignore
|
||||
// these files. This is not a problem though (although it might
|
||||
// appear that it is). Let's suppose a client is running docker pull.
|
||||
// The daemon it points to is Windows. Would it make sense for the
|
||||
// client to be doing a docker pull Ubuntu for example (which has files
|
||||
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||
// not as it would really only make sense that they were pulling a
|
||||
// Windows image. However, for development, it is necessary to be able
|
||||
// to pull Linux images which are in the repository.
|
||||
//
|
||||
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||
// specific or Linux-specific, this warning should be changed to an error
|
||||
// to cater for the situation where someone does manage to upload a Linux
|
||||
// image but have it tagged as Windows inadvertently.
|
||||
if runtime.GOOS == "windows" {
|
||||
if strings.Contains(hdr.Name, ":") {
|
||||
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = system.MkdirAll(parentPath, 0600, "")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||
// We don't want this directory, but we need the files in them so that
|
||||
// such hardlinks can be resolved.
|
||||
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||
basename := filepath.Base(hdr.Name)
|
||||
aufsHardlinks[basename] = hdr
|
||||
if aufsTempdir == "" {
|
||||
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer os.RemoveAll(aufsTempdir)
|
||||
}
|
||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if hdr.Name != WhiteoutOpaqueDir {
|
||||
continue
|
||||
}
|
||||
}
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
rel, err := filepath.Rel(dest, path)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||
}
|
||||
base := filepath.Base(path)
|
||||
|
||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||
dir := filepath.Dir(path)
|
||||
if base == WhiteoutOpaqueDir {
|
||||
_, err := os.Lstat(dir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = nil // parent was deleted
|
||||
}
|
||||
return err
|
||||
}
|
||||
if path == dir {
|
||||
return nil
|
||||
}
|
||||
if _, exists := unpackedPaths[path]; !exists {
|
||||
err := os.RemoveAll(path)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
originalBase := base[len(WhiteoutPrefix):]
|
||||
originalPath := filepath.Join(dir, originalBase)
|
||||
if err := os.RemoveAll(originalPath); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If path exits we almost always just want to remove and replace it.
|
||||
// The only exception is when it is a directory *and* the file from
|
||||
// the layer is also a directory. Then we want to merge them (i.e.
|
||||
// just apply the metadata from the layer).
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||
if err := os.RemoveAll(path); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trBuf.Reset(tr)
|
||||
srcData := io.Reader(trBuf)
|
||||
srcHdr := hdr
|
||||
|
||||
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||
// we manually retarget these into the temporary files we extracted them into
|
||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||
linkBasename := filepath.Base(hdr.Linkname)
|
||||
srcHdr = aufsHardlinks[linkBasename]
|
||||
if srcHdr == nil {
|
||||
return 0, fmt.Errorf("Invalid aufs hardlink")
|
||||
}
|
||||
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer tmpFile.Close()
|
||||
srcData = tmpFile
|
||||
}
|
||||
|
||||
if err := remapIDs(idMappings, srcHdr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Directory mtimes must be handled at the end to avoid further
|
||||
// file creation in them to modify the directory mtime
|
||||
if hdr.Typeflag == tar.TypeDir {
|
||||
dirs = append(dirs, hdr)
|
||||
}
|
||||
unpackedPaths[path] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can be
|
||||
// compressed or uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, options, false)
|
||||
}
|
||||
|
||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// We need to be able to set any perms
|
||||
oldmask, err := system.Umask(0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
||||
|
||||
if decompress {
|
||||
layer, err = DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return UnpackLayer(dest, layer, options)
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
// +build ignore
|
||||
|
||||
// Simple tool to create an archive stream from an old and new directory
|
||||
//
|
||||
// By default it will stream the comparison of two temporary directories with junk files
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
flDebug = flag.Bool("D", false, "debugging output")
|
||||
flNewDir = flag.String("newdir", "", "")
|
||||
flOldDir = flag.String("olddir", "", "")
|
||||
log = logrus.New()
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = func() {
|
||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
flag.Parse()
|
||||
log.Out = os.Stderr
|
||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
var newDir, oldDir string
|
||||
|
||||
if len(*flNewDir) == 0 {
|
||||
var err error
|
||||
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(newDir)
|
||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
newDir = *flNewDir
|
||||
}
|
||||
|
||||
if len(*flOldDir) == 0 {
|
||||
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer os.RemoveAll(oldDir)
|
||||
} else {
|
||||
oldDir = *flOldDir
|
||||
}
|
||||
|
||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
a, err := archive.ExportChanges(newDir, changes)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer a.Close()
|
||||
|
||||
i, err := io.Copy(os.Stdout, a)
|
||||
if err != nil && err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||
}
|
||||
|
||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||
fileData := []byte("fooo")
|
||||
for n := 0; n < numberOfFiles; n++ {
|
||||
fileName := fmt.Sprintf("file-%d", n)
|
||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if makeLinks {
|
||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
}
|
||||
totalSize := numberOfFiles * len(fileData)
|
||||
return totalSize, nil
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
if time.IsZero() {
|
||||
// Return UTIME_OMIT special value
|
||||
ts.Sec = 0
|
||||
ts.Nsec = (1 << 30) - 2
|
||||
return
|
||||
}
|
||||
return syscall.NsecToTimespec(time.UnixNano())
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
// +build !linux
|
||||
|
||||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||
nsec := int64(0)
|
||||
if !time.IsZero() {
|
||||
nsec = time.UnixNano()
|
||||
}
|
||||
return syscall.NsecToTimespec(nsec)
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||
// filesystems these files are generated/handled on tar creation/extraction.
|
||||
|
||||
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||
// filename this means that file has been removed from the base layer.
|
||||
const WhiteoutPrefix = ".wh."
|
||||
|
||||
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||
// for removing an actual file. Normally these files are excluded from exported
|
||||
// archives.
|
||||
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||
|
||||
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||
// layers. Normally these should not go into exported archives and all changed
|
||||
// hardlinks should be copied to the top layer.
|
||||
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||
|
||||
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||
// readdir calls to this directory do not follow to lower layers.
|
||||
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
|
@ -0,0 +1,59 @@
|
|||
package archive // import "github.com/docker/docker/pkg/archive"
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Generate generates a new archive from the content provided
|
||||
// as input.
|
||||
//
|
||||
// `files` is a sequence of path/content pairs. A new file is
|
||||
// added to the archive for each pair.
|
||||
// If the last pair is incomplete, the file is created with an
|
||||
// empty content. For example:
|
||||
//
|
||||
// Generate("foo.txt", "hello world", "emptyfile")
|
||||
//
|
||||
// The above call will return an archive with 2 files:
|
||||
// * ./foo.txt with content "hello world"
|
||||
// * ./empty with empty content
|
||||
//
|
||||
// FIXME: stream content instead of buffering
|
||||
// FIXME: specify permissions and other archive metadata
|
||||
func Generate(input ...string) (io.Reader, error) {
|
||||
files := parseStringPairs(input...)
|
||||
buf := new(bytes.Buffer)
|
||||
tw := tar.NewWriter(buf)
|
||||
for _, file := range files {
|
||||
name, content := file[0], file[1]
|
||||
hdr := &tar.Header{
|
||||
Name: name,
|
||||
Size: int64(len(content)),
|
||||
}
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, err := tw.Write([]byte(content)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func parseStringPairs(input ...string) (output [][2]string) {
|
||||
output = make([][2]string, 0, len(input)/2+1)
|
||||
for i := 0; i < len(input); i += 2 {
|
||||
var pair [2]string
|
||||
pair[0] = input[i]
|
||||
if i+1 < len(input) {
|
||||
pair[1] = input[i+1]
|
||||
}
|
||||
output = append(output, pair)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
)
|
||||
|
||||
// NewArchiver returns a new Archiver which uses chrootarchive.Untar
|
||||
func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver {
|
||||
if idMappings == nil {
|
||||
idMappings = &idtools.IDMappings{}
|
||||
}
|
||||
return &archive.Archiver{
|
||||
Untar: Untar,
|
||||
IDMappingsVar: idMappings,
|
||||
}
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
// identity (uncompressed), gzip, bzip2, xz.
|
||||
func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, true)
|
||||
}
|
||||
|
||||
// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive must be an uncompressed stream.
|
||||
func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
return untarHandler(tarArchive, dest, options, false)
|
||||
}
|
||||
|
||||
// Handler for teasing out the automatic decompression
|
||||
func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error {
|
||||
if tarArchive == nil {
|
||||
return fmt.Errorf("Empty archive")
|
||||
}
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
rootIDs := idMappings.RootPair()
|
||||
|
||||
dest = filepath.Clean(dest)
|
||||
if _, err := os.Stat(dest); os.IsNotExist(err) {
|
||||
if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
r := ioutil.NopCloser(tarArchive)
|
||||
if decompress {
|
||||
decompressedArchive, err := archive.DecompressStream(tarArchive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer decompressedArchive.Close()
|
||||
r = decompressedArchive
|
||||
}
|
||||
|
||||
return invokeUnpack(r, dest, options)
|
||||
}
|
|
@ -0,0 +1,88 @@
|
|||
// +build !windows
|
||||
|
||||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
// untar is the entry-point for docker-untar on re-exec. This is not used on
|
||||
// Windows as it does not support chroot, hence no point sandboxing through
|
||||
// chroot and rexec.
|
||||
func untar() {
|
||||
runtime.LockOSThread()
|
||||
flag.Parse()
|
||||
|
||||
var options *archive.TarOptions
|
||||
|
||||
//read the options from the pipe "ExtraFiles"
|
||||
if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := chroot(flag.Arg(0)); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := archive.Unpack(os.Stdin, "/", options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
// fully consume stdin in case it is zero padded
|
||||
if _, err := flush(os.Stdin); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error {
|
||||
|
||||
// We can't pass a potentially large exclude list directly via cmd line
|
||||
// because we easily overrun the kernel's max argument/environment size
|
||||
// when the full image list is passed (e.g. when this is used by
|
||||
// `docker load`). We will marshall the options via a pipe to the
|
||||
// child
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Untar pipe failure: %v", err)
|
||||
}
|
||||
|
||||
cmd := reexec.Command("docker-untar", dest)
|
||||
cmd.Stdin = decompressedArchive
|
||||
|
||||
cmd.ExtraFiles = append(cmd.ExtraFiles, r)
|
||||
output := bytes.NewBuffer(nil)
|
||||
cmd.Stdout = output
|
||||
cmd.Stderr = output
|
||||
|
||||
if err := cmd.Start(); err != nil {
|
||||
w.Close()
|
||||
return fmt.Errorf("Untar error on re-exec cmd: %v", err)
|
||||
}
|
||||
//write the options to the pipe for the untar exec to read
|
||||
if err := json.NewEncoder(w).Encode(options); err != nil {
|
||||
w.Close()
|
||||
return fmt.Errorf("Untar json encode to pipe failed: %v", err)
|
||||
}
|
||||
w.Close()
|
||||
|
||||
if err := cmd.Wait(); err != nil {
|
||||
// when `xz -d -c -q | docker-untar ...` failed on docker-untar side,
|
||||
// we need to exhaust `xz`'s output, otherwise the `xz` side will be
|
||||
// pending on write pipe forever
|
||||
io.Copy(ioutil.Discard, decompressedArchive)
|
||||
|
||||
return fmt.Errorf("Error processing tar file(%v): %s", err, output)
|
||||
}
|
||||
return nil
|
||||
}
|
22
vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
generated
vendored
Normal file
22
vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// chroot is not supported by Windows
|
||||
func chroot(path string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func invokeUnpack(decompressedArchive io.ReadCloser,
|
||||
dest string,
|
||||
options *archive.TarOptions) error {
|
||||
// Windows is different to Linux here because Windows does not support
|
||||
// chroot. Hence there is no point sandboxing a chrooted process to
|
||||
// do the unpack. We call inline instead within the daemon process.
|
||||
return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options)
|
||||
}
|
113
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
113
vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// chroot on linux uses pivot_root instead of chroot
|
||||
// pivot_root takes a new root and an old root.
|
||||
// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root.
|
||||
// New root is where the new rootfs is set to.
|
||||
// Old root is removed after the call to pivot_root so it is no longer available under the new root.
|
||||
// This is similar to how libcontainer sets up a container's rootfs
|
||||
func chroot(path string) (err error) {
|
||||
// if the engine is running in a user namespace we need to use actual chroot
|
||||
if rsystem.RunningInUserNS() {
|
||||
return realChroot(path)
|
||||
}
|
||||
if err := unix.Unshare(unix.CLONE_NEWNS); err != nil {
|
||||
return fmt.Errorf("Error creating mount namespace before pivot: %v", err)
|
||||
}
|
||||
|
||||
// Make everything in new ns slave.
|
||||
// Don't use `private` here as this could race where the mountns gets a
|
||||
// reference to a mount and an unmount from the host does not propagate,
|
||||
// which could potentially cause transient errors for other operations,
|
||||
// even though this should be relatively small window here `slave` should
|
||||
// not cause any problems.
|
||||
if err := mount.MakeRSlave("/"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if mounted, _ := mount.Mounted(path); !mounted {
|
||||
if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil {
|
||||
return realChroot(path)
|
||||
}
|
||||
}
|
||||
|
||||
// setup oldRoot for pivot_root
|
||||
pivotDir, err := ioutil.TempDir(path, ".pivot_root")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error setting up pivot dir: %v", err)
|
||||
}
|
||||
|
||||
var mounted bool
|
||||
defer func() {
|
||||
if mounted {
|
||||
// make sure pivotDir is not mounted before we try to remove it
|
||||
if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil {
|
||||
if err == nil {
|
||||
err = errCleanup
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
errCleanup := os.Remove(pivotDir)
|
||||
// pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful
|
||||
// because we already cleaned it up on failed pivot_root
|
||||
if errCleanup != nil && !os.IsNotExist(errCleanup) {
|
||||
errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup)
|
||||
if err == nil {
|
||||
err = errCleanup
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := unix.PivotRoot(path, pivotDir); err != nil {
|
||||
// If pivot fails, fall back to the normal chroot after cleaning up temp dir
|
||||
if err := os.Remove(pivotDir); err != nil {
|
||||
return fmt.Errorf("Error cleaning up after failed pivot: %v", err)
|
||||
}
|
||||
return realChroot(path)
|
||||
}
|
||||
mounted = true
|
||||
|
||||
// This is the new path for where the old root (prior to the pivot) has been moved to
|
||||
// This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction
|
||||
pivotDir = filepath.Join("/", filepath.Base(pivotDir))
|
||||
|
||||
if err := unix.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("Error changing to new root: %v", err)
|
||||
}
|
||||
|
||||
// Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host
|
||||
if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
|
||||
return fmt.Errorf("Error making old root private after pivot: %v", err)
|
||||
}
|
||||
|
||||
// Now unmount the old root so it's no longer visible from the new root
|
||||
if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil {
|
||||
return fmt.Errorf("Error while unmounting old root after pivot: %v", err)
|
||||
}
|
||||
mounted = false
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func realChroot(path string) error {
|
||||
if err := unix.Chroot(path); err != nil {
|
||||
return fmt.Errorf("Error after fallback to chroot: %v", err)
|
||||
}
|
||||
if err := unix.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("Error changing to new root after chroot: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
// +build !windows,!linux
|
||||
|
||||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func chroot(path string) error {
|
||||
if err := unix.Chroot(path); err != nil {
|
||||
return err
|
||||
}
|
||||
return unix.Chdir("/")
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
)
|
||||
|
||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||
// and applies it to the directory `dest`. The stream `layer` can only be
|
||||
// uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyLayer(dest string, layer io.Reader) (size int64, err error) {
|
||||
return applyLayerHandler(dest, layer, &archive.TarOptions{}, true)
|
||||
}
|
||||
|
||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||
// can only be uncompressed.
|
||||
// Returns the size in bytes of the contents of the layer.
|
||||
func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) {
|
||||
return applyLayerHandler(dest, layer, options, false)
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
//+build !windows
|
||||
|
||||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
|
||||
type applyLayerResponse struct {
|
||||
LayerSize int64 `json:"layerSize"`
|
||||
}
|
||||
|
||||
// applyLayer is the entry-point for docker-applylayer on re-exec. This is not
|
||||
// used on Windows as it does not support chroot, hence no point sandboxing
|
||||
// through chroot and rexec.
|
||||
func applyLayer() {
|
||||
|
||||
var (
|
||||
tmpDir string
|
||||
err error
|
||||
options *archive.TarOptions
|
||||
)
|
||||
runtime.LockOSThread()
|
||||
flag.Parse()
|
||||
|
||||
inUserns := rsystem.RunningInUserNS()
|
||||
if err := chroot(flag.Arg(0)); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
// We need to be able to set any perms
|
||||
oldmask, err := system.Umask(0)
|
||||
defer system.Umask(oldmask)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
if inUserns {
|
||||
options.InUserNS = true
|
||||
}
|
||||
|
||||
if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Setenv("TMPDIR", tmpDir)
|
||||
size, err := archive.UnpackLayer("/", os.Stdin, options)
|
||||
os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
encoder := json.NewEncoder(os.Stdout)
|
||||
if err := encoder.Encode(applyLayerResponse{size}); err != nil {
|
||||
fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err))
|
||||
}
|
||||
|
||||
if _, err := flush(os.Stdin); err != nil {
|
||||
fatal(err)
|
||||
}
|
||||
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
if decompress {
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
layer = decompressed
|
||||
}
|
||||
if options == nil {
|
||||
options = &archive.TarOptions{}
|
||||
if rsystem.RunningInUserNS() {
|
||||
options.InUserNS = true
|
||||
}
|
||||
}
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
|
||||
data, err := json.Marshal(options)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer json encode: %v", err)
|
||||
}
|
||||
|
||||
cmd := reexec.Command("docker-applyLayer", dest)
|
||||
cmd.Stdin = layer
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data))
|
||||
|
||||
outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer)
|
||||
cmd.Stdout, cmd.Stderr = outBuf, errBuf
|
||||
|
||||
if err = cmd.Run(); err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf)
|
||||
}
|
||||
|
||||
// Stdout should be a valid JSON struct representing an applyLayerResponse.
|
||||
response := applyLayerResponse{}
|
||||
decoder := json.NewDecoder(outBuf)
|
||||
if err = decoder.Decode(&response); err != nil {
|
||||
return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err)
|
||||
}
|
||||
|
||||
return response.LayerSize, nil
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
// applyLayerHandler parses a diff in the standard layer format from `layer`, and
|
||||
// applies it to the directory `dest`. Returns the size in bytes of the
|
||||
// contents of the layer.
|
||||
func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) {
|
||||
dest = filepath.Clean(dest)
|
||||
|
||||
// Ensure it is a Windows-style volume path
|
||||
dest = longpath.AddPrefix(dest)
|
||||
|
||||
if decompress {
|
||||
decompressed, err := archive.DecompressStream(layer)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer decompressed.Close()
|
||||
|
||||
layer = decompressed
|
||||
}
|
||||
|
||||
tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract")
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err)
|
||||
}
|
||||
|
||||
s, err := archive.UnpackLayer(dest, layer, nil)
|
||||
os.RemoveAll(tmpDir)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err)
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
// +build !windows
|
||||
|
||||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
)
|
||||
|
||||
func init() {
|
||||
reexec.Register("docker-applyLayer", applyLayer)
|
||||
reexec.Register("docker-untar", untar)
|
||||
}
|
||||
|
||||
func fatal(err error) {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// flush consumes all the bytes from the reader discarding
|
||||
// any errors
|
||||
func flush(r io.Reader) (bytes int64, err error) {
|
||||
return io.Copy(ioutil.Discard, r)
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive"
|
||||
|
||||
func init() {
|
||||
}
|
|
@ -0,0 +1,137 @@
|
|||
// Package pools provides a collection of pools which provide various
|
||||
// data types with buffers. These can be used to lower the number of
|
||||
// memory allocations and reuse buffers.
|
||||
//
|
||||
// New pools should be added to this package to allow them to be
|
||||
// shared across packages.
|
||||
//
|
||||
// Utility functions which operate on pools should be added to this
|
||||
// package to allow them to be reused.
|
||||
package pools // import "github.com/docker/docker/pkg/pools"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
||||
const buffer32K = 32 * 1024
|
||||
|
||||
var (
|
||||
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
|
||||
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
|
||||
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
|
||||
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
|
||||
buffer32KPool = newBufferPoolWithSize(buffer32K)
|
||||
)
|
||||
|
||||
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
||||
type BufioReaderPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// newBufioReaderPoolWithSize is unexported because new pools should be
|
||||
// added here to be shared where required.
|
||||
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
|
||||
return &BufioReaderPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
|
||||
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
|
||||
buf := bufPool.pool.Get().(*bufio.Reader)
|
||||
buf.Reset(r)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Put puts the bufio.Reader back into the pool.
|
||||
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
|
||||
b.Reset(nil)
|
||||
bufPool.pool.Put(b)
|
||||
}
|
||||
|
||||
type bufferPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func newBufferPoolWithSize(size int) *bufferPool {
|
||||
return &bufferPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return make([]byte, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *bufferPool) Get() []byte {
|
||||
return bp.pool.Get().([]byte)
|
||||
}
|
||||
|
||||
func (bp *bufferPool) Put(b []byte) {
|
||||
bp.pool.Put(b)
|
||||
}
|
||||
|
||||
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
|
||||
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buf := buffer32KPool.Get()
|
||||
written, err = io.CopyBuffer(dst, src, buf)
|
||||
buffer32KPool.Put(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
|
||||
// into the pool and closes the reader if it's an io.ReadCloser.
|
||||
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
|
||||
return ioutils.NewReadCloserWrapper(r, func() error {
|
||||
if readCloser, ok := r.(io.ReadCloser); ok {
|
||||
readCloser.Close()
|
||||
}
|
||||
bufPool.Put(buf)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// BufioWriterPool is a bufio writer that uses sync.Pool.
|
||||
type BufioWriterPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
// newBufioWriterPoolWithSize is unexported because new pools should be
|
||||
// added here to be shared where required.
|
||||
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
|
||||
return &BufioWriterPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
|
||||
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
|
||||
buf := bufPool.pool.Get().(*bufio.Writer)
|
||||
buf.Reset(w)
|
||||
return buf
|
||||
}
|
||||
|
||||
// Put puts the bufio.Writer back into the pool.
|
||||
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
|
||||
b.Reset(nil)
|
||||
bufPool.pool.Put(b)
|
||||
}
|
||||
|
||||
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
|
||||
// into the pool and closes the writer if it's an io.Writecloser.
|
||||
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
|
||||
return ioutils.NewWriteCloserWrapper(w, func() error {
|
||||
buf.Flush()
|
||||
if writeCloser, ok := w.(io.WriteCloser); ok {
|
||||
writeCloser.Close()
|
||||
}
|
||||
bufPool.Put(buf)
|
||||
return nil
|
||||
})
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
# reexec
|
||||
|
||||
The `reexec` package facilitates the busybox style reexec of the docker binary that we require because
|
||||
of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of
|
||||
the exec of the binary will be used to find and execute custom init paths.
|
|
@ -0,0 +1,28 @@
|
|||
package reexec // import "github.com/docker/docker/pkg/reexec"
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// Self returns the path to the current process's binary.
|
||||
// Returns "/proc/self/exe".
|
||||
func Self() string {
|
||||
return "/proc/self/exe"
|
||||
}
|
||||
|
||||
// Command returns *exec.Cmd which has Path as current binary. Also it setting
|
||||
// SysProcAttr.Pdeathsig to SIGTERM.
|
||||
// This will use the in-memory version (/proc/self/exe) of the current binary,
|
||||
// it is thus safe to delete or replace the on-disk binary (os.Args[0]).
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: Self(),
|
||||
Args: args,
|
||||
SysProcAttr: &syscall.SysProcAttr{
|
||||
Pdeathsig: unix.SIGTERM,
|
||||
},
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
// +build freebsd darwin
|
||||
|
||||
package reexec // import "github.com/docker/docker/pkg/reexec"
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Self returns the path to the current process's binary.
|
||||
// Uses os.Args[0].
|
||||
func Self() string {
|
||||
return naiveSelf()
|
||||
}
|
||||
|
||||
// Command returns *exec.Cmd which has Path as current binary.
|
||||
// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will
|
||||
// be set to "/usr/bin/docker".
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: Self(),
|
||||
Args: args,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
// +build !linux,!windows,!freebsd,!darwin
|
||||
|
||||
package reexec // import "github.com/docker/docker/pkg/reexec"
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Command is unsupported on operating systems apart from Linux, Windows, and Darwin.
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package reexec // import "github.com/docker/docker/pkg/reexec"
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
)
|
||||
|
||||
// Self returns the path to the current process's binary.
|
||||
// Uses os.Args[0].
|
||||
func Self() string {
|
||||
return naiveSelf()
|
||||
}
|
||||
|
||||
// Command returns *exec.Cmd which has Path as current binary.
|
||||
// For example if current binary is "docker.exe" at "C:\", then cmd.Path will
|
||||
// be set to "C:\docker.exe".
|
||||
func Command(args ...string) *exec.Cmd {
|
||||
return &exec.Cmd{
|
||||
Path: Self(),
|
||||
Args: args,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package reexec // import "github.com/docker/docker/pkg/reexec"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var registeredInitializers = make(map[string]func())
|
||||
|
||||
// Register adds an initialization func under the specified name
|
||||
func Register(name string, initializer func()) {
|
||||
if _, exists := registeredInitializers[name]; exists {
|
||||
panic(fmt.Sprintf("reexec func already registered under name %q", name))
|
||||
}
|
||||
|
||||
registeredInitializers[name] = initializer
|
||||
}
|
||||
|
||||
// Init is called as the first part of the exec process and returns true if an
|
||||
// initialization function was called.
|
||||
func Init() bool {
|
||||
initializer, exists := registeredInitializers[os.Args[0]]
|
||||
if exists {
|
||||
initializer()
|
||||
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func naiveSelf() string {
|
||||
name := os.Args[0]
|
||||
if filepath.Base(name) == name {
|
||||
if lp, err := exec.LookPath(name); err == nil {
|
||||
return lp
|
||||
}
|
||||
}
|
||||
// handle conversion of relative paths to absolute
|
||||
if absName, err := filepath.Abs(name); err == nil {
|
||||
return absName
|
||||
}
|
||||
// if we couldn't get absolute name, return original
|
||||
// (NOTE: Go only errors on Abs() if os.Getwd fails)
|
||||
return name
|
||||
}
|
|
@ -0,0 +1,423 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufferPool = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 32*1024)
|
||||
return &buffer
|
||||
},
|
||||
}
|
||||
|
||||
func rootPath(root, p string, followLinks bool) (string, error) {
|
||||
p = filepath.Join("/", p)
|
||||
if p == "/" {
|
||||
return root, nil
|
||||
}
|
||||
if followLinks {
|
||||
return fs.RootPath(root, p)
|
||||
}
|
||||
d, f := filepath.Split(p)
|
||||
ppath, err := fs.RootPath(root, d)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(ppath, f), nil
|
||||
}
|
||||
|
||||
func ResolveWildcards(root, src string, followLinks bool) ([]string, error) {
|
||||
d1, d2 := splitWildcards(src)
|
||||
if d2 != "" {
|
||||
p, err := rootPath(root, d1, followLinks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matches, err := resolveWildcards(p, d2)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, m := range matches {
|
||||
p, err := rel(root, m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
matches[i] = p
|
||||
}
|
||||
return matches, nil
|
||||
}
|
||||
return []string{d1}, nil
|
||||
}
|
||||
|
||||
// Copy copies files using `cp -a` semantics.
|
||||
// Copy is likely unsafe to be used in non-containerized environments.
|
||||
func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) error {
|
||||
var ci CopyInfo
|
||||
for _, o := range opts {
|
||||
o(&ci)
|
||||
}
|
||||
ensureDstPath := dst
|
||||
if d, f := filepath.Split(dst); f != "" && f != "." {
|
||||
ensureDstPath = d
|
||||
}
|
||||
if ensureDstPath != "" {
|
||||
ensureDstPath, err := fs.RootPath(dstRoot, ensureDstPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := MkdirAll(ensureDstPath, 0755, ci.Chown, ci.Utime); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dst, err := fs.RootPath(dstRoot, filepath.Clean(dst))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c := newCopier(ci.Chown, ci.Utime, ci.Mode, ci.XAttrErrorHandler)
|
||||
srcs := []string{src}
|
||||
|
||||
if ci.AllowWildcards {
|
||||
matches, err := ResolveWildcards(srcRoot, src, ci.FollowLinks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(matches) == 0 {
|
||||
return errors.Errorf("no matches found: %s", src)
|
||||
}
|
||||
srcs = matches
|
||||
}
|
||||
|
||||
for _, src := range srcs {
|
||||
srcFollowed, err := rootPath(srcRoot, src, ci.FollowLinks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dst, err := c.prepareTargetDir(srcFollowed, src, dst, ci.CopyDirContents)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.copy(ctx, srcFollowed, dst, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *copier) prepareTargetDir(srcFollowed, src, destPath string, copyDirContents bool) (string, error) {
|
||||
fiSrc, err := os.Lstat(srcFollowed)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fiDest, err := os.Stat(destPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return "", errors.Wrap(err, "failed to lstat destination path")
|
||||
}
|
||||
}
|
||||
|
||||
if (!copyDirContents && fiSrc.IsDir() && fiDest != nil) || (!fiSrc.IsDir() && fiDest != nil && fiDest.IsDir()) {
|
||||
destPath = filepath.Join(destPath, filepath.Base(src))
|
||||
}
|
||||
|
||||
target := filepath.Dir(destPath)
|
||||
|
||||
if copyDirContents && fiSrc.IsDir() && fiDest == nil {
|
||||
target = destPath
|
||||
}
|
||||
if err := MkdirAll(target, 0755, c.chown, c.utime); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return destPath, nil
|
||||
}
|
||||
|
||||
type ChownOpt struct {
|
||||
Uid, Gid int
|
||||
}
|
||||
|
||||
type XAttrErrorHandler func(dst, src, xattrKey string, err error) error
|
||||
|
||||
type CopyInfo struct {
|
||||
Chown *ChownOpt
|
||||
Utime *time.Time
|
||||
AllowWildcards bool
|
||||
Mode *int
|
||||
XAttrErrorHandler XAttrErrorHandler
|
||||
CopyDirContents bool
|
||||
FollowLinks bool
|
||||
}
|
||||
|
||||
type Opt func(*CopyInfo)
|
||||
|
||||
func WithCopyInfo(ci CopyInfo) func(*CopyInfo) {
|
||||
return func(c *CopyInfo) {
|
||||
*c = ci
|
||||
}
|
||||
}
|
||||
|
||||
func WithChown(uid, gid int) Opt {
|
||||
return func(ci *CopyInfo) {
|
||||
ci.Chown = &ChownOpt{Uid: uid, Gid: gid}
|
||||
}
|
||||
}
|
||||
|
||||
func AllowWildcards(ci *CopyInfo) {
|
||||
ci.AllowWildcards = true
|
||||
}
|
||||
|
||||
func WithXAttrErrorHandler(h XAttrErrorHandler) Opt {
|
||||
return func(ci *CopyInfo) {
|
||||
ci.XAttrErrorHandler = h
|
||||
}
|
||||
}
|
||||
|
||||
func AllowXAttrErrors(ci *CopyInfo) {
|
||||
h := func(string, string, string, error) error {
|
||||
return nil
|
||||
}
|
||||
WithXAttrErrorHandler(h)(ci)
|
||||
}
|
||||
|
||||
type copier struct {
|
||||
chown *ChownOpt
|
||||
utime *time.Time
|
||||
mode *int
|
||||
inodes map[uint64]string
|
||||
xattrErrorHandler XAttrErrorHandler
|
||||
}
|
||||
|
||||
func newCopier(chown *ChownOpt, tm *time.Time, mode *int, xeh XAttrErrorHandler) *copier {
|
||||
if xeh == nil {
|
||||
xeh = func(dst, src, key string, err error) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return &copier{inodes: map[uint64]string{}, chown: chown, utime: tm, xattrErrorHandler: xeh, mode: mode}
|
||||
}
|
||||
|
||||
// dest is always clean
|
||||
func (c *copier) copy(ctx context.Context, src, target string, overwriteTargetMetadata bool) error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
default:
|
||||
}
|
||||
fi, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to stat %s", src)
|
||||
}
|
||||
|
||||
if !fi.IsDir() {
|
||||
if err := ensureEmptyFileTarget(target); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
copyFileInfo := true
|
||||
|
||||
switch {
|
||||
case fi.IsDir():
|
||||
if created, err := c.copyDirectory(ctx, src, target, fi, overwriteTargetMetadata); err != nil {
|
||||
return err
|
||||
} else if !overwriteTargetMetadata {
|
||||
copyFileInfo = created
|
||||
}
|
||||
case (fi.Mode() & os.ModeType) == 0:
|
||||
link, err := getLinkSource(target, fi, c.inodes)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get hardlink")
|
||||
}
|
||||
if link != "" {
|
||||
if err := os.Link(link, target); err != nil {
|
||||
return errors.Wrap(err, "failed to create hard link")
|
||||
}
|
||||
} else if err := copyFile(src, target); err != nil {
|
||||
return errors.Wrap(err, "failed to copy files")
|
||||
}
|
||||
case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink:
|
||||
link, err := os.Readlink(src)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read link: %s", src)
|
||||
}
|
||||
if err := os.Symlink(link, target); err != nil {
|
||||
return errors.Wrapf(err, "failed to create symlink: %s", target)
|
||||
}
|
||||
case (fi.Mode() & os.ModeDevice) == os.ModeDevice:
|
||||
if err := copyDevice(target, fi); err != nil {
|
||||
return errors.Wrapf(err, "failed to create device")
|
||||
}
|
||||
default:
|
||||
// TODO: Support pipes and sockets
|
||||
return errors.Wrapf(err, "unsupported mode %s", fi.Mode())
|
||||
}
|
||||
|
||||
if copyFileInfo {
|
||||
if err := c.copyFileInfo(fi, target); err != nil {
|
||||
return errors.Wrap(err, "failed to copy file info")
|
||||
}
|
||||
|
||||
if err := copyXAttrs(target, src, c.xattrErrorHandler); err != nil {
|
||||
return errors.Wrap(err, "failed to copy xattrs")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *copier) copyDirectory(ctx context.Context, src, dst string, stat os.FileInfo, overwriteTargetMetadata bool) (bool, error) {
|
||||
if !stat.IsDir() {
|
||||
return false, errors.Errorf("source is not directory")
|
||||
}
|
||||
|
||||
created := false
|
||||
|
||||
if st, err := os.Lstat(dst); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return false, err
|
||||
}
|
||||
created = true
|
||||
if err := os.Mkdir(dst, stat.Mode()); err != nil {
|
||||
return created, errors.Wrapf(err, "failed to mkdir %s", dst)
|
||||
}
|
||||
} else if !st.IsDir() {
|
||||
return false, errors.Errorf("cannot copy to non-directory: %s", dst)
|
||||
} else if overwriteTargetMetadata {
|
||||
if err := os.Chmod(dst, stat.Mode()); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to chmod on %s", dst)
|
||||
}
|
||||
}
|
||||
|
||||
fis, err := ioutil.ReadDir(src)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, "failed to read %s", src)
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
if err := c.copy(ctx, filepath.Join(src, fi.Name()), filepath.Join(dst, fi.Name()), true); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
|
||||
return created, nil
|
||||
}
|
||||
|
||||
func ensureEmptyFileTarget(dst string) error {
|
||||
fi, err := os.Lstat(dst)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.Wrap(err, "failed to lstat file target")
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return errors.Errorf("cannot replace to directory %s with file", dst)
|
||||
}
|
||||
return os.Remove(dst)
|
||||
}
|
||||
|
||||
func copyFile(source, target string) error {
|
||||
src, err := os.Open(source)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open source %s", source)
|
||||
}
|
||||
defer src.Close()
|
||||
tgt, err := os.Create(target)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open target %s", target)
|
||||
}
|
||||
defer tgt.Close()
|
||||
|
||||
return copyFileContent(tgt, src)
|
||||
}
|
||||
|
||||
func containsWildcards(name string) bool {
|
||||
isWindows := runtime.GOOS == "windows"
|
||||
for i := 0; i < len(name); i++ {
|
||||
ch := name[i]
|
||||
if ch == '\\' && !isWindows {
|
||||
i++
|
||||
} else if ch == '*' || ch == '?' || ch == '[' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func splitWildcards(p string) (d1, d2 string) {
|
||||
parts := strings.Split(filepath.Join(p), string(filepath.Separator))
|
||||
var p1, p2 []string
|
||||
var found bool
|
||||
for _, p := range parts {
|
||||
if !found && containsWildcards(p) {
|
||||
found = true
|
||||
}
|
||||
if p == "" {
|
||||
p = "/"
|
||||
}
|
||||
if !found {
|
||||
p1 = append(p1, p)
|
||||
} else {
|
||||
p2 = append(p2, p)
|
||||
}
|
||||
}
|
||||
return filepath.Join(p1...), filepath.Join(p2...)
|
||||
}
|
||||
|
||||
func resolveWildcards(basePath, comp string) ([]string, error) {
|
||||
var out []string
|
||||
err := filepath.Walk(basePath, func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := rel(basePath, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rel == "." {
|
||||
return nil
|
||||
}
|
||||
if match, _ := filepath.Match(comp, rel); !match {
|
||||
return nil
|
||||
}
|
||||
out = append(out, path)
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// rel makes a path relative to base path. Same as `filepath.Rel` but can also
|
||||
// handle UUID paths in windows.
|
||||
func rel(basepath, targpath string) (string, error) {
|
||||
// filepath.Rel can't handle UUID paths in windows
|
||||
if runtime.GOOS == "windows" {
|
||||
pfx := basepath + `\`
|
||||
if strings.HasPrefix(targpath, pfx) {
|
||||
p := strings.TrimPrefix(targpath, pfx)
|
||||
if p == "" {
|
||||
p = "."
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
return filepath.Rel(basepath, targpath)
|
||||
}
|
|
@ -0,0 +1,97 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getUidGid(fi os.FileInfo) (uid, gid int) {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
return int(st.Uid), int(st.Gid)
|
||||
}
|
||||
|
||||
func (c *copier) copyFileInfo(fi os.FileInfo, name string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
|
||||
chown := c.chown
|
||||
if chown == nil {
|
||||
uid, gid := getUidGid(fi)
|
||||
chown = &ChownOpt{Uid: uid, Gid: gid}
|
||||
}
|
||||
if err := Chown(name, chown); err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
|
||||
m := fi.Mode()
|
||||
if c.mode != nil {
|
||||
m = (m & ^os.FileMode(0777)) | os.FileMode(*c.mode&0777)
|
||||
}
|
||||
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||
if err := os.Chmod(name, m); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
if c.utime != nil {
|
||||
if err := Utimes(name, c.utime); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
timespec := []unix.Timespec{unix.Timespec(sys.StatAtime(st)), unix.Timespec(sys.StatMtime(st))}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
st, err := src.Stat()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to stat source")
|
||||
}
|
||||
|
||||
var written int64
|
||||
size := st.Size()
|
||||
first := true
|
||||
|
||||
for written < size {
|
||||
var desired int
|
||||
if size-written > math.MaxInt32 {
|
||||
desired = int(math.MaxInt32)
|
||||
} else {
|
||||
desired = int(size - written)
|
||||
}
|
||||
|
||||
n, err := unix.CopyFileRange(int(src.Fd()), nil, int(dst.Fd()), nil, desired, 0)
|
||||
if err != nil {
|
||||
if (err != unix.ENOSYS && err != unix.EXDEV && err != unix.EPERM) || !first {
|
||||
return errors.Wrap(err, "copy file range failed")
|
||||
}
|
||||
|
||||
buf := bufferPool.Get().(*[]byte)
|
||||
_, err = io.CopyBuffer(dst, src, *buf)
|
||||
bufferPool.Put(buf)
|
||||
return errors.Wrap(err, "userspace copy failed")
|
||||
}
|
||||
|
||||
first = false
|
||||
written += int64(n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
}
|
||||
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
// +build !windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
)
|
||||
|
||||
// copyXAttrs requires xeh to be non-nil
|
||||
func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
|
||||
xattrKeys, err := sysx.LListxattr(src)
|
||||
if err != nil {
|
||||
return xeh(dst, src, "", errors.Wrapf(err, "failed to list xattrs on %s", src))
|
||||
}
|
||||
for _, xattr := range xattrKeys {
|
||||
data, err := sysx.LGetxattr(src, xattr)
|
||||
if err != nil {
|
||||
return xeh(dst, src, xattr, errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src))
|
||||
}
|
||||
if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil {
|
||||
return xeh(dst, src, xattr, errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
// +build solaris darwin freebsd
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/sys"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func getUidGid(fi os.FileInfo) (uid, gid int) {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
return int(st.Uid), int(st.Gid)
|
||||
}
|
||||
|
||||
func (c *copier) copyFileInfo(fi os.FileInfo, name string) error {
|
||||
st := fi.Sys().(*syscall.Stat_t)
|
||||
chown := c.chown
|
||||
if chown == nil {
|
||||
uid, gid := getUidGid(fi)
|
||||
chown = &ChownOpt{Uid: uid, Gid: gid}
|
||||
}
|
||||
if err := Chown(name, chown); err != nil {
|
||||
return errors.Wrapf(err, "failed to chown %s", name)
|
||||
}
|
||||
|
||||
m := fi.Mode()
|
||||
if c.mode != nil {
|
||||
m = (m & ^os.FileMode(0777)) | os.FileMode(*c.mode&0777)
|
||||
}
|
||||
if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink {
|
||||
if err := os.Chmod(name, m); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
}
|
||||
|
||||
if c.utime != nil {
|
||||
if err := Utimes(name, c.utime); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
timespec := []unix.Timespec{unix.Timespec(sys.StatAtime(st)), unix.Timespec(sys.StatMtime(st))}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
buf := bufferPool.Get().(*[]byte)
|
||||
_, err := io.CopyBuffer(dst, src, *buf)
|
||||
bufferPool.Put(buf)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
st, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return errors.New("unsupported stat type")
|
||||
}
|
||||
return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev))
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (c *copier) copyFileInfo(fi os.FileInfo, name string) error {
|
||||
if err := os.Chmod(name, fi.Mode()); err != nil {
|
||||
return errors.Wrapf(err, "failed to chmod %s", name)
|
||||
}
|
||||
|
||||
// TODO: copy windows specific metadata
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFileContent(dst, src *os.File) error {
|
||||
buf := bufferPool.Get().(*[]byte)
|
||||
_, err := io.CopyBuffer(dst, src, *buf)
|
||||
bufferPool.Put(buf)
|
||||
return err
|
||||
}
|
||||
|
||||
func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyDevice(dst string, fi os.FileInfo) error {
|
||||
return errors.New("device copy not supported")
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package fs
|
||||
|
||||
import "os"
|
||||
|
||||
// GetLinkInfo returns an identifier representing the node a hardlink is pointing
|
||||
// to. If the file is not hard linked then 0 will be returned.
|
||||
func GetLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
return getLinkInfo(fi)
|
||||
}
|
||||
|
||||
// getLinkSource returns a path for the given name and
|
||||
// file info to its link source in the provided inode
|
||||
// map. If the given file name is not in the map and
|
||||
// has other links, it is added to the inode map
|
||||
// to be a source for other link locations.
|
||||
func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) {
|
||||
inode, isHardlink := getLinkInfo(fi)
|
||||
if !isHardlink {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
path, ok := inodes[inode]
|
||||
if !ok {
|
||||
inodes[inode] = name
|
||||
}
|
||||
return path, nil
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
// +build !windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
package fs
|
||||
|
||||
import "os"
|
||||
|
||||
func getLinkInfo(fi os.FileInfo) (uint64, bool) {
|
||||
return 0, false
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Chown(p string, user *ChownOpt) error {
|
||||
if user != nil {
|
||||
if err := os.Lchown(p, user.Uid, user.Gid); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MkdirAll is forked os.MkdirAll
|
||||
func MkdirAll(path string, perm os.FileMode, user *ChownOpt, tm *time.Time) error {
|
||||
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||
dir, err := os.Stat(path)
|
||||
if err == nil {
|
||||
if dir.IsDir() {
|
||||
return nil
|
||||
}
|
||||
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
|
||||
}
|
||||
|
||||
// Slow path: make sure parent exists and then call Mkdir for path.
|
||||
i := len(path)
|
||||
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
|
||||
i--
|
||||
}
|
||||
|
||||
j := i
|
||||
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
|
||||
j--
|
||||
}
|
||||
|
||||
if j > 1 {
|
||||
// Create parent.
|
||||
err = MkdirAll(fixRootDirectory(path[:j-1]), perm, user, tm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
dir, err1 := os.Lstat(path)
|
||||
if err1 == nil && dir.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parent now exists; invoke Mkdir and use its result.
|
||||
err = os.Mkdir(path, perm)
|
||||
if err != nil {
|
||||
// Handle arguments like "foo/." by
|
||||
// double-checking that directory doesn't exist.
|
||||
dir, err1 := os.Lstat(path)
|
||||
if err1 == nil && dir.IsDir() {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := Chown(path, user); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := Utimes(path, tm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
// +build !windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func fixRootDirectory(p string) string {
|
||||
return p
|
||||
}
|
||||
|
||||
func Utimes(p string, tm *time.Time) error {
|
||||
if tm == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ts, err := unix.TimeToTimespec(*tm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
timespec := []unix.Timespec{ts, ts}
|
||||
if err := unix.UtimesNanoAt(unix.AT_FDCWD, p, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil {
|
||||
return errors.Wrapf(err, "failed to utime %s", p)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
// +build windows
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
func fixRootDirectory(p string) string {
|
||||
if len(p) == len(`\\?\c:`) {
|
||||
if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' {
|
||||
return p + `\`
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func Utimes(p string, tm *time.Time) error {
|
||||
return nil
|
||||
}
|
|
@ -118,17 +118,21 @@ github.com/docker/distribution/reference
|
|||
github.com/docker/distribution/digestset
|
||||
# github.com/docker/docker v0.7.3-0.20180531152204-71cd53e4a197
|
||||
github.com/docker/docker/pkg/locker
|
||||
github.com/docker/docker/pkg/reexec
|
||||
github.com/docker/docker/builder/dockerignore
|
||||
github.com/docker/docker/api/types/strslice
|
||||
github.com/docker/docker/pkg/signal
|
||||
github.com/docker/docker/api/types/container
|
||||
github.com/docker/docker/pkg/archive
|
||||
github.com/docker/docker/pkg/chrootarchive
|
||||
github.com/docker/docker/pkg/fileutils
|
||||
github.com/docker/docker/pkg/ioutils
|
||||
github.com/docker/docker/api/types/blkiodev
|
||||
github.com/docker/docker/api/types/mount
|
||||
github.com/docker/docker/pkg/homedir
|
||||
github.com/docker/docker/pkg/longpath
|
||||
github.com/docker/docker/pkg/idtools
|
||||
github.com/docker/docker/pkg/longpath
|
||||
github.com/docker/docker/pkg/pools
|
||||
github.com/docker/docker/pkg/system
|
||||
github.com/docker/docker/pkg/mount
|
||||
# github.com/docker/docker-credential-helpers v0.6.0
|
||||
|
@ -210,9 +214,10 @@ github.com/stretchr/testify/require
|
|||
github.com/stretchr/testify/assert
|
||||
# github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8
|
||||
github.com/syndtr/gocapability/capability
|
||||
# github.com/tonistiigi/fsutil v0.0.0-20190314220245-1ec1983587cd
|
||||
# github.com/tonistiigi/fsutil v0.0.0-20190316003333-2a10686c7e92
|
||||
github.com/tonistiigi/fsutil
|
||||
github.com/tonistiigi/fsutil/types
|
||||
github.com/tonistiigi/fsutil/copy
|
||||
# github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||
github.com/tonistiigi/units
|
||||
# github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e
|
||||
|
|
|
@ -198,8 +198,12 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se
|
|||
return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w)
|
||||
case *pb.Op_Exec:
|
||||
return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager, sm, w.MetadataStore, w.Executor, w)
|
||||
case *pb.Op_File:
|
||||
return ops.NewFileOp(v, op, w.CacheManager, w.MetadataStore, w)
|
||||
case *pb.Op_Build:
|
||||
return ops.NewBuildOp(v, op, s, w)
|
||||
default:
|
||||
return nil, errors.Errorf("no support for %T", op)
|
||||
}
|
||||
}
|
||||
return nil, errors.Errorf("could not resolve %v", v)
|
||||
|
|
Loading…
Reference in New Issue