From 9ce5053a8b1fed326ed706dc105045139aad46bb Mon Sep 17 00:00:00 2001 From: Derek McGowan Date: Tue, 18 Jul 2017 18:05:19 -0700 Subject: [PATCH] Update containerd version Fix for logrus rename, use fork until fixed in moby. Removed unused tar stream. Signed-off-by: Derek McGowan --- cache/instructioncache/cache.go | 2 +- cache/manager.go | 2 +- cache/metadata/metadata.go | 2 +- client/solve.go | 2 +- cmd/buildctl/build.go | 2 +- cmd/buildctl/main.go | 2 +- cmd/buildd/debug.go | 2 +- cmd/buildd/main.go | 2 +- control/control.go | 2 +- control/control_standalone.go | 4 +- exporter/containerimage/export.go | 2 +- hack/dockerfiles/test.Dockerfile | 6 +- session/filesync/diffcopy.go | 2 +- session/filesync/filesync.go | 5 - session/filesync/tarstream.go | 94 -- session/grpc.go | 2 +- session/testutil/testutil.go | 2 +- snapshot/blobmapping/snapshotter.go | 2 +- solver/jobs.go | 2 +- solver/solver.go | 2 +- source/git/gitsource.go | 2 +- source/local/local.go | 2 +- util/appcontext/appcontext.go | 2 +- vendor.conf | 11 +- vendor/github.com/Azure/go-ansiterm/parser.go | 2 +- .../go-ansiterm/winterm/win_event_handler.go | 2 +- .../Sirupsen/logrus/json_formatter.go | 41 - .../Sirupsen/logrus/terminal_solaris.go | 15 - .../Sirupsen/logrus/terminal_windows.go | 27 - .../containerd/containerd/README.md | 5 +- .../api/services/snapshot/v1/snapshots.pb.go | 156 ++-- .../api/services/snapshot/v1/snapshots.proto | 8 +- .../containerd/containerd/archive/path.go | 107 +++ .../containerd/containerd/archive/tar.go | 79 +- .../containerd/containerd/archive/tar_unix.go | 2 +- .../containerd/archive/tar_windows.go | 2 +- .../containerd/containerd/client.go | 12 +- .../containerd/containerd/client_unix.go | 3 - .../containerd/containerd/client_windows.go | 3 - .../containerd/containerd/container.go | 46 +- .../containerd/containerd/content/content.go | 9 - .../containerd/containerd/content/helpers.go | 14 +- .../containerd/content/{ => local}/locks.go | 2 +- .../content/{ => local}/readerat.go | 2 +- .../containerd/content/{ => local}/store.go | 53 +- .../content/{ => local}/store_linux.go | 2 +- .../content/{ => local}/store_unix.go | 2 +- .../content/{ => local}/store_windows.go | 2 +- .../containerd/content/{ => local}/writer.go | 20 +- .../containerd/containerd/errdefs/errors.go | 2 +- .../containerd/containerd/events/emitter.go | 2 + .../containerd/containerd/events/poster.go | 2 +- .../containerd/containerd/events/sink.go | 2 +- .../containerd/containerd/filters/parser.go | 4 +- .../containerd/containerd/fs/diff.go | 2 +- .../containerd/containerd/images/handlers.go | 7 +- vendor/github.com/containerd/containerd/io.go | 54 +- .../containerd/containerd/io_unix.go | 39 +- .../containerd/containerd/io_windows.go | 26 +- .../containerd/containerd/log/context.go | 2 +- .../containerd/containerd/metadata/content.go | 43 +- .../containerd/mount/mount_linux.go | 19 + .../containerd/containerd/mount/mount_unix.go | 4 + .../containerd/mount/mount_windows.go | 4 + .../containerd/containerd/process.go | 2 + .../containerd/reference/reference.go | 8 +- .../containerd/remotes/docker/fetcher.go | 2 +- .../containerd/remotes/docker/resolver.go | 2 +- .../remotes/docker/schema1/converter.go | 2 +- .../containerd/containerd/remotes/handlers.go | 2 +- .../containerd/containerd/runtime/errors.go | 10 - .../containerd/containerd/runtime/monitor.go | 52 -- .../containerd/containerd/runtime/runtime.go | 51 -- .../containerd/containerd/runtime/task.go | 82 -- .../containerd/services/content/service.go | 2 +- .../containerd/services/snapshot/client.go | 9 +- .../containerd/services/snapshot/service.go | 7 +- .../containerd/snapshot/naive/naive.go | 36 +- .../containerd/snapshot/overlay/overlay.go | 43 +- .../containerd/snapshot/snapshotter.go | 27 +- .../containerd/snapshot/storage/bolt.go | 138 ++- .../containerd/snapshot/storage/metastore.go | 16 +- .../snapshot/storage/proto/record.pb.go | 108 +-- .../snapshot/storage/proto/record.proto | 11 +- .../containerd/containerd/spec_windows.go | 9 +- .../containerd/containerd/sys/prctl.go | 46 +- .../containerd/containerd/sys/socket_unix.go | 17 + .../github.com/containerd/containerd/task.go | 24 +- .../containerd/containerd/typeurl/types.go | 19 +- .../containerd/containerd/vendor.conf | 13 +- vendor/github.com/dmcgowan/go-tar/LICENSE | 27 + vendor/github.com/dmcgowan/go-tar/common.go | 286 +++++++ vendor/github.com/dmcgowan/go-tar/format.go | 197 +++++ vendor/github.com/dmcgowan/go-tar/reader.go | 800 ++++++++++++++++++ .../github.com/dmcgowan/go-tar/stat_atim.go | 20 + .../dmcgowan/go-tar/stat_atimespec.go | 20 + .../github.com/dmcgowan/go-tar/stat_unix.go | 32 + vendor/github.com/dmcgowan/go-tar/strconv.go | 252 ++++++ vendor/github.com/dmcgowan/go-tar/writer.go | 364 ++++++++ .../docker/docker/pkg/archive/archive.go | 41 +- .../docker/docker/pkg/archive/changes.go | 2 +- .../docker/docker/pkg/archive/copy.go | 2 +- .../docker/docker/pkg/archive/diff.go | 2 +- .../docker/pkg/archive/example_changes.go | 2 +- .../docker/pkg/chrootarchive/archive.go | 70 -- .../docker/pkg/chrootarchive/archive_unix.go | 86 -- .../pkg/chrootarchive/archive_windows.go | 22 - .../docker/pkg/chrootarchive/chroot_linux.go | 108 --- .../docker/pkg/chrootarchive/chroot_unix.go | 12 - .../docker/docker/pkg/chrootarchive/diff.go | 23 - .../docker/pkg/chrootarchive/diff_unix.go | 130 --- .../docker/pkg/chrootarchive/diff_windows.go | 45 - .../docker/pkg/chrootarchive/init_unix.go | 28 - .../docker/pkg/chrootarchive/init_windows.go | 4 - .../docker/docker/pkg/fileutils/fileutils.go | 2 +- .../docker/pkg/fileutils/fileutils_unix.go | 2 +- .../docker/docker/pkg/reexec/README.md | 5 - .../docker/docker/pkg/reexec/command_linux.go | 28 - .../docker/docker/pkg/reexec/command_unix.go | 23 - .../docker/pkg/reexec/command_unsupported.go | 12 - .../docker/pkg/reexec/command_windows.go | 23 - .../docker/docker/pkg/reexec/reexec.go | 47 - .../docker/pkg/system/syscall_windows.go | 2 +- vendor/github.com/docker/docker/vendor.conf | 2 +- .../github.com/docker/go-events/broadcast.go | 2 +- vendor/github.com/docker/go-events/queue.go | 2 +- vendor/github.com/docker/go-events/retry.go | 2 +- .../{Sirupsen => sirupsen}/logrus/LICENSE | 0 .../{Sirupsen => sirupsen}/logrus/README.md | 178 ++-- .../{Sirupsen => sirupsen}/logrus/alt_exit.go | 2 +- .../{Sirupsen => sirupsen}/logrus/doc.go | 4 +- .../{Sirupsen => sirupsen}/logrus/entry.go | 36 +- .../{Sirupsen => sirupsen}/logrus/exported.go | 4 +- .../logrus/formatter.go | 0 .../{Sirupsen => sirupsen}/logrus/hooks.go | 0 .../sirupsen/logrus/json_formatter.go | 74 ++ .../{Sirupsen => sirupsen}/logrus/logger.go | 51 +- .../{Sirupsen => sirupsen}/logrus/logrus.go | 2 +- .../logrus/terminal_appengine.go | 4 +- .../logrus/terminal_bsd.go | 0 .../logrus/terminal_linux.go | 0 .../logrus/terminal_notwindows.go | 14 +- .../sirupsen/logrus/terminal_solaris.go | 21 + .../sirupsen/logrus/terminal_windows.go | 82 ++ .../logrus/text_formatter.go | 55 +- .../{Sirupsen => sirupsen}/logrus/writer.go | 29 +- worker/runcworker/worker.go | 2 +- 147 files changed, 3206 insertions(+), 1803 deletions(-) delete mode 100644 session/filesync/tarstream.go delete mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go delete mode 100644 vendor/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/containerd/containerd/archive/path.go rename vendor/github.com/containerd/containerd/content/{ => local}/locks.go (97%) rename vendor/github.com/containerd/containerd/content/{ => local}/readerat.go (96%) rename vendor/github.com/containerd/containerd/content/{ => local}/store.go (86%) rename vendor/github.com/containerd/containerd/content/{ => local}/store_linux.go (93%) rename vendor/github.com/containerd/containerd/content/{ => local}/store_unix.go (94%) rename vendor/github.com/containerd/containerd/content/{ => local}/store_windows.go (85%) rename vendor/github.com/containerd/containerd/content/{ => local}/writer.go (87%) delete mode 100644 vendor/github.com/containerd/containerd/runtime/errors.go delete mode 100644 vendor/github.com/containerd/containerd/runtime/monitor.go delete mode 100644 vendor/github.com/containerd/containerd/runtime/runtime.go delete mode 100644 vendor/github.com/containerd/containerd/runtime/task.go create mode 100644 vendor/github.com/dmcgowan/go-tar/LICENSE create mode 100644 vendor/github.com/dmcgowan/go-tar/common.go create mode 100644 vendor/github.com/dmcgowan/go-tar/format.go create mode 100644 vendor/github.com/dmcgowan/go-tar/reader.go create mode 100644 vendor/github.com/dmcgowan/go-tar/stat_atim.go create mode 100644 vendor/github.com/dmcgowan/go-tar/stat_atimespec.go create mode 100644 vendor/github.com/dmcgowan/go-tar/stat_unix.go create mode 100644 vendor/github.com/dmcgowan/go-tar/strconv.go create mode 100644 vendor/github.com/dmcgowan/go-tar/writer.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/README.md delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_linux.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unix.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_windows.go delete mode 100644 vendor/github.com/docker/docker/pkg/reexec/reexec.go rename vendor/github.com/{Sirupsen => sirupsen}/logrus/LICENSE (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/README.md (74%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/alt_exit.go (96%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/doc.go (83%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/entry.go (89%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/exported.go (99%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/formatter.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/hooks.go (100%) create mode 100644 vendor/github.com/sirupsen/logrus/json_formatter.go rename vendor/github.com/{Sirupsen => sirupsen}/logrus/logger.go (88%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/logrus.go (99%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_appengine.go (71%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_bsd.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_linux.go (100%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/terminal_notwindows.go (60%) create mode 100644 vendor/github.com/sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/sirupsen/logrus/terminal_windows.go rename vendor/github.com/{Sirupsen => sirupsen}/logrus/text_formatter.go (73%) rename vendor/github.com/{Sirupsen => sirupsen}/logrus/writer.go (54%) diff --git a/cache/instructioncache/cache.go b/cache/instructioncache/cache.go index 74b619b7..22eb05ed 100644 --- a/cache/instructioncache/cache.go +++ b/cache/instructioncache/cache.go @@ -1,11 +1,11 @@ package instructioncache import ( - "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" ) diff --git a/cache/manager.go b/cache/manager.go index 4d89b07f..8ac8712b 100644 --- a/cache/manager.go +++ b/cache/manager.go @@ -4,13 +4,13 @@ import ( "context" "sync" - "github.com/Sirupsen/logrus" cdsnapshot "github.com/containerd/containerd/snapshot" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/snapshot" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) diff --git a/cache/metadata/metadata.go b/cache/metadata/metadata.go index 7d4cc7ac..c375dbaf 100644 --- a/cache/metadata/metadata.go +++ b/cache/metadata/metadata.go @@ -4,9 +4,9 @@ import ( "encoding/json" "strings" - "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) const ( diff --git a/client/solve.go b/client/solve.go index a2500e4d..8151cdf6 100644 --- a/client/solve.go +++ b/client/solve.go @@ -10,7 +10,6 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/session" @@ -18,6 +17,7 @@ import ( "github.com/moby/buildkit/session/grpchijack" "github.com/moby/buildkit/solver/pb" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) diff --git a/cmd/buildctl/build.go b/cmd/buildctl/build.go index f3483338..8d9db617 100644 --- a/cmd/buildctl/build.go +++ b/cmd/buildctl/build.go @@ -7,11 +7,11 @@ import ( "os" "strings" - "github.com/Sirupsen/logrus" "github.com/moby/buildkit/client" "github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/progress/progressui" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/urfave/cli" "golang.org/x/sync/errgroup" ) diff --git a/cmd/buildctl/main.go b/cmd/buildctl/main.go index 4eb1e89e..c3b78d86 100644 --- a/cmd/buildctl/main.go +++ b/cmd/buildctl/main.go @@ -4,10 +4,10 @@ import ( "fmt" "os" - "github.com/Sirupsen/logrus" "github.com/moby/buildkit/client" "github.com/moby/buildkit/util/appdefaults" "github.com/moby/buildkit/util/profiler" + "github.com/sirupsen/logrus" "github.com/urfave/cli" ) diff --git a/cmd/buildd/debug.go b/cmd/buildd/debug.go index 602a0628..94a30211 100644 --- a/cmd/buildd/debug.go +++ b/cmd/buildd/debug.go @@ -6,7 +6,7 @@ import ( "net/http" "net/http/pprof" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "golang.org/x/net/trace" ) diff --git a/cmd/buildd/main.go b/cmd/buildd/main.go index 739af9c2..bf4be467 100644 --- a/cmd/buildd/main.go +++ b/cmd/buildd/main.go @@ -5,12 +5,12 @@ import ( "os" "path/filepath" - "github.com/Sirupsen/logrus" "github.com/containerd/containerd/sys" "github.com/moby/buildkit/util/appcontext" "github.com/moby/buildkit/util/appdefaults" "github.com/moby/buildkit/util/profiler" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "github.com/urfave/cli" "golang.org/x/net/context" "google.golang.org/grpc" diff --git a/control/control.go b/control/control.go index 95e214db..4ddb40e6 100644 --- a/control/control.go +++ b/control/control.go @@ -1,7 +1,6 @@ package control import ( - "github.com/Sirupsen/logrus" "github.com/containerd/containerd/snapshot" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/cache" @@ -13,6 +12,7 @@ import ( "github.com/moby/buildkit/source" "github.com/moby/buildkit/worker" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" "golang.org/x/sync/errgroup" "google.golang.org/grpc" diff --git a/control/control_standalone.go b/control/control_standalone.go index f47baae4..c6484acc 100644 --- a/control/control_standalone.go +++ b/control/control_standalone.go @@ -7,7 +7,7 @@ import ( "os" "path/filepath" - "github.com/containerd/containerd/content" + "github.com/containerd/containerd/content/local" "github.com/containerd/containerd/differ" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/namespaces" @@ -50,7 +50,7 @@ func newStandalonePullDeps(root string) (*pullDeps, error) { return nil, err } - c, err := content.NewStore(filepath.Join(root, "content")) + c, err := local.NewStore(filepath.Join(root, "content")) if err != nil { return nil, err } diff --git a/exporter/containerimage/export.go b/exporter/containerimage/export.go index 3dfe6ab6..07eb2850 100644 --- a/exporter/containerimage/export.go +++ b/exporter/containerimage/export.go @@ -7,7 +7,6 @@ import ( "runtime" "time" - "github.com/Sirupsen/logrus" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" @@ -23,6 +22,7 @@ import ( digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) diff --git a/hack/dockerfiles/test.Dockerfile b/hack/dockerfiles/test.Dockerfile index 48f7c130..576b548b 100644 --- a/hack/dockerfiles/test.Dockerfile +++ b/hack/dockerfiles/test.Dockerfile @@ -1,5 +1,5 @@ ARG RUNC_VERSION=429a5387123625040bacfbb60d96b1cbd02293ab -ARG CONTAINERD_VERSION=v1.0.0-alpha0 +ARG CONTAINERD_VERSION=036232856fb8f088a844b22f3330bcddb5d44c0a FROM golang:1.8-alpine AS gobuild-base RUN apk add --no-cache g++ linux-headers @@ -21,8 +21,8 @@ RUN git clone https://github.com/containerd/containerd.git "$GOPATH/src/github.c && make bin/containerd FROM gobuild-base AS unit-tests -COPY --from=runc /usr/bin/runc /usr/bin/runc -COPY --from=containerd /go/src/github.com/containerd/containerd/bin/containerd /usr/bin/containerd +COPY --from=runc /usr/bin/runc /usr/bin/runc +COPY --from=containerd /go/src/github.com/containerd/containerd/bin/containerd* /usr/bin/ WORKDIR /go/src/github.com/moby/buildkit COPY . . diff --git a/session/filesync/diffcopy.go b/session/filesync/diffcopy.go index 4b2d3793..5e1b7e65 100644 --- a/session/filesync/diffcopy.go +++ b/session/filesync/diffcopy.go @@ -3,7 +3,7 @@ package filesync import ( "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "github.com/tonistiigi/fsutil" "google.golang.org/grpc" ) diff --git a/session/filesync/filesync.go b/session/filesync/filesync.go index 58812b96..7f02dd15 100644 --- a/session/filesync/filesync.go +++ b/session/filesync/filesync.go @@ -131,11 +131,6 @@ var supportedProtocols = []protocol{ sendFn: sendDiffCopy, recvFn: recvDiffCopy, }, - { - name: "tarstream", - sendFn: sendTarStream, - recvFn: recvTarStream, - }, } // FSSendRequestOpt defines options for FSSend request diff --git a/session/filesync/tarstream.go b/session/filesync/tarstream.go deleted file mode 100644 index a9d55d0f..00000000 --- a/session/filesync/tarstream.go +++ /dev/null @@ -1,94 +0,0 @@ -package filesync - -import ( - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/pkg/errors" - "google.golang.org/grpc" -) - -func sendTarStream(stream grpc.Stream, dir string, includes, excludes []string, progress progressCb) error { - a, err := archive.TarWithOptions(dir, &archive.TarOptions{ - ExcludePatterns: excludes, - }) - if err != nil { - return err - } - - size := 0 - buf := make([]byte, 1<<15) - t := new(BytesMessage) - for { - n, err := a.Read(buf) - if err != nil { - if err == io.EOF { - break - } - return err - } - t.Data = buf[:n] - - if err := stream.SendMsg(t); err != nil { - return err - } - size += n - if progress != nil { - progress(size, false) - } - } - if progress != nil { - progress(size, true) - } - return nil -} - -func recvTarStream(ds grpc.Stream, dest string, cs CacheUpdater, progress progressCb) error { - - pr, pw := io.Pipe() - - size := 0 - defer func() { - if progress != nil { - progress(size, true) - } - }() - - go func() { - var ( - err error - t = new(BytesMessage) - ) - for { - if err = ds.RecvMsg(t); err != nil { - if err == io.EOF { - err = nil - } - break - } - _, err = pw.Write(t.Data) - if err != nil { - break - } - size += len(t.Data) - if progress != nil { - progress(size, false) - } - } - if err = pw.CloseWithError(err); err != nil { - logrus.Errorf("failed to close tar transfer pipe") - } - }() - - decompressedStream, err := archive.DecompressStream(pr) - if err != nil { - return errors.Wrap(err, "failed to decompress stream") - } - - if err := chrootarchive.Untar(decompressedStream, dest, nil); err != nil { - return errors.Wrap(err, "failed to untar context") - } - return nil -} diff --git a/session/grpc.go b/session/grpc.go index 0f20b150..71d77bf4 100644 --- a/session/grpc.go +++ b/session/grpc.go @@ -4,8 +4,8 @@ import ( "net" "time" - "github.com/Sirupsen/logrus" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" "golang.org/x/net/http2" "google.golang.org/grpc" diff --git a/session/testutil/testutil.go b/session/testutil/testutil.go index 2e145d90..5a5adcfd 100644 --- a/session/testutil/testutil.go +++ b/session/testutil/testutil.go @@ -5,7 +5,7 @@ import ( "net" "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" "golang.org/x/net/context" ) diff --git a/snapshot/blobmapping/snapshotter.go b/snapshot/blobmapping/snapshotter.go index 05c3541a..4d606a97 100644 --- a/snapshot/blobmapping/snapshotter.go +++ b/snapshot/blobmapping/snapshotter.go @@ -3,12 +3,12 @@ package blobmapping import ( "context" - "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" "github.com/containerd/containerd/content" "github.com/containerd/containerd/snapshot" "github.com/moby/buildkit/cache/metadata" digest "github.com/opencontainers/go-digest" + "github.com/sirupsen/logrus" ) const blobKey = "blobmapping.blob" diff --git a/solver/jobs.go b/solver/jobs.go index c3fec844..70e9ddc4 100644 --- a/solver/jobs.go +++ b/solver/jobs.go @@ -6,11 +6,11 @@ import ( "sync" "time" - "github.com/Sirupsen/logrus" "github.com/moby/buildkit/client" "github.com/moby/buildkit/util/progress" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) type jobList struct { diff --git a/solver/solver.go b/solver/solver.go index 9c0e3e59..93d51088 100644 --- a/solver/solver.go +++ b/solver/solver.go @@ -3,7 +3,6 @@ package solver import ( "fmt" - "github.com/Sirupsen/logrus" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/client" "github.com/moby/buildkit/exporter" @@ -12,6 +11,7 @@ import ( "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/worker" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" "golang.org/x/sync/errgroup" ) diff --git a/source/git/gitsource.go b/source/git/gitsource.go index abcd686d..6bbd4360 100644 --- a/source/git/gitsource.go +++ b/source/git/gitsource.go @@ -10,7 +10,6 @@ import ( "strings" "github.com/BurntSushi/locker" - "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" @@ -19,6 +18,7 @@ import ( "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/progress/logs" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" ) diff --git a/source/local/local.go b/source/local/local.go index bc3cdbe9..05c442e4 100644 --- a/source/local/local.go +++ b/source/local/local.go @@ -3,7 +3,6 @@ package local import ( "time" - "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/metadata" @@ -13,6 +12,7 @@ import ( "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/progress" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" "golang.org/x/time/rate" ) diff --git a/util/appcontext/appcontext.go b/util/appcontext/appcontext.go index 09e4448e..e74da2cd 100644 --- a/util/appcontext/appcontext.go +++ b/util/appcontext/appcontext.go @@ -6,7 +6,7 @@ import ( "os/signal" "sync" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var appContextCache context.Context diff --git a/vendor.conf b/vendor.conf index 116de4bc..4e24e74f 100644 --- a/vendor.conf +++ b/vendor.conf @@ -6,9 +6,9 @@ github.com/davecgh/go-spew v1.1.0 github.com/pmezard/go-difflib v1.0.0 golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f -github.com/containerd/containerd v1.0.0-alpha0 +github.com/containerd/containerd 036232856fb8f088a844b22f3330bcddb5d44c0a golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c -github.com/Sirupsen/logrus v0.11.0 +github.com/sirupsen/logrus v1.0.0 google.golang.org/grpc v1.3.0 github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448 golang.org/x/net 1f9224279e98554b6a6432d4dd998a739f8b2b7c @@ -22,10 +22,10 @@ github.com/containerd/fifo 69b99525e472735860a5269b75af1970142b3062 github.com/opencontainers/runtime-spec 96de01bbb42c7af89bff100e10a9f0fb62e75bfb github.com/containerd/go-runc 2774a2ea124a5c2d0aba13b5c2dd8a5a9a48775d github.com/containerd/console 7fed77e673ca4abcd0cbd6d4d0e0e22137cbd778 -github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e +github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 -github.com/docker/go-events aa2e3b613fbbfdddbe055a7b9e3ce271cfd83eca +github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/urfave/cli d70f47eeca3afd795160003bc6e28b001d60c67c github.com/docker/go-units 0dadbb0345b35ec7ef35e228dabb8de89a65bf52 @@ -33,8 +33,9 @@ github.com/google/shlex 6f45313302b9c56850fc17f99e40caebce98c716 golang.org/x/time 8be79e1e0910c292df4e79c241bb7e8f7e725959 github.com/BurntSushi/locker 392720b78f44e9d0249fcac6c43b111b47a370b8 -github.com/docker/docker 05c7c311390911daebcf5d9519dee813fc02a887 +github.com/docker/docker 6301ac0c27aef52a96820482a01869457ae416f7 https://github.com/mcandre/moby.git github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f github.com/tonistiigi/fsutil 923ce3f08a66a6677878bbee066406d13eecd139 github.com/stevvooe/continuity 86cec1535a968310e7532819f699ff2830ed7463 +github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go index 169f68db..3286a9cb 100644 --- a/vendor/github.com/Azure/go-ansiterm/parser.go +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -5,7 +5,7 @@ import ( "io/ioutil" "os" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var logger *logrus.Logger diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go index 4d858ed6..48998bb0 100644 --- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -9,7 +9,7 @@ import ( "strconv" "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var logger *logrus.Logger diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5c..00000000 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index a3c6f6e7..00000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build solaris,!appengine - -package logrus - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 3727e8ad..00000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows,!appengine - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/containerd/containerd/README.md b/vendor/github.com/containerd/containerd/README.md index b8f9d64d..a57b25d4 100644 --- a/vendor/github.com/containerd/containerd/README.md +++ b/vendor/github.com/containerd/containerd/README.md @@ -3,6 +3,7 @@ [![GoDoc](https://godoc.org/github.com/containerd/containerd?status.svg)](https://godoc.org/github.com/containerd/containerd) [![Build Status](https://travis-ci.org/containerd/containerd.svg?branch=master)](https://travis-ci.org/containerd/containerd) [![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd.svg?type=shield)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fcontainerd%2Fcontainerd?ref=badge_shield) +[![Go Report Card](https://goreportcard.com/badge/github.com/containerd/containerd)](https://goreportcard.com/report/github.com/containerd/containerd) containerd is an industry-standard container runtime with an emphasis on simplicity, robustness and portability. It is available as a daemon for Linux and Windows, which can manage the complete container lifecycle of its host system: image transfer and storage, container execution and supervision, low-level storage and network attachments, etc. @@ -85,7 +86,7 @@ image, err := client.Pull(context, "docker.io/library/redis:latest", containerd. // allocate a new RW root filesystem for a container based on the image redis, err := client.NewContainer(context, "redis-master", containerd.WithSpec(spec), - containerd.WithNewRootFS("redis-rootfs", image), + containerd.WithNewSnapshot("redis-rootfs", image), ) // use a readonly filesystem with multiple containers @@ -93,7 +94,7 @@ for i := 0; i < 10; i++ { id := fmt.Sprintf("id-%s", i) container, err := client.NewContainer(ctx, id, containerd.WithSpec(spec), - containerd.WithNewReadonlyRootFS(id, image), + containerd.WithNewSnapshotView(id, image), ) } ``` diff --git a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go b/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go index 5f365ef2..5770792c 100644 --- a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go +++ b/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.pb.go @@ -58,17 +58,23 @@ const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package type Kind int32 const ( - KindActive Kind = 0 - KindCommitted Kind = 1 + KindUnknown Kind = 0 + KindView Kind = 1 + KindActive Kind = 2 + KindCommitted Kind = 3 ) var Kind_name = map[int32]string{ - 0: "ACTIVE", - 1: "COMMITTED", + 0: "UNKNOWN", + 1: "VIEW", + 2: "ACTIVE", + 3: "COMMITTED", } var Kind_value = map[string]int32{ - "ACTIVE": 0, - "COMMITTED": 1, + "UNKNOWN": 0, + "VIEW": 1, + "ACTIVE": 2, + "COMMITTED": 3, } func (x Kind) String() string { @@ -158,10 +164,9 @@ func (*StatSnapshotRequest) ProtoMessage() {} func (*StatSnapshotRequest) Descriptor() ([]byte, []int) { return fileDescriptorSnapshots, []int{8} } type Info struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` - Kind Kind `protobuf:"varint,3,opt,name=kind,proto3,enum=containerd.services.snapshots.v1.Kind" json:"kind,omitempty"` - Readonly bool `protobuf:"varint,4,opt,name=readonly,proto3" json:"readonly,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` + Kind Kind `protobuf:"varint,3,opt,name=kind,proto3,enum=containerd.services.snapshots.v1.Kind" json:"kind,omitempty"` } func (m *Info) Reset() { *m = Info{} } @@ -880,16 +885,6 @@ func (m *Info) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintSnapshots(dAtA, i, uint64(m.Kind)) } - if m.Readonly { - dAtA[i] = 0x20 - i++ - if m.Readonly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } return i, nil } @@ -1204,9 +1199,6 @@ func (m *Info) Size() (n int) { if m.Kind != 0 { n += 1 + sovSnapshots(uint64(m.Kind)) } - if m.Readonly { - n += 2 - } return n } @@ -1386,7 +1378,6 @@ func (this *Info) String() string { `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Readonly:` + fmt.Sprintf("%v", this.Readonly) + `,`, `}`, }, "") return s @@ -2535,26 +2526,6 @@ func (m *Info) Unmarshal(dAtA []byte) error { break } } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnapshots - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Readonly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipSnapshots(dAtA[iNdEx:]) @@ -3122,52 +3093,53 @@ func init() { } var fileDescriptorSnapshots = []byte{ - // 738 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x5d, 0x4f, 0xd4, 0x4c, - 0x18, 0xdd, 0xd2, 0xbe, 0x0b, 0x3c, 0x7c, 0x84, 0x77, 0x58, 0x60, 0xd3, 0x37, 0xe9, 0xdb, 0xf4, - 0xc2, 0x10, 0x2f, 0x5a, 0xc0, 0x08, 0x28, 0x37, 0xb2, 0x2b, 0x31, 0x2b, 0x12, 0x4d, 0x41, 0x22, - 0xc6, 0xc4, 0x94, 0xed, 0xb0, 0x34, 0xd0, 0x4e, 0xed, 0xcc, 0xae, 0x59, 0x2f, 0x8c, 0xde, 0x19, - 0xfe, 0x03, 0x57, 0xfa, 0x2b, 0xfc, 0x05, 0x5c, 0x7a, 0xe9, 0x95, 0x91, 0xfd, 0x25, 0xa6, 0xd3, - 0xd9, 0x0f, 0xa4, 0x66, 0xcb, 0x82, 0x77, 0xcf, 0x7c, 0x9c, 0xf3, 0x9c, 0x9d, 0xd3, 0x39, 0xb3, - 0x50, 0xa9, 0x79, 0xec, 0xb0, 0xbe, 0x6f, 0x56, 0x89, 0x6f, 0x55, 0x49, 0xc0, 0x1c, 0x2f, 0xc0, - 0x91, 0xdb, 0x5b, 0x3a, 0xa1, 0x67, 0x51, 0x1c, 0x35, 0xbc, 0x2a, 0xa6, 0x16, 0x0d, 0x9c, 0x90, - 0x1e, 0x12, 0x66, 0x35, 0x16, 0x3b, 0x35, 0x35, 0xc3, 0x88, 0x30, 0x82, 0xf4, 0x2e, 0xc8, 0x6c, - 0x03, 0xcc, 0xee, 0xa6, 0xc6, 0xa2, 0x5a, 0xa8, 0x91, 0x1a, 0xe1, 0x9b, 0xad, 0xb8, 0x4a, 0x70, - 0xea, 0x7f, 0x35, 0x42, 0x6a, 0xc7, 0xd8, 0xe2, 0xa3, 0xfd, 0xfa, 0x81, 0x85, 0xfd, 0x90, 0x35, - 0xc5, 0xe2, 0x72, 0x26, 0x7d, 0xac, 0x19, 0x62, 0x6a, 0xf9, 0xa4, 0x1e, 0xb0, 0x04, 0x67, 0xb8, - 0x30, 0xfb, 0x2c, 0xc2, 0xa1, 0x13, 0xe1, 0x6d, 0xa1, 0xc0, 0xc6, 0x6f, 0xea, 0x98, 0x32, 0xa4, - 0xc3, 0x58, 0x5b, 0x14, 0xc3, 0x51, 0x51, 0xd2, 0xa5, 0xf9, 0x51, 0xbb, 0x77, 0x0a, 0x4d, 0x81, - 0x7c, 0x84, 0x9b, 0xc5, 0x21, 0xbe, 0x12, 0x97, 0x68, 0x16, 0xf2, 0x31, 0x55, 0xc0, 0x8a, 0x32, - 0x9f, 0x14, 0x23, 0xe3, 0x31, 0xcc, 0x5d, 0xea, 0x42, 0x43, 0x12, 0x50, 0x8c, 0x2c, 0xc8, 0x73, - 0x3d, 0xb4, 0x28, 0xe9, 0xf2, 0xfc, 0xd8, 0xd2, 0x9c, 0xd9, 0x73, 0x3c, 0x5c, 0xaf, 0xb9, 0x15, - 0xaf, 0xdb, 0x62, 0x9b, 0xe1, 0xc0, 0xf4, 0xae, 0x87, 0xdf, 0xfe, 0x4d, 0xb9, 0x8f, 0xa0, 0x70, - 0xb1, 0xc5, 0xa0, 0x5a, 0xcb, 0x30, 0xc1, 0x27, 0xe8, 0x35, 0x54, 0x1a, 0xeb, 0x30, 0xd9, 0x26, - 0x19, 0x54, 0xc7, 0x26, 0xcc, 0xd8, 0xd8, 0x27, 0x8d, 0x9b, 0x30, 0xd9, 0x78, 0x0d, 0x33, 0x65, - 0xe2, 0xfb, 0x1e, 0xbb, 0x3a, 0x19, 0x02, 0x25, 0x70, 0x7c, 0x2c, 0xd8, 0x78, 0xdd, 0x6e, 0x20, - 0x77, 0x1b, 0x54, 0x60, 0x7a, 0x9b, 0x39, 0xec, 0x26, 0xb4, 0x9e, 0x48, 0xa0, 0x54, 0x82, 0x03, - 0xd2, 0xe9, 0x2c, 0xf5, 0x74, 0xee, 0xda, 0x3f, 0xd4, 0x6b, 0x3f, 0xba, 0x0f, 0xca, 0x91, 0x17, - 0xb8, 0x5c, 0xd2, 0xe4, 0xd2, 0x2d, 0xb3, 0xdf, 0x7d, 0x35, 0x37, 0xbd, 0xc0, 0xb5, 0x39, 0x06, - 0xa9, 0x30, 0x12, 0x61, 0xc7, 0x25, 0xc1, 0x71, 0xb3, 0xa8, 0xe8, 0xd2, 0xfc, 0x88, 0xdd, 0x19, - 0x1b, 0x2f, 0xa0, 0x70, 0xf1, 0x77, 0x09, 0x3b, 0x1f, 0x80, 0xe2, 0x05, 0x07, 0x84, 0x6b, 0x1b, - 0xcb, 0xd2, 0x2f, 0xfe, 0x45, 0x25, 0xe5, 0xec, 0xc7, 0xff, 0x39, 0x9b, 0x23, 0x8d, 0x55, 0x28, - 0x3c, 0xf1, 0x68, 0x87, 0x39, 0xfb, 0xe7, 0x66, 0xec, 0xc1, 0xcc, 0x6f, 0xc8, 0x4b, 0xa2, 0xe4, - 0x01, 0x45, 0x95, 0x60, 0xfc, 0x39, 0x75, 0x6a, 0xf8, 0x3a, 0xfe, 0xad, 0xc1, 0x84, 0xe0, 0x10, - 0xb2, 0x10, 0x28, 0xd4, 0x7b, 0x97, 0xf8, 0x28, 0xdb, 0xbc, 0x8e, 0x7d, 0xf4, 0x02, 0xe2, 0x62, - 0xca, 0x91, 0xb2, 0x2d, 0x46, 0xb7, 0x6d, 0x50, 0x36, 0x13, 0x4f, 0xf2, 0xeb, 0xe5, 0x9d, 0xca, - 0xee, 0xc6, 0x54, 0x4e, 0x9d, 0x3c, 0x39, 0xd5, 0x21, 0x9e, 0x5d, 0xaf, 0x32, 0xaf, 0x81, 0x91, - 0x0e, 0xa3, 0xe5, 0xa7, 0x5b, 0x5b, 0x95, 0x9d, 0x9d, 0x8d, 0x87, 0x53, 0x92, 0xfa, 0xef, 0xc9, - 0xa9, 0x3e, 0x11, 0x2f, 0x27, 0x5f, 0x38, 0xc3, 0xae, 0x3a, 0xfe, 0xe9, 0xb3, 0x96, 0xfb, 0xfa, - 0x45, 0xe3, 0x5c, 0x4b, 0x1f, 0x87, 0x61, 0xb4, 0x73, 0x58, 0xe8, 0x3d, 0x0c, 0x8b, 0x5c, 0x43, - 0xab, 0xfd, 0x4f, 0x28, 0x3d, 0x68, 0xd5, 0x7b, 0x03, 0x20, 0xc5, 0x69, 0xd4, 0x41, 0x89, 0x83, - 0x0a, 0xdd, 0xed, 0x4f, 0x91, 0x92, 0x99, 0xea, 0xf2, 0x55, 0x61, 0xa2, 0xed, 0x11, 0xe4, 0x93, - 0x44, 0x42, 0x56, 0x7f, 0x86, 0x0b, 0x01, 0xa8, 0x2e, 0x64, 0x07, 0x88, 0x66, 0x7b, 0x90, 0x4f, - 0xcc, 0x40, 0x2b, 0xfd, 0xb1, 0xa9, 0xc1, 0xa4, 0xce, 0x9a, 0xc9, 0xd3, 0x69, 0xb6, 0x9f, 0x4e, - 0x73, 0x23, 0x7e, 0x3a, 0x63, 0xea, 0x24, 0x16, 0xb3, 0x50, 0xa7, 0x06, 0xe8, 0x1f, 0xa9, 0xeb, - 0xa0, 0xc4, 0x77, 0x3d, 0x8b, 0x33, 0x29, 0x59, 0x97, 0xc5, 0x99, 0xd4, 0x28, 0x69, 0x82, 0x12, - 0x5f, 0x67, 0x94, 0x01, 0x9f, 0x16, 0x18, 0xea, 0xca, 0x95, 0x71, 0x49, 0xe3, 0x05, 0x09, 0x1d, - 0xc2, 0x3f, 0xfc, 0xaa, 0x22, 0xb3, 0x3f, 0x47, 0x6f, 0x2e, 0xa8, 0x56, 0xe6, 0xfd, 0x49, 0xaf, - 0xd2, 0xab, 0xb3, 0x73, 0x2d, 0xf7, 0xfd, 0x5c, 0xcb, 0x7d, 0x68, 0x69, 0xd2, 0x59, 0x4b, 0x93, - 0xbe, 0xb5, 0x34, 0xe9, 0x67, 0x4b, 0x93, 0x5e, 0x96, 0x06, 0xfe, 0x97, 0xb6, 0xd6, 0xae, 0xf7, - 0xf3, 0xdc, 0xc9, 0x3b, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfd, 0xaf, 0x9e, 0xf2, 0x09, - 0x00, 0x00, + // 766 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcd, 0x4e, 0xdb, 0x4a, + 0x18, 0x8d, 0xb1, 0x6f, 0xb8, 0x7c, 0x01, 0x6e, 0xee, 0x10, 0x42, 0xe4, 0x7b, 0xe5, 0x6b, 0x79, + 0x71, 0x85, 0xba, 0xb0, 0x81, 0xaa, 0x40, 0xcb, 0xa6, 0x24, 0x8d, 0xaa, 0x94, 0x02, 0x95, 0xf9, + 0x2b, 0x55, 0xa5, 0xca, 0x24, 0x43, 0xb0, 0xd2, 0xcc, 0xa4, 0xf1, 0x24, 0x28, 0x5d, 0x54, 0xed, + 0x0e, 0xe5, 0x1d, 0xb2, 0x6a, 0x9f, 0xa2, 0x4f, 0xc0, 0xb2, 0xcb, 0xae, 0xaa, 0x92, 0x27, 0xa9, + 0x66, 0xec, 0xfc, 0x50, 0x52, 0xc5, 0x04, 0xba, 0xfb, 0x3c, 0xdf, 0x9c, 0xf3, 0x9d, 0xcc, 0xb1, + 0xcf, 0x04, 0x72, 0x45, 0x97, 0x9d, 0xd4, 0x8e, 0xcc, 0x3c, 0x2d, 0x5b, 0x79, 0x4a, 0x98, 0xe3, + 0x12, 0x5c, 0x2d, 0xf4, 0x97, 0x4e, 0xc5, 0xb5, 0x3c, 0x5c, 0xad, 0xbb, 0x79, 0xec, 0x59, 0x1e, + 0x71, 0x2a, 0xde, 0x09, 0x65, 0x56, 0x7d, 0xb1, 0x5b, 0x7b, 0x66, 0xa5, 0x4a, 0x19, 0x45, 0x7a, + 0x0f, 0x64, 0x76, 0x00, 0x66, 0x6f, 0x53, 0x7d, 0x51, 0x4d, 0x14, 0x69, 0x91, 0x8a, 0xcd, 0x16, + 0xaf, 0x7c, 0x9c, 0xfa, 0x4f, 0x91, 0xd2, 0xe2, 0x6b, 0x6c, 0x89, 0xa7, 0xa3, 0xda, 0xb1, 0x85, + 0xcb, 0x15, 0xd6, 0x08, 0x9a, 0xcb, 0xa1, 0xf4, 0xb1, 0x46, 0x05, 0x7b, 0x56, 0x99, 0xd6, 0x08, + 0xf3, 0x71, 0x46, 0x01, 0x92, 0xcf, 0xaa, 0xb8, 0xe2, 0x54, 0xf1, 0x4e, 0xa0, 0xc0, 0xc6, 0x6f, + 0x6a, 0xd8, 0x63, 0x48, 0x87, 0x58, 0x47, 0x14, 0xc3, 0xd5, 0x94, 0xa4, 0x4b, 0xf3, 0x13, 0x76, + 0xff, 0x12, 0x8a, 0x83, 0x5c, 0xc2, 0x8d, 0xd4, 0x98, 0xe8, 0xf0, 0x12, 0x25, 0x21, 0xca, 0xa9, + 0x08, 0x4b, 0xc9, 0x62, 0x31, 0x78, 0x32, 0x9e, 0xc0, 0xdc, 0x95, 0x29, 0x5e, 0x85, 0x12, 0x0f, + 0x23, 0x0b, 0xa2, 0x42, 0x8f, 0x97, 0x92, 0x74, 0x79, 0x3e, 0xb6, 0x34, 0x67, 0xf6, 0x1d, 0x8f, + 0xd0, 0x6b, 0x6e, 0xf2, 0xbe, 0x1d, 0x6c, 0x33, 0x1c, 0x98, 0xd9, 0x77, 0xf1, 0xe9, 0xef, 0x94, + 0xfb, 0x18, 0x12, 0x97, 0x47, 0x8c, 0xaa, 0x35, 0x03, 0x53, 0x62, 0xc1, 0xbb, 0x81, 0x4a, 0x63, + 0x1d, 0xa6, 0x3b, 0x24, 0xa3, 0xea, 0xd8, 0x80, 0x59, 0x1b, 0x97, 0x69, 0xfd, 0x36, 0x4c, 0x36, + 0x5e, 0xc1, 0x6c, 0x86, 0x96, 0xcb, 0x2e, 0xbb, 0x3e, 0x19, 0x02, 0x85, 0x38, 0x65, 0x1c, 0xb0, + 0x89, 0xba, 0x33, 0x40, 0xee, 0x0d, 0xc8, 0xc1, 0xcc, 0x0e, 0x73, 0xd8, 0x6d, 0x68, 0x25, 0xa0, + 0xe4, 0xc8, 0x31, 0xed, 0x0e, 0x96, 0xfa, 0x06, 0xf7, 0xdc, 0x1f, 0xeb, 0x77, 0x1f, 0x3d, 0x00, + 0xa5, 0xe4, 0x92, 0x82, 0x50, 0x34, 0xbd, 0xf4, 0xbf, 0x39, 0xec, 0x73, 0x35, 0x37, 0x5c, 0x52, + 0xb0, 0x05, 0xc6, 0x78, 0x0e, 0x89, 0xcb, 0xd2, 0x03, 0xc7, 0x1e, 0x82, 0xe2, 0x92, 0x63, 0x2a, + 0xe6, 0xc7, 0xc2, 0x70, 0x72, 0xd5, 0x69, 0xe5, 0xfc, 0xdb, 0x7f, 0x11, 0x5b, 0x20, 0x8d, 0x55, + 0x48, 0x3c, 0x75, 0xbd, 0x2e, 0x73, 0xf8, 0x37, 0xca, 0x38, 0x84, 0xd9, 0x9f, 0x90, 0x57, 0x44, + 0xc9, 0x23, 0x8a, 0x4a, 0xc3, 0xe4, 0x9e, 0xe7, 0x14, 0xf1, 0x4d, 0x2c, 0x5a, 0x83, 0xa9, 0x80, + 0x23, 0x90, 0x85, 0x40, 0xf1, 0xdc, 0xb7, 0xbe, 0x57, 0xb2, 0x2d, 0x6a, 0xee, 0x95, 0x4b, 0x68, + 0x01, 0x7b, 0x02, 0x29, 0xdb, 0xc1, 0xd3, 0x9d, 0x33, 0x09, 0x14, 0x7e, 0xfc, 0xe8, 0x5f, 0x18, + 0xdf, 0xdb, 0xda, 0xd8, 0xda, 0x3e, 0xd8, 0x8a, 0x47, 0xd4, 0xbf, 0x9a, 0x2d, 0x3d, 0xc6, 0x97, + 0xf7, 0x48, 0x89, 0xd0, 0x53, 0x82, 0x92, 0xa0, 0xec, 0xe7, 0xb2, 0x07, 0x71, 0x49, 0x9d, 0x6c, + 0xb6, 0xf4, 0x3f, 0x79, 0x8b, 0x7f, 0xe0, 0x48, 0x85, 0xe8, 0x7a, 0x66, 0x37, 0xb7, 0x9f, 0x8d, + 0x8f, 0xa9, 0xd3, 0xcd, 0x96, 0x0e, 0xbc, 0xb3, 0x9e, 0x67, 0x6e, 0x1d, 0x23, 0x1d, 0x26, 0x32, + 0xdb, 0x9b, 0x9b, 0xb9, 0xdd, 0xdd, 0xec, 0xa3, 0xb8, 0xac, 0xfe, 0xdd, 0x6c, 0xe9, 0x53, 0xbc, + 0xed, 0xbf, 0xfb, 0x0c, 0x17, 0xd4, 0xc9, 0xb3, 0x8f, 0x5a, 0xe4, 0xf3, 0x27, 0x4d, 0x28, 0x58, + 0xfa, 0x30, 0x0e, 0x13, 0xdd, 0x33, 0x46, 0xef, 0x60, 0x3c, 0x48, 0x3c, 0xb4, 0x3a, 0xfc, 0x60, + 0x07, 0x47, 0xb0, 0x7a, 0x7f, 0x04, 0x64, 0x70, 0x88, 0x35, 0x50, 0xc4, 0x2f, 0xbc, 0x37, 0x9c, + 0x62, 0x40, 0x9a, 0xaa, 0xcb, 0xd7, 0x85, 0x05, 0x63, 0x4b, 0x10, 0xf5, 0xb3, 0x0a, 0x59, 0xc3, + 0x19, 0x2e, 0x45, 0xa3, 0xba, 0x10, 0x1e, 0x10, 0x0c, 0x3b, 0x84, 0xa8, 0x6f, 0x06, 0x5a, 0x19, + 0x8e, 0x1d, 0x18, 0x59, 0x6a, 0xd2, 0xf4, 0x2f, 0x55, 0xb3, 0x73, 0xa9, 0x9a, 0x59, 0x7e, 0xa9, + 0x72, 0x6a, 0x3f, 0x30, 0xc3, 0x50, 0x0f, 0x8c, 0xd6, 0x5f, 0x52, 0xd7, 0x40, 0xe1, 0x11, 0x11, + 0xc6, 0x99, 0x01, 0x29, 0x18, 0xc6, 0x99, 0x81, 0x09, 0xd4, 0x00, 0x85, 0xa7, 0x00, 0x0a, 0x81, + 0x1f, 0x94, 0x33, 0xea, 0xca, 0xb5, 0x71, 0xfe, 0xe0, 0x05, 0x09, 0x9d, 0xc0, 0x1f, 0xe2, 0x0b, + 0x47, 0xe6, 0x70, 0x8e, 0xfe, 0x38, 0x51, 0xad, 0xd0, 0xfb, 0xfd, 0x59, 0xe9, 0x97, 0xe7, 0x17, + 0x5a, 0xe4, 0xeb, 0x85, 0x16, 0x79, 0xdf, 0xd6, 0xa4, 0xf3, 0xb6, 0x26, 0x7d, 0x69, 0x6b, 0xd2, + 0xf7, 0xb6, 0x26, 0xbd, 0x48, 0x8f, 0xfc, 0xff, 0x6d, 0xad, 0x53, 0x1f, 0x45, 0x85, 0x93, 0x77, + 0x7f, 0x04, 0x00, 0x00, 0xff, 0xff, 0x51, 0x4b, 0x27, 0x3b, 0x0c, 0x0a, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto b/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto index db67ae4b..3f5180dc 100644 --- a/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto +++ b/vendor/github.com/containerd/containerd/api/services/snapshot/v1/snapshots.proto @@ -69,16 +69,16 @@ enum Kind { option (gogoproto.goproto_enum_prefix) = false; option (gogoproto.enum_customname) = "Kind"; - ACTIVE = 0 [(gogoproto.enumvalue_customname) = "KindActive"]; - - COMMITTED = 1 [(gogoproto.enumvalue_customname) = "KindCommitted"]; + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"]; + VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"]; + ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"]; + COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"]; } message Info { string name = 1; string parent = 2; Kind kind = 3; - bool readonly = 4; } message StatSnapshotResponse { diff --git a/vendor/github.com/containerd/containerd/archive/path.go b/vendor/github.com/containerd/containerd/archive/path.go new file mode 100644 index 00000000..32374459 --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/path.go @@ -0,0 +1,107 @@ +package archive + +import ( + "os" + "path/filepath" + "strings" + + "github.com/pkg/errors" +) + +var ( + errTooManyLinks = errors.New("too many links") +) + +// rootPath joins a path with a root, evaluating and bounding any +// symlink to the root directory. +// TODO(dmcgowan): Expose and move to fs package or continuity path driver +func rootPath(root, path string) (string, error) { + if path == "" { + return root, nil + } + var linksWalked int // to protect against cycles + for { + i := linksWalked + newpath, err := walkLinks(root, path, &linksWalked) + if err != nil { + return "", err + } + path = newpath + if i == linksWalked { + newpath = filepath.Join("/", newpath) + if path == newpath { + return filepath.Join(root, newpath), nil + } + path = newpath + } + } +} + +func walkLink(root, path string, linksWalked *int) (newpath string, islink bool, err error) { + if *linksWalked > 255 { + return "", false, errTooManyLinks + } + + path = filepath.Join("/", path) + if path == "/" { + return path, false, nil + } + realPath := filepath.Join(root, path) + + fi, err := os.Lstat(realPath) + if err != nil { + // If path does not yet exist, treat as non-symlink + if os.IsNotExist(err) { + return path, false, nil + } + return "", false, err + } + if fi.Mode()&os.ModeSymlink == 0 { + return path, false, nil + } + newpath, err = os.Readlink(realPath) + if err != nil { + return "", false, err + } + if filepath.IsAbs(newpath) && strings.HasPrefix(newpath, root) { + newpath = newpath[:len(root)] + if !strings.HasPrefix(newpath, "/") { + newpath = "/" + newpath + } + } + *linksWalked++ + return newpath, true, nil +} + +func walkLinks(root, path string, linksWalked *int) (string, error) { + switch dir, file := filepath.Split(path); { + case dir == "": + newpath, _, err := walkLink(root, file, linksWalked) + return newpath, err + case file == "": + if os.IsPathSeparator(dir[len(dir)-1]) { + if dir == "/" { + return dir, nil + } + return walkLinks(root, dir[:len(dir)-1], linksWalked) + } + newpath, _, err := walkLink(root, dir, linksWalked) + return newpath, err + default: + newdir, err := walkLinks(root, dir, linksWalked) + if err != nil { + return "", err + } + newpath, islink, err := walkLink(root, filepath.Join(newdir, file), linksWalked) + if err != nil { + return "", err + } + if !islink { + return newpath, nil + } + if filepath.IsAbs(newpath) { + return newpath, nil + } + return filepath.Join(newdir, newpath), nil + } +} diff --git a/vendor/github.com/containerd/containerd/archive/tar.go b/vendor/github.com/containerd/containerd/archive/tar.go index 7e970589..b29699e1 100644 --- a/vendor/github.com/containerd/containerd/archive/tar.go +++ b/vendor/github.com/containerd/containerd/archive/tar.go @@ -1,7 +1,6 @@ package archive import ( - "archive/tar" "context" "fmt" "io" @@ -16,6 +15,7 @@ import ( "github.com/containerd/containerd/fs" "github.com/containerd/containerd/log" + "github.com/dmcgowan/go-tar" "github.com/pkg/errors" ) @@ -25,8 +25,6 @@ var ( return make([]byte, 32*1024) }, } - - breakoutError = errors.New("file name outside of root") ) // Diff returns a tar stream of the computed filesystem @@ -128,16 +126,30 @@ func Apply(ctx context.Context, root string, r io.Reader) (int64, error) { continue } - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(root, parent) + // Split name and resolve symlinks for root directory. + ppath, base := filepath.Split(hdr.Name) + ppath, err = rootPath(root, ppath) + if err != nil { + return 0, errors.Wrap(err, "failed to get root path") + } + // Join to root before joining to parent path to ensure relative links are + // already resolved based on the root before adding to parent. + path := filepath.Join(ppath, filepath.Join("/", base)) + if path == root { + log.G(ctx).Debugf("file %q ignored: resolved to root", hdr.Name) + continue + } + + // If file is not directly under root, ensure parent directory + // exists or is created. + if ppath != root { + parentPath := ppath + if base == "" { + parentPath = filepath.Dir(path) + } if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = mkdirAll(parentPath, 0600) + err = mkdirAll(parentPath, 0700) if err != nil { return 0, err } @@ -158,7 +170,11 @@ func Apply(ctx context.Context, root string, r io.Reader) (int64, error) { } defer os.RemoveAll(aufsTempdir) } - if err := createTarFile(ctx, filepath.Join(aufsTempdir, basename), root, hdr, tr); err != nil { + p, err := rootPath(aufsTempdir, basename) + if err != nil { + return 0, err + } + if err := createTarFile(ctx, p, root, hdr, tr); err != nil { return 0, err } } @@ -168,18 +184,6 @@ func Apply(ctx context.Context, root string, r io.Reader) (int64, error) { } } - path := filepath.Join(root, hdr.Name) - rel, err := filepath.Rel(root, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, errors.Wrapf(breakoutError, "%q is outside of %q", hdr.Name, root) - } - base := filepath.Base(path) - if strings.HasPrefix(base, whiteoutPrefix) { dir := filepath.Dir(path) if base == whiteoutOpaqueDir { @@ -239,7 +243,11 @@ func Apply(ctx context.Context, root string, r io.Reader) (int64, error) { if srcHdr == nil { return 0, fmt.Errorf("Invalid aufs hardlink") } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + p, err := rootPath(aufsTempdir, linkBasename) + if err != nil { + return 0, err + } + tmpFile, err := os.Open(p) if err != nil { return 0, err } @@ -260,7 +268,10 @@ func Apply(ctx context.Context, root string, r io.Reader) (int64, error) { } for _, hdr := range dirs { - path := filepath.Join(root, hdr.Name) + path, err := rootPath(root, hdr.Name) + if err != nil { + return 0, err + } if err := chtimes(path, boundTime(latestTime(hdr.AccessTime, hdr.ModTime)), boundTime(hdr.ModTime)); err != nil { return 0, err } @@ -467,25 +478,15 @@ func createTarFile(ctx context.Context, path, extractDir string, hdr *tar.Header } case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return errors.Wrapf(breakoutError, "invalid hardlink %q -> %q", targetPath, hdr.Linkname) + targetPath, err := rootPath(extractDir, hdr.Linkname) + if err != nil { + return err } if err := os.Link(targetPath, path); err != nil { return err } case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return errors.Wrapf(breakoutError, "invalid symlink %q -> %q", path, hdr.Linkname) - } if err := os.Symlink(hdr.Linkname, path); err != nil { return err } diff --git a/vendor/github.com/containerd/containerd/archive/tar_unix.go b/vendor/github.com/containerd/containerd/archive/tar_unix.go index 61951054..2fa59a41 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_unix.go +++ b/vendor/github.com/containerd/containerd/archive/tar_unix.go @@ -3,12 +3,12 @@ package archive import ( - "archive/tar" "os" "sync" "syscall" "github.com/containerd/continuity/sysx" + "github.com/dmcgowan/go-tar" "github.com/opencontainers/runc/libcontainer/system" "github.com/pkg/errors" ) diff --git a/vendor/github.com/containerd/containerd/archive/tar_windows.go b/vendor/github.com/containerd/containerd/archive/tar_windows.go index fda77508..4ccf605e 100644 --- a/vendor/github.com/containerd/containerd/archive/tar_windows.go +++ b/vendor/github.com/containerd/containerd/archive/tar_windows.go @@ -1,13 +1,13 @@ package archive import ( - "archive/tar" "errors" "fmt" "os" "strings" "github.com/containerd/containerd/sys" + "github.com/dmcgowan/go-tar" ) // tarName returns platform-specific filepath diff --git a/vendor/github.com/containerd/containerd/client.go b/vendor/github.com/containerd/containerd/client.go index f8b4b904..92ff6709 100644 --- a/vendor/github.com/containerd/containerd/client.go +++ b/vendor/github.com/containerd/containerd/client.go @@ -161,8 +161,8 @@ func WithContainerLabels(labels map[string]string) NewContainerOpts { } } -// WithExistingRootFS uses an existing root filesystem for the container -func WithExistingRootFS(id string) NewContainerOpts { +// WithSnapshot uses an existing root filesystem for the container +func WithSnapshot(id string) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { // check that the snapshot exists, if not, fail on creation if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil { @@ -173,9 +173,9 @@ func WithExistingRootFS(id string) NewContainerOpts { } } -// WithNewRootFS allocates a new snapshot to be used by the container as the +// WithNewSnapshot allocates a new snapshot to be used by the container as the // root filesystem in read-write mode -func WithNewRootFS(id string, i Image) NewContainerOpts { +func WithNewSnapshot(id string, i Image) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore()) if err != nil { @@ -190,9 +190,9 @@ func WithNewRootFS(id string, i Image) NewContainerOpts { } } -// WithNewReadonlyRootFS allocates a new snapshot to be used by the container as the +// WithNewSnapshotView allocates a new snapshot to be used by the container as the // root filesystem in read-only mode -func WithNewReadonlyRootFS(id string, i Image) NewContainerOpts { +func WithNewSnapshotView(id string, i Image) NewContainerOpts { return func(ctx context.Context, client *Client, c *containers.Container) error { diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore()) if err != nil { diff --git a/vendor/github.com/containerd/containerd/client_unix.go b/vendor/github.com/containerd/containerd/client_unix.go index f352eb4d..d5a9f63e 100644 --- a/vendor/github.com/containerd/containerd/client_unix.go +++ b/vendor/github.com/containerd/containerd/client_unix.go @@ -9,9 +9,6 @@ import ( "time" ) -// DefaultAddress is the default unix socket address -const DefaultAddress = "/run/containerd/containerd.sock" - func dialer(address string, timeout time.Duration) (net.Conn, error) { address = strings.TrimPrefix(address, "unix://") return net.DialTimeout("unix", address, timeout) diff --git a/vendor/github.com/containerd/containerd/client_windows.go b/vendor/github.com/containerd/containerd/client_windows.go index 998c15c6..548024e5 100644 --- a/vendor/github.com/containerd/containerd/client_windows.go +++ b/vendor/github.com/containerd/containerd/client_windows.go @@ -7,9 +7,6 @@ import ( winio "github.com/Microsoft/go-winio" ) -// DefaultAddress is the default unix socket address -const DefaultAddress = `\\.\pipe\containerd-containerd` - func dialer(address string, timeout time.Duration) (net.Conn, error) { return winio.DialPipe(address, &timeout) } diff --git a/vendor/github.com/containerd/containerd/container.go b/vendor/github.com/containerd/containerd/container.go index fd7cfe2a..3da1eefa 100644 --- a/vendor/github.com/containerd/containerd/container.go +++ b/vendor/github.com/containerd/containerd/container.go @@ -7,25 +7,16 @@ import ( "strings" "sync" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/containers" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/mount" "github.com/containerd/containerd/typeurl" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" ) -var ( - ErrNoImage = errors.New("container does not have an image") - ErrNoRunningTask = errors.New("no running task") - ErrDeleteRunningTask = errors.New("cannot delete container with running task") - ErrProcessExited = errors.New("process already exited") - ErrNoExecID = errors.New("exec id must be provided") -) - type DeleteOpts func(context.Context, *Client, containers.Container) error type Container interface { @@ -117,8 +108,8 @@ func (c *container) Spec() (*specs.Spec, error) { return &s, nil } -// WithRootFSDeletion deletes the rootfs allocated for the container -func WithRootFSDeletion(ctx context.Context, client *Client, c containers.Container) error { +// WithSnapshotCleanup deletes the rootfs allocated for the container +func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error { if c.RootFS != "" { return client.SnapshotService(c.Snapshotter).Remove(ctx, c.RootFS) } @@ -129,7 +120,7 @@ func WithRootFSDeletion(ctx context.Context, client *Client, c containers.Contai // an error is returned if the container has running tasks func (c *container) Delete(ctx context.Context, opts ...DeleteOpts) (err error) { if _, err := c.Task(ctx, nil); err == nil { - return ErrDeleteRunningTask + return errors.Wrapf(errdefs.ErrFailedPrecondition, "cannot delete running task %v", c.ID()) } for _, o := range opts { if err := o(ctx, c.client, c.c); err != nil { @@ -150,11 +141,11 @@ func (c *container) Task(ctx context.Context, attach IOAttach) (Task, error) { // Image returns the image that the container is based on func (c *container) Image(ctx context.Context) (Image, error) { if c.c.Image == "" { - return nil, ErrNoImage + return nil, errors.Wrapf(errdefs.ErrNotFound, "container not created from an image") } i, err := c.client.ImageService().Get(ctx, c.c.Image) if err != nil { - return nil, err + return nil, errors.Wrapf(err, "failed to get image for container") } return &image{ client: c.client, @@ -164,10 +155,17 @@ func (c *container) Image(ctx context.Context) (Image, error) { type NewTaskOpts func(context.Context, *Client, *TaskInfo) error +func WithRootFS(mounts []mount.Mount) NewTaskOpts { + return func(ctx context.Context, c *Client, ti *TaskInfo) error { + ti.RootFS = mounts + return nil + } +} + func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...NewTaskOpts) (Task, error) { c.mu.Lock() defer c.mu.Unlock() - i, err := ioCreate() + i, err := ioCreate(c.c.ID) if err != nil { return nil, err } @@ -198,6 +196,15 @@ func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...Ne return nil, err } } + if info.RootFS != nil { + for _, m := range info.RootFS { + request.Rootfs = append(request.Rootfs, &types.Mount{ + Type: m.Type, + Source: m.Source, + Options: m.Options, + }) + } + } if info.Options != nil { any, err := typeurl.MarshalAny(info.Options) if err != nil { @@ -229,8 +236,9 @@ func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, erro ContainerID: c.c.ID, }) if err != nil { - if grpc.Code(errors.Cause(err)) == codes.NotFound { - return nil, ErrNoRunningTask + err = errdefs.FromGRPC(err) + if errdefs.IsNotFound(err) { + return nil, errors.Wrapf(err, "no running task found") } return nil, err } diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go index 43ff5b66..770a37b8 100644 --- a/vendor/github.com/containerd/containerd/content/content.go +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -3,20 +3,11 @@ package content import ( "context" "io" - "sync" "time" "github.com/opencontainers/go-digest" ) -var ( - bufPool = sync.Pool{ - New: func() interface{} { - return make([]byte, 1<<20) - }, - } -) - type Provider interface { Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) ReaderAt(ctx context.Context, dgst digest.Digest) (io.ReaderAt, error) diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go index 3a241c7f..7b295be7 100644 --- a/vendor/github.com/containerd/containerd/content/helpers.go +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -5,12 +5,21 @@ import ( "fmt" "io" "io/ioutil" + "sync" "github.com/containerd/containerd/errdefs" "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 1<<20) + }, + } +) + // ReadBlob retrieves the entire contents of the blob from the provider. // // Avoid using this for large blobs, such as layers. @@ -121,8 +130,3 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { return r, errors.Wrapf(errUnseekable, "seek to offset %v failed", offset) } - -func readFileString(path string) (string, error) { - p, err := ioutil.ReadFile(path) - return string(p), err -} diff --git a/vendor/github.com/containerd/containerd/content/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go similarity index 97% rename from vendor/github.com/containerd/containerd/content/locks.go rename to vendor/github.com/containerd/containerd/content/local/locks.go index c7b1e437..cf5d0c59 100644 --- a/vendor/github.com/containerd/containerd/content/locks.go +++ b/vendor/github.com/containerd/containerd/content/local/locks.go @@ -1,4 +1,4 @@ -package content +package local import ( "sync" diff --git a/vendor/github.com/containerd/containerd/content/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go similarity index 96% rename from vendor/github.com/containerd/containerd/content/readerat.go rename to vendor/github.com/containerd/containerd/content/local/readerat.go index 74c11bd1..47521bc1 100644 --- a/vendor/github.com/containerd/containerd/content/readerat.go +++ b/vendor/github.com/containerd/containerd/content/local/readerat.go @@ -1,4 +1,4 @@ -package content +package local import ( "io" diff --git a/vendor/github.com/containerd/containerd/content/store.go b/vendor/github.com/containerd/containerd/content/local/store.go similarity index 86% rename from vendor/github.com/containerd/containerd/content/store.go rename to vendor/github.com/containerd/containerd/content/local/store.go index 8accf166..170a84f9 100644 --- a/vendor/github.com/containerd/containerd/content/store.go +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -1,4 +1,4 @@ -package content +package local import ( "context" @@ -8,8 +8,10 @@ import ( "os" "path/filepath" "strconv" + "sync" "time" + "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/filters" "github.com/containerd/containerd/log" @@ -17,6 +19,14 @@ import ( "github.com/pkg/errors" ) +var ( + bufPool = sync.Pool{ + New: func() interface{} { + return make([]byte, 1<<20) + }, + } +) + // Store is digest-keyed store for content. All data written into the store is // stored under a verifiable digest. // @@ -26,7 +36,7 @@ type store struct { root string } -func NewStore(root string) (Store, error) { +func NewStore(root string) (content.Store, error) { if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil && !os.IsExist(err) { return nil, err } @@ -36,7 +46,7 @@ func NewStore(root string) (Store, error) { }, nil } -func (s *store) Info(ctx context.Context, dgst digest.Digest) (Info, error) { +func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { p := s.blobPath(dgst) fi, err := os.Stat(p) if err != nil { @@ -44,14 +54,14 @@ func (s *store) Info(ctx context.Context, dgst digest.Digest) (Info, error) { err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) } - return Info{}, err + return content.Info{}, err } return s.info(dgst, fi), nil } -func (s *store) info(dgst digest.Digest, fi os.FileInfo) Info { - return Info{ +func (s *store) info(dgst digest.Digest, fi os.FileInfo) content.Info { + return content.Info{ Digest: dgst, Size: fi.Size(), CreatedAt: fi.ModTime(), @@ -93,12 +103,12 @@ func (cs *store) Delete(ctx context.Context, dgst digest.Digest) error { return nil } -func (cs *store) Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) { +func (cs *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { // TODO: Support persisting and updating mutable content data - return Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store") + return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store") } -func (cs *store) Walk(ctx context.Context, fn WalkFunc, filters ...string) error { +func (cs *store) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error { // TODO: Support filters root := filepath.Join(cs.root, "blobs") var alg digest.Algorithm @@ -141,11 +151,11 @@ func (cs *store) Walk(ctx context.Context, fn WalkFunc, filters ...string) error }) } -func (s *store) Status(ctx context.Context, ref string) (Status, error) { +func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { return s.status(s.ingestRoot(ref)) } -func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]Status, error) { +func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { fp, err := os.Open(filepath.Join(s.root, "ingest")) if err != nil { return nil, err @@ -163,7 +173,7 @@ func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]Status, error return nil, err } - var active []Status + var active []content.Status for _, fi := range fis { p := filepath.Join(s.root, "ingest", fi.Name()) stat, err := s.status(p) @@ -192,19 +202,19 @@ func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]Status, error } // status works like stat above except uses the path to the ingest. -func (s *store) status(ingestPath string) (Status, error) { +func (s *store) status(ingestPath string) (content.Status, error) { dp := filepath.Join(ingestPath, "data") fi, err := os.Stat(dp) if err != nil { - return Status{}, err + return content.Status{}, err } ref, err := readFileString(filepath.Join(ingestPath, "ref")) if err != nil { - return Status{}, err + return content.Status{}, err } - return Status{ + return content.Status{ Ref: ref, Offset: fi.Size(), Total: s.total(ingestPath), @@ -213,7 +223,7 @@ func (s *store) status(ingestPath string) (Status, error) { }, nil } -func adaptStatus(status Status) filters.Adaptor { +func adaptStatus(status content.Status) filters.Adaptor { return filters.AdapterFunc(func(fieldpath []string) (string, bool) { if len(fieldpath) == 0 { return "", false @@ -248,7 +258,7 @@ func (s *store) total(ingestPath string) int64 { // ref at a time. // // The argument `ref` is used to uniquely identify a long-lived writer transaction. -func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (Writer, error) { +func (s *store) Writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { // TODO(stevvooe): Need to actually store expected here. We have // code in the service that shouldn't be dealing with this. if expected != "" { @@ -261,7 +271,7 @@ func (s *store) Writer(ctx context.Context, ref string, total int64, expected di path, refp, data := s.ingestPaths(ref) if err := tryLock(ref); err != nil { - return nil, errors.Wrapf(err, "locking %v failed", ref) + return nil, errors.Wrapf(err, "locking ref %v failed", ref) } var ( @@ -384,3 +394,8 @@ func (s *store) ingestPaths(ref string) (string, string, string) { return fp, rp, dp } + +func readFileString(path string) (string, error) { + p, err := ioutil.ReadFile(path) + return string(p), err +} diff --git a/vendor/github.com/containerd/containerd/content/store_linux.go b/vendor/github.com/containerd/containerd/content/local/store_linux.go similarity index 93% rename from vendor/github.com/containerd/containerd/content/store_linux.go rename to vendor/github.com/containerd/containerd/content/local/store_linux.go index ed25f0c8..e1f89f31 100644 --- a/vendor/github.com/containerd/containerd/content/store_linux.go +++ b/vendor/github.com/containerd/containerd/content/local/store_linux.go @@ -1,4 +1,4 @@ -package content +package local import ( "os" diff --git a/vendor/github.com/containerd/containerd/content/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go similarity index 94% rename from vendor/github.com/containerd/containerd/content/store_unix.go rename to vendor/github.com/containerd/containerd/content/local/store_unix.go index a7c6b1b5..75b26997 100644 --- a/vendor/github.com/containerd/containerd/content/store_unix.go +++ b/vendor/github.com/containerd/containerd/content/local/store_unix.go @@ -1,6 +1,6 @@ // +build darwin freebsd -package content +package local import ( "os" diff --git a/vendor/github.com/containerd/containerd/content/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go similarity index 85% rename from vendor/github.com/containerd/containerd/content/store_windows.go rename to vendor/github.com/containerd/containerd/content/local/store_windows.go index 495e3424..7fb6ad43 100644 --- a/vendor/github.com/containerd/containerd/content/store_windows.go +++ b/vendor/github.com/containerd/containerd/content/local/store_windows.go @@ -1,4 +1,4 @@ -package content +package local import ( "os" diff --git a/vendor/github.com/containerd/containerd/content/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go similarity index 87% rename from vendor/github.com/containerd/containerd/content/writer.go rename to vendor/github.com/containerd/containerd/content/local/writer.go index 5fbaac2f..e7a74b29 100644 --- a/vendor/github.com/containerd/containerd/content/writer.go +++ b/vendor/github.com/containerd/containerd/content/local/writer.go @@ -1,10 +1,12 @@ -package content +package local import ( "os" "path/filepath" + "runtime" "time" + "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -23,8 +25,8 @@ type writer struct { updatedAt time.Time } -func (w *writer) Status() (Status, error) { - return Status{ +func (w *writer) Status() (content.Status, error) { + return content.Status{ Ref: w.ref, Offset: w.offset, Total: w.total, @@ -67,8 +69,12 @@ func (w *writer) Commit(size int64, expected digest.Digest) error { // only allowing reads honoring the umask on creation. // // This removes write and exec, only allowing read per the creation umask. - if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil { - return errors.Wrap(err, "failed to change ingest file permissions") + // + // NOTE: Windows does not support this operation + if runtime.GOOS != "windows" { + if err := w.fp.Chmod((fi.Mode() & os.ModePerm) &^ 0333); err != nil { + return errors.Wrap(err, "failed to change ingest file permissions") + } } if size > 0 && size != fi.Size() { @@ -104,6 +110,10 @@ func (w *writer) Commit(size int64, expected digest.Digest) error { } return err } + commitTime := time.Now() + if err := os.Chtimes(target, commitTime, commitTime); err != nil { + return err + } unlock(w.ref) w.fp = nil diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go index f84e0dae..60e300ab 100644 --- a/vendor/github.com/containerd/containerd/errdefs/errors.go +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -21,7 +21,7 @@ import "github.com/pkg/errors" // map very well to those defined by grpc. var ( ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. - ErrInvalidArgument = errors.New("invalid") + ErrInvalidArgument = errors.New("invalid argument") ErrNotFound = errors.New("not found") ErrAlreadyExists = errors.New("already exists") ErrFailedPrecondition = errors.New("failed precondition") diff --git a/vendor/github.com/containerd/containerd/events/emitter.go b/vendor/github.com/containerd/containerd/events/emitter.go index ed390214..d6910564 100644 --- a/vendor/github.com/containerd/containerd/events/emitter.go +++ b/vendor/github.com/containerd/containerd/events/emitter.go @@ -47,7 +47,9 @@ func (e *Emitter) Events(ctx context.Context, clientID string) chan *events.Enve ns: ns, } e.sinks[clientID] = s + e.m.Unlock() e.broadcaster.Add(s) + return s.ch } ch := e.sinks[clientID].ch e.m.Unlock() diff --git a/vendor/github.com/containerd/containerd/events/poster.go b/vendor/github.com/containerd/containerd/events/poster.go index 520b5e8d..124f8b79 100644 --- a/vendor/github.com/containerd/containerd/events/poster.go +++ b/vendor/github.com/containerd/containerd/events/poster.go @@ -3,9 +3,9 @@ package events import ( "context" - "github.com/Sirupsen/logrus" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" + "github.com/sirupsen/logrus" ) var ( diff --git a/vendor/github.com/containerd/containerd/events/sink.go b/vendor/github.com/containerd/containerd/events/sink.go index f72f86db..9246e40a 100644 --- a/vendor/github.com/containerd/containerd/events/sink.go +++ b/vendor/github.com/containerd/containerd/events/sink.go @@ -4,13 +4,13 @@ import ( "context" "time" - "github.com/Sirupsen/logrus" "github.com/containerd/containerd/api/services/events/v1" "github.com/containerd/containerd/log" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/typeurl" goevents "github.com/docker/go-events" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) type sinkEvent struct { diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go index acecc732..c9b09847 100644 --- a/vendor/github.com/containerd/containerd/filters/parser.go +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -46,7 +46,7 @@ func Parse(s string) (Filter, error) { // ParseAll parses each filter in ss and returns a filter that will return true // if any filter matches the expression. // -// If no fitlers are provided, the filter will match anything. +// If no filters are provided, the filter will match anything. func ParseAll(ss ...string) (Filter, error) { if len(ss) == 0 { return Always, nil @@ -158,7 +158,7 @@ func (p *parser) fieldpath() ([]string, error) { fs := []string{f} loop: for { - tok := p.scanner.peek() // lookahead to consume field separtor + tok := p.scanner.peek() // lookahead to consume field separator switch tok { case '.': diff --git a/vendor/github.com/containerd/containerd/fs/diff.go b/vendor/github.com/containerd/containerd/fs/diff.go index 5fed7cf2..9073d0d9 100644 --- a/vendor/github.com/containerd/containerd/fs/diff.go +++ b/vendor/github.com/containerd/containerd/fs/diff.go @@ -8,7 +8,7 @@ import ( "golang.org/x/sync/errgroup" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // ChangeKind is the type of modification that diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go index c8780463..452dd7cc 100644 --- a/vendor/github.com/containerd/containerd/images/handlers.go +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -159,11 +159,12 @@ func ChildrenHandler(provider content.Provider) HandlerFunc { descs = append(descs, index.Manifests...) case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, - MediaTypeDockerSchema2Config, ocispec.MediaTypeImageLayer, - ocispec.MediaTypeImageLayerGzip: // childless data types. + MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip: + // childless data types. return nil, nil default: - log.G(ctx).Warnf("encounted unknown type %v; children may not be fetched", desc.MediaType) + log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) } return descs, nil diff --git a/vendor/github.com/containerd/containerd/io.go b/vendor/github.com/containerd/containerd/io.go index ea103c35..d144d287 100644 --- a/vendor/github.com/containerd/containerd/io.go +++ b/vendor/github.com/containerd/containerd/io.go @@ -1,11 +1,10 @@ package containerd import ( + "context" "fmt" "io" - "io/ioutil" "os" - "path/filepath" "sync" ) @@ -18,6 +17,13 @@ type IO struct { closer *wgCloser } +func (i *IO) Cancel() { + if i.closer == nil { + return + } + i.closer.Cancel() +} + func (i *IO) Wait() { if i.closer == nil { return @@ -32,7 +38,7 @@ func (i *IO) Close() error { return i.closer.Close() } -type IOCreation func() (*IO, error) +type IOCreation func(id string) (*IO, error) type IOAttach func(*FIFOSet) (*IO, error) @@ -41,8 +47,8 @@ func NewIO(stdin io.Reader, stdout, stderr io.Writer) IOCreation { } func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) IOCreation { - return func() (*IO, error) { - paths, err := NewFifos() + return func(id string) (*IO, error) { + paths, err := NewFifos(id) if err != nil { return nil, err } @@ -64,7 +70,6 @@ func NewIOWithTerminal(stdin io.Reader, stdout, stderr io.Writer, terminal bool) i.closer = closer return i, nil } - } func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach { @@ -94,31 +99,13 @@ func WithAttach(stdin io.Reader, stdout, stderr io.Writer) IOAttach { // Stdio returns an IO implementation to be used for a task // that outputs the container's IO as the current processes Stdio -func Stdio() (*IO, error) { - return NewIO(os.Stdin, os.Stdout, os.Stderr)() +func Stdio(id string) (*IO, error) { + return NewIO(os.Stdin, os.Stdout, os.Stderr)(id) } // StdioTerminal will setup the IO for the task to use a terminal -func StdioTerminal() (*IO, error) { - return NewIOWithTerminal(os.Stdin, os.Stdout, os.Stderr, true)() -} - -// NewFifos returns a new set of fifos for the task -func NewFifos() (*FIFOSet, error) { - root := filepath.Join(os.TempDir(), "containerd") - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - dir, err := ioutil.TempDir(root, "") - if err != nil { - return nil, err - } - return &FIFOSet{ - Dir: dir, - In: filepath.Join(dir, "stdin"), - Out: filepath.Join(dir, "stdout"), - Err: filepath.Join(dir, "stderr"), - }, nil +func StdioTerminal(id string) (*IO, error) { + return NewIOWithTerminal(os.Stdin, os.Stdout, os.Stderr, true)(id) } type FIFOSet struct { @@ -134,9 +121,10 @@ type ioSet struct { } type wgCloser struct { - wg *sync.WaitGroup - dir string - set []io.Closer + wg *sync.WaitGroup + dir string + set []io.Closer + cancel context.CancelFunc } func (g *wgCloser) Wait() { @@ -152,3 +140,7 @@ func (g *wgCloser) Close() error { } return nil } + +func (g *wgCloser) Cancel() { + g.cancel() +} diff --git a/vendor/github.com/containerd/containerd/io_unix.go b/vendor/github.com/containerd/containerd/io_unix.go index ed896058..cde30d45 100644 --- a/vendor/github.com/containerd/containerd/io_unix.go +++ b/vendor/github.com/containerd/containerd/io_unix.go @@ -5,24 +5,46 @@ package containerd import ( "context" "io" + "io/ioutil" + "os" + "path/filepath" "sync" "syscall" "github.com/containerd/fifo" ) +// NewFifos returns a new set of fifos for the task +func NewFifos(id string) (*FIFOSet, error) { + root := filepath.Join(os.TempDir(), "containerd") + if err := os.MkdirAll(root, 0700); err != nil { + return nil, err + } + dir, err := ioutil.TempDir(root, "") + if err != nil { + return nil, err + } + return &FIFOSet{ + Dir: dir, + In: filepath.Join(dir, id+"-stdin"), + Out: filepath.Join(dir, id+"-stdout"), + Err: filepath.Join(dir, id+"-stderr"), + }, nil +} + func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) { var ( - f io.ReadWriteCloser - set []io.Closer - ctx = context.Background() - wg = &sync.WaitGroup{} + f io.ReadWriteCloser + set []io.Closer + ctx, cancel = context.WithCancel(context.Background()) + wg = &sync.WaitGroup{} ) defer func() { if err != nil { for _, f := range set { f.Close() } + cancel() } }() @@ -55,13 +77,14 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) { wg.Add(1) go func(r io.ReadCloser) { io.Copy(ioset.err, r) - wg.Done() r.Close() + wg.Done() }(f) } return &wgCloser{ - wg: wg, - dir: fifos.Dir, - set: set, + wg: wg, + dir: fifos.Dir, + set: set, + cancel: cancel, }, nil } diff --git a/vendor/github.com/containerd/containerd/io_windows.go b/vendor/github.com/containerd/containerd/io_windows.go index 58c6a8be..e37568c2 100644 --- a/vendor/github.com/containerd/containerd/io_windows.go +++ b/vendor/github.com/containerd/containerd/io_windows.go @@ -1,6 +1,7 @@ package containerd import ( + "fmt" "io" "net" "sync" @@ -10,8 +11,22 @@ import ( "github.com/pkg/errors" ) +const pipeRoot = `\\.\pipe` + +// NewFifos returns a new set of fifos for the task +func NewFifos(id string) (*FIFOSet, error) { + return &FIFOSet{ + In: fmt.Sprintf(`%s\ctr-%s-stdin`, pipeRoot, id), + Out: fmt.Sprintf(`%s\ctr-%s-stdout`, pipeRoot, id), + Err: fmt.Sprintf(`%s\ctr-%s-stderr`, pipeRoot, id), + }, nil +} + func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) { - var wg sync.WaitGroup + var ( + wg sync.WaitGroup + set []io.Closer + ) if fifos.In != "" { l, err := winio.ListenPipe(fifos.In, nil) @@ -23,6 +38,7 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) { l.Close() } }(l) + set = append(set, l) go func() { c, err := l.Accept() @@ -46,6 +62,7 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) { l.Close() } }(l) + set = append(set, l) wg.Add(1) go func() { @@ -71,6 +88,7 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) { l.Close() } }(l) + set = append(set, l) wg.Add(1) go func() { @@ -89,5 +107,11 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) { return &wgCloser{ wg: &wg, dir: fifos.Dir, + set: set, + cancel: func() { + for _, l := range set { + l.Close() + } + }, }, nil } diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go index 10af06f0..1081719c 100644 --- a/vendor/github.com/containerd/containerd/log/context.go +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -4,7 +4,7 @@ import ( "context" "path" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var ( diff --git a/vendor/github.com/containerd/containerd/metadata/content.go b/vendor/github.com/containerd/containerd/metadata/content.go index ceefef15..811a9191 100644 --- a/vendor/github.com/containerd/containerd/metadata/content.go +++ b/vendor/github.com/containerd/containerd/metadata/content.go @@ -251,7 +251,12 @@ func (cs *contentStore) Status(ctx context.Context, ref string) (content.Status, return content.Status{}, err } - return cs.Store.Status(ctx, bref) + st, err := cs.Store.Status(ctx, bref) + if err != nil { + return content.Status{}, err + } + st.Ref = ref + return st, nil } func (cs *contentStore) Abort(ctx context.Context, ref string) error { @@ -298,18 +303,23 @@ func (cs *contentStore) Writer(ctx context.Context, ref string, size int64, expe return err } - if len(bkt.Get([]byte(ref))) > 0 { - return errors.Wrapf(errdefs.ErrUnavailable, "ref %v is currently in use", ref) - } + var ( + bref string + brefb = bkt.Get([]byte(ref)) + ) - sid, err := bkt.NextSequence() - if err != nil { - return err - } + if brefb == nil { + sid, err := bkt.NextSequence() + if err != nil { + return err + } - bref := createKey(sid, ns, ref) - if err := bkt.Put([]byte(ref), []byte(bref)); err != nil { - return err + bref = createKey(sid, ns, ref) + if err := bkt.Put([]byte(ref), []byte(bref)); err != nil { + return err + } + } else { + bref = string(brefb) } // Do not use the passed in expected value here since it was @@ -385,13 +395,14 @@ func (nw *namespacedWriter) commit(tx *bolt.Tx, size int64, expected digest.Dige return err } - timeEncoded, err := status.UpdatedAt.MarshalBinary() + timeEncoded, err := time.Now().UTC().MarshalBinary() if err != nil { return err } for _, v := range [][2][]byte{ {bucketKeyCreatedAt, timeEncoded}, + {bucketKeyUpdatedAt, timeEncoded}, {bucketKeySize, sizeEncoded}, } { if err := bkt.Put(v[0], v[1]); err != nil { @@ -402,6 +413,14 @@ func (nw *namespacedWriter) commit(tx *bolt.Tx, size int64, expected digest.Dige return nil } +func (nw *namespacedWriter) Status() (content.Status, error) { + st, err := nw.Writer.Status() + if err == nil { + st.Ref = nw.ref + } + return st, err +} + func (cs *contentStore) Reader(ctx context.Context, dgst digest.Digest) (io.ReadCloser, error) { if err := cs.checkAccess(ctx, dgst); err != nil { return nil, err diff --git a/vendor/github.com/containerd/containerd/mount/mount_linux.go b/vendor/github.com/containerd/containerd/mount/mount_linux.go index 86df8bbc..5995051b 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_linux.go +++ b/vendor/github.com/containerd/containerd/mount/mount_linux.go @@ -15,6 +15,25 @@ func Unmount(mount string, flags int) error { return unix.Unmount(mount, flags) } +// UnmountAll repeatedly unmounts the given mount point until there +// are no mounts remaining (EINVAL is returned by mount), which is +// useful for undoing a stack of mounts on the same mount point. +func UnmountAll(mount string, flags int) error { + for { + if err := Unmount(mount, flags); err != nil { + // EINVAL is returned if the target is not a + // mount point, indicating that we are + // done. It can also indicate a few other + // things (such as invalid flags) which we + // unfortunately end up squelching here too. + if err == unix.EINVAL { + return nil + } + return err + } + } +} + // parseMountOptions takes fstab style mount options and parses them for // use with a standard mount() syscall func parseMountOptions(options []string) (int, string) { diff --git a/vendor/github.com/containerd/containerd/mount/mount_unix.go b/vendor/github.com/containerd/containerd/mount/mount_unix.go index 32ea2691..23467a8c 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_unix.go +++ b/vendor/github.com/containerd/containerd/mount/mount_unix.go @@ -15,3 +15,7 @@ func (m *Mount) Mount(target string) error { func Unmount(mount string, flags int) error { return ErrNotImplementOnUnix } + +func UnmountAll(mount string, flags int) error { + return ErrNotImplementOnUnix +} diff --git a/vendor/github.com/containerd/containerd/mount/mount_windows.go b/vendor/github.com/containerd/containerd/mount/mount_windows.go index 35dead41..8eeca681 100644 --- a/vendor/github.com/containerd/containerd/mount/mount_windows.go +++ b/vendor/github.com/containerd/containerd/mount/mount_windows.go @@ -13,3 +13,7 @@ func (m *Mount) Mount(target string) error { func Unmount(mount string, flags int) error { return ErrNotImplementOnWindows } + +func UnmountAll(mount string, flags int) error { + return ErrNotImplementOnWindows +} diff --git a/vendor/github.com/containerd/containerd/process.go b/vendor/github.com/containerd/containerd/process.go index caa33201..a3b4b356 100644 --- a/vendor/github.com/containerd/containerd/process.go +++ b/vendor/github.com/containerd/containerd/process.go @@ -45,6 +45,8 @@ func (p *process) Start(ctx context.Context) error { } response, err := p.task.client.TaskService().Exec(ctx, request) if err != nil { + p.io.Cancel() + p.io.Wait() p.io.Close() return err } diff --git a/vendor/github.com/containerd/containerd/reference/reference.go b/vendor/github.com/containerd/containerd/reference/reference.go index 658ff1b2..d31dff52 100644 --- a/vendor/github.com/containerd/containerd/reference/reference.go +++ b/vendor/github.com/containerd/containerd/reference/reference.go @@ -47,8 +47,8 @@ type Spec struct { // Object contains the identifier for the remote resource. Classically, // this is a tag but can refer to anything in a remote. By convention, any - // portion that may be a partial or whole digest will be preceeded by an - // `@`. Anything preceeding the `@` will be referred to as the "tag". + // portion that may be a partial or whole digest will be preceded by an + // `@`. Anything preceding the `@` will be referred to as the "tag". // // In practice, we will see this broken down into the following formats: // @@ -82,7 +82,7 @@ func Parse(s string) (Spec, error) { var object string if idx := splitRe.FindStringIndex(u.Path); idx != nil { - // This allows us to retain the @ to signify digests or shortend digests in + // This allows us to retain the @ to signify digests or shortened digests in // the object. object = u.Path[idx[0]:] if object[:1] == ":" { @@ -129,7 +129,7 @@ func (r Spec) String() string { return fmt.Sprintf("%v:%v", r.Locator, r.Object) } -// SplitObject provides two parts of the object spec, delimiited by an `@` +// SplitObject provides two parts of the object spec, delimited by an `@` // symbol. // // Either may be empty and it is the callers job to validate them diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go index 24c2a6a5..a48c8bdc 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -7,11 +7,11 @@ import ( "path" "strings" - "github.com/Sirupsen/logrus" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) type dockerFetcher struct { diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go index fe962685..8eed9975 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -13,7 +13,6 @@ import ( "strings" "time" - "github.com/Sirupsen/logrus" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" "github.com/containerd/containerd/reference" @@ -21,6 +20,7 @@ import ( digest "github.com/opencontainers/go-digest" ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context/ctxhttp" ) diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go index 2cf882f7..4459ce94 100644 --- a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -336,7 +336,7 @@ type v1History struct { } `json:"container_config,omitempty"` } -// isEmptyLayer returns whether the v1 compability history describes an +// isEmptyLayer returns whether the v1 compatibility history describes an // empty layer. A return value of true indicates the layer is empty, // however false does not indicate non-empty. func isEmptyLayer(compatHistory []byte) (bool, error) { diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go index cdcf7146..9c5f9ca8 100644 --- a/vendor/github.com/containerd/containerd/remotes/handlers.go +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -5,12 +5,12 @@ import ( "fmt" "time" - "github.com/Sirupsen/logrus" "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/log" ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" ) // MakeRef returns a unique reference for the descriptor. This reference can be diff --git a/vendor/github.com/containerd/containerd/runtime/errors.go b/vendor/github.com/containerd/containerd/runtime/errors.go deleted file mode 100644 index c55fbbb6..00000000 --- a/vendor/github.com/containerd/containerd/runtime/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package runtime - -import "errors" - -var ( - ErrContainerExists = errors.New("runtime: container with id already exists") - ErrContainerNotExist = errors.New("runtime: container does not exist") - ErrRuntimeNotExist = errors.New("runtime: runtime does not exist") - ErrProcessExited = errors.New("runtime: process already exited") -) diff --git a/vendor/github.com/containerd/containerd/runtime/monitor.go b/vendor/github.com/containerd/containerd/runtime/monitor.go deleted file mode 100644 index 4c61858b..00000000 --- a/vendor/github.com/containerd/containerd/runtime/monitor.go +++ /dev/null @@ -1,52 +0,0 @@ -package runtime - -// TaskMonitor provides an interface for monitoring of containers within containerd -type TaskMonitor interface { - // Monitor adds the provided container to the monitor - Monitor(Task) error - // Stop stops and removes the provided container from the monitor - Stop(Task) error -} - -func NewMultiTaskMonitor(monitors ...TaskMonitor) TaskMonitor { - return &multiTaskMonitor{ - monitors: monitors, - } -} - -func NewNoopMonitor() TaskMonitor { - return &noopTaskMonitor{} -} - -type noopTaskMonitor struct { -} - -func (mm *noopTaskMonitor) Monitor(c Task) error { - return nil -} - -func (mm *noopTaskMonitor) Stop(c Task) error { - return nil -} - -type multiTaskMonitor struct { - monitors []TaskMonitor -} - -func (mm *multiTaskMonitor) Monitor(c Task) error { - for _, m := range mm.monitors { - if err := m.Monitor(c); err != nil { - return err - } - } - return nil -} - -func (mm *multiTaskMonitor) Stop(c Task) error { - for _, m := range mm.monitors { - if err := m.Stop(c); err != nil { - return err - } - } - return nil -} diff --git a/vendor/github.com/containerd/containerd/runtime/runtime.go b/vendor/github.com/containerd/containerd/runtime/runtime.go deleted file mode 100644 index 09642a45..00000000 --- a/vendor/github.com/containerd/containerd/runtime/runtime.go +++ /dev/null @@ -1,51 +0,0 @@ -package runtime - -import ( - "context" - "time" - - "github.com/containerd/containerd/mount" - "github.com/gogo/protobuf/types" -) - -type IO struct { - Stdin string - Stdout string - Stderr string - Terminal bool -} - -type CreateOpts struct { - // Spec is the OCI runtime spec - Spec *types.Any - // Rootfs mounts to perform to gain access to the container's filesystem - Rootfs []mount.Mount - // IO for the container's main process - IO IO - // Checkpoint digest to restore container state - Checkpoint string - // Options for the runtime and container - Options *types.Any -} - -type Exit struct { - Pid uint32 - Status uint32 - Timestamp time.Time -} - -// Runtime is responsible for the creation of containers for a certain platform, -// arch, or custom usage. -type Runtime interface { - // ID of the runtime - ID() string - // Create creates a task with the provided id and options. - Create(ctx context.Context, id string, opts CreateOpts) (Task, error) - // Get returns a task. - Get(context.Context, string) (Task, error) - // Tasks returns all the current tasks for the runtime. - // Any container runs at most one task at a time. - Tasks(context.Context) ([]Task, error) - // Delete removes the task in the runtime. - Delete(context.Context, Task) (*Exit, error) -} diff --git a/vendor/github.com/containerd/containerd/runtime/task.go b/vendor/github.com/containerd/containerd/runtime/task.go deleted file mode 100644 index 4b3e47b9..00000000 --- a/vendor/github.com/containerd/containerd/runtime/task.go +++ /dev/null @@ -1,82 +0,0 @@ -package runtime - -import ( - "context" - - "github.com/gogo/protobuf/types" -) - -type TaskInfo struct { - ID string - Runtime string - Spec []byte - Namespace string -} - -type Process interface { - ID() string - // State returns the process state - State(context.Context) (State, error) - // Kill signals a container - Kill(context.Context, uint32, bool) error - // Pty resizes the processes pty/console - ResizePty(context.Context, ConsoleSize) error - // CloseStdin closes the processes stdin - CloseIO(context.Context) error -} - -type Task interface { - Process - - // Information of the container - Info() TaskInfo - // Start the container's user defined process - Start(context.Context) error - // Pause pauses the container process - Pause(context.Context) error - // Resume unpauses the container process - Resume(context.Context) error - // Exec adds a process into the container - Exec(context.Context, string, ExecOpts) (Process, error) - // Pids returns all pids - Pids(context.Context) ([]uint32, error) - // Checkpoint checkpoints a container to an image with live system data - Checkpoint(context.Context, string, *types.Any) error - // DeleteProcess deletes a specific exec process via the pid - DeleteProcess(context.Context, string) (*Exit, error) - // Update sets the provided resources to a running task - Update(context.Context, *types.Any) error - // Process returns a process within the task for the provided id - Process(context.Context, string) (Process, error) -} - -type ExecOpts struct { - Spec *types.Any - IO IO -} - -type ConsoleSize struct { - Width uint32 - Height uint32 -} - -type Status int - -const ( - CreatedStatus Status = iota + 1 - RunningStatus - StoppedStatus - DeletedStatus - PausedStatus -) - -type State struct { - // Status is the current status of the container - Status Status - // Pid is the main process id for the container - Pid uint32 - Stdin string - Stdout string - Stderr string - Terminal bool -} diff --git a/vendor/github.com/containerd/containerd/services/content/service.go b/vendor/github.com/containerd/containerd/services/content/service.go index ead3b9d6..c8698df8 100644 --- a/vendor/github.com/containerd/containerd/services/content/service.go +++ b/vendor/github.com/containerd/containerd/services/content/service.go @@ -4,7 +4,6 @@ import ( "io" "sync" - "github.com/Sirupsen/logrus" "github.com/boltdb/bolt" api "github.com/containerd/containerd/api/services/content/v1" eventsapi "github.com/containerd/containerd/api/services/events/v1" @@ -17,6 +16,7 @@ import ( "github.com/golang/protobuf/ptypes/empty" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" "google.golang.org/grpc" "google.golang.org/grpc/codes" diff --git a/vendor/github.com/containerd/containerd/services/snapshot/client.go b/vendor/github.com/containerd/containerd/services/snapshot/client.go index b8f6e0d7..c5b4ab53 100644 --- a/vendor/github.com/containerd/containerd/services/snapshot/client.go +++ b/vendor/github.com/containerd/containerd/services/snapshot/client.go @@ -105,7 +105,7 @@ func (r *remoteSnapshotter) Walk(ctx context.Context, fn func(context.Context, s Snapshotter: r.snapshotterName, }) if err != nil { - errdefs.FromGRPC(err) + return errdefs.FromGRPC(err) } for { resp, err := sc.Recv() @@ -135,10 +135,9 @@ func toKind(kind snapshotapi.Kind) snapshot.Kind { func toInfo(info snapshotapi.Info) snapshot.Info { return snapshot.Info{ - Name: info.Name, - Parent: info.Parent, - Kind: toKind(info.Kind), - Readonly: info.Readonly, + Name: info.Name, + Parent: info.Parent, + Kind: toKind(info.Kind), } } diff --git a/vendor/github.com/containerd/containerd/services/snapshot/service.go b/vendor/github.com/containerd/containerd/services/snapshot/service.go index 089812e0..3972f0f0 100644 --- a/vendor/github.com/containerd/containerd/services/snapshot/service.go +++ b/vendor/github.com/containerd/containerd/services/snapshot/service.go @@ -269,10 +269,9 @@ func fromKind(kind snapshot.Kind) snapshotapi.Kind { func fromInfo(info snapshot.Info) snapshotapi.Info { return snapshotapi.Info{ - Name: info.Name, - Parent: info.Parent, - Kind: fromKind(info.Kind), - Readonly: info.Readonly, + Name: info.Name, + Parent: info.Parent, + Kind: fromKind(info.Kind), } } diff --git a/vendor/github.com/containerd/containerd/snapshot/naive/naive.go b/vendor/github.com/containerd/containerd/snapshot/naive/naive.go index 88fc5539..a78a7614 100644 --- a/vendor/github.com/containerd/containerd/snapshot/naive/naive.go +++ b/vendor/github.com/containerd/containerd/snapshot/naive/naive.go @@ -94,11 +94,11 @@ func (o *snapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, er } func (o *snapshotter) Prepare(ctx context.Context, key, parent string) ([]mount.Mount, error) { - return o.createActive(ctx, key, parent, false) + return o.createSnapshot(ctx, snapshot.KindActive, key, parent) } func (o *snapshotter) View(ctx context.Context, key, parent string) ([]mount.Mount, error) { - return o.createActive(ctx, key, parent, true) + return o.createSnapshot(ctx, snapshot.KindView, key, parent) } // Mounts returns the mounts for the transaction identified by key. Can be @@ -110,12 +110,12 @@ func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er if err != nil { return nil, err } - active, err := storage.GetActive(ctx, key) + s, err := storage.GetSnapshot(ctx, key) t.Rollback() if err != nil { - return nil, errors.Wrap(err, "failed to get active mount") + return nil, errors.Wrap(err, "failed to get snapshot mount") } - return o.mounts(active), nil + return o.mounts(s), nil } func (o *snapshotter) Commit(ctx context.Context, name, key string) error { @@ -203,13 +203,13 @@ func (o *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapsho return storage.WalkInfo(ctx, fn) } -func (o *snapshotter) createActive(ctx context.Context, key, parent string, readonly bool) ([]mount.Mount, error) { +func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshot.Kind, key, parent string) ([]mount.Mount, error) { var ( err error path, td string ) - if !readonly || parent == "" { + if kind == snapshot.KindActive || parent == "" { td, err = ioutil.TempDir(filepath.Join(o.root, "snapshots"), "new-") if err != nil { return nil, errors.Wrap(err, "failed to create temp dir") @@ -235,23 +235,23 @@ func (o *snapshotter) createActive(ctx context.Context, key, parent string, read return nil, err } - active, err := storage.CreateActive(ctx, key, parent, readonly) + s, err := storage.CreateSnapshot(ctx, kind, key, parent) if err != nil { if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("Failure rolling back transaction") } - return nil, errors.Wrap(err, "failed to create active") + return nil, errors.Wrap(err, "failed to create snapshot") } if td != "" { - if len(active.ParentIDs) > 0 { - parent := o.getSnapshotDir(active.ParentIDs[0]) + if len(s.ParentIDs) > 0 { + parent := o.getSnapshotDir(s.ParentIDs[0]) if err := fs.CopyDir(td, parent); err != nil { return nil, errors.Wrap(err, "copying of parent failed") } } - path = o.getSnapshotDir(active.ID) + path = o.getSnapshotDir(s.ID) if err := os.Rename(td, path); err != nil { if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("Failure rolling back transaction") @@ -265,29 +265,29 @@ func (o *snapshotter) createActive(ctx context.Context, key, parent string, read return nil, errors.Wrap(err, "commit failed") } - return o.mounts(active), nil + return o.mounts(s), nil } func (o *snapshotter) getSnapshotDir(id string) string { return filepath.Join(o.root, "snapshots", id) } -func (o *snapshotter) mounts(active storage.Active) []mount.Mount { +func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { var ( roFlag string source string ) - if active.Readonly { + if s.Kind == snapshot.KindView { roFlag = "ro" } else { roFlag = "rw" } - if len(active.ParentIDs) == 0 || !active.Readonly { - source = o.getSnapshotDir(active.ID) + if len(s.ParentIDs) == 0 || s.Kind == snapshot.KindActive { + source = o.getSnapshotDir(s.ID) } else { - source = o.getSnapshotDir(active.ParentIDs[0]) + source = o.getSnapshotDir(s.ParentIDs[0]) } return []mount.Mount{ diff --git a/vendor/github.com/containerd/containerd/snapshot/overlay/overlay.go b/vendor/github.com/containerd/containerd/snapshot/overlay/overlay.go index a8775fa5..c759829d 100644 --- a/vendor/github.com/containerd/containerd/snapshot/overlay/overlay.go +++ b/vendor/github.com/containerd/containerd/snapshot/overlay/overlay.go @@ -122,11 +122,11 @@ func (o *snapshotter) Usage(ctx context.Context, key string) (snapshot.Usage, er } func (o *snapshotter) Prepare(ctx context.Context, key, parent string) ([]mount.Mount, error) { - return o.createActive(ctx, key, parent, false) + return o.createSnapshot(ctx, snapshot.KindActive, key, parent) } func (o *snapshotter) View(ctx context.Context, key, parent string) ([]mount.Mount, error) { - return o.createActive(ctx, key, parent, true) + return o.createSnapshot(ctx, snapshot.KindView, key, parent) } // Mounts returns the mounts for the transaction identified by key. Can be @@ -138,12 +138,12 @@ func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, er if err != nil { return nil, err } - active, err := storage.GetActive(ctx, key) + s, err := storage.GetSnapshot(ctx, key) t.Rollback() if err != nil { return nil, errors.Wrap(err, "failed to get active mount") } - return o.mounts(active), nil + return o.mounts(s), nil } func (o *snapshotter) Commit(ctx context.Context, name, key string) error { @@ -230,7 +230,7 @@ func (o *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapsho return storage.WalkInfo(ctx, fn) } -func (o *snapshotter) createActive(ctx context.Context, key, parent string, readonly bool) ([]mount.Mount, error) { +func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshot.Kind, key, parent string) ([]mount.Mount, error) { var ( path string snapshotDir = filepath.Join(o.root, "snapshots") @@ -258,7 +258,8 @@ func (o *snapshotter) createActive(ctx context.Context, key, parent string, read if err = os.MkdirAll(filepath.Join(td, "fs"), 0711); err != nil { return nil, err } - if !readonly { + + if kind == snapshot.KindActive { if err = os.MkdirAll(filepath.Join(td, "work"), 0700); err != nil { return nil, err } @@ -269,7 +270,7 @@ func (o *snapshotter) createActive(ctx context.Context, key, parent string, read return nil, err } - active, err := storage.CreateActive(ctx, key, parent, readonly) + s, err := storage.CreateSnapshot(ctx, kind, key, parent) if err != nil { if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("Failure rolling back transaction") @@ -277,7 +278,7 @@ func (o *snapshotter) createActive(ctx context.Context, key, parent string, read return nil, errors.Wrap(err, "failed to create active") } - path = filepath.Join(snapshotDir, active.ID) + path = filepath.Join(snapshotDir, s.ID) if err = os.Rename(td, path); err != nil { if rerr := t.Rollback(); rerr != nil { log.G(ctx).WithError(rerr).Warn("Failure rolling back transaction") @@ -290,21 +291,21 @@ func (o *snapshotter) createActive(ctx context.Context, key, parent string, read return nil, errors.Wrap(err, "commit failed") } - return o.mounts(active), nil + return o.mounts(s), nil } -func (o *snapshotter) mounts(active storage.Active) []mount.Mount { - if len(active.ParentIDs) == 0 { +func (o *snapshotter) mounts(s storage.Snapshot) []mount.Mount { + if len(s.ParentIDs) == 0 { // if we only have one layer/no parents then just return a bind mount as overlay // will not work roFlag := "rw" - if active.Readonly { + if s.Kind == snapshot.KindView { roFlag = "ro" } return []mount.Mount{ { - Source: o.upperPath(active.ID), + Source: o.upperPath(s.ID), Type: "bind", Options: []string{ roFlag, @@ -315,15 +316,15 @@ func (o *snapshotter) mounts(active storage.Active) []mount.Mount { } var options []string - if !active.Readonly { + if s.Kind == snapshot.KindActive { options = append(options, - fmt.Sprintf("workdir=%s", o.workPath(active.ID)), - fmt.Sprintf("upperdir=%s", o.upperPath(active.ID)), + fmt.Sprintf("workdir=%s", o.workPath(s.ID)), + fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)), ) - } else if len(active.ParentIDs) == 1 { + } else if len(s.ParentIDs) == 1 { return []mount.Mount{ { - Source: o.upperPath(active.ParentIDs[0]), + Source: o.upperPath(s.ParentIDs[0]), Type: "bind", Options: []string{ "ro", @@ -333,9 +334,9 @@ func (o *snapshotter) mounts(active storage.Active) []mount.Mount { } } - parentPaths := make([]string, len(active.ParentIDs)) - for i := range active.ParentIDs { - parentPaths[i] = o.upperPath(active.ParentIDs[i]) + parentPaths := make([]string, len(s.ParentIDs)) + for i := range s.ParentIDs { + parentPaths[i] = o.upperPath(s.ParentIDs[i]) } options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":"))) diff --git a/vendor/github.com/containerd/containerd/snapshot/snapshotter.go b/vendor/github.com/containerd/containerd/snapshot/snapshotter.go index a0e956d7..c2cd73fc 100644 --- a/vendor/github.com/containerd/containerd/snapshot/snapshotter.go +++ b/vendor/github.com/containerd/containerd/snapshot/snapshotter.go @@ -11,16 +11,29 @@ type Kind int // definitions of snapshot kinds const ( - KindActive Kind = iota + KindView Kind = iota + 1 + KindActive KindCommitted ) +func (k Kind) String() string { + switch k { + case KindView: + return "View" + case KindActive: + return "Active" + case KindCommitted: + return "Committed" + default: + return "Unknown" + } +} + // Info provides information about a particular snapshot. type Info struct { - Name string // name or key of snapshot - Parent string // name of parent snapshot - Kind Kind // active or committed snapshot - Readonly bool // true if readonly, only valid for active + Kind Kind // active or committed snapshot + Name string // name or key of snapshot + Parent string // name of parent snapshot } // Usage defines statistics for disk resources consumed by the snapshot. @@ -36,7 +49,7 @@ func (u *Usage) Add(other Usage) { u.Size += other.Size // TODO(stevvooe): assumes independent inodes, but provides and upper - // bound. This should be pretty close, assumming the inodes for a + // bound. This should be pretty close, assuming the inodes for a // snapshot are roughly unique to it. Don't trust this assumption. u.Inodes += other.Inodes } @@ -50,7 +63,7 @@ func (u *Usage) Add(other Usage) { // between a parent and its snapshot to generate a classic layer. // // An active snapshot is created by calling `Prepare`. After mounting, changes -// can be made to the snapshot. The act of commiting creates a committed +// can be made to the snapshot. The act of committing creates a committed // snapshot. The committed snapshot will get the parent of active snapshot. The // committed snapshot can then be used as a parent. Active snapshots can never // act as a parent. diff --git a/vendor/github.com/containerd/containerd/snapshot/storage/bolt.go b/vendor/github.com/containerd/containerd/snapshot/storage/bolt.go index f6dc8016..abe997c2 100644 --- a/vendor/github.com/containerd/containerd/snapshot/storage/bolt.go +++ b/vendor/github.com/containerd/containerd/snapshot/storage/bolt.go @@ -79,10 +79,9 @@ func GetInfo(ctx context.Context, key string) (string, snapshot.Info, snapshot.U } return fmt.Sprint(ss.ID), snapshot.Info{ - Name: key, - Parent: ss.Parent, - Kind: fromProtoKind(ss.Kind), - Readonly: ss.Readonly, + Name: key, + Parent: ss.Parent, + Kind: snapshot.Kind(ss.Kind), }, usage, nil } @@ -102,22 +101,64 @@ func WalkInfo(ctx context.Context, fn func(context.Context, snapshot.Info) error } info := snapshot.Info{ - Name: string(k), - Parent: ss.Parent, - Kind: fromProtoKind(ss.Kind), - Readonly: ss.Readonly, + Name: string(k), + Parent: ss.Parent, + Kind: snapshot.Kind(ss.Kind), } return fn(ctx, info) }) }) } -// CreateActive creates a new active snapshot transaction referenced by -// the provided key. The new active snapshot will have the provided -// parent. If the readonly option is given, the active snapshot will be -// marked as readonly and can only be removed, and not committed. The -// provided context must contain a writable transaction. -func CreateActive(ctx context.Context, key, parent string, readonly bool) (a Active, err error) { +// GetSnapshot returns the metadata for the active or view snapshot transaction +// referenced by the given key. Requires a context with a storage transaction. +func GetSnapshot(ctx context.Context, key string) (s Snapshot, err error) { + err = withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { + b := bkt.Get([]byte(key)) + if len(b) == 0 { + return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) + } + + var ss db.Snapshot + if err := proto.Unmarshal(b, &ss); err != nil { + return errors.Wrap(err, "failed to unmarshal snapshot") + } + + if ss.Kind != db.KindActive && ss.Kind != db.KindView { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "requested snapshot %v not active or view", key) + } + + s.ID = fmt.Sprintf("%d", ss.ID) + s.Kind = snapshot.Kind(ss.Kind) + + if ss.Parent != "" { + var parent db.Snapshot + if err := getSnapshot(bkt, ss.Parent, &parent); err != nil { + return errors.Wrap(err, "failed to get parent snapshot") + } + + s.ParentIDs, err = parents(bkt, &parent) + if err != nil { + return errors.Wrap(err, "failed to get parent chain") + } + } + return nil + }) + if err != nil { + return Snapshot{}, err + } + + return +} + +// CreateSnapshot inserts a record for an active or view snapshot with the provided parent. +func CreateSnapshot(ctx context.Context, kind snapshot.Kind, key, parent string) (s Snapshot, err error) { + switch kind { + case snapshot.KindActive, snapshot.KindView: + default: + return Snapshot{}, errors.Wrapf(errdefs.ErrInvalidArgument, "snapshot type %v invalid; only snapshots of type Active or View can be created", kind) + } + err = createBucketIfNotExists(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { var ( parentS *db.Snapshot @@ -143,10 +184,9 @@ func CreateActive(ctx context.Context, key, parent string, readonly bool) (a Act } ss := db.Snapshot{ - ID: id, - Parent: parent, - Kind: db.KindActive, - Readonly: readonly, + ID: id, + Parent: parent, + Kind: db.Kind(kind), } if err := putSnapshot(bkt, key, &ss); err != nil { return err @@ -159,59 +199,18 @@ func CreateActive(ctx context.Context, key, parent string, readonly bool) (a Act return errors.Wrap(err, "failed to write parent link") } - a.ParentIDs, err = parents(bkt, parentS) + s.ParentIDs, err = parents(bkt, parentS) if err != nil { return errors.Wrap(err, "failed to get parent chain") } } - a.ID = fmt.Sprintf("%d", id) - a.Readonly = readonly - + s.ID = fmt.Sprintf("%d", id) + s.Kind = kind return nil }) if err != nil { - return Active{}, err - } - - return -} - -// GetActive returns the metadata for the active snapshot transaction referenced -// by the given key. Requires a context with a storage transaction. -func GetActive(ctx context.Context, key string) (a Active, err error) { - err = withBucket(ctx, func(ctx context.Context, bkt, pbkt *bolt.Bucket) error { - b := bkt.Get([]byte(key)) - if len(b) == 0 { - return errors.Wrapf(errdefs.ErrNotFound, "snapshot %v", key) - } - - var ss db.Snapshot - if err := proto.Unmarshal(b, &ss); err != nil { - return errors.Wrap(err, "failed to unmarshal snapshot") - } - if ss.Kind != db.KindActive { - return errors.Wrapf(errdefs.ErrFailedPrecondition, "requested snapshot %v not active", key) - } - - a.ID = fmt.Sprintf("%d", ss.ID) - a.Readonly = ss.Readonly - - if ss.Parent != "" { - var parent db.Snapshot - if err := getSnapshot(bkt, ss.Parent, &parent); err != nil { - return errors.Wrap(err, "failed to get parent snapshot") - } - - a.ParentIDs, err = parents(bkt, &parent) - if err != nil { - return errors.Wrap(err, "failed to get parent chain") - } - } - return nil - }) - if err != nil { - return Active{}, err + return Snapshot{}, err } return @@ -255,7 +254,7 @@ func Remove(ctx context.Context, key string) (id string, k snapshot.Kind, err er } id = fmt.Sprintf("%d", ss.ID) - k = fromProtoKind(ss.Kind) + k = snapshot.Kind(ss.Kind) return nil }) @@ -283,12 +282,8 @@ func CommitActive(ctx context.Context, key, name string, usage snapshot.Usage) ( if ss.Kind != db.KindActive { return errors.Wrapf(errdefs.ErrFailedPrecondition, "snapshot %v is not active", name) } - if ss.Readonly { - return errors.Errorf("active snapshot is readonly") - } ss.Kind = db.KindCommitted - ss.Readonly = true ss.Inodes = usage.Inodes ss.Size_ = usage.Size @@ -354,13 +349,6 @@ func createBucketIfNotExists(ctx context.Context, fn func(context.Context, *bolt return fn(ctx, sbkt, pbkt) } -func fromProtoKind(k db.Kind) snapshot.Kind { - if k == db.KindActive { - return snapshot.KindActive - } - return snapshot.KindCommitted -} - func parents(bkt *bolt.Bucket, parent *db.Snapshot) (parents []string, err error) { for { parents = append(parents, fmt.Sprintf("%d", parent.ID)) diff --git a/vendor/github.com/containerd/containerd/snapshot/storage/metastore.go b/vendor/github.com/containerd/containerd/snapshot/storage/metastore.go index c223d40d..692931a3 100644 --- a/vendor/github.com/containerd/containerd/snapshot/storage/metastore.go +++ b/vendor/github.com/containerd/containerd/snapshot/storage/metastore.go @@ -9,6 +9,7 @@ import ( "context" "github.com/boltdb/bolt" + "github.com/containerd/containerd/snapshot" "github.com/pkg/errors" ) @@ -26,15 +27,16 @@ type Transactor interface { Rollback() error } -// Active hold the metadata for an active snapshot transaction. The ParentIDs -// hold the snapshot identifiers for the committed snapshots this active is -// based on. The ParentIDs are ordered from the lowest base to highest, meaning -// they should be applied in order from the first index to the last index. The -// last index should always be considered the active snapshots immediate parent. -type Active struct { +// Snapshot hold the metadata for an active or view snapshot transaction. The +// ParentIDs hold the snapshot identifiers for the committed snapshots this +// active or view is based on. The ParentIDs are ordered from the lowest base +// to highest, meaning they should be applied in order from the first index to +// the last index. The last index should always be considered the active +// snapshots immediate parent. +type Snapshot struct { + Kind snapshot.Kind ID string ParentIDs []string - Readonly bool } // MetaStore is used to store metadata related to a snapshot driver. The diff --git a/vendor/github.com/containerd/containerd/snapshot/storage/proto/record.pb.go b/vendor/github.com/containerd/containerd/snapshot/storage/proto/record.pb.go index b42b674f..bfaa67d0 100644 --- a/vendor/github.com/containerd/containerd/snapshot/storage/proto/record.pb.go +++ b/vendor/github.com/containerd/containerd/snapshot/storage/proto/record.pb.go @@ -34,23 +34,26 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto1.GoGoProtoPackageIsVersion2 // please upgrade the proto package -// Kind defines the kind of snapshot. type Kind int32 const ( - // KindActive represents an active snapshot - KindActive Kind = 0 - // KindCommitted represents a committed immutable snapshot - KindCommitted Kind = 1 + KindUnknown Kind = 0 + KindView Kind = 1 + KindActive Kind = 2 + KindCommitted Kind = 3 ) var Kind_name = map[int32]string{ - 0: "ACTIVE", - 1: "COMMITTED", + 0: "UNKNOWN", + 1: "VIEW", + 2: "ACTIVE", + 3: "COMMITTED", } var Kind_value = map[string]int32{ - "ACTIVE": 0, - "COMMITTED": 1, + "UNKNOWN": 0, + "VIEW": 1, + "ACTIVE": 2, + "COMMITTED": 3, } func (x Kind) String() string { @@ -61,10 +64,9 @@ func (Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptorRecord, []in // Snapshot defines the storage type for a snapshot in the // metadata store. type Snapshot struct { - ID uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` - Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` - Kind Kind `protobuf:"varint,4,opt,name=kind,proto3,enum=containerd.snapshot.v1.Kind" json:"kind,omitempty"` - Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` + ID uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + Parent string `protobuf:"bytes,2,opt,name=parent,proto3" json:"parent,omitempty"` + Kind Kind `protobuf:"varint,4,opt,name=kind,proto3,enum=containerd.snapshot.v1.Kind" json:"kind,omitempty"` // inodes stores the number inodes in use for the snapshot. // // Only valid for committed snapshots. @@ -115,16 +117,6 @@ func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintRecord(dAtA, i, uint64(m.Kind)) } - if m.Readonly { - dAtA[i] = 0x28 - i++ - if m.Readonly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } if m.Inodes != 0 { dAtA[i] = 0x30 i++ @@ -178,9 +170,6 @@ func (m *Snapshot) Size() (n int) { if m.Kind != 0 { n += 1 + sovRecord(uint64(m.Kind)) } - if m.Readonly { - n += 2 - } if m.Inodes != 0 { n += 1 + sovRecord(uint64(m.Inodes)) } @@ -211,7 +200,6 @@ func (this *Snapshot) String() string { `ID:` + fmt.Sprintf("%v", this.ID) + `,`, `Parent:` + fmt.Sprintf("%v", this.Parent) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Readonly:` + fmt.Sprintf("%v", this.Readonly) + `,`, `Inodes:` + fmt.Sprintf("%v", this.Inodes) + `,`, `Size_:` + fmt.Sprintf("%v", this.Size_) + `,`, `}`, @@ -322,26 +310,6 @@ func (m *Snapshot) Unmarshal(dAtA []byte) error { break } } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Readonly = bool(v != 0) case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Inodes", wireType) @@ -511,26 +479,28 @@ func init() { } var fileDescriptorRecord = []byte{ - // 334 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xbb, 0x4e, 0xf3, 0x30, - 0x1c, 0xc5, 0xe3, 0x7c, 0xf9, 0x42, 0x6b, 0x41, 0x55, 0x2c, 0x54, 0x59, 0x11, 0x32, 0x16, 0x53, - 0xc4, 0x90, 0x70, 0x79, 0x82, 0xde, 0x86, 0xaa, 0xaa, 0x90, 0x42, 0xc5, 0x9e, 0xc6, 0x56, 0x6a, - 0x41, 0xed, 0xca, 0x31, 0x95, 0x60, 0x62, 0x44, 0x7d, 0x87, 0x4e, 0xf0, 0x08, 0x4c, 0x3c, 0x41, - 0x47, 0x46, 0x26, 0x44, 0xf3, 0x24, 0x28, 0x69, 0xb9, 0x0c, 0x6c, 0xe7, 0xf7, 0xf7, 0x4f, 0xe7, - 0x48, 0x86, 0x9d, 0x54, 0x98, 0xf1, 0xcd, 0x28, 0x48, 0xd4, 0x24, 0x4c, 0x94, 0x34, 0xb1, 0x90, - 0x5c, 0xb3, 0xdf, 0x31, 0x93, 0xf1, 0x34, 0x1b, 0x2b, 0x13, 0x66, 0x46, 0xe9, 0x38, 0xe5, 0xe1, - 0x54, 0x2b, 0xa3, 0x42, 0xcd, 0x13, 0xa5, 0x59, 0x50, 0x02, 0x6a, 0xfc, 0xf8, 0xc1, 0x97, 0x1f, - 0xcc, 0x4e, 0xbc, 0xbd, 0x54, 0xa5, 0x6a, 0xed, 0x17, 0x69, 0x6d, 0x1f, 0x3e, 0x03, 0x58, 0xb9, - 0xd8, 0x58, 0xa8, 0x01, 0x6d, 0xc1, 0x30, 0xa0, 0xc0, 0x77, 0x5a, 0x6e, 0xfe, 0x7e, 0x60, 0xf7, - 0x3a, 0x91, 0x2d, 0x18, 0x6a, 0x40, 0x77, 0x1a, 0x6b, 0x2e, 0x0d, 0xb6, 0x29, 0xf0, 0xab, 0xd1, - 0x86, 0xd0, 0x31, 0x74, 0xae, 0x84, 0x64, 0xd8, 0xa1, 0xc0, 0xaf, 0x9d, 0xee, 0x07, 0x7f, 0x2f, - 0x07, 0x7d, 0x21, 0x59, 0x54, 0x9a, 0xc8, 0x83, 0x15, 0xcd, 0x63, 0xa6, 0xe4, 0xf5, 0x2d, 0xfe, - 0x4f, 0x81, 0x5f, 0x89, 0xbe, 0xb9, 0x58, 0x11, 0x52, 0x31, 0x9e, 0x61, 0x97, 0x02, 0xff, 0x5f, - 0xb4, 0x21, 0x84, 0xa0, 0x93, 0x89, 0x3b, 0x8e, 0xb7, 0xca, 0x6b, 0x99, 0x8f, 0x22, 0xe8, 0xf4, - 0xd7, 0x7d, 0x6e, 0xb3, 0x3d, 0xec, 0x5d, 0x76, 0xeb, 0x96, 0x57, 0x9b, 0x2f, 0x28, 0x2c, 0xae, - 0xcd, 0xc4, 0x88, 0x19, 0x47, 0x14, 0x56, 0xdb, 0xe7, 0x83, 0x41, 0x6f, 0x38, 0xec, 0x76, 0xea, - 0xc0, 0xdb, 0x9d, 0x2f, 0xe8, 0x4e, 0xf1, 0xdc, 0x56, 0x93, 0x89, 0x30, 0x86, 0x33, 0x6f, 0xfb, - 0xe1, 0x91, 0x58, 0x2f, 0x4f, 0xa4, 0xec, 0x6a, 0xe1, 0xe5, 0x8a, 0x58, 0x6f, 0x2b, 0x62, 0xdd, - 0xe7, 0x04, 0x2c, 0x73, 0x02, 0x5e, 0x73, 0x02, 0x3e, 0x72, 0x02, 0x46, 0x6e, 0xf9, 0x57, 0x67, - 0x9f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xae, 0x6e, 0x6e, 0xcd, 0xa1, 0x01, 0x00, 0x00, + // 364 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xbb, 0xae, 0xd3, 0x30, + 0x18, 0xc7, 0xe3, 0x9c, 0x28, 0xe7, 0xd4, 0x94, 0x12, 0x2c, 0x14, 0x45, 0x51, 0x65, 0x2c, 0xa6, + 0x88, 0x21, 0xe1, 0xf2, 0x04, 0xbd, 0x0d, 0x51, 0xd5, 0x56, 0x0a, 0xbd, 0xcc, 0x69, 0x6c, 0xa5, + 0x56, 0x55, 0xbb, 0x4a, 0x4c, 0x2b, 0x31, 0x31, 0x56, 0x79, 0x02, 0x96, 0x4c, 0xf0, 0x14, 0x3c, + 0x41, 0x47, 0x46, 0x26, 0x44, 0xf3, 0x24, 0x28, 0x69, 0x11, 0x0c, 0x67, 0xfb, 0x5f, 0x7e, 0xf6, + 0xf7, 0xe9, 0x83, 0xc3, 0x94, 0xab, 0xcd, 0xc7, 0xb5, 0x9f, 0xc8, 0x5d, 0x90, 0x48, 0xa1, 0x62, + 0x2e, 0x58, 0x46, 0xff, 0x97, 0xb9, 0x88, 0xf7, 0xf9, 0x46, 0xaa, 0x20, 0x57, 0x32, 0x8b, 0x53, + 0x16, 0xec, 0x33, 0xa9, 0x64, 0x90, 0xb1, 0x44, 0x66, 0xd4, 0x6f, 0x0c, 0xb2, 0xff, 0xf1, 0xfe, + 0x5f, 0xde, 0x3f, 0xbc, 0x75, 0x5f, 0xa4, 0x32, 0x95, 0x57, 0xbe, 0x56, 0x57, 0xfa, 0xd5, 0x17, + 0x00, 0x1f, 0x3e, 0xdc, 0x28, 0x64, 0x43, 0x9d, 0x53, 0x07, 0x10, 0xe0, 0x19, 0x7d, 0xb3, 0xfa, + 0xf5, 0x52, 0x0f, 0x87, 0x91, 0xce, 0x29, 0xb2, 0xa1, 0xb9, 0x8f, 0x33, 0x26, 0x94, 0xa3, 0x13, + 0xe0, 0xb5, 0xa2, 0x9b, 0x43, 0x6f, 0xa0, 0xb1, 0xe5, 0x82, 0x3a, 0x06, 0x01, 0x5e, 0xe7, 0x5d, + 0xd7, 0x7f, 0x7c, 0xb2, 0x3f, 0xe6, 0x82, 0x46, 0x0d, 0x59, 0xff, 0xc4, 0x85, 0xa4, 0x2c, 0x77, + 0x4c, 0x02, 0xbc, 0xbb, 0xe8, 0xe6, 0x10, 0x82, 0x46, 0xce, 0x3f, 0x31, 0xe7, 0xbe, 0x49, 0x1b, + 0xfd, 0xfa, 0x04, 0xa0, 0x51, 0x3f, 0x45, 0x5d, 0x78, 0xbf, 0x98, 0x8e, 0xa7, 0xb3, 0xd5, 0xd4, + 0xd2, 0xdc, 0x67, 0x45, 0x49, 0x9e, 0xd4, 0xf1, 0x42, 0x6c, 0x85, 0x3c, 0x0a, 0x64, 0x43, 0x63, + 0x19, 0x8e, 0x56, 0x16, 0x70, 0xdb, 0x45, 0x49, 0x1e, 0xea, 0x6a, 0xc9, 0xd9, 0x11, 0xb9, 0xd0, + 0xec, 0x0d, 0xe6, 0xe1, 0x72, 0x64, 0xe9, 0x6e, 0xa7, 0x28, 0x09, 0xac, 0x9b, 0x5e, 0xa2, 0xf8, + 0x81, 0x21, 0x02, 0x5b, 0x83, 0xd9, 0x64, 0x12, 0xce, 0xe7, 0xa3, 0xa1, 0x75, 0xe7, 0x3e, 0x2f, + 0x4a, 0xf2, 0xb4, 0xae, 0x07, 0x72, 0xb7, 0xe3, 0x4a, 0x31, 0xea, 0xb6, 0x4f, 0x5f, 0xb1, 0xf6, + 0xfd, 0x1b, 0x6e, 0x36, 0xe8, 0x3b, 0xe7, 0x0b, 0xd6, 0x7e, 0x5e, 0xb0, 0xf6, 0xb9, 0xc2, 0xe0, + 0x5c, 0x61, 0xf0, 0xa3, 0xc2, 0xe0, 0x77, 0x85, 0xc1, 0xda, 0x6c, 0xce, 0xf8, 0xfe, 0x4f, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x61, 0xef, 0x92, 0x3d, 0xbc, 0x01, 0x00, 0x00, } diff --git a/vendor/github.com/containerd/containerd/snapshot/storage/proto/record.proto b/vendor/github.com/containerd/containerd/snapshot/storage/proto/record.proto index feb48601..50a462a0 100644 --- a/vendor/github.com/containerd/containerd/snapshot/storage/proto/record.proto +++ b/vendor/github.com/containerd/containerd/snapshot/storage/proto/record.proto @@ -4,16 +4,14 @@ package containerd.snapshot.v1; import "gogoproto/gogo.proto"; -// Kind defines the kind of snapshot. enum Kind { option (gogoproto.goproto_enum_prefix) = false; option (gogoproto.enum_customname) = "Kind"; - // KindActive represents an active snapshot - ACTIVE = 0 [(gogoproto.enumvalue_customname) = "KindActive"]; - - // KindCommitted represents a committed immutable snapshot - COMMITTED = 1 [(gogoproto.enumvalue_customname) = "KindCommitted"]; + UNKNOWN = 0 [(gogoproto.enumvalue_customname) = "KindUnknown"]; + VIEW = 1 [(gogoproto.enumvalue_customname) = "KindView"]; + ACTIVE = 2 [(gogoproto.enumvalue_customname) = "KindActive"]; + COMMITTED = 3 [(gogoproto.enumvalue_customname) = "KindCommitted"]; } // Snapshot defines the storage type for a snapshot in the @@ -22,7 +20,6 @@ message Snapshot { uint64 id = 1; string parent = 2; Kind kind = 4; - bool readonly = 5; // inodes stores the number inodes in use for the snapshot. // diff --git a/vendor/github.com/containerd/containerd/spec_windows.go b/vendor/github.com/containerd/containerd/spec_windows.go index 9341384b..7e0f5cfd 100644 --- a/vendor/github.com/containerd/containerd/spec_windows.go +++ b/vendor/github.com/containerd/containerd/spec_windows.go @@ -12,18 +12,23 @@ import ( specs "github.com/opencontainers/runtime-spec/specs-go" ) -const pipeRoot = `\\.\pipe` - func createDefaultSpec() (*specs.Spec, error) { return &specs.Spec{ Version: specs.Version, Root: &specs.Root{}, Process: &specs.Process{ + Cwd: `C:\`, ConsoleSize: &specs.Box{ Width: 80, Height: 20, }, }, + Windows: &specs.Windows{ + IgnoreFlushesDuringBoot: true, + Network: &specs.WindowsNetwork{ + AllowUnqualifiedDNSQuery: true, + }, + }, }, nil } diff --git a/vendor/github.com/containerd/containerd/sys/prctl.go b/vendor/github.com/containerd/containerd/sys/prctl.go index 63c418db..aa1a4ad3 100644 --- a/vendor/github.com/containerd/containerd/sys/prctl.go +++ b/vendor/github.com/containerd/containerd/sys/prctl.go @@ -1,7 +1,6 @@ // +build linux -// Package osutils provide access to the Get Child and Set Child prctl -// flags. +// Package sys provides access to the Get Child and Set Child prctl flags. // See http://man7.org/linux/man-pages/man2/prctl.2.html package sys @@ -11,30 +10,14 @@ import ( "golang.org/x/sys/unix" ) -// PR_SET_CHILD_SUBREAPER allows setting the child subreaper. -// If arg2 is nonzero, set the "child subreaper" attribute of the -// calling process; if arg2 is zero, unset the attribute. When a -// process is marked as a child subreaper, all of the children -// that it creates, and their descendants, will be marked as -// having a subreaper. In effect, a subreaper fulfills the role -// of init(1) for its descendant processes. Upon termination of -// a process that is orphaned (i.e., its immediate parent has -// already terminated) and marked as having a subreaper, the -// nearest still living ancestor subreaper will receive a SIGCHLD -// signal and be able to wait(2) on the process to discover its -// termination status. -const prSetChildSubreaper = 36 - -// PR_GET_CHILD_SUBREAPER allows retrieving the current child -// subreaper. -// Returns the "child subreaper" setting of the caller, in the -// location pointed to by (int *) arg2. -const prGetChildSubreaper = 37 - // GetSubreaper returns the subreaper setting for the calling process func GetSubreaper() (int, error) { var i uintptr - if _, _, err := unix.RawSyscall(unix.SYS_PRCTL, prGetChildSubreaper, uintptr(unsafe.Pointer(&i)), 0); err != 0 { + // PR_GET_CHILD_SUBREAPER allows retrieving the current child + // subreaper. + // Returns the "child subreaper" setting of the caller, in the + // location pointed to by (int *) arg2. + if err := unix.Prctl(unix.PR_GET_CHILD_SUBREAPER, uintptr(unsafe.Pointer(&i)), 0, 0, 0); err != nil { return -1, err } return int(i), nil @@ -42,8 +25,17 @@ func GetSubreaper() (int, error) { // SetSubreaper sets the value i as the subreaper setting for the calling process func SetSubreaper(i int) error { - if _, _, err := unix.RawSyscall(unix.SYS_PRCTL, prSetChildSubreaper, uintptr(i), 0); err != 0 { - return err - } - return nil + // PR_SET_CHILD_SUBREAPER allows setting the child subreaper. + // If arg2 is nonzero, set the "child subreaper" attribute of the + // calling process; if arg2 is zero, unset the attribute. When a + // process is marked as a child subreaper, all of the children + // that it creates, and their descendants, will be marked as + // having a subreaper. In effect, a subreaper fulfills the role + // of init(1) for its descendant processes. Upon termination of + // a process that is orphaned (i.e., its immediate parent has + // already terminated) and marked as having a subreaper, the + // nearest still living ancestor subreaper will receive a SIGCHLD + // signal and be able to wait(2) on the process to discover its + // termination status. + return unix.Prctl(unix.PR_SET_CHILD_SUBREAPER, uintptr(i), 0, 0, 0) } diff --git a/vendor/github.com/containerd/containerd/sys/socket_unix.go b/vendor/github.com/containerd/containerd/sys/socket_unix.go index cd361c25..0d5f049a 100644 --- a/vendor/github.com/containerd/containerd/sys/socket_unix.go +++ b/vendor/github.com/containerd/containerd/sys/socket_unix.go @@ -23,6 +23,11 @@ func CreateUnixSocket(path string) (net.Listener, error) { // GetLocalListener returns a listerner out of a unix socket. func GetLocalListener(path string, uid, gid int) (net.Listener, error) { + // Ensure parent directory is created + if err := mkdirAs(filepath.Dir(path), uid, gid); err != nil { + return nil, err + } + l, err := CreateUnixSocket(path) if err != nil { return l, err @@ -40,3 +45,15 @@ func GetLocalListener(path string, uid, gid int) (net.Listener, error) { return l, nil } + +func mkdirAs(path string, uid, gid int) error { + if _, err := os.Stat(path); err == nil || !os.IsNotExist(err) { + return err + } + + if err := os.Mkdir(path, 0770); err != nil { + return err + } + + return os.Chown(path, uid, gid) +} diff --git a/vendor/github.com/containerd/containerd/task.go b/vendor/github.com/containerd/containerd/task.go index ab247dfb..015c8cc7 100644 --- a/vendor/github.com/containerd/containerd/task.go +++ b/vendor/github.com/containerd/containerd/task.go @@ -14,14 +14,15 @@ import ( "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/api/types" "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/linux/runcopts" + "github.com/containerd/containerd/mount" "github.com/containerd/containerd/rootfs" - "github.com/containerd/containerd/runtime" "github.com/containerd/containerd/typeurl" digest "github.com/opencontainers/go-digest" "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/runtime-spec/specs-go" - "google.golang.org/grpc" + "github.com/pkg/errors" ) const UnknownExitStatus = 255 @@ -55,6 +56,7 @@ type CheckpointTaskOpts func(*CheckpointTaskInfo) error type TaskInfo struct { Checkpoint *types.Descriptor + RootFS []mount.Mount Options interface{} } @@ -130,10 +132,7 @@ func (t *task) Kill(ctx context.Context, s syscall.Signal) error { ContainerID: t.id, }) if err != nil { - if strings.Contains(grpc.ErrorDesc(err), runtime.ErrProcessExited.Error()) { - return ErrProcessExited - } - return err + return errdefs.FromGRPC(err) } return nil } @@ -142,14 +141,14 @@ func (t *task) Pause(ctx context.Context) error { _, err := t.client.TaskService().Pause(ctx, &tasks.PauseTaskRequest{ ContainerID: t.id, }) - return err + return errdefs.FromGRPC(err) } func (t *task) Resume(ctx context.Context) error { _, err := t.client.TaskService().Resume(ctx, &tasks.ResumeTaskRequest{ ContainerID: t.id, }) - return err + return errdefs.FromGRPC(err) } func (t *task) Status(ctx context.Context) (TaskStatus, error) { @@ -157,7 +156,7 @@ func (t *task) Status(ctx context.Context) (TaskStatus, error) { ContainerID: t.id, }) if err != nil { - return "", err + return "", errdefs.FromGRPC(err) } return TaskStatus(strings.ToLower(r.Task.Status.String())), nil } @@ -166,7 +165,7 @@ func (t *task) Status(ctx context.Context) (TaskStatus, error) { func (t *task) Wait(ctx context.Context) (uint32, error) { eventstream, err := t.client.EventService().Stream(ctx, &eventsapi.StreamEventsRequest{}) if err != nil { - return UnknownExitStatus, err + return UnknownExitStatus, errdefs.FromGRPC(err) } for { evt, err := eventstream.Recv() @@ -191,6 +190,7 @@ func (t *task) Wait(ctx context.Context) (uint32, error) { // during cleanup func (t *task) Delete(ctx context.Context) (uint32, error) { if t.io != nil { + t.io.Cancel() t.io.Wait() t.io.Close() } @@ -205,9 +205,9 @@ func (t *task) Delete(ctx context.Context) (uint32, error) { func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate IOCreation) (Process, error) { if id == "" { - return nil, ErrNoExecID + return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "exec id must not be empty") } - i, err := ioCreate() + i, err := ioCreate(id) if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/containerd/typeurl/types.go b/vendor/github.com/containerd/containerd/typeurl/types.go index a93a5481..e12b8a3b 100644 --- a/vendor/github.com/containerd/containerd/typeurl/types.go +++ b/vendor/github.com/containerd/containerd/typeurl/types.go @@ -10,6 +10,7 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" + "github.com/pkg/errors" ) const Prefix = "types.containerd.io" @@ -21,13 +22,19 @@ var ( // Register a type with the base url of the type func Register(v interface{}, args ...string) { - t := tryDereference(v) + var ( + t = tryDereference(v) + p = path.Join(append([]string{Prefix}, args...)...) + ) mu.Lock() defer mu.Unlock() - if _, ok := registry[t]; ok { - panic(errdefs.ErrAlreadyExists) + if et, ok := registry[t]; ok { + if et != p { + panic(errors.Errorf("type registred with alternate path %q != %q", et, p)) + } + return } - registry[t] = path.Join(append([]string{Prefix}, args...)...) + registry[t] = p } // TypeURL returns the type url for a registred type @@ -39,7 +46,7 @@ func TypeURL(v interface{}) (string, error) { // fallback to the proto registry if it is a proto message pb, ok := v.(proto.Message) if !ok { - return "", errdefs.ErrNotFound + return "", errors.Wrapf(errdefs.ErrNotFound, "type %s", reflect.TypeOf(v)) } return path.Join(Prefix, proto.MessageName(pb)), nil } @@ -116,7 +123,7 @@ func getTypeByUrl(url string) (urlType, error) { isProto: true, }, nil } - return urlType{}, errdefs.ErrNotFound + return urlType{}, errors.Wrapf(errdefs.ErrNotFound, "type with url %s", url) } func tryDereference(v interface{}) reflect.Type { diff --git a/vendor/github.com/containerd/containerd/vendor.conf b/vendor/github.com/containerd/containerd/vendor.conf index b4251bdf..8676d22d 100644 --- a/vendor/github.com/containerd/containerd/vendor.conf +++ b/vendor/github.com/containerd/containerd/vendor.conf @@ -3,7 +3,7 @@ github.com/containerd/go-runc 2774a2ea124a5c2d0aba13b5c2dd8a5a9a48775d github.com/containerd/console 7fed77e673ca4abcd0cbd6d4d0e0e22137cbd778 github.com/containerd/cgroups 4fd64a776f25b5540cddcb72eea6e35e58baca6e github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87 -github.com/docker/go-events aa2e3b613fbbfdddbe055a7b9e3ce271cfd83eca +github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9 github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f github.com/prometheus/client_golang v0.8.0 github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 @@ -16,12 +16,12 @@ github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8 github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55 github.com/opencontainers/runtime-spec 96de01bbb42c7af89bff100e10a9f0fb62e75bfb github.com/opencontainers/runc 429a5387123625040bacfbb60d96b1cbd02293ab -github.com/Sirupsen/logrus v0.11.0 +github.com/sirupsen/logrus v1.0.0 github.com/containerd/btrfs e9c546f46bccffefe71a6bc137e4c21b5503cc18 github.com/stretchr/testify v1.1.4 github.com/davecgh/go-spew v1.1.0 github.com/pmezard/go-difflib v1.0.0 -github.com/containerd/fifo 69b99525e472735860a5269b75af1970142b3062 +github.com/containerd/fifo fbfb6a11ec671efbe94ad1c12c2e98773f19e1e6 github.com/urfave/cli 8ba6f23b6e36d03666a14bd9421f5e3efcb59aca golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 google.golang.org/grpc v1.3.0 @@ -33,9 +33,10 @@ github.com/containerd/continuity 86cec1535a968310e7532819f699ff2830ed7463 golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c github.com/BurntSushi/toml v0.2.0-21-g9906417 github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0 -github.com/Microsoft/go-winio v0.4.1 +github.com/Microsoft/go-winio v0.4.4 github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd -github.com/Microsoft/hcsshim v0.5.15 -github.com/Azure/go-ansiterm fa152c58bc15761d0200cb75fe958b89a9d4888e +github.com/Microsoft/hcsshim v0.6.1 +github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944 golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4 +github.com/dmcgowan/go-tar 2e2c51242e8993c50445dab7c03c8e7febddd0cf diff --git a/vendor/github.com/dmcgowan/go-tar/LICENSE b/vendor/github.com/dmcgowan/go-tar/LICENSE new file mode 100644 index 00000000..6a66aea5 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/dmcgowan/go-tar/common.go b/vendor/github.com/dmcgowan/go-tar/common.go new file mode 100644 index 00000000..d2ae66d5 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/common.go @@ -0,0 +1,286 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tar implements access to tar archives. +// It aims to cover most of the variations, including those produced +// by GNU and BSD tars. +// +// References: +// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 +// http://www.gnu.org/software/tar/manual/html_node/Standard.html +// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html +package tar + +import ( + "errors" + "fmt" + "os" + "path" + "time" +) + +// BUG: Use of the Uid and Gid fields in Header could overflow on 32-bit +// architectures. If a large value is encountered when decoding, the result +// stored in Header will be the truncated version. + +// Header type flags. +const ( + TypeReg = '0' // regular file + TypeRegA = '\x00' // regular file + TypeLink = '1' // hard link + TypeSymlink = '2' // symbolic link + TypeChar = '3' // character device node + TypeBlock = '4' // block device node + TypeDir = '5' // directory + TypeFifo = '6' // fifo node + TypeCont = '7' // reserved + TypeXHeader = 'x' // extended header + TypeXGlobalHeader = 'g' // global extended header + TypeGNULongName = 'L' // Next file has a long name + TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name + TypeGNUSparse = 'S' // sparse file +) + +// A Header represents a single header in a tar archive. +// Some fields may not be populated. +type Header struct { + Name string // name of header file entry + Mode int64 // permission and mode bits + Uid int // user id of owner + Gid int // group id of owner + Size int64 // length in bytes + ModTime time.Time // modified time + Typeflag byte // type of header entry + Linkname string // target name of link + Uname string // user name of owner + Gname string // group name of owner + Devmajor int64 // major number of character or block device + Devminor int64 // minor number of character or block device + AccessTime time.Time // access time + ChangeTime time.Time // status change time + Xattrs map[string]string +} + +// FileInfo returns an os.FileInfo for the Header. +func (h *Header) FileInfo() os.FileInfo { + return headerFileInfo{h} +} + +// headerFileInfo implements os.FileInfo. +type headerFileInfo struct { + h *Header +} + +func (fi headerFileInfo) Size() int64 { return fi.h.Size } +func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } +func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } +func (fi headerFileInfo) Sys() interface{} { return fi.h } + +// Name returns the base name of the file. +func (fi headerFileInfo) Name() string { + if fi.IsDir() { + return path.Base(path.Clean(fi.h.Name)) + } + return path.Base(fi.h.Name) +} + +// Mode returns the permission and mode bits for the headerFileInfo. +func (fi headerFileInfo) Mode() (mode os.FileMode) { + // Set file permission bits. + mode = os.FileMode(fi.h.Mode).Perm() + + // Set setuid, setgid and sticky bits. + if fi.h.Mode&c_ISUID != 0 { + // setuid + mode |= os.ModeSetuid + } + if fi.h.Mode&c_ISGID != 0 { + // setgid + mode |= os.ModeSetgid + } + if fi.h.Mode&c_ISVTX != 0 { + // sticky + mode |= os.ModeSticky + } + + // Set file mode bits. + // clear perm, setuid, setgid and sticky bits. + m := os.FileMode(fi.h.Mode) &^ 07777 + if m == c_ISDIR { + // directory + mode |= os.ModeDir + } + if m == c_ISFIFO { + // named pipe (FIFO) + mode |= os.ModeNamedPipe + } + if m == c_ISLNK { + // symbolic link + mode |= os.ModeSymlink + } + if m == c_ISBLK { + // device file + mode |= os.ModeDevice + } + if m == c_ISCHR { + // Unix character device + mode |= os.ModeDevice + mode |= os.ModeCharDevice + } + if m == c_ISSOCK { + // Unix domain socket + mode |= os.ModeSocket + } + + switch fi.h.Typeflag { + case TypeSymlink: + // symbolic link + mode |= os.ModeSymlink + case TypeChar: + // character device node + mode |= os.ModeDevice + mode |= os.ModeCharDevice + case TypeBlock: + // block device node + mode |= os.ModeDevice + case TypeDir: + // directory + mode |= os.ModeDir + case TypeFifo: + // fifo node + mode |= os.ModeNamedPipe + } + + return mode +} + +// sysStat, if non-nil, populates h from system-dependent fields of fi. +var sysStat func(fi os.FileInfo, h *Header) error + +// Mode constants from the tar spec. +const ( + c_ISUID = 04000 // Set uid + c_ISGID = 02000 // Set gid + c_ISVTX = 01000 // Save text (sticky bit) + c_ISDIR = 040000 // Directory + c_ISFIFO = 010000 // FIFO + c_ISREG = 0100000 // Regular file + c_ISLNK = 0120000 // Symbolic link + c_ISBLK = 060000 // Block special file + c_ISCHR = 020000 // Character special file + c_ISSOCK = 0140000 // Socket +) + +// Keywords for the PAX Extended Header +const ( + paxAtime = "atime" + paxCharset = "charset" + paxComment = "comment" + paxCtime = "ctime" // please note that ctime is not a valid pax header. + paxGid = "gid" + paxGname = "gname" + paxLinkpath = "linkpath" + paxMtime = "mtime" + paxPath = "path" + paxSize = "size" + paxUid = "uid" + paxUname = "uname" + paxXattr = "SCHILY.xattr." + paxNone = "" +) + +// FileInfoHeader creates a partially-populated Header from fi. +// If fi describes a symlink, FileInfoHeader records link as the link target. +// If fi describes a directory, a slash is appended to the name. +// Because os.FileInfo's Name method returns only the base name of +// the file it describes, it may be necessary to modify the Name field +// of the returned header to provide the full path name of the file. +func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { + if fi == nil { + return nil, errors.New("tar: FileInfo is nil") + } + fm := fi.Mode() + h := &Header{ + Name: fi.Name(), + ModTime: fi.ModTime(), + Mode: int64(fm.Perm()), // or'd with c_IS* constants later + } + switch { + case fm.IsRegular(): + h.Mode |= c_ISREG + h.Typeflag = TypeReg + h.Size = fi.Size() + case fi.IsDir(): + h.Typeflag = TypeDir + h.Mode |= c_ISDIR + h.Name += "/" + case fm&os.ModeSymlink != 0: + h.Typeflag = TypeSymlink + h.Mode |= c_ISLNK + h.Linkname = link + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + h.Mode |= c_ISCHR + h.Typeflag = TypeChar + } else { + h.Mode |= c_ISBLK + h.Typeflag = TypeBlock + } + case fm&os.ModeNamedPipe != 0: + h.Typeflag = TypeFifo + h.Mode |= c_ISFIFO + case fm&os.ModeSocket != 0: + h.Mode |= c_ISSOCK + default: + return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) + } + if fm&os.ModeSetuid != 0 { + h.Mode |= c_ISUID + } + if fm&os.ModeSetgid != 0 { + h.Mode |= c_ISGID + } + if fm&os.ModeSticky != 0 { + h.Mode |= c_ISVTX + } + // If possible, populate additional fields from OS-specific + // FileInfo fields. + if sys, ok := fi.Sys().(*Header); ok { + // This FileInfo came from a Header (not the OS). Use the + // original Header to populate all remaining fields. + h.Uid = sys.Uid + h.Gid = sys.Gid + h.Uname = sys.Uname + h.Gname = sys.Gname + h.AccessTime = sys.AccessTime + h.ChangeTime = sys.ChangeTime + if sys.Xattrs != nil { + h.Xattrs = make(map[string]string) + for k, v := range sys.Xattrs { + h.Xattrs[k] = v + } + } + if sys.Typeflag == TypeLink { + // hard link + h.Typeflag = TypeLink + h.Size = 0 + h.Linkname = sys.Linkname + } + } + if sysStat != nil { + return h, sysStat(fi, h) + } + return h, nil +} + +// isHeaderOnlyType checks if the given type flag is of the type that has no +// data section even if a size is specified. +func isHeaderOnlyType(flag byte) bool { + switch flag { + case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: + return true + default: + return false + } +} diff --git a/vendor/github.com/dmcgowan/go-tar/format.go b/vendor/github.com/dmcgowan/go-tar/format.go new file mode 100644 index 00000000..c2c9910d --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/format.go @@ -0,0 +1,197 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// Constants to identify various tar formats. +const ( + // The format is unknown. + formatUnknown = (1 << iota) / 2 // Sequence of 0, 1, 2, 4, 8, etc... + + // The format of the original Unix V7 tar tool prior to standardization. + formatV7 + + // The old and new GNU formats, which are incompatible with USTAR. + // This does cover the old GNU sparse extension. + // This does not cover the GNU sparse extensions using PAX headers, + // versions 0.0, 0.1, and 1.0; these fall under the PAX format. + formatGNU + + // Schily's tar format, which is incompatible with USTAR. + // This does not cover STAR extensions to the PAX format; these fall under + // the PAX format. + formatSTAR + + // USTAR is the former standardization of tar defined in POSIX.1-1988. + // This is incompatible with the GNU and STAR formats. + formatUSTAR + + // PAX is the latest standardization of tar defined in POSIX.1-2001. + // This is an extension of USTAR and is "backwards compatible" with it. + // + // Some newer formats add their own extensions to PAX, such as GNU sparse + // files and SCHILY extended attributes. Since they are backwards compatible + // with PAX, they will be labelled as "PAX". + formatPAX +) + +// Magics used to identify various formats. +const ( + magicGNU, versionGNU = "ustar ", " \x00" + magicUSTAR, versionUSTAR = "ustar\x00", "00" + trailerSTAR = "tar\x00" +) + +// Size constants from various tar specifications. +const ( + blockSize = 512 // Size of each block in a tar stream + nameSize = 100 // Max length of the name field in USTAR format + prefixSize = 155 // Max length of the prefix field in USTAR format +) + +var zeroBlock block + +type block [blockSize]byte + +// Convert block to any number of formats. +func (b *block) V7() *headerV7 { return (*headerV7)(b) } +func (b *block) GNU() *headerGNU { return (*headerGNU)(b) } +func (b *block) STAR() *headerSTAR { return (*headerSTAR)(b) } +func (b *block) USTAR() *headerUSTAR { return (*headerUSTAR)(b) } +func (b *block) Sparse() sparseArray { return (sparseArray)(b[:]) } + +// GetFormat checks that the block is a valid tar header based on the checksum. +// It then attempts to guess the specific format based on magic values. +// If the checksum fails, then formatUnknown is returned. +func (b *block) GetFormat() (format int) { + // Verify checksum. + var p parser + value := p.parseOctal(b.V7().Chksum()) + chksum1, chksum2 := b.ComputeChecksum() + if p.err != nil || (value != chksum1 && value != chksum2) { + return formatUnknown + } + + // Guess the magic values. + magic := string(b.USTAR().Magic()) + version := string(b.USTAR().Version()) + trailer := string(b.STAR().Trailer()) + switch { + case magic == magicUSTAR && trailer == trailerSTAR: + return formatSTAR + case magic == magicUSTAR: + return formatUSTAR + case magic == magicGNU && version == versionGNU: + return formatGNU + default: + return formatV7 + } +} + +// SetFormat writes the magic values necessary for specified format +// and then updates the checksum accordingly. +func (b *block) SetFormat(format int) { + // Set the magic values. + switch format { + case formatV7: + // Do nothing. + case formatGNU: + copy(b.GNU().Magic(), magicGNU) + copy(b.GNU().Version(), versionGNU) + case formatSTAR: + copy(b.STAR().Magic(), magicUSTAR) + copy(b.STAR().Version(), versionUSTAR) + copy(b.STAR().Trailer(), trailerSTAR) + case formatUSTAR, formatPAX: + copy(b.USTAR().Magic(), magicUSTAR) + copy(b.USTAR().Version(), versionUSTAR) + default: + panic("invalid format") + } + + // Update checksum. + // This field is special in that it is terminated by a NULL then space. + var f formatter + field := b.V7().Chksum() + chksum, _ := b.ComputeChecksum() // Possible values are 256..128776 + f.formatOctal(field[:7], chksum) // Never fails since 128776 < 262143 + field[7] = ' ' +} + +// ComputeChecksum computes the checksum for the header block. +// POSIX specifies a sum of the unsigned byte values, but the Sun tar used +// signed byte values. +// We compute and return both. +func (b *block) ComputeChecksum() (unsigned, signed int64) { + for i, c := range b { + if 148 <= i && i < 156 { + c = ' ' // Treat the checksum field itself as all spaces. + } + unsigned += int64(uint8(c)) + signed += int64(int8(c)) + } + return unsigned, signed +} + +type headerV7 [blockSize]byte + +func (h *headerV7) Name() []byte { return h[000:][:100] } +func (h *headerV7) Mode() []byte { return h[100:][:8] } +func (h *headerV7) UID() []byte { return h[108:][:8] } +func (h *headerV7) GID() []byte { return h[116:][:8] } +func (h *headerV7) Size() []byte { return h[124:][:12] } +func (h *headerV7) ModTime() []byte { return h[136:][:12] } +func (h *headerV7) Chksum() []byte { return h[148:][:8] } +func (h *headerV7) TypeFlag() []byte { return h[156:][:1] } +func (h *headerV7) LinkName() []byte { return h[157:][:100] } + +type headerGNU [blockSize]byte + +func (h *headerGNU) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerGNU) Magic() []byte { return h[257:][:6] } +func (h *headerGNU) Version() []byte { return h[263:][:2] } +func (h *headerGNU) UserName() []byte { return h[265:][:32] } +func (h *headerGNU) GroupName() []byte { return h[297:][:32] } +func (h *headerGNU) DevMajor() []byte { return h[329:][:8] } +func (h *headerGNU) DevMinor() []byte { return h[337:][:8] } +func (h *headerGNU) AccessTime() []byte { return h[345:][:12] } +func (h *headerGNU) ChangeTime() []byte { return h[357:][:12] } +func (h *headerGNU) Sparse() sparseArray { return (sparseArray)(h[386:][:24*4+1]) } +func (h *headerGNU) RealSize() []byte { return h[483:][:12] } + +type headerSTAR [blockSize]byte + +func (h *headerSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerSTAR) Version() []byte { return h[263:][:2] } +func (h *headerSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerSTAR) Prefix() []byte { return h[345:][:131] } +func (h *headerSTAR) AccessTime() []byte { return h[476:][:12] } +func (h *headerSTAR) ChangeTime() []byte { return h[488:][:12] } +func (h *headerSTAR) Trailer() []byte { return h[508:][:4] } + +type headerUSTAR [blockSize]byte + +func (h *headerUSTAR) V7() *headerV7 { return (*headerV7)(h) } +func (h *headerUSTAR) Magic() []byte { return h[257:][:6] } +func (h *headerUSTAR) Version() []byte { return h[263:][:2] } +func (h *headerUSTAR) UserName() []byte { return h[265:][:32] } +func (h *headerUSTAR) GroupName() []byte { return h[297:][:32] } +func (h *headerUSTAR) DevMajor() []byte { return h[329:][:8] } +func (h *headerUSTAR) DevMinor() []byte { return h[337:][:8] } +func (h *headerUSTAR) Prefix() []byte { return h[345:][:155] } + +type sparseArray []byte + +func (s sparseArray) Entry(i int) sparseNode { return (sparseNode)(s[i*24:]) } +func (s sparseArray) IsExtended() []byte { return s[24*s.MaxEntries():][:1] } +func (s sparseArray) MaxEntries() int { return len(s) / 24 } + +type sparseNode []byte + +func (s sparseNode) Offset() []byte { return s[00:][:12] } +func (s sparseNode) NumBytes() []byte { return s[12:][:12] } diff --git a/vendor/github.com/dmcgowan/go-tar/reader.go b/vendor/github.com/dmcgowan/go-tar/reader.go new file mode 100644 index 00000000..a6142c6b --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/reader.go @@ -0,0 +1,800 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - pax extensions + +import ( + "bytes" + "errors" + "io" + "io/ioutil" + "math" + "strconv" + "strings" + "time" +) + +var ( + ErrHeader = errors.New("archive/tar: invalid tar header") +) + +// A Reader provides sequential access to the contents of a tar archive. +// A tar archive consists of a sequence of files. +// The Next method advances to the next file in the archive (including the first), +// and then it can be treated as an io.Reader to access the file's data. +type Reader struct { + r io.Reader + pad int64 // amount of padding (ignored) after current file entry + curr numBytesReader // reader for current file entry + blk block // buffer to use as temporary local storage + + // err is a persistent error. + // It is only the responsibility of every exported method of Reader to + // ensure that this error is sticky. + err error +} + +// A numBytesReader is an io.Reader with a numBytes method, returning the number +// of bytes remaining in the underlying encoded data. +type numBytesReader interface { + io.Reader + numBytes() int64 +} + +// A regFileReader is a numBytesReader for reading file data from a tar archive. +type regFileReader struct { + r io.Reader // underlying reader + nb int64 // number of unread bytes for current file entry +} + +// A sparseFileReader is a numBytesReader for reading sparse file data from a +// tar archive. +type sparseFileReader struct { + rfr numBytesReader // Reads the sparse-encoded file data + sp []sparseEntry // The sparse map for the file + pos int64 // Keeps track of file position + total int64 // Total size of the file +} + +// A sparseEntry holds a single entry in a sparse file's sparse map. +// +// Sparse files are represented using a series of sparseEntrys. +// Despite the name, a sparseEntry represents an actual data fragment that +// references data found in the underlying archive stream. All regions not +// covered by a sparseEntry are logically filled with zeros. +// +// For example, if the underlying raw file contains the 10-byte data: +// var compactData = "abcdefgh" +// +// And the sparse map has the following entries: +// var sp = []sparseEntry{ +// {offset: 2, numBytes: 5} // Data fragment for [2..7] +// {offset: 18, numBytes: 3} // Data fragment for [18..21] +// } +// +// Then the content of the resulting sparse file with a "real" size of 25 is: +// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 +type sparseEntry struct { + offset int64 // Starting position of the fragment + numBytes int64 // Length of the fragment +} + +// Keywords for GNU sparse files in a PAX extended header +const ( + paxGNUSparseNumBlocks = "GNU.sparse.numblocks" + paxGNUSparseOffset = "GNU.sparse.offset" + paxGNUSparseNumBytes = "GNU.sparse.numbytes" + paxGNUSparseMap = "GNU.sparse.map" + paxGNUSparseName = "GNU.sparse.name" + paxGNUSparseMajor = "GNU.sparse.major" + paxGNUSparseMinor = "GNU.sparse.minor" + paxGNUSparseSize = "GNU.sparse.size" + paxGNUSparseRealSize = "GNU.sparse.realsize" +) + +// NewReader creates a new Reader reading from r. +func NewReader(r io.Reader) *Reader { return &Reader{r: r} } + +// Next advances to the next entry in the tar archive. +// +// io.EOF is returned at the end of the input. +func (tr *Reader) Next() (*Header, error) { + if tr.err != nil { + return nil, tr.err + } + hdr, err := tr.next() + tr.err = err + return hdr, err +} + +func (tr *Reader) next() (*Header, error) { + var extHdrs map[string]string + + // Externally, Next iterates through the tar archive as if it is a series of + // files. Internally, the tar format often uses fake "files" to add meta + // data that describes the next file. These meta data "files" should not + // normally be visible to the outside. As such, this loop iterates through + // one or more "header files" until it finds a "normal file". +loop: + for { + if err := tr.skipUnread(); err != nil { + return nil, err + } + hdr, rawHdr, err := tr.readHeader() + if err != nil { + return nil, err + } + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + + // Check for PAX/GNU special headers and files. + switch hdr.Typeflag { + case TypeXHeader: + extHdrs, err = parsePAX(tr) + if err != nil { + return nil, err + } + continue loop // This is a meta header affecting the next header + case TypeGNULongName, TypeGNULongLink: + realname, err := ioutil.ReadAll(tr) + if err != nil { + return nil, err + } + + // Convert GNU extensions to use PAX headers. + if extHdrs == nil { + extHdrs = make(map[string]string) + } + var p parser + switch hdr.Typeflag { + case TypeGNULongName: + extHdrs[paxPath] = p.parseString(realname) + case TypeGNULongLink: + extHdrs[paxLinkpath] = p.parseString(realname) + } + if p.err != nil { + return nil, p.err + } + continue loop // This is a meta header affecting the next header + default: + // The old GNU sparse format is handled here since it is technically + // just a regular file with additional attributes. + + if err := mergePAX(hdr, extHdrs); err != nil { + return nil, err + } + + // The extended headers may have updated the size. + // Thus, setup the regFileReader again after merging PAX headers. + if err := tr.handleRegularFile(hdr); err != nil { + return nil, err + } + + // Sparse formats rely on being able to read from the logical data + // section; there must be a preceding call to handleRegularFile. + if err := tr.handleSparseFile(hdr, rawHdr, extHdrs); err != nil { + return nil, err + } + return hdr, nil // This is a file, so stop + } + } +} + +// handleRegularFile sets up the current file reader and padding such that it +// can only read the following logical data section. It will properly handle +// special headers that contain no data section. +func (tr *Reader) handleRegularFile(hdr *Header) error { + nb := hdr.Size + if isHeaderOnlyType(hdr.Typeflag) { + nb = 0 + } + if nb < 0 { + return ErrHeader + } + + tr.pad = -nb & (blockSize - 1) // blockSize is a power of two + tr.curr = ®FileReader{r: tr.r, nb: nb} + return nil +} + +// handleSparseFile checks if the current file is a sparse format of any type +// and sets the curr reader appropriately. +func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block, extHdrs map[string]string) error { + var sp []sparseEntry + var err error + if hdr.Typeflag == TypeGNUSparse { + sp, err = tr.readOldGNUSparseMap(hdr, rawHdr) + if err != nil { + return err + } + } else { + sp, err = tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) + if err != nil { + return err + } + } + + // If sp is non-nil, then this is a sparse file. + // Note that it is possible for len(sp) to be zero. + if sp != nil { + tr.curr, err = newSparseFileReader(tr.curr, sp, hdr.Size) + } + return err +} + +// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then +// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to +// be treated as a regular file. +func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { + var sparseFormat string + + // Check for sparse format indicators + major, majorOk := headers[paxGNUSparseMajor] + minor, minorOk := headers[paxGNUSparseMinor] + sparseName, sparseNameOk := headers[paxGNUSparseName] + _, sparseMapOk := headers[paxGNUSparseMap] + sparseSize, sparseSizeOk := headers[paxGNUSparseSize] + sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] + + // Identify which, if any, sparse format applies from which PAX headers are set + if majorOk && minorOk { + sparseFormat = major + "." + minor + } else if sparseNameOk && sparseMapOk { + sparseFormat = "0.1" + } else if sparseSizeOk { + sparseFormat = "0.0" + } else { + // Not a PAX format GNU sparse file. + return nil, nil + } + + // Check for unknown sparse format + if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { + return nil, nil + } + + // Update hdr from GNU sparse PAX headers + if sparseNameOk { + hdr.Name = sparseName + } + if sparseSizeOk { + realSize, err := strconv.ParseInt(sparseSize, 10, 64) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } else if sparseRealSizeOk { + realSize, err := strconv.ParseInt(sparseRealSize, 10, 64) + if err != nil { + return nil, ErrHeader + } + hdr.Size = realSize + } + + // Set up the sparse map, according to the particular sparse format in use + var sp []sparseEntry + var err error + switch sparseFormat { + case "0.0", "0.1": + sp, err = readGNUSparseMap0x1(headers) + case "1.0": + sp, err = readGNUSparseMap1x0(tr.curr) + } + return sp, err +} + +// mergePAX merges well known headers according to PAX standard. +// In general headers with the same name as those found +// in the header struct overwrite those found in the header +// struct with higher precision or longer values. Esp. useful +// for name and linkname fields. +func mergePAX(hdr *Header, headers map[string]string) (err error) { + var id64 int64 + for k, v := range headers { + switch k { + case paxPath: + hdr.Name = v + case paxLinkpath: + hdr.Linkname = v + case paxUname: + hdr.Uname = v + case paxGname: + hdr.Gname = v + case paxUid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Uid = int(id64) // Integer overflow possible + case paxGid: + id64, err = strconv.ParseInt(v, 10, 64) + hdr.Gid = int(id64) // Integer overflow possible + case paxAtime: + hdr.AccessTime, err = parsePAXTime(v) + case paxMtime: + hdr.ModTime, err = parsePAXTime(v) + case paxCtime: + hdr.ChangeTime, err = parsePAXTime(v) + case paxSize: + hdr.Size, err = strconv.ParseInt(v, 10, 64) + default: + if strings.HasPrefix(k, paxXattr) { + if hdr.Xattrs == nil { + hdr.Xattrs = make(map[string]string) + } + hdr.Xattrs[k[len(paxXattr):]] = v + } + } + if err != nil { + return ErrHeader + } + } + return nil +} + +// parsePAX parses PAX headers. +// If an extended header (type 'x') is invalid, ErrHeader is returned +func parsePAX(r io.Reader) (map[string]string, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + sbuf := string(buf) + + // For GNU PAX sparse format 0.0 support. + // This function transforms the sparse format 0.0 headers into format 0.1 + // headers since 0.0 headers were not PAX compliant. + var sparseMap []string + + extHdrs := make(map[string]string) + for len(sbuf) > 0 { + key, value, residual, err := parsePAXRecord(sbuf) + if err != nil { + return nil, ErrHeader + } + sbuf = residual + + switch key { + case paxGNUSparseOffset, paxGNUSparseNumBytes: + // Validate sparse header order and value. + if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) || + (len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) || + strings.Contains(value, ",") { + return nil, ErrHeader + } + sparseMap = append(sparseMap, value) + default: + // According to PAX specification, a value is stored only if it is + // non-empty. Otherwise, the key is deleted. + if len(value) > 0 { + extHdrs[key] = value + } else { + delete(extHdrs, key) + } + } + } + if len(sparseMap) > 0 { + extHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",") + } + return extHdrs, nil +} + +// skipUnread skips any unread bytes in the existing file entry, as well as any +// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is +// encountered in the data portion; it is okay to hit io.EOF in the padding. +// +// Note that this function still works properly even when sparse files are being +// used since numBytes returns the bytes remaining in the underlying io.Reader. +func (tr *Reader) skipUnread() error { + dataSkip := tr.numBytes() // Number of data bytes to skip + totalSkip := dataSkip + tr.pad // Total number of bytes to skip + tr.curr, tr.pad = nil, 0 + + // If possible, Seek to the last byte before the end of the data section. + // Do this because Seek is often lazy about reporting errors; this will mask + // the fact that the tar stream may be truncated. We can rely on the + // io.CopyN done shortly afterwards to trigger any IO errors. + var seekSkipped int64 // Number of bytes skipped via Seek + if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { + // Not all io.Seeker can actually Seek. For example, os.Stdin implements + // io.Seeker, but calling Seek always returns an error and performs + // no action. Thus, we try an innocent seek to the current position + // to see if Seek is really supported. + pos1, err := sr.Seek(0, io.SeekCurrent) + if err == nil { + // Seek seems supported, so perform the real Seek. + pos2, err := sr.Seek(dataSkip-1, io.SeekCurrent) + if err != nil { + return err + } + seekSkipped = pos2 - pos1 + } + } + + copySkipped, err := io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) + if err == io.EOF && seekSkipped+copySkipped < dataSkip { + err = io.ErrUnexpectedEOF + } + return err +} + +// readHeader reads the next block header and assumes that the underlying reader +// is already aligned to a block boundary. It returns the raw block of the +// header in case further processing is required. +// +// The err will be set to io.EOF only when one of the following occurs: +// * Exactly 0 bytes are read and EOF is hit. +// * Exactly 1 block of zeros is read and EOF is hit. +// * At least 2 blocks of zeros are read. +func (tr *Reader) readHeader() (*Header, *block, error) { + // Two blocks of zero bytes marks the end of the archive. + if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { + return nil, nil, err // EOF is okay here; exactly 0 bytes read + } + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil { + return nil, nil, err // EOF is okay here; exactly 1 block of zeros read + } + if bytes.Equal(tr.blk[:], zeroBlock[:]) { + return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read + } + return nil, nil, ErrHeader // Zero block and then non-zero block + } + + // Verify the header matches a known format. + format := tr.blk.GetFormat() + if format == formatUnknown { + return nil, nil, ErrHeader + } + + var p parser + hdr := new(Header) + + // Unpack the V7 header. + v7 := tr.blk.V7() + hdr.Name = p.parseString(v7.Name()) + hdr.Mode = p.parseNumeric(v7.Mode()) + hdr.Uid = int(p.parseNumeric(v7.UID())) + hdr.Gid = int(p.parseNumeric(v7.GID())) + hdr.Size = p.parseNumeric(v7.Size()) + hdr.ModTime = time.Unix(p.parseNumeric(v7.ModTime()), 0) + hdr.Typeflag = v7.TypeFlag()[0] + hdr.Linkname = p.parseString(v7.LinkName()) + + // Unpack format specific fields. + if format > formatV7 { + ustar := tr.blk.USTAR() + hdr.Uname = p.parseString(ustar.UserName()) + hdr.Gname = p.parseString(ustar.GroupName()) + if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { + hdr.Devmajor = p.parseNumeric(ustar.DevMajor()) + hdr.Devminor = p.parseNumeric(ustar.DevMinor()) + } + + var prefix string + switch format { + case formatUSTAR, formatGNU: + // TODO(dsnet): Do not use the prefix field for the GNU format! + // See golang.org/issues/12594 + ustar := tr.blk.USTAR() + prefix = p.parseString(ustar.Prefix()) + case formatSTAR: + star := tr.blk.STAR() + prefix = p.parseString(star.Prefix()) + hdr.AccessTime = time.Unix(p.parseNumeric(star.AccessTime()), 0) + hdr.ChangeTime = time.Unix(p.parseNumeric(star.ChangeTime()), 0) + } + if len(prefix) > 0 { + hdr.Name = prefix + "/" + hdr.Name + } + } + return hdr, &tr.blk, p.err +} + +// readOldGNUSparseMap reads the sparse map from the old GNU sparse format. +// The sparse map is stored in the tar header if it's small enough. +// If it's larger than four entries, then one or more extension headers are used +// to store the rest of the sparse map. +// +// The Header.Size does not reflect the size of any extended headers used. +// Thus, this function will read from the raw io.Reader to fetch extra headers. +// This method mutates blk in the process. +func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) ([]sparseEntry, error) { + // Make sure that the input format is GNU. + // Unfortunately, the STAR format also has a sparse header format that uses + // the same type flag but has a completely different layout. + if blk.GetFormat() != formatGNU { + return nil, ErrHeader + } + + var p parser + hdr.Size = p.parseNumeric(blk.GNU().RealSize()) + if p.err != nil { + return nil, p.err + } + var s sparseArray = blk.GNU().Sparse() + var sp = make([]sparseEntry, 0, s.MaxEntries()) + for { + for i := 0; i < s.MaxEntries(); i++ { + // This termination condition is identical to GNU and BSD tar. + if s.Entry(i).Offset()[0] == 0x00 { + break // Don't return, need to process extended headers (even if empty) + } + offset := p.parseNumeric(s.Entry(i).Offset()) + numBytes := p.parseNumeric(s.Entry(i).NumBytes()) + if p.err != nil { + return nil, p.err + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + + if s.IsExtended()[0] > 0 { + // There are more entries. Read an extension header and parse its entries. + if _, err := io.ReadFull(tr.r, blk[:]); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + s = blk.Sparse() + continue + } + return sp, nil // Done + } +} + +// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format +// version 1.0. The format of the sparse map consists of a series of +// newline-terminated numeric fields. The first field is the number of entries +// and is always present. Following this are the entries, consisting of two +// fields (offset, numBytes). This function must stop reading at the end +// boundary of the block containing the last newline. +// +// Note that the GNU manual says that numeric values should be encoded in octal +// format. However, the GNU tar utility itself outputs these values in decimal. +// As such, this library treats values as being encoded in decimal. +func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { + var cntNewline int64 + var buf bytes.Buffer + var blk = make([]byte, blockSize) + + // feedTokens copies data in numBlock chunks from r into buf until there are + // at least cnt newlines in buf. It will not read more blocks than needed. + var feedTokens = func(cnt int64) error { + for cntNewline < cnt { + if _, err := io.ReadFull(r, blk); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + buf.Write(blk) + for _, c := range blk { + if c == '\n' { + cntNewline++ + } + } + } + return nil + } + + // nextToken gets the next token delimited by a newline. This assumes that + // at least one newline exists in the buffer. + var nextToken = func() string { + cntNewline-- + tok, _ := buf.ReadString('\n') + return tok[:len(tok)-1] // Cut off newline + } + + // Parse for the number of entries. + // Use integer overflow resistant math to check this. + if err := feedTokens(1); err != nil { + return nil, err + } + numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // Parse for all member entries. + // numEntries is trusted after this since a potential attacker must have + // committed resources proportional to what this library used. + if err := feedTokens(2 * numEntries); err != nil { + return nil, err + } + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(nextToken(), 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format +// version 0.1. The sparse map is stored in the PAX headers. +func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { + // Get number of entries. + // Use integer overflow resistant math to check this. + numEntriesStr := extHdrs[paxGNUSparseNumBlocks] + numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int + if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { + return nil, ErrHeader + } + + // There should be two numbers in sparseMap for each entry. + sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") + if int64(len(sparseMap)) != 2*numEntries { + return nil, ErrHeader + } + + // Loop through the entries in the sparse map. + // numEntries is trusted now. + sp := make([]sparseEntry, 0, numEntries) + for i := int64(0); i < numEntries; i++ { + offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) + if err != nil { + return nil, ErrHeader + } + numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) + if err != nil { + return nil, ErrHeader + } + sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) + } + return sp, nil +} + +// numBytes returns the number of bytes left to read in the current file's entry +// in the tar archive, or 0 if there is no current file. +func (tr *Reader) numBytes() int64 { + if tr.curr == nil { + // No current file, so no bytes + return 0 + } + return tr.curr.numBytes() +} + +// Read reads from the current entry in the tar archive. +// It returns 0, io.EOF when it reaches the end of that entry, +// until Next is called to advance to the next entry. +// +// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, +// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what +// the Header.Size claims. +func (tr *Reader) Read(b []byte) (int, error) { + if tr.err != nil { + return 0, tr.err + } + if tr.curr == nil { + return 0, io.EOF + } + + n, err := tr.curr.Read(b) + if err != nil && err != io.EOF { + tr.err = err + } + return n, err +} + +func (rfr *regFileReader) Read(b []byte) (n int, err error) { + if rfr.nb == 0 { + // file consumed + return 0, io.EOF + } + if int64(len(b)) > rfr.nb { + b = b[0:rfr.nb] + } + n, err = rfr.r.Read(b) + rfr.nb -= int64(n) + + if err == io.EOF && rfr.nb > 0 { + err = io.ErrUnexpectedEOF + } + return +} + +// numBytes returns the number of bytes left to read in the file's data in the tar archive. +func (rfr *regFileReader) numBytes() int64 { + return rfr.nb +} + +// newSparseFileReader creates a new sparseFileReader, but validates all of the +// sparse entries before doing so. +func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { + if total < 0 { + return nil, ErrHeader // Total size cannot be negative + } + + // Validate all sparse entries. These are the same checks as performed by + // the BSD tar utility. + for i, s := range sp { + switch { + case s.offset < 0 || s.numBytes < 0: + return nil, ErrHeader // Negative values are never okay + case s.offset > math.MaxInt64-s.numBytes: + return nil, ErrHeader // Integer overflow with large length + case s.offset+s.numBytes > total: + return nil, ErrHeader // Region extends beyond the "real" size + case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: + return nil, ErrHeader // Regions can't overlap and must be in order + } + } + return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil +} + +// readHole reads a sparse hole ending at endOffset. +func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { + n64 := endOffset - sfr.pos + if n64 > int64(len(b)) { + n64 = int64(len(b)) + } + n := int(n64) + for i := 0; i < n; i++ { + b[i] = 0 + } + sfr.pos += n64 + return n +} + +// Read reads the sparse file data in expanded form. +func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { + // Skip past all empty fragments. + for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { + sfr.sp = sfr.sp[1:] + } + + // If there are no more fragments, then it is possible that there + // is one last sparse hole. + if len(sfr.sp) == 0 { + // This behavior matches the BSD tar utility. + // However, GNU tar stops returning data even if sfr.total is unmet. + if sfr.pos < sfr.total { + return sfr.readHole(b, sfr.total), nil + } + return 0, io.EOF + } + + // In front of a data fragment, so read a hole. + if sfr.pos < sfr.sp[0].offset { + return sfr.readHole(b, sfr.sp[0].offset), nil + } + + // In a data fragment, so read from it. + // This math is overflow free since we verify that offset and numBytes can + // be safely added when creating the sparseFileReader. + endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment + bytesLeft := endPos - sfr.pos // Bytes left in fragment + if int64(len(b)) > bytesLeft { + b = b[:bytesLeft] + } + + n, err = sfr.rfr.Read(b) + sfr.pos += int64(n) + if err == io.EOF { + if sfr.pos < endPos { + err = io.ErrUnexpectedEOF // There was supposed to be more data + } else if sfr.pos < sfr.total { + err = nil // There is still an implicit sparse hole at the end + } + } + + if sfr.pos == endPos { + sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it + } + return n, err +} + +// numBytes returns the number of bytes left to read in the sparse file's +// sparse-encoded data in the tar archive. +func (sfr *sparseFileReader) numBytes() int64 { + return sfr.rfr.numBytes() +} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_atim.go b/vendor/github.com/dmcgowan/go-tar/stat_atim.go new file mode 100644 index 00000000..cf9cc79c --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/stat_atim.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux dragonfly openbsd solaris + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atim.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctim.Unix()) +} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_atimespec.go b/vendor/github.com/dmcgowan/go-tar/stat_atimespec.go new file mode 100644 index 00000000..6f17dbe3 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/stat_atimespec.go @@ -0,0 +1,20 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin freebsd netbsd + +package tar + +import ( + "syscall" + "time" +) + +func statAtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Atimespec.Unix()) +} + +func statCtime(st *syscall.Stat_t) time.Time { + return time.Unix(st.Ctimespec.Unix()) +} diff --git a/vendor/github.com/dmcgowan/go-tar/stat_unix.go b/vendor/github.com/dmcgowan/go-tar/stat_unix.go new file mode 100644 index 00000000..cb843db4 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/stat_unix.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin dragonfly freebsd openbsd netbsd solaris + +package tar + +import ( + "os" + "syscall" +) + +func init() { + sysStat = statUnix +} + +func statUnix(fi os.FileInfo, h *Header) error { + sys, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return nil + } + h.Uid = int(sys.Uid) + h.Gid = int(sys.Gid) + // TODO(bradfitz): populate username & group. os/user + // doesn't cache LookupId lookups, and lacks group + // lookup functions. + h.AccessTime = statAtime(sys) + h.ChangeTime = statCtime(sys) + // TODO(bradfitz): major/minor device numbers? + return nil +} diff --git a/vendor/github.com/dmcgowan/go-tar/strconv.go b/vendor/github.com/dmcgowan/go-tar/strconv.go new file mode 100644 index 00000000..bb5b51c0 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/strconv.go @@ -0,0 +1,252 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "time" +) + +func isASCII(s string) bool { + for _, c := range s { + if c >= 0x80 { + return false + } + } + return true +} + +func toASCII(s string) string { + if isASCII(s) { + return s + } + var buf bytes.Buffer + for _, c := range s { + if c < 0x80 { + buf.WriteByte(byte(c)) + } + } + return buf.String() +} + +type parser struct { + err error // Last error seen +} + +type formatter struct { + err error // Last error seen +} + +// parseString parses bytes as a NUL-terminated C-style string. +// If a NUL byte is not found then the whole slice is returned as a string. +func (*parser) parseString(b []byte) string { + n := 0 + for n < len(b) && b[n] != 0 { + n++ + } + return string(b[0:n]) +} + +// Write s into b, terminating it with a NUL if there is room. +func (f *formatter) formatString(b []byte, s string) { + if len(s) > len(b) { + f.err = ErrFieldTooLong + return + } + ascii := toASCII(s) + copy(b, ascii) + if len(ascii) < len(b) { + b[len(ascii)] = 0 + } +} + +// fitsInBase256 reports whether x can be encoded into n bytes using base-256 +// encoding. Unlike octal encoding, base-256 encoding does not require that the +// string ends with a NUL character. Thus, all n bytes are available for output. +// +// If operating in binary mode, this assumes strict GNU binary mode; which means +// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is +// equivalent to the sign bit in two's complement form. +func fitsInBase256(n int, x int64) bool { + var binBits = uint(n-1) * 8 + return n >= 9 || (x >= -1< 0 && b[0]&0x80 != 0 { + // Handling negative numbers relies on the following identity: + // -a-1 == ^a + // + // If the number is negative, we use an inversion mask to invert the + // data bytes and treat the value as an unsigned number. + var inv byte // 0x00 if positive or zero, 0xff if negative + if b[0]&0x40 != 0 { + inv = 0xff + } + + var x uint64 + for i, c := range b { + c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing + if i == 0 { + c &= 0x7f // Ignore signal bit in first byte + } + if (x >> 56) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + x = x<<8 | uint64(c) + } + if (x >> 63) > 0 { + p.err = ErrHeader // Integer overflow + return 0 + } + if inv == 0xff { + return ^int64(x) + } + return int64(x) + } + + // Normal case is base-8 (octal) format. + return p.parseOctal(b) +} + +// Write x into b, as binary (GNUtar/star extension). +func (f *formatter) formatNumeric(b []byte, x int64) { + if fitsInBase256(len(b), x) { + for i := len(b) - 1; i >= 0; i-- { + b[i] = byte(x) + x >>= 8 + } + b[0] |= 0x80 // Highest bit indicates binary format + return + } + + f.formatOctal(b, 0) // Last resort, just write zero + f.err = ErrFieldTooLong +} + +func (p *parser) parseOctal(b []byte) int64 { + // Because unused fields are filled with NULs, we need + // to skip leading NULs. Fields may also be padded with + // spaces or NULs. + // So we remove leading and trailing NULs and spaces to + // be sure. + b = bytes.Trim(b, " \x00") + + if len(b) == 0 { + return 0 + } + x, perr := strconv.ParseUint(p.parseString(b), 8, 64) + if perr != nil { + p.err = ErrHeader + } + return int64(x) +} + +func (f *formatter) formatOctal(b []byte, x int64) { + s := strconv.FormatInt(x, 8) + // Add leading zeros, but leave room for a NUL. + if n := len(b) - len(s) - 1; n > 0 { + s = strings.Repeat("0", n) + s + } + f.formatString(b, s) +} + +// parsePAXTime takes a string of the form %d.%d as described in the PAX +// specification. Note that this implementation allows for negative timestamps, +// which is allowed for by the PAX specification, but not always portable. +func parsePAXTime(s string) (time.Time, error) { + const maxNanoSecondDigits = 9 + + // Split string into seconds and sub-seconds parts. + ss, sn := s, "" + if pos := strings.IndexByte(s, '.'); pos >= 0 { + ss, sn = s[:pos], s[pos+1:] + } + + // Parse the seconds. + secs, err := strconv.ParseInt(ss, 10, 64) + if err != nil { + return time.Time{}, ErrHeader + } + if len(sn) == 0 { + return time.Unix(secs, 0), nil // No sub-second values + } + + // Parse the nanoseconds. + if strings.Trim(sn, "0123456789") != "" { + return time.Time{}, ErrHeader + } + if len(sn) < maxNanoSecondDigits { + sn += strings.Repeat("0", maxNanoSecondDigits-len(sn)) // Right pad + } else { + sn = sn[:maxNanoSecondDigits] // Right truncate + } + nsecs, _ := strconv.ParseInt(sn, 10, 64) // Must succeed + if len(ss) > 0 && ss[0] == '-' { + return time.Unix(secs, -1*int64(nsecs)), nil // Negative correction + } + return time.Unix(secs, int64(nsecs)), nil +} + +// TODO(dsnet): Implement formatPAXTime. + +// parsePAXRecord parses the input PAX record string into a key-value pair. +// If parsing is successful, it will slice off the currently read record and +// return the remainder as r. +// +// A PAX record is of the following form: +// "%d %s=%s\n" % (size, key, value) +func parsePAXRecord(s string) (k, v, r string, err error) { + // The size field ends at the first space. + sp := strings.IndexByte(s, ' ') + if sp == -1 { + return "", "", s, ErrHeader + } + + // Parse the first token as a decimal integer. + n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int + if perr != nil || n < 5 || int64(len(s)) < n { + return "", "", s, ErrHeader + } + + // Extract everything between the space and the final newline. + rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] + if nl != "\n" { + return "", "", s, ErrHeader + } + + // The first equals separates the key from the value. + eq := strings.IndexByte(rec, '=') + if eq == -1 { + return "", "", s, ErrHeader + } + return rec[:eq], rec[eq+1:], rem, nil +} + +// formatPAXRecord formats a single PAX record, prefixing it with the +// appropriate length. +func formatPAXRecord(k, v string) string { + const padding = 3 // Extra padding for ' ', '=', and '\n' + size := len(k) + len(v) + padding + size += len(strconv.Itoa(size)) + record := fmt.Sprintf("%d %s=%s\n", size, k, v) + + // Final adjustment if adding size field increased the record size. + if len(record) != size { + size = len(record) + record = fmt.Sprintf("%d %s=%s\n", size, k, v) + } + return record +} diff --git a/vendor/github.com/dmcgowan/go-tar/writer.go b/vendor/github.com/dmcgowan/go-tar/writer.go new file mode 100644 index 00000000..596fb8b9 --- /dev/null +++ b/vendor/github.com/dmcgowan/go-tar/writer.go @@ -0,0 +1,364 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package tar + +// TODO(dsymonds): +// - catch more errors (no first header, etc.) + +import ( + "bytes" + "errors" + "fmt" + "io" + "path" + "sort" + "strconv" + "strings" + "time" +) + +var ( + ErrWriteTooLong = errors.New("archive/tar: write too long") + ErrFieldTooLong = errors.New("archive/tar: header field too long") + ErrWriteAfterClose = errors.New("archive/tar: write after close") + errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") +) + +// A Writer provides sequential writing of a tar archive in POSIX.1 format. +// A tar archive consists of a sequence of files. +// Call WriteHeader to begin a new file, and then call Write to supply that file's data, +// writing at most hdr.Size bytes in total. +type Writer struct { + w io.Writer + err error + nb int64 // number of unwritten bytes for current file entry + pad int64 // amount of padding to write after current file entry + closed bool + usedBinary bool // whether the binary numeric field extension was used + preferPax bool // use PAX header instead of binary numeric header + hdrBuff block // buffer to use in writeHeader when writing a regular header + paxHdrBuff block // buffer to use in writeHeader when writing a PAX header +} + +// NewWriter creates a new Writer writing to w. +func NewWriter(w io.Writer) *Writer { return &Writer{w: w} } + +// Flush finishes writing the current file (optional). +func (tw *Writer) Flush() error { + if tw.nb > 0 { + tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) + return tw.err + } + + n := tw.nb + tw.pad + for n > 0 && tw.err == nil { + nr := n + if nr > blockSize { + nr = blockSize + } + var nw int + nw, tw.err = tw.w.Write(zeroBlock[0:nr]) + n -= int64(nw) + } + tw.nb = 0 + tw.pad = 0 + return tw.err +} + +var ( + minTime = time.Unix(0, 0) + // There is room for 11 octal digits (33 bits) of mtime. + maxTime = minTime.Add((1<<33 - 1) * time.Second) +) + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +func (tw *Writer) WriteHeader(hdr *Header) error { + return tw.writeHeader(hdr, true) +} + +// WriteHeader writes hdr and prepares to accept the file's contents. +// WriteHeader calls Flush if it is not the first header. +// Calling after a Close will return ErrWriteAfterClose. +// As this method is called internally by writePax header to allow it to +// suppress writing the pax header. +func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { + if tw.closed { + return ErrWriteAfterClose + } + if tw.err == nil { + tw.Flush() + } + if tw.err != nil { + return tw.err + } + + // a map to hold pax header records, if any are needed + paxHeaders := make(map[string]string) + + // TODO(dsnet): we might want to use PAX headers for + // subsecond time resolution, but for now let's just capture + // too long fields or non ascii characters + + // We need to select which scratch buffer to use carefully, + // since this method is called recursively to write PAX headers. + // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. + // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is + // already being used by the non-recursive call, so we must use paxHdrBuff. + header := &tw.hdrBuff + if !allowPax { + header = &tw.paxHdrBuff + } + copy(header[:], zeroBlock[:]) + + // Wrappers around formatter that automatically sets paxHeaders if the + // argument extends beyond the capacity of the input byte slice. + var f formatter + var formatString = func(b []byte, s string, paxKeyword string) { + needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) + if needsPaxHeader { + paxHeaders[paxKeyword] = s + return + } + f.formatString(b, s) + } + var formatNumeric = func(b []byte, x int64, paxKeyword string) { + // Try octal first. + s := strconv.FormatInt(x, 8) + if len(s) < len(b) { + f.formatOctal(b, x) + return + } + + // If it is too long for octal, and PAX is preferred, use a PAX header. + if paxKeyword != paxNone && tw.preferPax { + f.formatOctal(b, 0) + s := strconv.FormatInt(x, 10) + paxHeaders[paxKeyword] = s + return + } + + tw.usedBinary = true + f.formatNumeric(b, x) + } + + // Handle out of range ModTime carefully. + var modTime int64 + if !hdr.ModTime.Before(minTime) && !hdr.ModTime.After(maxTime) { + modTime = hdr.ModTime.Unix() + } + + v7 := header.V7() + formatString(v7.Name(), hdr.Name, paxPath) + // TODO(dsnet): The GNU format permits the mode field to be encoded in + // base-256 format. Thus, we can use formatNumeric instead of formatOctal. + f.formatOctal(v7.Mode(), hdr.Mode) + formatNumeric(v7.UID(), int64(hdr.Uid), paxUid) + formatNumeric(v7.GID(), int64(hdr.Gid), paxGid) + formatNumeric(v7.Size(), hdr.Size, paxSize) + // TODO(dsnet): Consider using PAX for finer time granularity. + formatNumeric(v7.ModTime(), modTime, paxNone) + v7.TypeFlag()[0] = hdr.Typeflag + formatString(v7.LinkName(), hdr.Linkname, paxLinkpath) + + ustar := header.USTAR() + formatString(ustar.UserName(), hdr.Uname, paxUname) + formatString(ustar.GroupName(), hdr.Gname, paxGname) + formatNumeric(ustar.DevMajor(), hdr.Devmajor, paxNone) + formatNumeric(ustar.DevMinor(), hdr.Devminor, paxNone) + + // TODO(dsnet): The logic surrounding the prefix field is broken when trying + // to encode the header as GNU format. The challenge with the current logic + // is that we are unsure what format we are using at any given moment until + // we have processed *all* of the fields. The problem is that by the time + // all fields have been processed, some work has already been done to handle + // each field under the assumption that it is for one given format or + // another. In some situations, this causes the Writer to be confused and + // encode a prefix field when the format being used is GNU. Thus, producing + // an invalid tar file. + // + // As a short-term fix, we disable the logic to use the prefix field, which + // will force the badly generated GNU files to become encoded as being + // the PAX format. + // + // As an alternative fix, we could hard-code preferPax to be true. However, + // this is problematic for the following reasons: + // * The preferPax functionality is not tested at all. + // * This can result in headers that try to use both the GNU and PAX + // features at the same time, which is also wrong. + // + // The proper fix for this is to use a two-pass method: + // * The first pass simply determines what set of formats can possibly + // encode the given header. + // * The second pass actually encodes the header as that given format + // without worrying about violating the format. + // + // See the following: + // https://golang.org/issue/12594 + // https://golang.org/issue/17630 + // https://golang.org/issue/9683 + const usePrefix = false + + // try to use a ustar header when only the name is too long + _, paxPathUsed := paxHeaders[paxPath] + if usePrefix && !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { + prefix, suffix, ok := splitUSTARPath(hdr.Name) + if ok { + // Since we can encode in USTAR format, disable PAX header. + delete(paxHeaders, paxPath) + + // Update the path fields + formatString(v7.Name(), suffix, paxNone) + formatString(ustar.Prefix(), prefix, paxNone) + } + } + + if tw.usedBinary { + header.SetFormat(formatGNU) + } else { + header.SetFormat(formatUSTAR) + } + + // Check if there were any formatting errors. + if f.err != nil { + tw.err = f.err + return tw.err + } + + if allowPax { + for k, v := range hdr.Xattrs { + paxHeaders[paxXattr+k] = v + } + } + + if len(paxHeaders) > 0 { + if !allowPax { + return errInvalidHeader + } + if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { + return err + } + } + tw.nb = hdr.Size + tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize + + _, tw.err = tw.w.Write(header[:]) + return tw.err +} + +// splitUSTARPath splits a path according to USTAR prefix and suffix rules. +// If the path is not splittable, then it will return ("", "", false). +func splitUSTARPath(name string) (prefix, suffix string, ok bool) { + length := len(name) + if length <= nameSize || !isASCII(name) { + return "", "", false + } else if length > prefixSize+1 { + length = prefixSize + 1 + } else if name[length-1] == '/' { + length-- + } + + i := strings.LastIndex(name[:length], "/") + nlen := len(name) - i - 1 // nlen is length of suffix + plen := i // plen is length of prefix + if i <= 0 || nlen > nameSize || nlen == 0 || plen > prefixSize { + return "", "", false + } + return name[:i], name[i+1:], true +} + +// writePaxHeader writes an extended pax header to the +// archive. +func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { + // Prepare extended header + ext := new(Header) + ext.Typeflag = TypeXHeader + // Setting ModTime is required for reader parsing to + // succeed, and seems harmless enough. + ext.ModTime = hdr.ModTime + // The spec asks that we namespace our pseudo files + // with the current pid. However, this results in differing outputs + // for identical inputs. As such, the constant 0 is now used instead. + // golang.org/issue/12358 + dir, file := path.Split(hdr.Name) + fullName := path.Join(dir, "PaxHeaders.0", file) + + ascii := toASCII(fullName) + if len(ascii) > nameSize { + ascii = ascii[:nameSize] + } + ext.Name = ascii + // Construct the body + var buf bytes.Buffer + + // Keys are sorted before writing to body to allow deterministic output. + keys := make([]string, 0, len(paxHeaders)) + for k := range paxHeaders { + keys = append(keys, k) + } + sort.Strings(keys) + + for _, k := range keys { + fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) + } + + ext.Size = int64(len(buf.Bytes())) + if err := tw.writeHeader(ext, false); err != nil { + return err + } + if _, err := tw.Write(buf.Bytes()); err != nil { + return err + } + if err := tw.Flush(); err != nil { + return err + } + return nil +} + +// Write writes to the current entry in the tar archive. +// Write returns the error ErrWriteTooLong if more than +// hdr.Size bytes are written after WriteHeader. +func (tw *Writer) Write(b []byte) (n int, err error) { + if tw.closed { + err = ErrWriteAfterClose + return + } + overwrite := false + if int64(len(b)) > tw.nb { + b = b[0:tw.nb] + overwrite = true + } + n, err = tw.w.Write(b) + tw.nb -= int64(n) + if err == nil && overwrite { + err = ErrWriteTooLong + return + } + tw.err = err + return +} + +// Close closes the tar archive, flushing any unwritten +// data to the underlying writer. +func (tw *Writer) Close() error { + if tw.err != nil || tw.closed { + return tw.err + } + tw.Flush() + tw.closed = true + if tw.err != nil { + return tw.err + } + + // trailer: two zero blocks + for i := 0; i < 2; i++ { + _, tw.err = tw.w.Write(zeroBlock[:]) + if tw.err != nil { + break + } + } + return tw.err +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go index 6cbc2e2b..58a06a0d 100644 --- a/vendor/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -16,13 +16,13 @@ import ( "strings" "syscall" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/fileutils" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/ioutils" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/promise" "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) type ( @@ -93,16 +93,6 @@ const ( OverlayWhiteoutFormat ) -const ( - modeISDIR = 040000 // Directory - modeISFIFO = 010000 // FIFO - modeISREG = 0100000 // Regular file - modeISLNK = 0120000 // Symbolic link - modeISBLK = 060000 // Block special file - modeISCHR = 020000 // Character special file - modeISSOCK = 0140000 // Socket -) - // IsArchivePath checks if the (possibly compressed) file at the given path // starts with a tar file header. func IsArchivePath(path string) bool { @@ -315,14 +305,12 @@ func (compression *Compression) Extension() string { // FileInfoHeader creates a populated Header from fi. // Compared to archive pkg this function fills in more information. -// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), -// which have been deleted since Go 1.9 archive/tar. func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { hdr, err := tar.FileInfoHeader(fi, link) if err != nil { return nil, err } - hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) name, err = canonicalTarName(name, fi.IsDir()) if err != nil { return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) @@ -334,31 +322,6 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro return hdr, nil } -// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar -// https://github.com/golang/go/commit/66b5a2f -func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { - fm := fi.Mode() - switch { - case fm.IsRegular(): - mode |= modeISREG - case fi.IsDir(): - mode |= modeISDIR - case fm&os.ModeSymlink != 0: - mode |= modeISLNK - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - mode |= modeISCHR - } else { - mode |= modeISBLK - } - case fm&os.ModeNamedPipe != 0: - mode |= modeISFIFO - case fm&os.ModeSocket != 0: - mode |= modeISSOCK - } - return mode -} - // ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem // to a tar header func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go index 5ca39b72..341c66de 100644 --- a/vendor/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -13,10 +13,10 @@ import ( "syscall" "time" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) // ChangeType represents the change type. diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go index 0614c67c..cc1eb30e 100644 --- a/vendor/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -9,8 +9,8 @@ import ( "path/filepath" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) // Errors used or returned by this file. diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go index a2766b59..019facd3 100644 --- a/vendor/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -10,10 +10,10 @@ import ( "runtime" "strings" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" ) // UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go index cedd46a4..495db809 100644 --- a/vendor/github.com/docker/docker/pkg/archive/example_changes.go +++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go @@ -13,8 +13,8 @@ import ( "os" "path" - "github.com/Sirupsen/logrus" "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" ) var ( diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go deleted file mode 100644 index 76044187..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go +++ /dev/null @@ -1,70 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" -) - -// NewArchiver returns a new Archiver which uses chrootarchive.Untar -func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { - if idMappings == nil { - idMappings = &idtools.IDMappings{} - } - return &archive.Archiver{Untar: Untar, IDMappings: idMappings} -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - if options == nil { - options = &archive.TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) - rootIDs := idMappings.RootPair() - - dest = filepath.Clean(dest) - if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { - return err - } - } - - r := ioutil.NopCloser(tarArchive) - if decompress { - decompressedArchive, err := archive.DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return invokeUnpack(r, dest, options) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go deleted file mode 100644 index f2325abd..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -// untar is the entry-point for docker-untar on re-exec. This is not used on -// Windows as it does not support chroot, hence no point sandboxing through -// chroot and rexec. -func untar() { - runtime.LockOSThread() - flag.Parse() - - var options *archive.TarOptions - - //read the options from the pipe "ExtraFiles" - if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { - fatal(err) - } - - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := archive.Unpack(os.Stdin, "/", options); err != nil { - fatal(err) - } - // fully consume stdin in case it is zero padded - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { - - // We can't pass a potentially large exclude list directly via cmd line - // because we easily overrun the kernel's max argument/environment size - // when the full image list is passed (e.g. when this is used by - // `docker load`). We will marshall the options via a pipe to the - // child - r, w, err := os.Pipe() - if err != nil { - return fmt.Errorf("Untar pipe failure: %v", err) - } - - cmd := reexec.Command("docker-untar", dest) - cmd.Stdin = decompressedArchive - - cmd.ExtraFiles = append(cmd.ExtraFiles, r) - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - - if err := cmd.Start(); err != nil { - return fmt.Errorf("Untar error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - return fmt.Errorf("Untar json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, - // we need to exhaust `xz`'s output, otherwise the `xz` side will be - // pending on write pipe forever - io.Copy(ioutil.Discard, decompressedArchive) - - return fmt.Errorf("Error processing tar file(%v): %s", err, output) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go deleted file mode 100644 index 0a500ed5..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// chroot is not supported by Windows -func chroot(path string) error { - return nil -} - -func invokeUnpack(decompressedArchive io.ReadCloser, - dest string, - options *archive.TarOptions) error { - // Windows is different to Linux here because Windows does not support - // chroot. Hence there is no point sandboxing a chrooted process to - // do the unpack. We call inline instead within the daemon process. - return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go deleted file mode 100644 index f9d7fed6..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go +++ /dev/null @@ -1,108 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/mount" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -// chroot on linux uses pivot_root instead of chroot -// pivot_root takes a new root and an old root. -// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. -// New root is where the new rootfs is set to. -// Old root is removed after the call to pivot_root so it is no longer available under the new root. -// This is similar to how libcontainer sets up a container's rootfs -func chroot(path string) (err error) { - // if the engine is running in a user namespace we need to use actual chroot - if rsystem.RunningInUserNS() { - return realChroot(path) - } - if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { - return fmt.Errorf("Error creating mount namespace before pivot: %v", err) - } - - // make everything in new ns private - if err := mount.MakeRPrivate("/"); err != nil { - return err - } - - if mounted, _ := mount.Mounted(path); !mounted { - if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { - return realChroot(path) - } - } - - // setup oldRoot for pivot_root - pivotDir, err := ioutil.TempDir(path, ".pivot_root") - if err != nil { - return fmt.Errorf("Error setting up pivot dir: %v", err) - } - - var mounted bool - defer func() { - if mounted { - // make sure pivotDir is not mounted before we try to remove it - if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { - if err == nil { - err = errCleanup - } - return - } - } - - errCleanup := os.Remove(pivotDir) - // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful - // because we already cleaned it up on failed pivot_root - if errCleanup != nil && !os.IsNotExist(errCleanup) { - errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) - if err == nil { - err = errCleanup - } - } - }() - - if err := syscall.PivotRoot(path, pivotDir); err != nil { - // If pivot fails, fall back to the normal chroot after cleaning up temp dir - if err := os.Remove(pivotDir); err != nil { - return fmt.Errorf("Error cleaning up after failed pivot: %v", err) - } - return realChroot(path) - } - mounted = true - - // This is the new path for where the old root (prior to the pivot) has been moved to - // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction - pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - - if err := syscall.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root: %v", err) - } - - // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host - if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { - return fmt.Errorf("Error making old root private after pivot: %v", err) - } - - // Now unmount the old root so it's no longer visible from the new root - if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { - return fmt.Errorf("Error while unmounting old root after pivot: %v", err) - } - mounted = false - - return nil -} - -func realChroot(path string) error { - if err := syscall.Chroot(path); err != nil { - return fmt.Errorf("Error after fallback to chroot: %v", err) - } - if err := syscall.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root after chroot: %v", err) - } - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go deleted file mode 100644 index 16354bf6..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux - -package chrootarchive - -import "syscall" - -func chroot(path string) error { - if err := syscall.Chroot(path); err != nil { - return err - } - return syscall.Chdir("/") -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go deleted file mode 100644 index 49acad79..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/docker/docker/pkg/archive" -) - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can only be -// uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { - return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go deleted file mode 100644 index 33098b33..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go +++ /dev/null @@ -1,130 +0,0 @@ -//+build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/system" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -type applyLayerResponse struct { - LayerSize int64 `json:"layerSize"` -} - -// applyLayer is the entry-point for docker-applylayer on re-exec. This is not -// used on Windows as it does not support chroot, hence no point sandboxing -// through chroot and rexec. -func applyLayer() { - - var ( - tmpDir string - err error - options *archive.TarOptions - ) - runtime.LockOSThread() - flag.Parse() - - inUserns := rsystem.RunningInUserNS() - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - defer system.Umask(oldmask) - if err != nil { - fatal(err) - } - - if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { - fatal(err) - } - - if inUserns { - options.InUserNS = true - } - - if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { - fatal(err) - } - - os.Setenv("TMPDIR", tmpDir) - size, err := archive.UnpackLayer("/", os.Stdin, options) - os.RemoveAll(tmpDir) - if err != nil { - fatal(err) - } - - encoder := json.NewEncoder(os.Stdout) - if err := encoder.Encode(applyLayerResponse{size}); err != nil { - fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) - } - - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - if options == nil { - options = &archive.TarOptions{} - if rsystem.RunningInUserNS() { - options.InUserNS = true - } - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - data, err := json.Marshal(options) - if err != nil { - return 0, fmt.Errorf("ApplyLayer json encode: %v", err) - } - - cmd := reexec.Command("docker-applyLayer", dest) - cmd.Stdin = layer - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) - - outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) - cmd.Stdout, cmd.Stderr = outBuf, errBuf - - if err = cmd.Run(); err != nil { - return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) - } - - // Stdout should be a valid JSON struct representing an applyLayerResponse. - response := applyLayerResponse{} - decoder := json.NewDecoder(outBuf) - if err = decoder.Decode(&response); err != nil { - return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) - } - - return response.LayerSize, nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go deleted file mode 100644 index dc07eb68..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go +++ /dev/null @@ -1,45 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - - // Ensure it is a Windows-style volume path - dest = longpath.AddPrefix(dest) - - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") - if err != nil { - return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) - } - - s, err := archive.UnpackLayer(dest, layer, nil) - os.RemoveAll(tmpDir) - if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) - } - - return s, nil -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go deleted file mode 100644 index 4f637f17..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/reexec" -) - -func init() { - reexec.Register("docker-applyLayer", applyLayer) - reexec.Register("docker-untar", untar) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -// flush consumes all the bytes from the reader discarding -// any errors -func flush(r io.Reader) (bytes int64, err error) { - return io.Copy(ioutil.Discard, r) -} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go deleted file mode 100644 index fa17c9bf..00000000 --- a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive - -func init() { -} diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go index 57cc0873..a129e654 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -10,7 +10,7 @@ import ( "strings" "text/scanner" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // PatternMatcher allows checking paths agaist a list of patterns diff --git a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go index d5c3abf5..9e0e97bd 100644 --- a/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go +++ b/vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go @@ -7,7 +7,7 @@ import ( "io/ioutil" "os" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // GetTotalUsedFds Returns the number of used File Descriptors by diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md deleted file mode 100644 index 6658f69b..00000000 --- a/vendor/github.com/docker/docker/pkg/reexec/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# reexec - -The `reexec` package facilitates the busybox style reexec of the docker binary that we require because -of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of -the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go deleted file mode 100644 index 34ae2a9d..00000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux - -package reexec - -import ( - "os/exec" - "syscall" -) - -// Self returns the path to the current process's binary. -// Returns "/proc/self/exe". -func Self() string { - return "/proc/self/exe" -} - -// Command returns *exec.Cmd which has Path as current binary. Also it setting -// SysProcAttr.Pdeathsig to SIGTERM. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: syscall.SIGTERM, - }, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go deleted file mode 100644 index 778a720e..00000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build freebsd solaris darwin - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go deleted file mode 100644 index 76edd824..00000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris,!darwin - -package reexec - -import ( - "os/exec" -) - -// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. -func Command(args ...string) *exec.Cmd { - return nil -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go deleted file mode 100644 index ca871c42..00000000 --- a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build windows - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go deleted file mode 100644 index c56671d9..00000000 --- a/vendor/github.com/docker/docker/pkg/reexec/reexec.go +++ /dev/null @@ -1,47 +0,0 @@ -package reexec - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" -) - -var registeredInitializers = make(map[string]func()) - -// Register adds an initialization func under the specified name -func Register(name string, initializer func()) { - if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registered under name %q", name)) - } - - registeredInitializers[name] = initializer -} - -// Init is called as the first part of the exec process and returns true if an -// initialization function was called. -func Init() bool { - initializer, exists := registeredInitializers[os.Args[0]] - if exists { - initializer() - - return true - } - return false -} - -func naiveSelf() string { - name := os.Args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - return lp - } - } - // handle conversion of relative paths to absolute - if absName, err := filepath.Abs(name); err == nil { - return absName - } - // if we couldn't get absolute name, return original - // (NOTE: Go only errors on Abs() if os.Getwd fails) - return name -} diff --git a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go index c328a6fb..f6c81292 100644 --- a/vendor/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/docker/docker/pkg/system/syscall_windows.go @@ -4,7 +4,7 @@ import ( "syscall" "unsafe" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) var ( diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf index 20460d5c..fb90cf0b 100644 --- a/vendor/github.com/docker/docker/vendor.conf +++ b/vendor/github.com/docker/docker/vendor.conf @@ -2,7 +2,7 @@ github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 github.com/Microsoft/hcsshim v0.5.25 github.com/Microsoft/go-winio v0.4.2 -github.com/Sirupsen/logrus v0.11.0 +github.com/sirupsen/logrus v1.0.0 github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git diff --git a/vendor/github.com/docker/go-events/broadcast.go b/vendor/github.com/docker/go-events/broadcast.go index 567c1a1e..5120078d 100644 --- a/vendor/github.com/docker/go-events/broadcast.go +++ b/vendor/github.com/docker/go-events/broadcast.go @@ -4,7 +4,7 @@ import ( "fmt" "sync" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // Broadcaster sends events to multiple, reliable Sinks. The goal of this diff --git a/vendor/github.com/docker/go-events/queue.go b/vendor/github.com/docker/go-events/queue.go index a46091d1..4bb770af 100644 --- a/vendor/github.com/docker/go-events/queue.go +++ b/vendor/github.com/docker/go-events/queue.go @@ -4,7 +4,7 @@ import ( "container/list" "sync" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // Queue accepts all messages into a queue for asynchronous consumption diff --git a/vendor/github.com/docker/go-events/retry.go b/vendor/github.com/docker/go-events/retry.go index 8f6c37c5..2df55d21 100644 --- a/vendor/github.com/docker/go-events/retry.go +++ b/vendor/github.com/docker/go-events/retry.go @@ -7,7 +7,7 @@ import ( "sync/atomic" "time" - "github.com/Sirupsen/logrus" + "github.com/sirupsen/logrus" ) // RetryingSink retries the write until success or an ErrSinkClosed is diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/sirupsen/logrus/LICENSE similarity index 100% rename from vendor/github.com/Sirupsen/logrus/LICENSE rename to vendor/github.com/sirupsen/logrus/LICENSE diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md similarity index 74% rename from vendor/github.com/Sirupsen/logrus/README.md rename to vendor/github.com/sirupsen/logrus/README.md index 126cd1fc..cbe8b696 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -1,4 +1,4 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) +# Logrus :walrus: [![Build Status](https://travis-ci.org/sirupsen/logrus.svg?branch=master)](https://travis-ci.org/sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/sirupsen/logrus?status.svg)](https://godoc.org/github.com/sirupsen/logrus) Logrus is a structured logger for Go (golang), completely API compatible with the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not @@ -7,6 +7,17 @@ many large deployments. The core API is unlikely to change much but please version control your Logrus to make sure you aren't fetching latest `master` on every build.** +**Seeing weird case-sensitive problems?** Unfortunately, the author failed to +realize the consequences of renaming to lower-case. Due to the Go package +environment, this caused issues. Regretfully, there's no turning back now. +Everything using `logrus` will need to use the lower-case: +`github.com/sirupsen/logrus`. Any package that isn't, should be changed. + +I am terribly sorry for this inconvenience. Logrus strives hard for backwards +compatibility, and the author failed to realize the cascading consequences of +such a name-change. To fix Glide, see [these +comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437). + Nicely color-coded in development (when a TTY is attached, otherwise just plain text): @@ -46,6 +57,12 @@ time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x20822 exit status 1 ``` +#### Case-sensitivity + +The organization's name was changed to lower-case--and this will not be changed +back. If you are getting import conflicts due to case sensitivity, please use +the lower-case import: `github.com/sirupsen/logrus`. + #### Example The simplest way to use Logrus is simply the package-level exported logger: @@ -54,7 +71,7 @@ The simplest way to use Logrus is simply the package-level exported logger: package main import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func main() { @@ -65,7 +82,7 @@ func main() { ``` Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` +replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"` and you'll now have the flexibility of Logrus. You can customize it all you want: @@ -74,15 +91,16 @@ package main import ( "os" - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func init() { // Log as JSON instead of the default ASCII formatter. log.SetFormatter(&log.JSONFormatter{}) - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) + // Output to stdout instead of the default stderr + // Can be any io.Writer, see below for File example + log.SetOutput(os.Stdout) // Only log the warning severity or above. log.SetLevel(log.WarnLevel) @@ -123,7 +141,8 @@ application, you can also create an instance of the `logrus` Logger: package main import ( - "github.com/Sirupsen/logrus" + "os" + "github.com/sirupsen/logrus" ) // Create a new instance of the logger. You can have any number of instances. @@ -132,7 +151,15 @@ var log = logrus.New() func main() { // The API for setting attributes is a little different than the package level // exported logger. See Godoc. - log.Out = os.Stderr + log.Out = os.Stdout + + // You could set this to any `io.Writer` such as a file + // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666) + // if err == nil { + // log.Out = file + // } else { + // log.Info("Failed to log to file, using default stderr") + // } log.WithFields(logrus.Fields{ "animal": "walrus", @@ -143,7 +170,7 @@ func main() { #### Fields -Logrus encourages careful, structured logging though logging fields instead of +Logrus encourages careful, structured logging through logging fields instead of long, unparseable error messages. For example, instead of: `log.Fatalf("Failed to send event %s to topic %s with key %d")`, you should log the much more discoverable: @@ -165,6 +192,20 @@ In general, with Logrus using any of the `printf`-family functions should be seen as a hint you should add a field, however, you can still use the `printf`-family functions with Logrus. +#### Default Fields + +Often it's helpful to have fields _always_ attached to log statements in an +application or parts of one. For example, you may want to always log the +`request_id` and `user_ip` in the context of a request. Instead of writing +`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on +every line, you can create a `logrus.Entry` to pass around instead: + +```go +requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip}) +requestLogger.Info("something happened on that request") # will log request_id and user_ip +requestLogger.Warn("something not great happened") +``` + #### Hooks You can add hooks for logging levels. For example to send errors to an exception @@ -176,9 +217,9 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in ```go import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" + logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" ) @@ -200,40 +241,51 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | Hook | Description | | ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | | [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | +| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | | [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | +| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | | [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | -| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) +| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | +| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | +| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | +| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | +| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | +| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | +| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | +| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | +| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | +| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | +| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | +| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | | [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | +| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | +| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | +| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | +| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | | [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | - +| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | +| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | +| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| +| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | +| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | +| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | +| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | +| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | +| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | +| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | +| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | #### Level logging @@ -282,7 +334,7 @@ could do: ```go import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) init() { @@ -309,8 +361,11 @@ The built-in logging formatters are: without colors. * *Note:* to force colored output when there is no TTY, set the `ForceColors` field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` + `DisableColors` field to `true`. For Windows, see + [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. + * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). Third party logging formatters: @@ -359,6 +414,18 @@ srv := http.Server{ Each line written to that writer will be printed the usual way, using formatters and hooks. The level for those entries is `info`. +This means that we can override the standard library logger easily: + +```go +logger := logrus.New() +logger.Formatter = &logrus.JSONFormatter{} + +// Use logrus for standard log output +// Note that `log` here references stdlib's log +// Not logrus imported under the name `log`. +log.SetOutput(logger.Writer()) +``` + #### Rotation Log rotation is not provided with Logrus. Log rotation should be done by an @@ -370,7 +437,7 @@ entries. It should not be a feature of the application-level logger. | Tool | Description | | ---- | ----------- | |[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| -|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | #### Testing @@ -380,15 +447,24 @@ Logrus has a built in facility for asserting the presence of log messages. This * a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): ```go -logger, hook := NewNullLogger() -logger.Error("Hello error") +import( + "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/null" + "github.com/stretchr/testify/assert" + "testing" +) -assert.Equal(1, len(hook.Entries)) -assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) -assert.Equal("Hello error", hook.LastEntry().Message) +func TestSomething(t*testing.T){ + logger, hook := null.NewNullLogger() + logger.Error("Helloerror") -hook.Reset() -assert.Nil(hook.LastEntry()) + assert.Equal(t, 1, len(hook.Entries)) + assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level) + assert.Equal(t, "Helloerror", hook.LastEntry().Message) + + hook.Reset() + assert.Nil(t, hook.LastEntry()) +} ``` #### Fatal handlers @@ -407,7 +483,7 @@ logrus.RegisterExitHandler(handler) ... ``` -#### Thread safty +#### Thread safety By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go similarity index 96% rename from vendor/github.com/Sirupsen/logrus/alt_exit.go rename to vendor/github.com/sirupsen/logrus/alt_exit.go index b4c9e847..8af90637 100644 --- a/vendor/github.com/Sirupsen/logrus/alt_exit.go +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -1,7 +1,7 @@ package logrus // The following code was sourced and modified from the -// https://bitbucket.org/tebeka/atexit package governed by the following license: +// https://github.com/tebeka/atexit package governed by the following license: // // Copyright (c) 2012 Miki Tebeka . // diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/sirupsen/logrus/doc.go similarity index 83% rename from vendor/github.com/Sirupsen/logrus/doc.go rename to vendor/github.com/sirupsen/logrus/doc.go index dddd5f87..da67aba0 100644 --- a/vendor/github.com/Sirupsen/logrus/doc.go +++ b/vendor/github.com/sirupsen/logrus/doc.go @@ -7,7 +7,7 @@ The simplest way to use Logrus is simply the package-level exported logger: package main import ( - log "github.com/Sirupsen/logrus" + log "github.com/sirupsen/logrus" ) func main() { @@ -21,6 +21,6 @@ The simplest way to use Logrus is simply the package-level exported logger: Output: time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 -For a full guide visit https://github.com/Sirupsen/logrus +For a full guide visit https://github.com/sirupsen/logrus */ package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go similarity index 89% rename from vendor/github.com/Sirupsen/logrus/entry.go rename to vendor/github.com/sirupsen/logrus/entry.go index 4edbe7a2..320e5d5b 100644 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -126,7 +126,7 @@ func (entry Entry) log(level Level, msg string) { } func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.log(DebugLevel, fmt.Sprint(args...)) } } @@ -136,13 +136,13 @@ func (entry *Entry) Print(args ...interface{}) { } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.log(InfoLevel, fmt.Sprint(args...)) } } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.log(WarnLevel, fmt.Sprint(args...)) } } @@ -152,20 +152,20 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.log(ErrorLevel, fmt.Sprint(args...)) } } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.log(FatalLevel, fmt.Sprint(args...)) } Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.log(PanicLevel, fmt.Sprint(args...)) } panic(fmt.Sprint(args...)) @@ -174,13 +174,13 @@ func (entry *Entry) Panic(args ...interface{}) { // Entry Printf family functions func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(fmt.Sprintf(format, args...)) } } func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(fmt.Sprintf(format, args...)) } } @@ -190,7 +190,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(fmt.Sprintf(format, args...)) } } @@ -200,20 +200,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(fmt.Sprintf(format, args...)) } } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(fmt.Sprintf(format, args...)) } Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(fmt.Sprintf(format, args...)) } } @@ -221,13 +221,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) { // Entry Println family functions func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { + if entry.Logger.level() >= DebugLevel { entry.Debug(entry.sprintlnn(args...)) } } func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { + if entry.Logger.level() >= InfoLevel { entry.Info(entry.sprintlnn(args...)) } } @@ -237,7 +237,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { + if entry.Logger.level() >= WarnLevel { entry.Warn(entry.sprintlnn(args...)) } } @@ -247,20 +247,20 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { + if entry.Logger.level() >= ErrorLevel { entry.Error(entry.sprintlnn(args...)) } } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { + if entry.Logger.level() >= FatalLevel { entry.Fatal(entry.sprintlnn(args...)) } Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { + if entry.Logger.level() >= PanicLevel { entry.Panic(entry.sprintlnn(args...)) } } diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go similarity index 99% rename from vendor/github.com/Sirupsen/logrus/exported.go rename to vendor/github.com/sirupsen/logrus/exported.go index 9a0120ac..1aeaa90b 100644 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -31,14 +31,14 @@ func SetFormatter(formatter Formatter) { func SetLevel(level Level) { std.mu.Lock() defer std.mu.Unlock() - std.Level = level + std.setLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { std.mu.Lock() defer std.mu.Unlock() - return std.Level + return std.level() } // AddHook adds a hook to the standard logger hooks. diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/formatter.go rename to vendor/github.com/sirupsen/logrus/formatter.go diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/sirupsen/logrus/hooks.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/hooks.go rename to vendor/github.com/sirupsen/logrus/hooks.go diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go new file mode 100644 index 00000000..e787ea17 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string +type FieldMap map[fieldKey]string + +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for various fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message", + // }, + // } + FieldMap FieldMap +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go similarity index 88% rename from vendor/github.com/Sirupsen/logrus/logger.go rename to vendor/github.com/sirupsen/logrus/logger.go index b769f3d3..370fff5d 100644 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -4,6 +4,7 @@ import ( "io" "os" "sync" + "sync/atomic" ) type Logger struct { @@ -112,7 +113,7 @@ func (logger *Logger) WithError(err error) *Entry { } func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debugf(format, args...) logger.releaseEntry(entry) @@ -120,7 +121,7 @@ func (logger *Logger) Debugf(format string, args ...interface{}) { } func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Infof(format, args...) logger.releaseEntry(entry) @@ -134,7 +135,7 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -142,7 +143,7 @@ func (logger *Logger) Warnf(format string, args ...interface{}) { } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnf(format, args...) logger.releaseEntry(entry) @@ -150,7 +151,7 @@ func (logger *Logger) Warningf(format string, args ...interface{}) { } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Errorf(format, args...) logger.releaseEntry(entry) @@ -158,7 +159,7 @@ func (logger *Logger) Errorf(format string, args ...interface{}) { } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatalf(format, args...) logger.releaseEntry(entry) @@ -167,7 +168,7 @@ func (logger *Logger) Fatalf(format string, args ...interface{}) { } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panicf(format, args...) logger.releaseEntry(entry) @@ -175,7 +176,7 @@ func (logger *Logger) Panicf(format string, args ...interface{}) { } func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debug(args...) logger.releaseEntry(entry) @@ -183,7 +184,7 @@ func (logger *Logger) Debug(args ...interface{}) { } func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Info(args...) logger.releaseEntry(entry) @@ -197,7 +198,7 @@ func (logger *Logger) Print(args ...interface{}) { } func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -205,7 +206,7 @@ func (logger *Logger) Warn(args ...interface{}) { } func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warn(args...) logger.releaseEntry(entry) @@ -213,7 +214,7 @@ func (logger *Logger) Warning(args ...interface{}) { } func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Error(args...) logger.releaseEntry(entry) @@ -221,7 +222,7 @@ func (logger *Logger) Error(args ...interface{}) { } func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatal(args...) logger.releaseEntry(entry) @@ -230,7 +231,7 @@ func (logger *Logger) Fatal(args ...interface{}) { } func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panic(args...) logger.releaseEntry(entry) @@ -238,7 +239,7 @@ func (logger *Logger) Panic(args ...interface{}) { } func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { + if logger.level() >= DebugLevel { entry := logger.newEntry() entry.Debugln(args...) logger.releaseEntry(entry) @@ -246,7 +247,7 @@ func (logger *Logger) Debugln(args ...interface{}) { } func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { + if logger.level() >= InfoLevel { entry := logger.newEntry() entry.Infoln(args...) logger.releaseEntry(entry) @@ -260,7 +261,7 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -268,7 +269,7 @@ func (logger *Logger) Warnln(args ...interface{}) { } func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { + if logger.level() >= WarnLevel { entry := logger.newEntry() entry.Warnln(args...) logger.releaseEntry(entry) @@ -276,7 +277,7 @@ func (logger *Logger) Warningln(args ...interface{}) { } func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { + if logger.level() >= ErrorLevel { entry := logger.newEntry() entry.Errorln(args...) logger.releaseEntry(entry) @@ -284,7 +285,7 @@ func (logger *Logger) Errorln(args ...interface{}) { } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { + if logger.level() >= FatalLevel { entry := logger.newEntry() entry.Fatalln(args...) logger.releaseEntry(entry) @@ -293,7 +294,7 @@ func (logger *Logger) Fatalln(args ...interface{}) { } func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { + if logger.level() >= PanicLevel { entry := logger.newEntry() entry.Panicln(args...) logger.releaseEntry(entry) @@ -306,3 +307,11 @@ func (logger *Logger) Panicln(args ...interface{}) { func (logger *Logger) SetNoLock() { logger.mu.Disable() } + +func (logger *Logger) level() Level { + return Level(atomic.LoadUint32((*uint32)(&logger.Level))) +} + +func (logger *Logger) setLevel(level Level) { + atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go similarity index 99% rename from vendor/github.com/Sirupsen/logrus/logrus.go rename to vendor/github.com/sirupsen/logrus/logrus.go index e5966911..dd389997 100644 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -10,7 +10,7 @@ import ( type Fields map[string]interface{} // Level type -type Level uint8 +type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_appengine.go similarity index 71% rename from vendor/github.com/Sirupsen/logrus/terminal_appengine.go rename to vendor/github.com/sirupsen/logrus/terminal_appengine.go index 1960169e..e011a869 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_appengine.go @@ -2,7 +2,9 @@ package logrus +import "io" + // IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { +func IsTerminal(f io.Writer) bool { return true } diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/terminal_bsd.go rename to vendor/github.com/sirupsen/logrus/terminal_bsd.go diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go similarity index 100% rename from vendor/github.com/Sirupsen/logrus/terminal_linux.go rename to vendor/github.com/sirupsen/logrus/terminal_linux.go diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go similarity index 60% rename from vendor/github.com/Sirupsen/logrus/terminal_notwindows.go rename to vendor/github.com/sirupsen/logrus/terminal_notwindows.go index 329038f6..190297ab 100644 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ b/vendor/github.com/sirupsen/logrus/terminal_notwindows.go @@ -9,14 +9,20 @@ package logrus import ( + "io" + "os" "syscall" "unsafe" ) // IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr +func IsTerminal(f io.Writer) bool { var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 + switch v := f.(type) { + case *os.File: + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 + default: + return false + } } diff --git a/vendor/github.com/sirupsen/logrus/terminal_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_solaris.go new file mode 100644 index 00000000..3c86b1ab --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,21 @@ +// +build solaris,!appengine + +package logrus + +import ( + "io" + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA) + return err == nil + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_windows.go b/vendor/github.com/sirupsen/logrus/terminal_windows.go new file mode 100644 index 00000000..7a336307 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_windows.go @@ -0,0 +1,82 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!appengine + +package logrus + +import ( + "bytes" + "errors" + "io" + "os" + "os/exec" + "strconv" + "strings" + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + procSetConsoleMode = kernel32.NewProc("SetConsoleMode") +) + +const ( + enableProcessedOutput = 0x0001 + enableWrapAtEolOutput = 0x0002 + enableVirtualTerminalProcessing = 0x0004 +) + +func getVersion() (float64, error) { + stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{} + cmd := exec.Command("cmd", "ver") + cmd.Stdout = stdout + cmd.Stderr = stderr + err := cmd.Run() + if err != nil { + return -1, err + } + + // The output should be like "Microsoft Windows [Version XX.X.XXXXXX]" + version := strings.Replace(stdout.String(), "\n", "", -1) + version = strings.Replace(version, "\r\n", "", -1) + + x1 := strings.Index(version, "[Version") + + if x1 == -1 || strings.Index(version, "]") == -1 { + return -1, errors.New("Can't determine Windows version") + } + + return strconv.ParseFloat(version[x1+9:x1+13], 64) +} + +func init() { + ver, err := getVersion() + if err != nil { + return + } + + // Activate Virtual Processing for Windows CMD + // Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + if ver >= 10 { + handle := syscall.Handle(os.Stderr.Fd()) + procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing) + } +} + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal(f io.Writer) bool { + switch v := f.(type) { + case *os.File: + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 + default: + return false + } +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go similarity index 73% rename from vendor/github.com/Sirupsen/logrus/text_formatter.go rename to vendor/github.com/sirupsen/logrus/text_formatter.go index 9114b3ca..ba888540 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -3,9 +3,9 @@ package logrus import ( "bytes" "fmt" - "runtime" "sort" "strings" + "sync" "time" ) @@ -20,16 +20,10 @@ const ( var ( baseTimestamp time.Time - isTerminal bool ) func init() { baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) } type TextFormatter struct { @@ -54,11 +48,32 @@ type TextFormatter struct { // that log extremely frequently and don't use the JSON formatter this may not // be desired. DisableSorting bool + + // QuoteEmptyFields will wrap empty fields in quotes if true + QuoteEmptyFields bool + + // QuoteCharacter can be set to the override the default quoting character " + // with something else. For example: ', or `. + QuoteCharacter string + + // Whether the logger's out is to a terminal + isTerminal bool + + sync.Once +} + +func (f *TextFormatter) init(entry *Entry) { + if len(f.QuoteCharacter) == 0 { + f.QuoteCharacter = "\"" + } + if entry.Logger != nil { + f.isTerminal = IsTerminal(entry.Logger.Out) + } } func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { var b *bytes.Buffer - var keys []string = make([]string, 0, len(entry.Data)) + keys := make([]string, 0, len(entry.Data)) for k := range entry.Data { keys = append(keys, k) } @@ -74,8 +89,9 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { prefixFieldClashes(entry.Data) - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + f.Do(func() { f.init(entry) }) + + isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors timestampFormat := f.TimestampFormat if timestampFormat == "" { @@ -115,8 +131,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelText := strings.ToUpper(entry.Level.String())[0:4] - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) } else { fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) } @@ -127,7 +145,10 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } } -func needsQuoting(text string) bool { +func (f *TextFormatter) needsQuoting(text string) bool { + if f.QuoteEmptyFields && len(text) == 0 { + return true + } for _, ch := range text { if !((ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || @@ -150,17 +171,17 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { switch value := value.(type) { case string: - if !needsQuoting(value) { + if !f.needsQuoting(value) { b.WriteString(value) } else { - fmt.Fprintf(b, "%q", value) + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter) } case error: errmsg := value.Error() - if !needsQuoting(errmsg) { + if !f.needsQuoting(errmsg) { b.WriteString(errmsg) } else { - fmt.Fprintf(b, "%q", errmsg) + fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter) } default: fmt.Fprint(b, value) diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go similarity index 54% rename from vendor/github.com/Sirupsen/logrus/writer.go rename to vendor/github.com/sirupsen/logrus/writer.go index f74d2aa5..7bdebedc 100644 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -11,39 +11,48 @@ func (logger *Logger) Writer() *io.PipeWriter { } func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + return NewEntry(logger).WriterLevel(level) +} + +func (entry *Entry) Writer() *io.PipeWriter { + return entry.WriterLevel(InfoLevel) +} + +func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { reader, writer := io.Pipe() var printFunc func(args ...interface{}) + switch level { case DebugLevel: - printFunc = logger.Debug + printFunc = entry.Debug case InfoLevel: - printFunc = logger.Info + printFunc = entry.Info case WarnLevel: - printFunc = logger.Warn + printFunc = entry.Warn case ErrorLevel: - printFunc = logger.Error + printFunc = entry.Error case FatalLevel: - printFunc = logger.Fatal + printFunc = entry.Fatal case PanicLevel: - printFunc = logger.Panic + printFunc = entry.Panic default: - printFunc = logger.Print + printFunc = entry.Print } - go logger.writerScanner(reader, printFunc) + go entry.writerScanner(reader, printFunc) runtime.SetFinalizer(writer, writerFinalizer) return writer } -func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { +func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { scanner := bufio.NewScanner(reader) for scanner.Scan() { printFunc(scanner.Text()) } if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) + entry.Errorf("Error while reading from Writer: %s", err) } reader.Close() } diff --git a/worker/runcworker/worker.go b/worker/runcworker/worker.go index b61443c7..7ae4f030 100644 --- a/worker/runcworker/worker.go +++ b/worker/runcworker/worker.go @@ -10,7 +10,6 @@ import ( "path/filepath" "syscall" - "github.com/Sirupsen/logrus" "github.com/containerd/containerd/mount" runc "github.com/containerd/go-runc" "github.com/docker/docker/pkg/symlink" @@ -18,6 +17,7 @@ import ( "github.com/moby/buildkit/worker" "github.com/moby/buildkit/worker/oci" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/net/context" )