Merge pull request #1666 from ktock/sgz-oci
Enable to use stargz snapshotter without spawning plugin processv0.8
commit
6361c6bad2
|
@ -23,7 +23,7 @@ jobs:
|
|||
- stage: testing
|
||||
name: "Client integration tests"
|
||||
script:
|
||||
- TESTPKGS=./client ./hack/test integration
|
||||
- TESTPKGS=./client TESTFLAGS='-v --timeout=20m' ./hack/test integration
|
||||
- TESTPKGS=./cmd/buildctl ./hack/test integration
|
||||
- TESTPKGS=./worker/containerd ./hack/test integration
|
||||
- script:
|
||||
|
@ -35,7 +35,7 @@ jobs:
|
|||
- TESTPKGS=./frontend ./hack/test
|
||||
name: "Unit Tests & Lint & Vendor & Proto"
|
||||
- script:
|
||||
- TESTPKGS=./frontend/dockerfile TESTFLAGS='-v --parallel=6' ./hack/test
|
||||
- TESTPKGS=./frontend/dockerfile TESTFLAGS='-v --parallel=6 --timeout=20m' ./hack/test
|
||||
name: "Dockerfile integration tests"
|
||||
- script: TESTPKGS=./frontend/dockerfile ./hack/test dockerfile
|
||||
name: "External Dockerfile tests"
|
||||
|
|
|
@ -11,7 +11,7 @@ ARG ROOTLESSKIT_VERSION=v0.9.5
|
|||
ARG CNI_VERSION=v0.8.6
|
||||
ARG SHADOW_VERSION=4.8.1
|
||||
ARG FUSEOVERLAYFS_VERSION=v1.1.2
|
||||
ARG STARGZ_SNAPSHOTTER_VERSION=5aca593bd474015005b8832cf9763685d9d4db61
|
||||
ARG STARGZ_SNAPSHOTTER_VERSION=2ee75e91f8f98f3d324290a2503269812e019fc3
|
||||
|
||||
# git stage is used for checking out remote repository sources
|
||||
FROM --platform=$BUILDPLATFORM alpine AS git
|
||||
|
@ -175,9 +175,9 @@ RUN --mount=target=/root/.cache,type=cache \
|
|||
file /rootlesskit | grep "statically linked"
|
||||
|
||||
FROM gobuild-base AS stargz-snapshotter
|
||||
ARG STARGZ_SNAPSHOTTER_VERSION
|
||||
RUN git clone https://github.com/containerd/stargz-snapshotter.git /go/src/github.com/containerd/stargz-snapshotter
|
||||
WORKDIR /go/src/github.com/containerd/stargz-snapshotter
|
||||
ARG STARGZ_SNAPSHOTTER_VERSION
|
||||
RUN git checkout -q "$STARGZ_SNAPSHOTTER_VERSION" && \
|
||||
mkdir /out && CGO_ENABLED=0 PREFIX=/out/ make && \
|
||||
file /out/containerd-stargz-grpc | grep "statically linked" && \
|
||||
|
@ -243,7 +243,7 @@ RUN apt-get --no-install-recommends install -y uidmap sudo vim iptables fuse \
|
|||
&& update-alternatives --set iptables /usr/sbin/iptables-legacy
|
||||
# musl is needed to directly use the registry binary that is built on alpine
|
||||
ENV BUILDKIT_INTEGRATION_CONTAINERD_EXTRA="containerd-1.3=/opt/containerd-alt/bin"
|
||||
ENV BUILDKIT_INTEGRATION_CONTAINERD_STARGZ=1
|
||||
ENV BUILDKIT_INTEGRATION_SNAPSHOTTER=stargz
|
||||
COPY --from=stargz-snapshotter /out/* /usr/bin/
|
||||
COPY --from=rootlesskit /rootlesskit /usr/bin/
|
||||
COPY --from=containerd-alt /out/containerd* /opt/containerd-alt/bin/
|
||||
|
|
|
@ -2056,7 +2056,7 @@ func testStargzLazyPull(t *testing.T, sb integration.Sandbox) {
|
|||
requiresLinux(t)
|
||||
|
||||
cdAddress := sb.ContainerdAddress()
|
||||
if cdAddress == "" || !sb.Stargz() {
|
||||
if cdAddress == "" || sb.Snapshotter() != "stargz" {
|
||||
t.Skip("test requires containerd worker with stargz snapshotter")
|
||||
}
|
||||
|
||||
|
|
|
@ -103,8 +103,8 @@ func testBuildContainerdExporter(t *testing.T, sb integration.Sandbox) {
|
|||
|
||||
// NOTE: by default, it is overlayfs
|
||||
snapshotter := "overlayfs"
|
||||
if sb.Stargz() {
|
||||
snapshotter = "stargz"
|
||||
if sn := sb.Snapshotter(); sn != "" {
|
||||
snapshotter = sn
|
||||
}
|
||||
ok, err := img.IsUnpacked(ctx, snapshotter)
|
||||
require.NoError(t, err)
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package config
|
||||
|
||||
import sgzconf "github.com/containerd/stargz-snapshotter/stargz/config"
|
||||
|
||||
// Config provides containerd configuration data for the server
|
||||
type Config struct {
|
||||
Debug bool `toml:"debug"`
|
||||
|
@ -78,8 +80,9 @@ type OCIConfig struct {
|
|||
// incomplete and the intention is to make it default without config.
|
||||
UserRemapUnsupported string `toml:"userRemapUnsupported"`
|
||||
// For use in storing the OCI worker binary name that will replace buildkit-runc
|
||||
Binary string `toml:"binary"`
|
||||
ProxySnapshotterPath string `toml:"proxySnapshotterPath"`
|
||||
Binary string `toml:"binary"`
|
||||
ProxySnapshotterPath string `toml:"proxySnapshotterPath"`
|
||||
StargzSnapshotterConfig sgzconf.Config `toml:"stargzSnapshotter"`
|
||||
}
|
||||
|
||||
type ContainerdConfig struct {
|
||||
|
|
|
@ -5,6 +5,7 @@ package main
|
|||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
|
@ -17,6 +18,8 @@ import (
|
|||
"github.com/containerd/containerd/snapshots/overlay"
|
||||
snproxy "github.com/containerd/containerd/snapshots/proxy"
|
||||
"github.com/containerd/containerd/sys"
|
||||
remotesn "github.com/containerd/stargz-snapshotter/snapshot"
|
||||
"github.com/containerd/stargz-snapshotter/stargz"
|
||||
"github.com/moby/buildkit/cmd/buildkitd/config"
|
||||
"github.com/moby/buildkit/executor/oci"
|
||||
"github.com/moby/buildkit/util/network/cniprovider"
|
||||
|
@ -227,7 +230,7 @@ func ociWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([]worker
|
|||
return nil, err
|
||||
}
|
||||
|
||||
snFactory, err := snapshotterFactory(common.config.Root, cfg.Snapshotter, cfg.ProxySnapshotterPath)
|
||||
snFactory, err := snapshotterFactory(common.config.Root, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -280,7 +283,11 @@ func ociWorkerInitializer(c *cli.Context, common workerInitializerOpt) ([]worker
|
|||
return []worker.Worker{w}, nil
|
||||
}
|
||||
|
||||
func snapshotterFactory(commonRoot, name, address string) (runc.SnapshotterFactory, error) {
|
||||
func snapshotterFactory(commonRoot string, cfg config.OCIConfig) (runc.SnapshotterFactory, error) {
|
||||
var (
|
||||
name = cfg.Snapshotter
|
||||
address = cfg.ProxySnapshotterPath
|
||||
)
|
||||
if address != "" {
|
||||
snFactory := runc.SnapshotterFactory{
|
||||
Name: name,
|
||||
|
@ -340,6 +347,16 @@ func snapshotterFactory(commonRoot, name, address string) (runc.SnapshotterFacto
|
|||
// no Opt (AsynchronousRemove is untested for fuse-overlayfs)
|
||||
return fuseoverlayfs.NewSnapshotter(root)
|
||||
}
|
||||
case "stargz":
|
||||
snFactory.New = func(root string) (ctdsnapshot.Snapshotter, error) {
|
||||
fs, err := stargz.NewFilesystem(filepath.Join(root, "stargz"),
|
||||
cfg.StargzSnapshotterConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return remotesn.NewSnapshotter(filepath.Join(root, "snapshotter"),
|
||||
fs, remotesn.AsynchronousRemove)
|
||||
}
|
||||
default:
|
||||
return snFactory, errors.Errorf("unknown snapshotter name: %q", name)
|
||||
}
|
||||
|
|
32
go.mod
32
go.mod
|
@ -4,27 +4,22 @@ go 1.13
|
|||
|
||||
require (
|
||||
github.com/AkihiroSuda/containerd-fuse-overlayfs v0.10.0
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5
|
||||
github.com/Microsoft/hcsshim v0.8.9
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20200815005529-6735787a4e85 // indirect
|
||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 // indirect
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 // indirect
|
||||
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340 // indirect
|
||||
github.com/containerd/console v1.0.0
|
||||
github.com/containerd/containerd v1.4.1-0.20200903181227-d4e78200d6da
|
||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b // indirect
|
||||
github.com/containerd/go-cni v1.0.1
|
||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328
|
||||
github.com/containerd/stargz-snapshotter v0.0.0-20200903042824-2ee75e91f8f9
|
||||
github.com/containerd/typeurl v1.0.1
|
||||
github.com/coreos/go-systemd/v22 v22.1.0
|
||||
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker v0.0.0
|
||||
github.com/docker/docker-credential-helpers v0.6.0 // indirect
|
||||
github.com/docker/go-connections v0.3.0
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200730172259-9f28837c1d93+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20200226230617-d8334ccdb9be
|
||||
github.com/gofrs/flock v0.7.3
|
||||
github.com/gogo/googleapis v1.3.2
|
||||
|
@ -33,18 +28,16 @@ require (
|
|||
github.com/golang/protobuf v1.4.2
|
||||
github.com/google/go-cmp v0.4.0
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9
|
||||
github.com/google/uuid v1.1.1 // indirect
|
||||
github.com/gorilla/mux v1.7.4 // indirect
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0
|
||||
github.com/hashicorp/golang-lru v0.5.1
|
||||
github.com/hashicorp/golang-lru v0.5.3
|
||||
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c // indirect
|
||||
github.com/imdario/mergo v0.3.9 // indirect
|
||||
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 // indirect
|
||||
github.com/jaguilar/vt100 v0.0.0-20150826170717-2703a27b14ea
|
||||
github.com/mitchellh/hashstructure v1.0.0
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.1
|
||||
github.com/opencontainers/runc v1.0.0-rc92
|
||||
|
@ -58,17 +51,18 @@ require (
|
|||
github.com/stretchr/testify v1.5.1
|
||||
github.com/tonistiigi/fsutil v0.0.0-20200724193237-c3ed55f3b481
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||
github.com/uber/jaeger-client-go v2.11.2+incompatible
|
||||
github.com/uber/jaeger-lib v1.2.1 // indirect
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible // indirect
|
||||
github.com/urfave/cli v1.22.2
|
||||
go.etcd.io/bbolt v1.3.5
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||
google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9
|
||||
google.golang.org/grpc v1.27.1
|
||||
// genproto: the actual version is replaced in replace()
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
|
||||
google.golang.org/grpc v1.28.1
|
||||
)
|
||||
|
||||
replace (
|
||||
|
@ -77,4 +71,6 @@ replace (
|
|||
github.com/golang/protobuf => github.com/golang/protobuf v1.3.5
|
||||
github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe
|
||||
github.com/jaguilar/vt100 => github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305
|
||||
// genproto: corresponds to containerd
|
||||
google.golang.org/genproto => google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63
|
||||
)
|
||||
|
|
544
go.sum
544
go.sum
|
@ -1,38 +1,103 @@
|
|||
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
|
||||
cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/AkihiroSuda/containerd-fuse-overlayfs v0.10.0 h1:WUnAvVmHctWgTblykFsbRk5KwMv5z2db8bRxyKcJTKc=
|
||||
github.com/AkihiroSuda/containerd-fuse-overlayfs v0.10.0/go.mod h1:0mMDvQFeLbbn1Wy8P2j3hwFhqBq+FKn8OZPno8WLmp8=
|
||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v19.1.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
|
||||
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
|
||||
github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
|
||||
github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
|
||||
github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
|
||||
github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
|
||||
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
|
||||
github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc=
|
||||
github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14=
|
||||
github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA=
|
||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||
github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk=
|
||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20200815005529-6735787a4e85 h1:eZbRggdK/z6AA84NSYUnePpCLJ4nHB/dllWNDfJhLRY=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20200815005529-6735787a4e85/go.mod h1:30A5igQ91GEmhYJF8TaRP79pMBOYynRsyOByfVV0dU4=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20200826032352-301c83a30e7c h1:c8XLHKgdaCIhlht1ns4ya1ypf408bYWac2XMVijauYw=
|
||||
github.com/Microsoft/hcsshim/test v0.0.0-20200826032352-301c83a30e7c/go.mod h1:30A5igQ91GEmhYJF8TaRP79pMBOYynRsyOByfVV0dU4=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7 h1:Fv9bK1Q+ly/ROk4aJsVMeuIwPel4bEnD8EPiI91nZMg=
|
||||
github.com/apache/thrift v0.0.0-20161221203622-b2a4d4ae21c7/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs=
|
||||
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
|
||||
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
|
||||
github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU=
|
||||
github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
|
||||
github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
|
||||
github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
|
||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775 h1:cHzBGGVew0ezFsq2grfy2RsB8hO/eNyBgOLHBCqfR1U=
|
||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58 h1:hHWif/4GirK3P5uvCyyj941XSVIQDzuJhbEguCICdPE=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||
|
@ -43,6 +108,7 @@ github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4g
|
|||
github.com/containerd/console v1.0.0 h1:fU3UuQapBs+zLJu82NhR11Rif1ny2zfMMAyPJzSN5tQ=
|
||||
github.com/containerd/console v1.0.0/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
github.com/containerd/containerd v1.4.0 h1:aWJB3lbDEaOxg1mkTBAINY2a+NsoKbAeRYefJPPRY+o=
|
||||
github.com/containerd/containerd v1.4.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||
|
@ -59,6 +125,8 @@ github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZH
|
|||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328 h1:PRTagVMbJcCezLcHXe8UJvR1oBzp2lG3CEumeFOLOds=
|
||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
|
||||
github.com/containerd/stargz-snapshotter v0.0.0-20200903042824-2ee75e91f8f9 h1:JEcj3rNCg0Ho5t9kiOa4LV/vaNXbRQ9l2PIRzz2LWCQ=
|
||||
github.com/containerd/stargz-snapshotter v0.0.0-20200903042824-2ee75e91f8f9/go.mod h1:f+gZLtYcuNQWxucWyfVEQXSBoGbXNpQ76+XWVMgp+34=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v1.0.1 h1:IfVOxKbjyBn9maoye2JN95pgGYOmPkQVqxtOu7rtNIc=
|
||||
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||
|
@ -67,50 +135,106 @@ github.com/containerd/typeurl v1.0.1 h1:PvuK4E3D5S5q6IqsPDCy928FhP0LUIGcmZ/Yhgp5
|
|||
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
|
||||
github.com/containernetworking/cni v0.8.0 h1:BT9lpgGoH4jw3lFC7Odz2prU5ruiYKcgAjMCbgybcKI=
|
||||
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk=
|
||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0 h1:kq/SbG2BCKLkDKkjQf5OWwKWUKj1lgs3lFI4PxnR5lg=
|
||||
github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||
github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
|
||||
github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk=
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng4PGlyM=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24 h1:bjsfAvm8BVtvQFxV7TYznmKa35J8+fmgrRJWvcS3yJo=
|
||||
github.com/docker/cli v0.0.0-20200227165822-2298e6a3fe24/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200310163718-4634ce647cf2+incompatible h1:ax4NateCD5bjRTqLvQBlFrSUPOoZRgEXWpJ6Bmu6OO0=
|
||||
github.com/docker/docker v17.12.0-ce-rc1.0.20200310163718-4634ce647cf2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.0 h1:5bhDRLn1roGiNjz8IezRngHxMfoeaXGyr0BeMHq4rD8=
|
||||
github.com/docker/docker-credential-helpers v0.6.0/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.3.0 h1:3lOnM9cSzgGwx8VfK/NGOW5fLQ0GjIlCkaktF+n1M6o=
|
||||
github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/docker-credential-helpers v0.6.3 h1:zI2p9+1NQYdnG6sMU26EX4aVGlqbInSQxQXLvzJ4RPQ=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8=
|
||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20200226230617-d8334ccdb9be h1:GJzljYRqZapOwyfeRyExF2/5qfv8f1feNDMiz0hmRPY=
|
||||
github.com/docker/libnetwork v0.8.0-dev.2.0.20200226230617-d8334ccdb9be/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8=
|
||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
|
||||
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
|
||||
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
|
||||
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
|
||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||
|
@ -121,49 +245,124 @@ github.com/gofrs/flock v0.7.3/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j
|
|||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||
github.com/gogo/googleapis v1.3.2 h1:kX1es4djPJrsDhY7aZKJy7aZasdcB5oSOEphMjSB53c=
|
||||
github.com/gogo/googleapis v1.3.2/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
|
||||
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/protobuf v1.3.5 h1:F768QJ1E9tib+q5Sc8MkdJi1RxLTbRcTf8LJV56aRls=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/crfs v0.0.0-20191108021818-71d77da419c9 h1:eiW5yEpL5gwvM2XH3EV06vRs7AVCtgL1LOs71VeW7uQ=
|
||||
github.com/google/crfs v0.0.0-20191108021818-71d77da419c9/go.mod h1:etGhoOqfwPkooV6aqoX3eBGQOJblqdoc9XvWOeuxpPw=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE=
|
||||
github.com/google/go-containerregistry v0.0.0-20200425101607-48f605c3b60a h1:ov0+Bhfvpg4POSTOvH0Uw9wzaswlFvVGhiCmIyXNkkQ=
|
||||
github.com/google/go-containerregistry v0.0.0-20200425101607-48f605c3b60a/go.mod h1:pD1UFYs7MCAx+ZLShBdttcaOSbyc8F9Na/9IZLNwJeA=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9 h1:JM174NTeGNJ2m/oLH3UOWOvWQQKd+BoL3hcSCUWFLt0=
|
||||
github.com/google/shlex v0.0.0-20150127133951-6f45313302b9/go.mod h1:RpwtwJQFrIEPstU94h88MWPXP2ektJZ8cZ0YntAmXiE=
|
||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.2.2/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
|
||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
|
||||
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||
github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0 h1:0IKlLyQ3Hs9nDaiK5cSHAGmcQEIC8l2Ts1u6x5Dfrqg=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.2.0/go.mod h1:mJzapYve32yjrKlk9GbyCZHuPgZsrbyIbyKhSzOpg6s=
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||
github.com/hanwen/go-fuse v1.0.0 h1:GxS9Zrn6c35/BnfiVsZVWmsG803xwE7eVRDvcf/BEVc=
|
||||
github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3 h1:kpV28BKeSyVgZREItBLnaVBvOEwv2PuhNdKetwnvNHo=
|
||||
github.com/hanwen/go-fuse/v2 v2.0.3/go.mod h1:0EQM6aH2ctVpvZ6a+onrQ/vaykxh2GH7hy3e13vzTUY=
|
||||
github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk=
|
||||
github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c h1:nQcv325vxv2fFHJsOt53eSRf1eINt6vOdYUFfXs4rgk=
|
||||
github.com/hashicorp/uuid v0.0.0-20160311170451-ebb0a03e909c/go.mod h1:fHzc09UnyJyqyW+bFuq864eh+wC7dj65aXmXLRe5to0=
|
||||
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg=
|
||||
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07 h1:rw3IAne6CDuVFlZbPOkA7bhxlqawFh7RJJ+CejfMaxE=
|
||||
github.com/ishidawataru/sctp v0.0.0-20191218070446-00ab2ac2db07/go.mod h1:co9pwDoBCm1kGxawmb4sPq0cSIOOWNPT4KnHotMP1Zg=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
|
@ -174,11 +373,27 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxv
|
|||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||
github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/hashstructure v1.0.0 h1:ZkRJX1CyOoTkar7p/mLS5TZU4nJ1Rn/F8u9dGS02Q3Y=
|
||||
github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
|
@ -186,14 +401,34 @@ github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQ
|
|||
github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74=
|
||||
github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
|
||||
github.com/moby/sys/mountinfo v0.1.3/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
|
||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mrunalp/fileutils v0.0.0-20200520151820-abd8a0e76976/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.10.3 h1:OoxbjfXVZyod1fmWYhI7SEyaD8B00ynP3T+D5GiyHOY=
|
||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
|
@ -222,46 +457,93 @@ github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NH
|
|||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/profile v1.5.0 h1:042Buzk+NhDI+DeSAA62RwJL8VAuZUMQZUjCsRz1Mug=
|
||||
github.com/pkg/profile v1.5.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18=
|
||||
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1 h1:Lo6mRUjdS99f3zxYOUalftWHUoOGaDRqFk1+j0Q57/I=
|
||||
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8=
|
||||
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto=
|
||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002 h1:ka9QPuQg2u4LGipiZGsgkg3rJCo4iIUCy75FddM0GRQ=
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002/go.mod h1:/yeG0My1xr/u+HZrFQ1tOQQQQrOawfyMUH13ai5brBc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
|
||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s=
|
||||
github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
|
@ -271,6 +553,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
|||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2 h1:b6uOv7YOFK0TYG7HtkIgExQo+2RdLuwRft63jn2HWj8=
|
||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20200724193237-c3ed55f3b481 h1:D4cbMpyxP3RhRI+s5RBK7x73nT9cmWFcP6esk951JNw=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20200724193237-c3ed55f3b481/go.mod h1:PlsE3+FgE0r5iesnFMYbo1w7A90DGWwKB9h/BUDaKuM=
|
||||
github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe h1:pd7hrFSqUPxYS9IB+UMG1AB/8EXGXo17ssx0bSQ5L6Y=
|
||||
|
@ -279,56 +563,120 @@ github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/
|
|||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
|
||||
github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305 h1:y/1cL5AL2oRcfzz8CAHHhR6kDDfIOT0WEyH5k40sccM=
|
||||
github.com/tonistiigi/vt100 v0.0.0-20190402012908-ad4c4a574305/go.mod h1:gXOLibKqQTRAVuVZ9gX7G9Ykky8ll8yb4slxsEMoY0c=
|
||||
github.com/uber/jaeger-client-go v2.11.2+incompatible h1:D2idO5gYBl+40qnsowJaqtwCV6z1rxYy2yhYBh3mVvI=
|
||||
github.com/uber/jaeger-client-go v2.11.2+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v1.2.1 h1:1MUo9UEukgEwIa8pIHpx5+Y+suhM36KCEatADCK/8CI=
|
||||
github.com/uber/jaeger-lib v1.2.1/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible h1:IxcNZ7WRY1Y3G4poYlx24szfsn/3LvK9QHCq9oQw8+U=
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible h1:MxZXOiR2JuoANZ3J6DE/U0kSFv/eJ/GfSYVCjK7dyaw=
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/urfave/cli v1.22.2 h1:gsqYFH8bb9ekPA12kRo0hfjngWQjkJPlN9R0N78BoUo=
|
||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c=
|
||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df h1:OviZH7qLw/7ZovXvuNyL3XQl8UFofeikI1NW1Gypu7k=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243 h1:R43TdZy32XXSXjJn7M/HhALJ9imq6ztLnChfYJpVDnM=
|
||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=
|
||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A=
|
||||
go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190314133821-5284462c4bec/go.mod h1:atTaCNAy0f16Ah5aV1gMSwgiKVHwu/JncqDpuRr7lS4=
|
||||
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d h1:1ZiEyfaQIg3Qh0EoqpwAakHVhecoE5wlSg5GjnafJGw=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -336,73 +684,158 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190514135907-3a4b5fb9f71f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200120151820-655fe14d7479/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIAMvInbeXljJz+jDjeYE=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200210192313-1ace956b0e17/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
|
||||
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
|
||||
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
|
||||
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU=
|
||||
google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20200117163144-32f20d992d24/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9 h1:Koy0f8zyrEVfIHetH7wjP5mQLUXiqDpubSg8V1fAxqc=
|
||||
google.golang.org/genproto v0.0.0-20200227132054-3f1135a288c9/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k=
|
||||
google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -412,7 +845,62 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
|||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
k8s.io/api v0.0.0-20180904230853-4e7be11eab3f/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
|
||||
k8s.io/api v0.17.4/go.mod h1:5qxx6vjmwUVG2nHQTKGlLts8Tbok8PzHl4vHtVFuZCA=
|
||||
k8s.io/api v0.19.0 h1:XyrFIJqTYZJ2DU7FBE/bSPz7b1HvbVBuBf07oeo6eTc=
|
||||
k8s.io/api v0.19.0/go.mod h1:I1K45XlvTrDjmj5LoM5LuP/KYrhWbjUKT/SoPG0qTjw=
|
||||
k8s.io/apimachinery v0.0.0-20180904193909-def12e63c512/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
|
||||
k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
|
||||
k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ=
|
||||
k8s.io/apimachinery v0.19.0/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA=
|
||||
k8s.io/apiserver v0.17.4/go.mod h1:5ZDQ6Xr5MNBxyi3iUZXS84QOhZl+W7Oq2us/29c0j9I=
|
||||
k8s.io/client-go v0.0.0-20180910083459-2cefa64ff137/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
||||
k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc=
|
||||
k8s.io/client-go v0.19.0 h1:1+0E0zfWFIWeyRhQYWzimJOyAk2UT7TiARaLNwJCf7k=
|
||||
k8s.io/client-go v0.19.0/go.mod h1:H9E/VT95blcFQnlyShFgnFT9ZnJOAceiUHM3MlRC+mU=
|
||||
k8s.io/cloud-provider v0.17.4/go.mod h1:XEjKDzfD+b9MTLXQFlDGkk6Ho8SGMpaU8Uugx/KNK9U=
|
||||
k8s.io/code-generator v0.17.2/go.mod h1:DVmfPQgxQENqDIzVR2ddLXMH34qeszkKSdH/N+s+38s=
|
||||
k8s.io/component-base v0.17.4/go.mod h1:5BRqHMbbQPm2kKu35v3G+CpVq4K0RJKC7TRioF0I9lE=
|
||||
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
|
||||
k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdxQrT7L0oo=
|
||||
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
|
||||
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||
k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||
k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js=
|
||||
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg=
|
||||
k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
|
||||
modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
|
||||
modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
|
||||
modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
|
||||
modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
|
||||
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06 h1:zD2IemQ4LmOcAumeiyDWXKUI2SO0NYDe3H6QGvPOVgU=
|
||||
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
|
|
@ -38,21 +38,21 @@ func InitContainerdWorker() {
|
|||
}
|
||||
}
|
||||
|
||||
if s := os.Getenv("BUILDKIT_INTEGRATION_CONTAINERD_STARGZ"); s == "1" {
|
||||
if s := os.Getenv("BUILDKIT_INTEGRATION_SNAPSHOTTER"); s != "" {
|
||||
Register(&containerd{
|
||||
name: "containerd-stargz",
|
||||
containerd: "containerd",
|
||||
containerdShim: "containerd-shim-runc-v2",
|
||||
stargzSnapshotter: "containerd-stargz-grpc",
|
||||
name: fmt.Sprintf("containerd-snapshotter-%s", s),
|
||||
containerd: "containerd",
|
||||
containerdShim: "containerd-shim-runc-v2",
|
||||
snapshotter: s,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
type containerd struct {
|
||||
name string
|
||||
containerd string
|
||||
containerdShim string
|
||||
stargzSnapshotter string
|
||||
name string
|
||||
containerd string
|
||||
containerdShim string
|
||||
snapshotter string
|
||||
}
|
||||
|
||||
func (c *containerd) Name() string {
|
||||
|
@ -110,22 +110,23 @@ disabled_plugins = ["cri"]
|
|||
`, filepath.Join(tmpdir, "root"), filepath.Join(tmpdir, "state"), address, filepath.Join(tmpdir, "debug.sock"), c.containerdShim)
|
||||
|
||||
var snBuildkitdArgs []string
|
||||
if c.stargzSnapshotter != "" {
|
||||
snPath, snCl, err := runStargzSnapshotter(cfg, c.stargzSnapshotter)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
deferF.append(snCl)
|
||||
|
||||
config = fmt.Sprintf(`%s
|
||||
if c.snapshotter != "" {
|
||||
snBuildkitdArgs = append(snBuildkitdArgs,
|
||||
fmt.Sprintf("--containerd-worker-snapshotter=%s", c.snapshotter))
|
||||
if c.snapshotter == "stargz" {
|
||||
snPath, snCl, err := runStargzSnapshotter(cfg)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
deferF.append(snCl)
|
||||
config = fmt.Sprintf(`%s
|
||||
|
||||
[proxy_plugins]
|
||||
[proxy_plugins.stargz]
|
||||
type = "snapshot"
|
||||
address = %q
|
||||
`, config, snPath)
|
||||
|
||||
snBuildkitdArgs = append(snBuildkitdArgs, "--containerd-worker-snapshotter=stargz")
|
||||
}
|
||||
}
|
||||
|
||||
configFile := filepath.Join(tmpdir, "config.toml")
|
||||
|
@ -164,7 +165,7 @@ disabled_plugins = ["cri"]
|
|||
address: buildkitdSock,
|
||||
containerdAddress: address,
|
||||
rootless: false,
|
||||
stargz: c.stargzSnapshotter != "",
|
||||
snapshotter: c.snapshotter,
|
||||
}, cl, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -23,17 +23,24 @@ func InitOCIWorker() {
|
|||
}
|
||||
}
|
||||
|
||||
if s := os.Getenv("BUILDKIT_INTEGRATION_SNAPSHOTTER"); s != "" {
|
||||
Register(&oci{snapshotter: s})
|
||||
}
|
||||
}
|
||||
|
||||
type oci struct {
|
||||
uid int
|
||||
gid int
|
||||
uid int
|
||||
gid int
|
||||
snapshotter string
|
||||
}
|
||||
|
||||
func (s *oci) Name() string {
|
||||
if s.uid != 0 {
|
||||
return "oci-rootless"
|
||||
}
|
||||
if s.snapshotter != "" {
|
||||
return fmt.Sprintf("oci-snapshotter-%s", s.snapshotter)
|
||||
}
|
||||
return "oci"
|
||||
}
|
||||
|
||||
|
@ -47,6 +54,11 @@ func (s *oci) New(cfg *BackendConfig) (Backend, func() error, error) {
|
|||
// Include use of --oci-worker-labels to trigger https://github.com/moby/buildkit/pull/603
|
||||
buildkitdArgs := []string{"buildkitd", "--oci-worker=true", "--containerd-worker=false", "--oci-worker-gc=false", "--oci-worker-labels=org.mobyproject.buildkit.worker.sandbox=true"}
|
||||
|
||||
if s.snapshotter != "" {
|
||||
buildkitdArgs = append(buildkitdArgs,
|
||||
fmt.Sprintf("--oci-worker-snapshotter=%s", s.snapshotter))
|
||||
}
|
||||
|
||||
if s.uid != 0 {
|
||||
if s.gid == 0 {
|
||||
return nil, nil, errors.Errorf("unsupported id pair: uid=%d, gid=%d", s.uid, s.gid)
|
||||
|
@ -62,7 +74,8 @@ func (s *oci) New(cfg *BackendConfig) (Backend, func() error, error) {
|
|||
}
|
||||
|
||||
return backend{
|
||||
address: buildkitdSock,
|
||||
rootless: s.uid != 0,
|
||||
address: buildkitdSock,
|
||||
rootless: s.uid != 0,
|
||||
snapshotter: s.snapshotter,
|
||||
}, stop, nil
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ type Backend interface {
|
|||
Address() string
|
||||
ContainerdAddress() string
|
||||
Rootless() bool
|
||||
Stargz() bool
|
||||
Snapshotter() string
|
||||
}
|
||||
|
||||
type Sandbox interface {
|
||||
|
@ -374,7 +374,8 @@ func prepareValueMatrix(tc testConf) []matrixValue {
|
|||
return m
|
||||
}
|
||||
|
||||
func runStargzSnapshotter(cfg *BackendConfig, binary string) (address string, cl func() error, err error) {
|
||||
func runStargzSnapshotter(cfg *BackendConfig) (address string, cl func() error, err error) {
|
||||
binary := "containerd-stargz-grpc"
|
||||
if err := lookupBinary(binary); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
@ -395,19 +396,12 @@ func runStargzSnapshotter(cfg *BackendConfig, binary string) (address string, cl
|
|||
}
|
||||
deferF.append(func() error { return os.RemoveAll(tmpStargzDir) })
|
||||
|
||||
config := `insecure = ["127.0.0.1", "localhost"]`
|
||||
configFile := filepath.Join(tmpStargzDir, "config.toml")
|
||||
if err = ioutil.WriteFile(configFile, []byte(config), 0644); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
address = filepath.Join(tmpStargzDir, "containerd-stargz-grpc.sock")
|
||||
stargzRootDir := filepath.Join(tmpStargzDir, "root")
|
||||
cmd := exec.Command(binary,
|
||||
"--log-level", "debug",
|
||||
"--address", address,
|
||||
"--root", stargzRootDir,
|
||||
"--config", configFile)
|
||||
"--root", stargzRootDir)
|
||||
snStop, err := startCmd(cmd, cfg.Logs)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
|
|
|
@ -24,7 +24,7 @@ type backend struct {
|
|||
address string
|
||||
containerdAddress string
|
||||
rootless bool
|
||||
stargz bool
|
||||
snapshotter string
|
||||
}
|
||||
|
||||
func (b backend) Address() string {
|
||||
|
@ -39,8 +39,8 @@ func (b backend) Rootless() bool {
|
|||
return b.rootless
|
||||
}
|
||||
|
||||
func (b backend) Stargz() bool {
|
||||
return b.stargz
|
||||
func (b backend) Snapshotter() string {
|
||||
return b.snapshotter
|
||||
}
|
||||
|
||||
type sandbox struct {
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
Apache Thrift
|
||||
Copyright 2006-2010 The Apache Software Foundation.
|
||||
|
||||
This product includes software developed at
|
||||
The Apache Software Foundation (http://www.apache.org/).
|
|
@ -1,91 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
)
|
||||
|
||||
type TBufferedTransportFactory struct {
|
||||
size int
|
||||
}
|
||||
|
||||
type TBufferedTransport struct {
|
||||
bufio.ReadWriter
|
||||
tp TTransport
|
||||
}
|
||||
|
||||
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
return NewTBufferedTransport(trans, p.size)
|
||||
}
|
||||
|
||||
func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
|
||||
return &TBufferedTransportFactory{size: bufferSize}
|
||||
}
|
||||
|
||||
func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
|
||||
return &TBufferedTransport{
|
||||
ReadWriter: bufio.ReadWriter{
|
||||
Reader: bufio.NewReaderSize(trans, bufferSize),
|
||||
Writer: bufio.NewWriterSize(trans, bufferSize),
|
||||
},
|
||||
tp: trans,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) IsOpen() bool {
|
||||
return p.tp.IsOpen()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Open() (err error) {
|
||||
return p.tp.Open()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Close() (err error) {
|
||||
return p.tp.Close()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Read(b []byte) (int, error) {
|
||||
n, err := p.ReadWriter.Read(b)
|
||||
if err != nil {
|
||||
p.ReadWriter.Reader.Reset(p.tp)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Write(b []byte) (int, error) {
|
||||
n, err := p.ReadWriter.Write(b)
|
||||
if err != nil {
|
||||
p.ReadWriter.Writer.Reset(p.tp)
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) Flush() error {
|
||||
if err := p.ReadWriter.Flush(); err != nil {
|
||||
p.ReadWriter.Writer.Reset(p.tp)
|
||||
return err
|
||||
}
|
||||
return p.tp.Flush()
|
||||
}
|
||||
|
||||
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return p.tp.RemainingBytes()
|
||||
}
|
|
@ -1,269 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
type TDebugProtocol struct {
|
||||
Delegate TProtocol
|
||||
LogPrefix string
|
||||
}
|
||||
|
||||
type TDebugProtocolFactory struct {
|
||||
Underlying TProtocolFactory
|
||||
LogPrefix string
|
||||
}
|
||||
|
||||
func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
|
||||
return &TDebugProtocolFactory{
|
||||
Underlying: underlying,
|
||||
LogPrefix: logPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return &TDebugProtocol{
|
||||
Delegate: t.Underlying.GetProtocol(trans),
|
||||
LogPrefix: t.LogPrefix,
|
||||
}
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid)
|
||||
log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMessageEnd() error {
|
||||
err := tdp.Delegate.WriteMessageEnd()
|
||||
log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteStructBegin(name string) error {
|
||||
err := tdp.Delegate.WriteStructBegin(name)
|
||||
log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteStructEnd() error {
|
||||
err := tdp.Delegate.WriteStructEnd()
|
||||
log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
err := tdp.Delegate.WriteFieldBegin(name, typeId, id)
|
||||
log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldEnd() error {
|
||||
err := tdp.Delegate.WriteFieldEnd()
|
||||
log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteFieldStop() error {
|
||||
err := tdp.Delegate.WriteFieldStop()
|
||||
log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
err := tdp.Delegate.WriteMapBegin(keyType, valueType, size)
|
||||
log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteMapEnd() error {
|
||||
err := tdp.Delegate.WriteMapEnd()
|
||||
log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
err := tdp.Delegate.WriteListBegin(elemType, size)
|
||||
log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteListEnd() error {
|
||||
err := tdp.Delegate.WriteListEnd()
|
||||
log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
err := tdp.Delegate.WriteSetBegin(elemType, size)
|
||||
log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteSetEnd() error {
|
||||
err := tdp.Delegate.WriteSetEnd()
|
||||
log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteBool(value bool) error {
|
||||
err := tdp.Delegate.WriteBool(value)
|
||||
log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteByte(value int8) error {
|
||||
err := tdp.Delegate.WriteByte(value)
|
||||
log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI16(value int16) error {
|
||||
err := tdp.Delegate.WriteI16(value)
|
||||
log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI32(value int32) error {
|
||||
err := tdp.Delegate.WriteI32(value)
|
||||
log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteI64(value int64) error {
|
||||
err := tdp.Delegate.WriteI64(value)
|
||||
log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteDouble(value float64) error {
|
||||
err := tdp.Delegate.WriteDouble(value)
|
||||
log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteString(value string) error {
|
||||
err := tdp.Delegate.WriteString(value)
|
||||
log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
func (tdp *TDebugProtocol) WriteBinary(value []byte) error {
|
||||
err := tdp.Delegate.WriteBinary(value)
|
||||
log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
|
||||
name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin()
|
||||
log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMessageEnd() (err error) {
|
||||
err = tdp.Delegate.ReadMessageEnd()
|
||||
log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) {
|
||||
name, err = tdp.Delegate.ReadStructBegin()
|
||||
log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadStructEnd() (err error) {
|
||||
err = tdp.Delegate.ReadStructEnd()
|
||||
log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
|
||||
name, typeId, id, err = tdp.Delegate.ReadFieldBegin()
|
||||
log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadFieldEnd() (err error) {
|
||||
err = tdp.Delegate.ReadFieldEnd()
|
||||
log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
|
||||
keyType, valueType, size, err = tdp.Delegate.ReadMapBegin()
|
||||
log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadMapEnd() (err error) {
|
||||
err = tdp.Delegate.ReadMapEnd()
|
||||
log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) {
|
||||
elemType, size, err = tdp.Delegate.ReadListBegin()
|
||||
log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadListEnd() (err error) {
|
||||
err = tdp.Delegate.ReadListEnd()
|
||||
log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) {
|
||||
elemType, size, err = tdp.Delegate.ReadSetBegin()
|
||||
log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadSetEnd() (err error) {
|
||||
err = tdp.Delegate.ReadSetEnd()
|
||||
log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadBool() (value bool, err error) {
|
||||
value, err = tdp.Delegate.ReadBool()
|
||||
log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadByte() (value int8, err error) {
|
||||
value, err = tdp.Delegate.ReadByte()
|
||||
log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI16() (value int16, err error) {
|
||||
value, err = tdp.Delegate.ReadI16()
|
||||
log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI32() (value int32, err error) {
|
||||
value, err = tdp.Delegate.ReadI32()
|
||||
log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadI64() (value int64, err error) {
|
||||
value, err = tdp.Delegate.ReadI64()
|
||||
log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) {
|
||||
value, err = tdp.Delegate.ReadDouble()
|
||||
log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadString() (value string, err error) {
|
||||
value, err = tdp.Delegate.ReadString()
|
||||
log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) {
|
||||
value, err = tdp.Delegate.ReadBinary()
|
||||
log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
|
||||
err = tdp.Delegate.Skip(fieldType)
|
||||
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
|
||||
return
|
||||
}
|
||||
func (tdp *TDebugProtocol) Flush() (err error) {
|
||||
err = tdp.Delegate.Flush()
|
||||
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
|
||||
return
|
||||
}
|
||||
|
||||
func (tdp *TDebugProtocol) Transport() TTransport {
|
||||
return tdp.Delegate.Transport()
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
type TDeserializer struct {
|
||||
Transport TTransport
|
||||
Protocol TProtocol
|
||||
}
|
||||
|
||||
func NewTDeserializer() *TDeserializer {
|
||||
var transport TTransport
|
||||
transport = NewTMemoryBufferLen(1024)
|
||||
|
||||
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
|
||||
|
||||
return &TDeserializer{
|
||||
transport,
|
||||
protocol}
|
||||
}
|
||||
|
||||
func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) {
|
||||
err = nil
|
||||
if _, err = t.Transport.Write([]byte(s)); err != nil {
|
||||
return
|
||||
}
|
||||
if err = msg.Read(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) {
|
||||
err = nil
|
||||
if _, err = t.Transport.Write(b); err != nil {
|
||||
return
|
||||
}
|
||||
if err = msg.Read(t.Protocol); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,79 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Helper class that encapsulates field metadata.
|
||||
type field struct {
|
||||
name string
|
||||
typeId TType
|
||||
id int
|
||||
}
|
||||
|
||||
func newField(n string, t TType, i int) *field {
|
||||
return &field{name: n, typeId: t, id: i}
|
||||
}
|
||||
|
||||
func (p *field) Name() string {
|
||||
if p == nil {
|
||||
return ""
|
||||
}
|
||||
return p.name
|
||||
}
|
||||
|
||||
func (p *field) TypeId() TType {
|
||||
if p == nil {
|
||||
return TType(VOID)
|
||||
}
|
||||
return p.typeId
|
||||
}
|
||||
|
||||
func (p *field) Id() int {
|
||||
if p == nil {
|
||||
return -1
|
||||
}
|
||||
return p.id
|
||||
}
|
||||
|
||||
func (p *field) String() string {
|
||||
if p == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
return "<TField name:'" + p.name + "' type:" + string(p.typeId) + " field-id:" + string(p.id) + ">"
|
||||
}
|
||||
|
||||
var ANONYMOUS_FIELD *field
|
||||
|
||||
type fieldSlice []field
|
||||
|
||||
func (p fieldSlice) Len() int {
|
||||
return len(p)
|
||||
}
|
||||
|
||||
func (p fieldSlice) Less(i, j int) bool {
|
||||
return p[i].Id() < p[j].Id()
|
||||
}
|
||||
|
||||
func (p fieldSlice) Swap(i, j int) {
|
||||
p[i], p[j] = p[j], p[i]
|
||||
}
|
||||
|
||||
func init() {
|
||||
ANONYMOUS_FIELD = newField("", STOP, 0)
|
||||
}
|
|
@ -1,167 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
const DEFAULT_MAX_LENGTH = 16384000
|
||||
|
||||
type TFramedTransport struct {
|
||||
transport TTransport
|
||||
buf bytes.Buffer
|
||||
reader *bufio.Reader
|
||||
frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header
|
||||
buffer [4]byte
|
||||
maxLength uint32
|
||||
}
|
||||
|
||||
type tFramedTransportFactory struct {
|
||||
factory TTransportFactory
|
||||
maxLength uint32
|
||||
}
|
||||
|
||||
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
|
||||
return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
|
||||
}
|
||||
|
||||
func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
|
||||
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
|
||||
}
|
||||
|
||||
func (p *tFramedTransportFactory) GetTransport(base TTransport) TTransport {
|
||||
return NewTFramedTransportMaxLength(p.factory.GetTransport(base), p.maxLength)
|
||||
}
|
||||
|
||||
func NewTFramedTransport(transport TTransport) *TFramedTransport {
|
||||
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
|
||||
}
|
||||
|
||||
func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
|
||||
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Open() error {
|
||||
return p.transport.Open()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) IsOpen() bool {
|
||||
return p.transport.IsOpen()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Close() error {
|
||||
return p.transport.Close()
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
|
||||
if p.frameSize == 0 {
|
||||
p.frameSize, err = p.readFrameHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.frameSize < uint32(len(buf)) {
|
||||
frameSize := p.frameSize
|
||||
tmp := make([]byte, p.frameSize)
|
||||
l, err = p.Read(tmp)
|
||||
copy(buf, tmp)
|
||||
if err == nil {
|
||||
err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf)))
|
||||
return
|
||||
}
|
||||
}
|
||||
got, err := p.reader.Read(buf)
|
||||
p.frameSize = p.frameSize - uint32(got)
|
||||
//sanity check
|
||||
if p.frameSize < 0 {
|
||||
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
|
||||
}
|
||||
return got, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) ReadByte() (c byte, err error) {
|
||||
if p.frameSize == 0 {
|
||||
p.frameSize, err = p.readFrameHeader()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.frameSize < 1 {
|
||||
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1))
|
||||
}
|
||||
c, err = p.reader.ReadByte()
|
||||
if err == nil {
|
||||
p.frameSize--
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Write(buf []byte) (int, error) {
|
||||
n, err := p.buf.Write(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) WriteByte(c byte) error {
|
||||
return p.buf.WriteByte(c)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
|
||||
return p.buf.WriteString(s)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) Flush() error {
|
||||
size := p.buf.Len()
|
||||
buf := p.buffer[:4]
|
||||
binary.BigEndian.PutUint32(buf, uint32(size))
|
||||
_, err := p.transport.Write(buf)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
if size > 0 {
|
||||
if n, err := p.buf.WriteTo(p.transport); err != nil {
|
||||
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
}
|
||||
err = p.transport.Flush()
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) readFrameHeader() (uint32, error) {
|
||||
buf := p.buffer[:4]
|
||||
if _, err := io.ReadFull(p.reader, buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
size := binary.BigEndian.Uint32(buf)
|
||||
if size < 0 || size > p.maxLength {
|
||||
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
|
||||
}
|
||||
return size, nil
|
||||
}
|
||||
|
||||
func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
|
||||
return uint64(p.frameSize)
|
||||
}
|
||||
|
|
@ -1,258 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Default to using the shared http client. Library users are
|
||||
// free to change this global client or specify one through
|
||||
// THttpClientOptions.
|
||||
var DefaultHttpClient *http.Client = http.DefaultClient
|
||||
|
||||
type THttpClient struct {
|
||||
client *http.Client
|
||||
response *http.Response
|
||||
url *url.URL
|
||||
requestBuffer *bytes.Buffer
|
||||
header http.Header
|
||||
nsecConnectTimeout int64
|
||||
nsecReadTimeout int64
|
||||
}
|
||||
|
||||
type THttpClientTransportFactory struct {
|
||||
options THttpClientOptions
|
||||
url string
|
||||
isPost bool
|
||||
}
|
||||
|
||||
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*THttpClient)
|
||||
if ok && t.url != nil {
|
||||
if t.requestBuffer != nil {
|
||||
t2, _ := NewTHttpPostClientWithOptions(t.url.String(), p.options)
|
||||
return t2
|
||||
}
|
||||
t2, _ := NewTHttpClientWithOptions(t.url.String(), p.options)
|
||||
return t2
|
||||
}
|
||||
}
|
||||
if p.isPost {
|
||||
s, _ := NewTHttpPostClientWithOptions(p.url, p.options)
|
||||
return s
|
||||
}
|
||||
s, _ := NewTHttpClientWithOptions(p.url, p.options)
|
||||
return s
|
||||
}
|
||||
|
||||
type THttpClientOptions struct {
|
||||
// If nil, DefaultHttpClient is used
|
||||
Client *http.Client
|
||||
}
|
||||
|
||||
func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||
return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
|
||||
}
|
||||
|
||||
func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
||||
return &THttpClientTransportFactory{url: url, isPost: false, options: options}
|
||||
}
|
||||
|
||||
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
|
||||
return NewTHttpPostClientTransportFactoryWithOptions(url, THttpClientOptions{})
|
||||
}
|
||||
|
||||
func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
|
||||
return &THttpClientTransportFactory{url: url, isPost: true, options: options}
|
||||
}
|
||||
|
||||
func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
||||
parsedURL, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
response, err := http.Get(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client := options.Client
|
||||
if client == nil {
|
||||
client = DefaultHttpClient
|
||||
}
|
||||
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
|
||||
return &THttpClient{client: client, response: response, url: parsedURL, header: httpHeader}, nil
|
||||
}
|
||||
|
||||
func NewTHttpClient(urlstr string) (TTransport, error) {
|
||||
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
|
||||
}
|
||||
|
||||
func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
|
||||
parsedURL, err := url.Parse(urlstr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf := make([]byte, 0, 1024)
|
||||
client := options.Client
|
||||
if client == nil {
|
||||
client = DefaultHttpClient
|
||||
}
|
||||
httpHeader := map[string][]string{"Content-Type": []string{"application/x-thrift"}}
|
||||
return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
|
||||
}
|
||||
|
||||
func NewTHttpPostClient(urlstr string) (TTransport, error) {
|
||||
return NewTHttpPostClientWithOptions(urlstr, THttpClientOptions{})
|
||||
}
|
||||
|
||||
// Set the HTTP Header for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
|
||||
func (p *THttpClient) SetHeader(key string, value string) {
|
||||
p.header.Add(key, value)
|
||||
}
|
||||
|
||||
// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// hdrValue := httpTrans.GetHeader("User-Agent")
|
||||
func (p *THttpClient) GetHeader(key string) string {
|
||||
return p.header.Get(key)
|
||||
}
|
||||
|
||||
// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
|
||||
// It is important that you first assert the TTransport as a THttpClient type
|
||||
// like so:
|
||||
//
|
||||
// httpTrans := trans.(THttpClient)
|
||||
// httpTrans.DelHeader("User-Agent")
|
||||
func (p *THttpClient) DelHeader(key string) {
|
||||
p.header.Del(key)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Open() error {
|
||||
// do nothing
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) IsOpen() bool {
|
||||
return p.response != nil || p.requestBuffer != nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) closeResponse() error {
|
||||
var err error
|
||||
if p.response != nil && p.response.Body != nil {
|
||||
// The docs specify that if keepalive is enabled and the response body is not
|
||||
// read to completion the connection will never be returned to the pool and
|
||||
// reused. Errors are being ignored here because if the connection is invalid
|
||||
// and this fails for some reason, the Close() method will do any remaining
|
||||
// cleanup.
|
||||
io.Copy(ioutil.Discard, p.response.Body)
|
||||
|
||||
err = p.response.Body.Close()
|
||||
}
|
||||
|
||||
p.response = nil
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *THttpClient) Close() error {
|
||||
if p.requestBuffer != nil {
|
||||
p.requestBuffer.Reset()
|
||||
p.requestBuffer = nil
|
||||
}
|
||||
return p.closeResponse()
|
||||
}
|
||||
|
||||
func (p *THttpClient) Read(buf []byte) (int, error) {
|
||||
if p.response == nil {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
|
||||
}
|
||||
n, err := p.response.Body.Read(buf)
|
||||
if n > 0 && (err == nil || err == io.EOF) {
|
||||
return n, nil
|
||||
}
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *THttpClient) ReadByte() (c byte, err error) {
|
||||
return readByte(p.response.Body)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Write(buf []byte) (int, error) {
|
||||
n, err := p.requestBuffer.Write(buf)
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (p *THttpClient) WriteByte(c byte) error {
|
||||
return p.requestBuffer.WriteByte(c)
|
||||
}
|
||||
|
||||
func (p *THttpClient) WriteString(s string) (n int, err error) {
|
||||
return p.requestBuffer.WriteString(s)
|
||||
}
|
||||
|
||||
func (p *THttpClient) Flush() error {
|
||||
// Close any previous response body to avoid leaking connections.
|
||||
p.closeResponse()
|
||||
|
||||
req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
req.Header = p.header
|
||||
response, err := p.client.Do(req)
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
if response.StatusCode != http.StatusOK {
|
||||
// Close the response to avoid leaking file descriptors. closeResponse does
|
||||
// more than just call Close(), so temporarily assign it and reuse the logic.
|
||||
p.response = response
|
||||
p.closeResponse()
|
||||
|
||||
// TODO(pomack) log bad response
|
||||
return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
|
||||
}
|
||||
p.response = response
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
|
||||
len := p.response.ContentLength
|
||||
if len >= 0 {
|
||||
return uint64(len)
|
||||
}
|
||||
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import "net/http"
|
||||
|
||||
// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
|
||||
func NewThriftHandlerFunc(processor TProcessor,
|
||||
inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
w.Header().Add("Content-Type", "application/x-thrift")
|
||||
|
||||
transport := NewStreamTransport(r.Body, w)
|
||||
processor.Process(inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
|
||||
}
|
||||
}
|
|
@ -1,214 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
)
|
||||
|
||||
// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
|
||||
type StreamTransport struct {
|
||||
io.Reader
|
||||
io.Writer
|
||||
isReadWriter bool
|
||||
closed bool
|
||||
}
|
||||
|
||||
type StreamTransportFactory struct {
|
||||
Reader io.Reader
|
||||
Writer io.Writer
|
||||
isReadWriter bool
|
||||
}
|
||||
|
||||
func (p *StreamTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
if trans != nil {
|
||||
t, ok := trans.(*StreamTransport)
|
||||
if ok {
|
||||
if t.isReadWriter {
|
||||
return NewStreamTransportRW(t.Reader.(io.ReadWriter))
|
||||
}
|
||||
if t.Reader != nil && t.Writer != nil {
|
||||
return NewStreamTransport(t.Reader, t.Writer)
|
||||
}
|
||||
if t.Reader != nil && t.Writer == nil {
|
||||
return NewStreamTransportR(t.Reader)
|
||||
}
|
||||
if t.Reader == nil && t.Writer != nil {
|
||||
return NewStreamTransportW(t.Writer)
|
||||
}
|
||||
return &StreamTransport{}
|
||||
}
|
||||
}
|
||||
if p.isReadWriter {
|
||||
return NewStreamTransportRW(p.Reader.(io.ReadWriter))
|
||||
}
|
||||
if p.Reader != nil && p.Writer != nil {
|
||||
return NewStreamTransport(p.Reader, p.Writer)
|
||||
}
|
||||
if p.Reader != nil && p.Writer == nil {
|
||||
return NewStreamTransportR(p.Reader)
|
||||
}
|
||||
if p.Reader == nil && p.Writer != nil {
|
||||
return NewStreamTransportW(p.Writer)
|
||||
}
|
||||
return &StreamTransport{}
|
||||
}
|
||||
|
||||
func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
|
||||
return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
|
||||
}
|
||||
|
||||
func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
|
||||
return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
|
||||
}
|
||||
|
||||
func NewStreamTransportR(r io.Reader) *StreamTransport {
|
||||
return &StreamTransport{Reader: bufio.NewReader(r)}
|
||||
}
|
||||
|
||||
func NewStreamTransportW(w io.Writer) *StreamTransport {
|
||||
return &StreamTransport{Writer: bufio.NewWriter(w)}
|
||||
}
|
||||
|
||||
func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
|
||||
bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
|
||||
return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
|
||||
}
|
||||
|
||||
func (p *StreamTransport) IsOpen() bool {
|
||||
return !p.closed
|
||||
}
|
||||
|
||||
// implicitly opened on creation, can't be reopened once closed
|
||||
func (p *StreamTransport) Open() error {
|
||||
if !p.closed {
|
||||
return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
|
||||
} else {
|
||||
return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
|
||||
}
|
||||
}
|
||||
|
||||
// Closes both the input and output streams.
|
||||
func (p *StreamTransport) Close() error {
|
||||
if p.closed {
|
||||
return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
|
||||
}
|
||||
p.closed = true
|
||||
closedReader := false
|
||||
if p.Reader != nil {
|
||||
c, ok := p.Reader.(io.Closer)
|
||||
if ok {
|
||||
e := c.Close()
|
||||
closedReader = true
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
p.Reader = nil
|
||||
}
|
||||
if p.Writer != nil && (!closedReader || !p.isReadWriter) {
|
||||
c, ok := p.Writer.(io.Closer)
|
||||
if ok {
|
||||
e := c.Close()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
}
|
||||
p.Writer = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Flushes the underlying output stream if not null.
|
||||
func (p *StreamTransport) Flush() error {
|
||||
if p.Writer == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
|
||||
}
|
||||
f, ok := p.Writer.(Flusher)
|
||||
if ok {
|
||||
err := f.Flush()
|
||||
if err != nil {
|
||||
return NewTTransportExceptionFromError(err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *StreamTransport) Read(c []byte) (n int, err error) {
|
||||
n, err = p.Reader.Read(c)
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) ReadByte() (c byte, err error) {
|
||||
f, ok := p.Reader.(io.ByteReader)
|
||||
if ok {
|
||||
c, err = f.ReadByte()
|
||||
} else {
|
||||
c, err = readByte(p.Reader)
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) Write(c []byte) (n int, err error) {
|
||||
n, err = p.Writer.Write(c)
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) WriteByte(c byte) (err error) {
|
||||
f, ok := p.Writer.(io.ByteWriter)
|
||||
if ok {
|
||||
err = f.WriteByte(c)
|
||||
} else {
|
||||
err = writeByte(p.Writer, c)
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) WriteString(s string) (n int, err error) {
|
||||
f, ok := p.Writer.(stringWriter)
|
||||
if ok {
|
||||
n, err = f.WriteString(s)
|
||||
} else {
|
||||
n, err = p.Writer.Write([]byte(s))
|
||||
}
|
||||
if err != nil {
|
||||
err = NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
|
@ -1,583 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
const (
|
||||
THRIFT_JSON_PROTOCOL_VERSION = 1
|
||||
)
|
||||
|
||||
// for references to _ParseContext see tsimplejson_protocol.go
|
||||
|
||||
// JSON protocol implementation for thrift.
|
||||
//
|
||||
// This protocol produces/consumes a simple output format
|
||||
// suitable for parsing by scripting languages. It should not be
|
||||
// confused with the full-featured TJSONProtocol.
|
||||
//
|
||||
type TJSONProtocol struct {
|
||||
*TSimpleJSONProtocol
|
||||
}
|
||||
|
||||
// Constructor
|
||||
func NewTJSONProtocol(t TTransport) *TJSONProtocol {
|
||||
v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
|
||||
v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
|
||||
v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
|
||||
return v
|
||||
}
|
||||
|
||||
// Factory
|
||||
type TJSONProtocolFactory struct{}
|
||||
|
||||
func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
|
||||
return NewTJSONProtocol(trans)
|
||||
}
|
||||
|
||||
func NewTJSONProtocolFactory() *TJSONProtocolFactory {
|
||||
return &TJSONProtocolFactory{}
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
|
||||
p.resetContextStack() // THRIFT-3735
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteString(name); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteByte(int8(typeId)); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI32(seqId); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMessageEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteStructBegin(name string) error {
|
||||
if e := p.OutputObjectBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteStructEnd() error {
|
||||
return p.OutputObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
|
||||
if e := p.WriteI16(id); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.OutputObjectBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(typeId)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldEnd() error {
|
||||
return p.OutputObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteFieldStop() error { return nil }
|
||||
|
||||
func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(keyType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 = p.TypeIdToString(valueType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return p.OutputObjectBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteMapEnd() error {
|
||||
if e := p.OutputObjectEnd(); e != nil {
|
||||
return e
|
||||
}
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error {
|
||||
return p.OutputElemListBegin(elemType, size)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteListEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error {
|
||||
return p.OutputElemListBegin(elemType, size)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteSetEnd() error {
|
||||
return p.OutputListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteBool(b bool) error {
|
||||
if b {
|
||||
return p.WriteI32(1)
|
||||
}
|
||||
return p.WriteI32(0)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteByte(b int8) error {
|
||||
return p.WriteI32(int32(b))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI16(v int16) error {
|
||||
return p.WriteI32(int32(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI32(v int32) error {
|
||||
return p.OutputI64(int64(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteI64(v int64) error {
|
||||
return p.OutputI64(int64(v))
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteDouble(v float64) error {
|
||||
return p.OutputF64(v)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteString(v string) error {
|
||||
return p.OutputString(v)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) WriteBinary(v []byte) error {
|
||||
// JSON library only takes in a string,
|
||||
// not an arbitrary byte array, to ensure bytes are transmitted
|
||||
// efficiently we must convert this into a valid JSON string
|
||||
// therefore we use base64 encoding to avoid excessive escaping/quoting
|
||||
if e := p.OutputPreValue(); e != nil {
|
||||
return e
|
||||
}
|
||||
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
writer := base64.NewEncoder(base64.StdEncoding, p.writer)
|
||||
if _, e := writer.Write(v); e != nil {
|
||||
p.writer.Reset(p.trans) // THRIFT-3735
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if e := writer.Close(); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
|
||||
return NewTProtocolException(e)
|
||||
}
|
||||
return p.OutputPostValue()
|
||||
}
|
||||
|
||||
// Reading methods.
|
||||
func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
|
||||
p.resetContextStack() // THRIFT-3735
|
||||
if isNull, err := p.ParseListBegin(); isNull || err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
version, err := p.ReadI32()
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if version != THRIFT_JSON_PROTOCOL_VERSION {
|
||||
e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
|
||||
return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
|
||||
}
|
||||
if name, err = p.ReadString(); err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
bTypeId, err := p.ReadByte()
|
||||
typeId = TMessageType(bTypeId)
|
||||
if err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
if seqId, err = p.ReadI32(); err != nil {
|
||||
return name, typeId, seqId, err
|
||||
}
|
||||
return name, typeId, seqId, nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMessageEnd() error {
|
||||
err := p.ParseListEnd()
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadStructBegin() (name string, err error) {
|
||||
_, err = p.ParseObjectStart()
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadStructEnd() error {
|
||||
return p.ParseObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
|
||||
b, _ := p.reader.Peek(1)
|
||||
if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
|
||||
return "", STOP, -1, nil
|
||||
}
|
||||
fieldId, err := p.ReadI16()
|
||||
if err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
if _, err = p.ParseObjectStart(); err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
sType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return "", STOP, fieldId, err
|
||||
}
|
||||
fType, err := p.StringToTypeId(sType)
|
||||
return "", fType, fieldId, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadFieldEnd() error {
|
||||
return p.ParseObjectEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, VOID, 0, e
|
||||
}
|
||||
|
||||
// read keyType
|
||||
sKeyType, e := p.ReadString()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
keyType, e = p.StringToTypeId(sKeyType)
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
// read valueType
|
||||
sValueType, e := p.ReadString()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
valueType, e = p.StringToTypeId(sValueType)
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
// read size
|
||||
iSize, e := p.ReadI64()
|
||||
if e != nil {
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
size = int(iSize)
|
||||
|
||||
_, e = p.ParseObjectStart()
|
||||
return keyType, valueType, size, e
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadMapEnd() error {
|
||||
e := p.ParseObjectEnd()
|
||||
if e != nil {
|
||||
return e
|
||||
}
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
|
||||
return p.ParseElemListBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadListEnd() error {
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
|
||||
return p.ParseElemListBegin()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadSetEnd() error {
|
||||
return p.ParseListEnd()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadBool() (bool, error) {
|
||||
value, err := p.ReadI32()
|
||||
return (value != 0), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadByte() (int8, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int8(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI16() (int16, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int16(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI32() (int32, error) {
|
||||
v, err := p.ReadI64()
|
||||
return int32(v), err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadI64() (int64, error) {
|
||||
v, _, err := p.ParseI64()
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadDouble() (float64, error) {
|
||||
v, _, err := p.ParseF64()
|
||||
return v, err
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadString() (string, error) {
|
||||
var v string
|
||||
if err := p.ParsePreValue(); err != nil {
|
||||
return v, err
|
||||
}
|
||||
f, _ := p.reader.Peek(1)
|
||||
if len(f) > 0 && f[0] == JSON_QUOTE {
|
||||
p.reader.ReadByte()
|
||||
value, err := p.ParseStringBody()
|
||||
v = value
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
|
||||
b := make([]byte, len(JSON_NULL))
|
||||
_, err := p.reader.Read(b)
|
||||
if err != nil {
|
||||
return v, NewTProtocolException(err)
|
||||
}
|
||||
if string(b) != string(JSON_NULL) {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
} else {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
return v, p.ParsePostValue()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
|
||||
var v []byte
|
||||
if err := p.ParsePreValue(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f, _ := p.reader.Peek(1)
|
||||
if len(f) > 0 && f[0] == JSON_QUOTE {
|
||||
p.reader.ReadByte()
|
||||
value, err := p.ParseBase64EncodedBody()
|
||||
v = value
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
|
||||
b := make([]byte, len(JSON_NULL))
|
||||
_, err := p.reader.Read(b)
|
||||
if err != nil {
|
||||
return v, NewTProtocolException(err)
|
||||
}
|
||||
if string(b) != string(JSON_NULL) {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
} else {
|
||||
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
|
||||
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
|
||||
return v, p.ParsePostValue()
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Flush() (err error) {
|
||||
err = p.writer.Flush()
|
||||
if err == nil {
|
||||
err = p.trans.Flush()
|
||||
}
|
||||
return NewTProtocolException(err)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Skip(fieldType TType) (err error) {
|
||||
return SkipDefaultDepth(p, fieldType)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) Transport() TTransport {
|
||||
return p.trans
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(elemType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.WriteString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.WriteI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, 0, e
|
||||
}
|
||||
sElemType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return VOID, size, err
|
||||
}
|
||||
elemType, err = p.StringToTypeId(sElemType)
|
||||
if err != nil {
|
||||
return elemType, size, err
|
||||
}
|
||||
nSize, err2 := p.ReadI64()
|
||||
size = int(nSize)
|
||||
return elemType, size, err2
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
|
||||
if isNull, e := p.ParseListBegin(); isNull || e != nil {
|
||||
return VOID, 0, e
|
||||
}
|
||||
sElemType, err := p.ReadString()
|
||||
if err != nil {
|
||||
return VOID, size, err
|
||||
}
|
||||
elemType, err = p.StringToTypeId(sElemType)
|
||||
if err != nil {
|
||||
return elemType, size, err
|
||||
}
|
||||
nSize, err2 := p.ReadI64()
|
||||
size = int(nSize)
|
||||
return elemType, size, err2
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
|
||||
if e := p.OutputListBegin(); e != nil {
|
||||
return e
|
||||
}
|
||||
s, e1 := p.TypeIdToString(elemType)
|
||||
if e1 != nil {
|
||||
return e1
|
||||
}
|
||||
if e := p.OutputString(s); e != nil {
|
||||
return e
|
||||
}
|
||||
if e := p.OutputI64(int64(size)); e != nil {
|
||||
return e
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
|
||||
switch byte(fieldType) {
|
||||
case BOOL:
|
||||
return "tf", nil
|
||||
case BYTE:
|
||||
return "i8", nil
|
||||
case I16:
|
||||
return "i16", nil
|
||||
case I32:
|
||||
return "i32", nil
|
||||
case I64:
|
||||
return "i64", nil
|
||||
case DOUBLE:
|
||||
return "dbl", nil
|
||||
case STRING:
|
||||
return "str", nil
|
||||
case STRUCT:
|
||||
return "rec", nil
|
||||
case MAP:
|
||||
return "map", nil
|
||||
case SET:
|
||||
return "set", nil
|
||||
case LIST:
|
||||
return "lst", nil
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
|
||||
return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
||||
|
||||
func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
|
||||
switch fieldType {
|
||||
case "tf":
|
||||
return TType(BOOL), nil
|
||||
case "i8":
|
||||
return TType(BYTE), nil
|
||||
case "i16":
|
||||
return TType(I16), nil
|
||||
case "i32":
|
||||
return TType(I32), nil
|
||||
case "i64":
|
||||
return TType(I64), nil
|
||||
case "dbl":
|
||||
return TType(DOUBLE), nil
|
||||
case "str":
|
||||
return TType(STRING), nil
|
||||
case "rec":
|
||||
return TType(STRUCT), nil
|
||||
case "map":
|
||||
return TType(MAP), nil
|
||||
case "set":
|
||||
return TType(SET), nil
|
||||
case "lst":
|
||||
return TType(LIST), nil
|
||||
}
|
||||
|
||||
e := fmt.Errorf("Unknown type identifier: %s", fieldType)
|
||||
return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
|
||||
}
|
|
@ -1,169 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
/*
|
||||
TMultiplexedProtocol is a protocol-independent concrete decorator
|
||||
that allows a Thrift client to communicate with a multiplexing Thrift server,
|
||||
by prepending the service name to the function name during function calls.
|
||||
|
||||
NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request
|
||||
from a multiplexing client.
|
||||
|
||||
This example uses a single socket transport to invoke two services:
|
||||
|
||||
socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
|
||||
transport := thrift.NewTFramedTransport(socket)
|
||||
protocol := thrift.NewTBinaryProtocolTransport(transport)
|
||||
|
||||
mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
|
||||
service := Calculator.NewCalculatorClient(mp)
|
||||
|
||||
mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
|
||||
service2 := WeatherReport.NewWeatherReportClient(mp2)
|
||||
|
||||
err := transport.Open()
|
||||
if err != nil {
|
||||
t.Fatal("Unable to open client socket", err)
|
||||
}
|
||||
|
||||
fmt.Println(service.Add(2,2))
|
||||
fmt.Println(service2.GetTemperature())
|
||||
*/
|
||||
|
||||
type TMultiplexedProtocol struct {
|
||||
TProtocol
|
||||
serviceName string
|
||||
}
|
||||
|
||||
const MULTIPLEXED_SEPARATOR = ":"
|
||||
|
||||
func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
|
||||
return &TMultiplexedProtocol{
|
||||
TProtocol: protocol,
|
||||
serviceName: serviceName,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
|
||||
if typeId == CALL || typeId == ONEWAY {
|
||||
return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
|
||||
} else {
|
||||
return t.TProtocol.WriteMessageBegin(name, typeId, seqid)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
TMultiplexedProcessor is a TProcessor allowing
|
||||
a single TServer to provide multiple services.
|
||||
|
||||
To do so, you instantiate the processor and then register additional
|
||||
processors with it, as shown in the following example:
|
||||
|
||||
var processor = thrift.NewTMultiplexedProcessor()
|
||||
|
||||
firstProcessor :=
|
||||
processor.RegisterProcessor("FirstService", firstProcessor)
|
||||
|
||||
processor.registerProcessor(
|
||||
"Calculator",
|
||||
Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
|
||||
)
|
||||
|
||||
processor.registerProcessor(
|
||||
"WeatherReport",
|
||||
WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
|
||||
)
|
||||
|
||||
serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
|
||||
if err != nil {
|
||||
t.Fatal("Unable to create server socket", err)
|
||||
}
|
||||
server := thrift.NewTSimpleServer2(processor, serverTransport)
|
||||
server.Serve();
|
||||
*/
|
||||
|
||||
type TMultiplexedProcessor struct {
|
||||
serviceProcessorMap map[string]TProcessor
|
||||
DefaultProcessor TProcessor
|
||||
}
|
||||
|
||||
func NewTMultiplexedProcessor() *TMultiplexedProcessor {
|
||||
return &TMultiplexedProcessor{
|
||||
serviceProcessorMap: make(map[string]TProcessor),
|
||||
}
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
|
||||
t.DefaultProcessor = processor
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
|
||||
if t.serviceProcessorMap == nil {
|
||||
t.serviceProcessorMap = make(map[string]TProcessor)
|
||||
}
|
||||
t.serviceProcessorMap[name] = processor
|
||||
}
|
||||
|
||||
func (t *TMultiplexedProcessor) Process(in, out TProtocol) (bool, TException) {
|
||||
name, typeId, seqid, err := in.ReadMessageBegin()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if typeId != CALL && typeId != ONEWAY {
|
||||
return false, fmt.Errorf("Unexpected message type %v", typeId)
|
||||
}
|
||||
//extract the service name
|
||||
v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
|
||||
if len(v) != 2 {
|
||||
if t.DefaultProcessor != nil {
|
||||
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
|
||||
return t.DefaultProcessor.Process(smb, out)
|
||||
}
|
||||
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
|
||||
}
|
||||
actualProcessor, ok := t.serviceProcessorMap[v[0]]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
|
||||
}
|
||||
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
|
||||
return actualProcessor.Process(smb, out)
|
||||
}
|
||||
|
||||
//Protocol that use stored message for ReadMessageBegin
|
||||
type storedMessageProtocol struct {
|
||||
TProtocol
|
||||
name string
|
||||
typeId TMessageType
|
||||
seqid int32
|
||||
}
|
||||
|
||||
func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
|
||||
return &storedMessageProtocol{protocol, name, typeId, seqid}
|
||||
}
|
||||
|
||||
func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
|
||||
return s.name, s.typeId, s.seqid, nil
|
||||
}
|
|
@ -1,50 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
// This file is home to helpers that convert from various base types to
|
||||
// respective pointer types. This is necessary because Go does not permit
|
||||
// references to constants, nor can a pointer type to base type be allocated
|
||||
// and initialized in a single expression.
|
||||
//
|
||||
// E.g., this is not allowed:
|
||||
//
|
||||
// var ip *int = &5
|
||||
//
|
||||
// But this *is* allowed:
|
||||
//
|
||||
// func IntPtr(i int) *int { return &i }
|
||||
// var ip *int = IntPtr(5)
|
||||
//
|
||||
// Since pointers to base types are commonplace as [optional] fields in
|
||||
// exported thrift structs, we factor such helpers here.
|
||||
///////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
func Float32Ptr(v float32) *float32 { return &v }
|
||||
func Float64Ptr(v float64) *float64 { return &v }
|
||||
func IntPtr(v int) *int { return &v }
|
||||
func Int32Ptr(v int32) *int32 { return &v }
|
||||
func Int64Ptr(v int64) *int64 { return &v }
|
||||
func StringPtr(v string) *string { return &v }
|
||||
func Uint32Ptr(v uint32) *uint32 { return &v }
|
||||
func Uint64Ptr(v uint64) *uint64 { return &v }
|
||||
func BoolPtr(v bool) *bool { return &v }
|
||||
func ByteSlicePtr(v []byte) *[]byte { return &v }
|
|
@ -1,58 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// The default processor factory just returns a singleton
|
||||
// instance.
|
||||
type TProcessorFactory interface {
|
||||
GetProcessor(trans TTransport) TProcessor
|
||||
}
|
||||
|
||||
type tProcessorFactory struct {
|
||||
processor TProcessor
|
||||
}
|
||||
|
||||
func NewTProcessorFactory(p TProcessor) TProcessorFactory {
|
||||
return &tProcessorFactory{processor: p}
|
||||
}
|
||||
|
||||
func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
|
||||
return p.processor
|
||||
}
|
||||
|
||||
/**
|
||||
* The default processor factory just returns a singleton
|
||||
* instance.
|
||||
*/
|
||||
type TProcessorFunctionFactory interface {
|
||||
GetProcessorFunction(trans TTransport) TProcessorFunction
|
||||
}
|
||||
|
||||
type tProcessorFunctionFactory struct {
|
||||
processor TProcessorFunction
|
||||
}
|
||||
|
||||
func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
|
||||
return &tProcessorFunctionFactory{processor: p}
|
||||
}
|
||||
|
||||
func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
|
||||
return p.processor
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
type TServer interface {
|
||||
ProcessorFactory() TProcessorFactory
|
||||
ServerTransport() TServerTransport
|
||||
InputTransportFactory() TTransportFactory
|
||||
OutputTransportFactory() TTransportFactory
|
||||
InputProtocolFactory() TProtocolFactory
|
||||
OutputProtocolFactory() TProtocolFactory
|
||||
|
||||
// Starts the server
|
||||
Serve() error
|
||||
// Stops the server. This is optional on a per-implementation basis. Not
|
||||
// all servers are required to be cleanly stoppable.
|
||||
Stop() error
|
||||
}
|
|
@ -1,122 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TServerSocket struct {
|
||||
listener net.Listener
|
||||
addr net.Addr
|
||||
clientTimeout time.Duration
|
||||
|
||||
// Protects the interrupted value to make it thread safe.
|
||||
mu sync.RWMutex
|
||||
interrupted bool
|
||||
}
|
||||
|
||||
func NewTServerSocket(listenAddr string) (*TServerSocket, error) {
|
||||
return NewTServerSocketTimeout(listenAddr, 0)
|
||||
}
|
||||
|
||||
func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) {
|
||||
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Listen() error {
|
||||
if p.IsListening() {
|
||||
return nil
|
||||
}
|
||||
l, err := net.Listen(p.addr.Network(), p.addr.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.listener = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Accept() (TTransport, error) {
|
||||
p.mu.RLock()
|
||||
interrupted := p.interrupted
|
||||
p.mu.RUnlock()
|
||||
|
||||
if interrupted {
|
||||
return nil, errTransportInterrupted
|
||||
}
|
||||
if p.listener == nil {
|
||||
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
|
||||
}
|
||||
conn, err := p.listener.Accept()
|
||||
if err != nil {
|
||||
return nil, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil
|
||||
}
|
||||
|
||||
// Checks whether the socket is listening.
|
||||
func (p *TServerSocket) IsListening() bool {
|
||||
return p.listener != nil
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TServerSocket) Open() error {
|
||||
if p.IsListening() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
|
||||
}
|
||||
if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil {
|
||||
return err
|
||||
} else {
|
||||
p.listener = l
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Addr() net.Addr {
|
||||
if p.listener != nil {
|
||||
return p.listener.Addr()
|
||||
}
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Close() error {
|
||||
defer func() {
|
||||
p.listener = nil
|
||||
}()
|
||||
if p.IsListening() {
|
||||
return p.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TServerSocket) Interrupt() error {
|
||||
p.mu.Lock()
|
||||
p.interrupted = true
|
||||
p.Close()
|
||||
p.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
// Server transport. Object which provides client transports.
|
||||
type TServerTransport interface {
|
||||
Listen() error
|
||||
Accept() (TTransport, error)
|
||||
Close() error
|
||||
|
||||
// Optional method implementation. This signals to the server transport
|
||||
// that it should break out of any accept() or listen() that it is currently
|
||||
// blocked on. This method, if implemented, MUST be thread safe, as it may
|
||||
// be called from a different thread context than the other TServerTransport
|
||||
// methods.
|
||||
Interrupt() error
|
||||
}
|
|
@ -1,196 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"log"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Simple, non-concurrent server for testing.
|
||||
type TSimpleServer struct {
|
||||
quit chan struct{}
|
||||
|
||||
processorFactory TProcessorFactory
|
||||
serverTransport TServerTransport
|
||||
inputTransportFactory TTransportFactory
|
||||
outputTransportFactory TTransportFactory
|
||||
inputProtocolFactory TProtocolFactory
|
||||
outputProtocolFactory TProtocolFactory
|
||||
}
|
||||
|
||||
func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
|
||||
return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
|
||||
}
|
||||
|
||||
func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
|
||||
serverTransport,
|
||||
transportFactory,
|
||||
protocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
|
||||
serverTransport,
|
||||
inputTransportFactory,
|
||||
outputTransportFactory,
|
||||
inputProtocolFactory,
|
||||
outputProtocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(processorFactory,
|
||||
serverTransport,
|
||||
NewTTransportFactory(),
|
||||
NewTTransportFactory(),
|
||||
NewTBinaryProtocolFactoryDefault(),
|
||||
NewTBinaryProtocolFactoryDefault(),
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return NewTSimpleServerFactory6(processorFactory,
|
||||
serverTransport,
|
||||
transportFactory,
|
||||
transportFactory,
|
||||
protocolFactory,
|
||||
protocolFactory,
|
||||
)
|
||||
}
|
||||
|
||||
func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
|
||||
return &TSimpleServer{
|
||||
processorFactory: processorFactory,
|
||||
serverTransport: serverTransport,
|
||||
inputTransportFactory: inputTransportFactory,
|
||||
outputTransportFactory: outputTransportFactory,
|
||||
inputProtocolFactory: inputProtocolFactory,
|
||||
outputProtocolFactory: outputProtocolFactory,
|
||||
quit: make(chan struct{}, 1),
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
|
||||
return p.processorFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) ServerTransport() TServerTransport {
|
||||
return p.serverTransport
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
|
||||
return p.inputTransportFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
|
||||
return p.outputTransportFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
|
||||
return p.inputProtocolFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
|
||||
return p.outputProtocolFactory
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Listen() error {
|
||||
return p.serverTransport.Listen()
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) AcceptLoop() error {
|
||||
for {
|
||||
client, err := p.serverTransport.Accept()
|
||||
if err != nil {
|
||||
select {
|
||||
case <-p.quit:
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
return err
|
||||
}
|
||||
if client != nil {
|
||||
go func() {
|
||||
if err := p.processRequests(client); err != nil {
|
||||
log.Println("error processing request:", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) Serve() error {
|
||||
err := p.Listen()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.AcceptLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
var once sync.Once
|
||||
|
||||
func (p *TSimpleServer) Stop() error {
|
||||
q := func() {
|
||||
p.quit <- struct{}{}
|
||||
p.serverTransport.Interrupt()
|
||||
}
|
||||
once.Do(q)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSimpleServer) processRequests(client TTransport) error {
|
||||
processor := p.processorFactory.GetProcessor(client)
|
||||
inputTransport := p.inputTransportFactory.GetTransport(client)
|
||||
outputTransport := p.outputTransportFactory.GetTransport(client)
|
||||
inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
|
||||
outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport)
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
log.Printf("panic in processor: %s: %s", e, debug.Stack())
|
||||
}
|
||||
}()
|
||||
if inputTransport != nil {
|
||||
defer inputTransport.Close()
|
||||
}
|
||||
if outputTransport != nil {
|
||||
defer outputTransport.Close()
|
||||
}
|
||||
for {
|
||||
ok, err := processor.Process(inputProtocol, outputProtocol)
|
||||
if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
log.Printf("error processing request: %s", err)
|
||||
return err
|
||||
}
|
||||
if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD {
|
||||
continue
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,166 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TSocket struct {
|
||||
conn net.Conn
|
||||
addr net.Addr
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewTSocket creates a net.Conn-backed TTransport, given a host and port
|
||||
//
|
||||
// Example:
|
||||
// trans, err := thrift.NewTSocket("localhost:9090")
|
||||
func NewTSocket(hostPort string) (*TSocket, error) {
|
||||
return NewTSocketTimeout(hostPort, 0)
|
||||
}
|
||||
|
||||
// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port
|
||||
// it also accepts a timeout as a time.Duration
|
||||
func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) {
|
||||
//conn, err := net.DialTimeout(network, address, timeout)
|
||||
addr, err := net.ResolveTCPAddr("tcp", hostPort)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewTSocketFromAddrTimeout(addr, timeout), nil
|
||||
}
|
||||
|
||||
// Creates a TSocket from a net.Addr
|
||||
func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket {
|
||||
return &TSocket{addr: addr, timeout: timeout}
|
||||
}
|
||||
|
||||
// Creates a TSocket from an existing net.Conn
|
||||
func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket {
|
||||
return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout}
|
||||
}
|
||||
|
||||
// Sets the socket timeout
|
||||
func (p *TSocket) SetTimeout(timeout time.Duration) error {
|
||||
p.timeout = timeout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSocket) pushDeadline(read, write bool) {
|
||||
var t time.Time
|
||||
if p.timeout > 0 {
|
||||
t = time.Now().Add(time.Duration(p.timeout))
|
||||
}
|
||||
if read && write {
|
||||
p.conn.SetDeadline(t)
|
||||
} else if read {
|
||||
p.conn.SetReadDeadline(t)
|
||||
} else if write {
|
||||
p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSocket) Open() error {
|
||||
if p.IsOpen() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
|
||||
}
|
||||
if p.addr == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
|
||||
}
|
||||
if len(p.addr.Network()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
|
||||
}
|
||||
if len(p.addr.String()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
|
||||
}
|
||||
var err error
|
||||
if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the underlying net.Conn
|
||||
func (p *TSocket) Conn() net.Conn {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Returns true if the connection is open
|
||||
func (p *TSocket) IsOpen() bool {
|
||||
if p.conn == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Closes the socket.
|
||||
func (p *TSocket) Close() error {
|
||||
// Close the socket
|
||||
if p.conn != nil {
|
||||
err := p.conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//Returns the remote address of the socket.
|
||||
func (p *TSocket) Addr() net.Addr {
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TSocket) Read(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(true, false)
|
||||
n, err := p.conn.Read(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TSocket) Write(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(false, true)
|
||||
return p.conn.Write(buf)
|
||||
}
|
||||
|
||||
func (p *TSocket) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSocket) Interrupt() error {
|
||||
if !p.IsOpen() {
|
||||
return nil
|
||||
}
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *TSocket) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
|
@ -1,109 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
"crypto/tls"
|
||||
)
|
||||
|
||||
type TSSLServerSocket struct {
|
||||
listener net.Listener
|
||||
addr net.Addr
|
||||
clientTimeout time.Duration
|
||||
interrupted bool
|
||||
cfg *tls.Config
|
||||
}
|
||||
|
||||
func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) {
|
||||
return NewTSSLServerSocketTimeout(listenAddr, cfg, 0)
|
||||
}
|
||||
|
||||
func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) {
|
||||
addr, err := net.ResolveTCPAddr("tcp", listenAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Listen() error {
|
||||
if p.IsListening() {
|
||||
return nil
|
||||
}
|
||||
l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.listener = l
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Accept() (TTransport, error) {
|
||||
if p.interrupted {
|
||||
return nil, errTransportInterrupted
|
||||
}
|
||||
if p.listener == nil {
|
||||
return nil, NewTTransportException(NOT_OPEN, "No underlying server socket")
|
||||
}
|
||||
conn, err := p.listener.Accept()
|
||||
if err != nil {
|
||||
return nil, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil
|
||||
}
|
||||
|
||||
// Checks whether the socket is listening.
|
||||
func (p *TSSLServerSocket) IsListening() bool {
|
||||
return p.listener != nil
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSSLServerSocket) Open() error {
|
||||
if p.IsListening() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Server socket already open")
|
||||
}
|
||||
if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
return err
|
||||
} else {
|
||||
p.listener = l
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Addr() net.Addr {
|
||||
return p.addr
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Close() error {
|
||||
defer func() {
|
||||
p.listener = nil
|
||||
}()
|
||||
if p.IsListening() {
|
||||
return p.listener.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLServerSocket) Interrupt() error {
|
||||
p.interrupted = true
|
||||
return nil
|
||||
}
|
|
@ -1,171 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"time"
|
||||
)
|
||||
|
||||
type TSSLSocket struct {
|
||||
conn net.Conn
|
||||
// hostPort contains host:port (e.g. "asdf.com:12345"). The field is
|
||||
// only valid if addr is nil.
|
||||
hostPort string
|
||||
// addr is nil when hostPort is not "", and is only used when the
|
||||
// TSSLSocket is constructed from a net.Addr.
|
||||
addr net.Addr
|
||||
timeout time.Duration
|
||||
cfg *tls.Config
|
||||
}
|
||||
|
||||
// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration
|
||||
//
|
||||
// Example:
|
||||
// trans, err := thrift.NewTSSLSocket("localhost:9090", nil)
|
||||
func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) {
|
||||
return NewTSSLSocketTimeout(hostPort, cfg, 0)
|
||||
}
|
||||
|
||||
// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port
|
||||
// it also accepts a tls Configuration and a timeout as a time.Duration
|
||||
func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) {
|
||||
return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil
|
||||
}
|
||||
|
||||
// Creates a TSSLSocket from a net.Addr
|
||||
func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
|
||||
return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg}
|
||||
}
|
||||
|
||||
// Creates a TSSLSocket from an existing net.Conn
|
||||
func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket {
|
||||
return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg}
|
||||
}
|
||||
|
||||
// Sets the socket timeout
|
||||
func (p *TSSLSocket) SetTimeout(timeout time.Duration) error {
|
||||
p.timeout = timeout
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) pushDeadline(read, write bool) {
|
||||
var t time.Time
|
||||
if p.timeout > 0 {
|
||||
t = time.Now().Add(time.Duration(p.timeout))
|
||||
}
|
||||
if read && write {
|
||||
p.conn.SetDeadline(t)
|
||||
} else if read {
|
||||
p.conn.SetReadDeadline(t)
|
||||
} else if write {
|
||||
p.conn.SetWriteDeadline(t)
|
||||
}
|
||||
}
|
||||
|
||||
// Connects the socket, creating a new socket object if necessary.
|
||||
func (p *TSSLSocket) Open() error {
|
||||
var err error
|
||||
// If we have a hostname, we need to pass the hostname to tls.Dial for
|
||||
// certificate hostname checks.
|
||||
if p.hostPort != "" {
|
||||
if p.conn, err = tls.Dial("tcp", p.hostPort, p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
} else {
|
||||
if p.IsOpen() {
|
||||
return NewTTransportException(ALREADY_OPEN, "Socket already connected.")
|
||||
}
|
||||
if p.addr == nil {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open nil address.")
|
||||
}
|
||||
if len(p.addr.Network()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad network name.")
|
||||
}
|
||||
if len(p.addr.String()) == 0 {
|
||||
return NewTTransportException(NOT_OPEN, "Cannot open bad address.")
|
||||
}
|
||||
if p.conn, err = tls.Dial(p.addr.Network(), p.addr.String(), p.cfg); err != nil {
|
||||
return NewTTransportException(NOT_OPEN, err.Error())
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieve the underlying net.Conn
|
||||
func (p *TSSLSocket) Conn() net.Conn {
|
||||
return p.conn
|
||||
}
|
||||
|
||||
// Returns true if the connection is open
|
||||
func (p *TSSLSocket) IsOpen() bool {
|
||||
if p.conn == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Closes the socket.
|
||||
func (p *TSSLSocket) Close() error {
|
||||
// Close the socket
|
||||
if p.conn != nil {
|
||||
err := p.conn.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.conn = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Read(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(true, false)
|
||||
n, err := p.conn.Read(buf)
|
||||
return n, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Write(buf []byte) (int, error) {
|
||||
if !p.IsOpen() {
|
||||
return 0, NewTTransportException(NOT_OPEN, "Connection not open")
|
||||
}
|
||||
p.pushDeadline(false, true)
|
||||
return p.conn.Write(buf)
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) Interrupt() error {
|
||||
if !p.IsOpen() {
|
||||
return nil
|
||||
}
|
||||
return p.conn.Close()
|
||||
}
|
||||
|
||||
func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) {
|
||||
const maxSize = ^uint64(0)
|
||||
return maxSize // the thruth is, we just don't know unless framed is used
|
||||
}
|
||||
|
|
@ -1,117 +0,0 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package thrift
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// TZlibTransportFactory is a factory for TZlibTransport instances
|
||||
type TZlibTransportFactory struct {
|
||||
level int
|
||||
}
|
||||
|
||||
// TZlibTransport is a TTransport implementation that makes use of zlib compression.
|
||||
type TZlibTransport struct {
|
||||
reader io.ReadCloser
|
||||
transport TTransport
|
||||
writer *zlib.Writer
|
||||
}
|
||||
|
||||
// GetTransport constructs a new instance of NewTZlibTransport
|
||||
func (p *TZlibTransportFactory) GetTransport(trans TTransport) TTransport {
|
||||
t, _ := NewTZlibTransport(trans, p.level)
|
||||
return t
|
||||
}
|
||||
|
||||
// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory
|
||||
func NewTZlibTransportFactory(level int) *TZlibTransportFactory {
|
||||
return &TZlibTransportFactory{level: level}
|
||||
}
|
||||
|
||||
// NewTZlibTransport constructs a new instance of TZlibTransport
|
||||
func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) {
|
||||
w, err := zlib.NewWriterLevel(trans, level)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &TZlibTransport{
|
||||
writer: w,
|
||||
transport: trans,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the reader and writer (flushing any unwritten data) and closes
|
||||
// the underlying transport.
|
||||
func (z *TZlibTransport) Close() error {
|
||||
if z.reader != nil {
|
||||
if err := z.reader.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := z.writer.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.transport.Close()
|
||||
}
|
||||
|
||||
// Flush flushes the writer and its underlying transport.
|
||||
func (z *TZlibTransport) Flush() error {
|
||||
if err := z.writer.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
return z.transport.Flush()
|
||||
}
|
||||
|
||||
// IsOpen returns true if the transport is open
|
||||
func (z *TZlibTransport) IsOpen() bool {
|
||||
return z.transport.IsOpen()
|
||||
}
|
||||
|
||||
// Open opens the transport for communication
|
||||
func (z *TZlibTransport) Open() error {
|
||||
return z.transport.Open()
|
||||
}
|
||||
|
||||
func (z *TZlibTransport) Read(p []byte) (int, error) {
|
||||
if z.reader == nil {
|
||||
r, err := zlib.NewReader(z.transport)
|
||||
if err != nil {
|
||||
return 0, NewTTransportExceptionFromError(err)
|
||||
}
|
||||
z.reader = r
|
||||
}
|
||||
|
||||
return z.reader.Read(p)
|
||||
}
|
||||
|
||||
// RemainingBytes returns the size in bytes of the data that is still to be
|
||||
// read.
|
||||
func (z *TZlibTransport) RemainingBytes() uint64 {
|
||||
return z.transport.RemainingBytes()
|
||||
}
|
||||
|
||||
func (z *TZlibTransport) Write(p []byte) (int, error) {
|
||||
return z.writer.Write(p)
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.5
|
||||
- 1.6
|
||||
- tip
|
|
@ -1,15 +0,0 @@
|
|||
hdrhistogram
|
||||
============
|
||||
|
||||
[![Build Status](https://travis-ci.org/codahale/hdrhistogram.png?branch=master)](https://travis-ci.org/codahale/hdrhistogram)
|
||||
|
||||
A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram).
|
||||
|
||||
> A Histogram that supports recording and analyzing sampled data value counts
|
||||
> across a configurable integer value range with configurable value precision
|
||||
> within the range. Value precision is expressed as the number of significant
|
||||
> digits in the value recording, and provides control over value quantization
|
||||
> behavior across the value range and the subsequent value resolution at any
|
||||
> given level.
|
||||
|
||||
For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram).
|
|
@ -1,563 +0,0 @@
|
|||
// Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram
|
||||
// data structure. The HDR Histogram allows for fast and accurate analysis of
|
||||
// the extreme ranges of data with non-normal distributions, like latency.
|
||||
package hdrhistogram
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
)
|
||||
|
||||
// A Bracket is a part of a cumulative distribution.
|
||||
type Bracket struct {
|
||||
Quantile float64
|
||||
Count, ValueAt int64
|
||||
}
|
||||
|
||||
// A Snapshot is an exported view of a Histogram, useful for serializing them.
|
||||
// A Histogram can be constructed from it by passing it to Import.
|
||||
type Snapshot struct {
|
||||
LowestTrackableValue int64
|
||||
HighestTrackableValue int64
|
||||
SignificantFigures int64
|
||||
Counts []int64
|
||||
}
|
||||
|
||||
// A Histogram is a lossy data structure used to record the distribution of
|
||||
// non-normally distributed data (like latency) with a high degree of accuracy
|
||||
// and a bounded degree of precision.
|
||||
type Histogram struct {
|
||||
lowestTrackableValue int64
|
||||
highestTrackableValue int64
|
||||
unitMagnitude int64
|
||||
significantFigures int64
|
||||
subBucketHalfCountMagnitude int32
|
||||
subBucketHalfCount int32
|
||||
subBucketMask int64
|
||||
subBucketCount int32
|
||||
bucketCount int32
|
||||
countsLen int32
|
||||
totalCount int64
|
||||
counts []int64
|
||||
}
|
||||
|
||||
// New returns a new Histogram instance capable of tracking values in the given
|
||||
// range and with the given amount of precision.
|
||||
func New(minValue, maxValue int64, sigfigs int) *Histogram {
|
||||
if sigfigs < 1 || 5 < sigfigs {
|
||||
panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs))
|
||||
}
|
||||
|
||||
largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs)
|
||||
subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution))))
|
||||
|
||||
subBucketHalfCountMagnitude := subBucketCountMagnitude
|
||||
if subBucketHalfCountMagnitude < 1 {
|
||||
subBucketHalfCountMagnitude = 1
|
||||
}
|
||||
subBucketHalfCountMagnitude--
|
||||
|
||||
unitMagnitude := int32(math.Floor(math.Log2(float64(minValue))))
|
||||
if unitMagnitude < 0 {
|
||||
unitMagnitude = 0
|
||||
}
|
||||
|
||||
subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1))
|
||||
|
||||
subBucketHalfCount := subBucketCount / 2
|
||||
subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude)
|
||||
|
||||
// determine exponent range needed to support the trackable value with no
|
||||
// overflow:
|
||||
smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude)
|
||||
bucketsNeeded := int32(1)
|
||||
for smallestUntrackableValue < maxValue {
|
||||
smallestUntrackableValue <<= 1
|
||||
bucketsNeeded++
|
||||
}
|
||||
|
||||
bucketCount := bucketsNeeded
|
||||
countsLen := (bucketCount + 1) * (subBucketCount / 2)
|
||||
|
||||
return &Histogram{
|
||||
lowestTrackableValue: minValue,
|
||||
highestTrackableValue: maxValue,
|
||||
unitMagnitude: int64(unitMagnitude),
|
||||
significantFigures: int64(sigfigs),
|
||||
subBucketHalfCountMagnitude: subBucketHalfCountMagnitude,
|
||||
subBucketHalfCount: subBucketHalfCount,
|
||||
subBucketMask: subBucketMask,
|
||||
subBucketCount: subBucketCount,
|
||||
bucketCount: bucketCount,
|
||||
countsLen: countsLen,
|
||||
totalCount: 0,
|
||||
counts: make([]int64, countsLen),
|
||||
}
|
||||
}
|
||||
|
||||
// ByteSize returns an estimate of the amount of memory allocated to the
|
||||
// histogram in bytes.
|
||||
//
|
||||
// N.B.: This does not take into account the overhead for slices, which are
|
||||
// small, constant, and specific to the compiler version.
|
||||
func (h *Histogram) ByteSize() int {
|
||||
return 6*8 + 5*4 + len(h.counts)*8
|
||||
}
|
||||
|
||||
// Merge merges the data stored in the given histogram with the receiver,
|
||||
// returning the number of recorded values which had to be dropped.
|
||||
func (h *Histogram) Merge(from *Histogram) (dropped int64) {
|
||||
i := from.rIterator()
|
||||
for i.next() {
|
||||
v := i.valueFromIdx
|
||||
c := i.countAtIdx
|
||||
|
||||
if h.RecordValues(v, c) != nil {
|
||||
dropped += c
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// TotalCount returns total number of values recorded.
|
||||
func (h *Histogram) TotalCount() int64 {
|
||||
return h.totalCount
|
||||
}
|
||||
|
||||
// Max returns the approximate maximum recorded value.
|
||||
func (h *Histogram) Max() int64 {
|
||||
var max int64
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 {
|
||||
max = i.highestEquivalentValue
|
||||
}
|
||||
}
|
||||
return h.highestEquivalentValue(max)
|
||||
}
|
||||
|
||||
// Min returns the approximate minimum recorded value.
|
||||
func (h *Histogram) Min() int64 {
|
||||
var min int64
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 && min == 0 {
|
||||
min = i.highestEquivalentValue
|
||||
break
|
||||
}
|
||||
}
|
||||
return h.lowestEquivalentValue(min)
|
||||
}
|
||||
|
||||
// Mean returns the approximate arithmetic mean of the recorded values.
|
||||
func (h *Histogram) Mean() float64 {
|
||||
if h.totalCount == 0 {
|
||||
return 0
|
||||
}
|
||||
var total int64
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 {
|
||||
total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx)
|
||||
}
|
||||
}
|
||||
return float64(total) / float64(h.totalCount)
|
||||
}
|
||||
|
||||
// StdDev returns the approximate standard deviation of the recorded values.
|
||||
func (h *Histogram) StdDev() float64 {
|
||||
if h.totalCount == 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
mean := h.Mean()
|
||||
geometricDevTotal := 0.0
|
||||
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
if i.countAtIdx != 0 {
|
||||
dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean
|
||||
geometricDevTotal += (dev * dev) * float64(i.countAtIdx)
|
||||
}
|
||||
}
|
||||
|
||||
return math.Sqrt(geometricDevTotal / float64(h.totalCount))
|
||||
}
|
||||
|
||||
// Reset deletes all recorded values and restores the histogram to its original
|
||||
// state.
|
||||
func (h *Histogram) Reset() {
|
||||
h.totalCount = 0
|
||||
for i := range h.counts {
|
||||
h.counts[i] = 0
|
||||
}
|
||||
}
|
||||
|
||||
// RecordValue records the given value, returning an error if the value is out
|
||||
// of range.
|
||||
func (h *Histogram) RecordValue(v int64) error {
|
||||
return h.RecordValues(v, 1)
|
||||
}
|
||||
|
||||
// RecordCorrectedValue records the given value, correcting for stalls in the
|
||||
// recording process. This only works for processes which are recording values
|
||||
// at an expected interval (e.g., doing jitter analysis). Processes which are
|
||||
// recording ad-hoc values (e.g., latency for incoming requests) can't take
|
||||
// advantage of this.
|
||||
func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error {
|
||||
if err := h.RecordValue(v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if expectedInterval <= 0 || v <= expectedInterval {
|
||||
return nil
|
||||
}
|
||||
|
||||
missingValue := v - expectedInterval
|
||||
for missingValue >= expectedInterval {
|
||||
if err := h.RecordValue(missingValue); err != nil {
|
||||
return err
|
||||
}
|
||||
missingValue -= expectedInterval
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RecordValues records n occurrences of the given value, returning an error if
|
||||
// the value is out of range.
|
||||
func (h *Histogram) RecordValues(v, n int64) error {
|
||||
idx := h.countsIndexFor(v)
|
||||
if idx < 0 || int(h.countsLen) <= idx {
|
||||
return fmt.Errorf("value %d is too large to be recorded", v)
|
||||
}
|
||||
h.counts[idx] += n
|
||||
h.totalCount += n
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValueAtQuantile returns the recorded value at the given quantile (0..100).
|
||||
func (h *Histogram) ValueAtQuantile(q float64) int64 {
|
||||
if q > 100 {
|
||||
q = 100
|
||||
}
|
||||
|
||||
total := int64(0)
|
||||
countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5)
|
||||
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
total += i.countAtIdx
|
||||
if total >= countAtPercentile {
|
||||
return h.highestEquivalentValue(i.valueFromIdx)
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
// CumulativeDistribution returns an ordered list of brackets of the
|
||||
// distribution of recorded values.
|
||||
func (h *Histogram) CumulativeDistribution() []Bracket {
|
||||
var result []Bracket
|
||||
|
||||
i := h.pIterator(1)
|
||||
for i.next() {
|
||||
result = append(result, Bracket{
|
||||
Quantile: i.percentile,
|
||||
Count: i.countToIdx,
|
||||
ValueAt: i.highestEquivalentValue,
|
||||
})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// SignificantFigures returns the significant figures used to create the
|
||||
// histogram
|
||||
func (h *Histogram) SignificantFigures() int64 {
|
||||
return h.significantFigures
|
||||
}
|
||||
|
||||
// LowestTrackableValue returns the lower bound on values that will be added
|
||||
// to the histogram
|
||||
func (h *Histogram) LowestTrackableValue() int64 {
|
||||
return h.lowestTrackableValue
|
||||
}
|
||||
|
||||
// HighestTrackableValue returns the upper bound on values that will be added
|
||||
// to the histogram
|
||||
func (h *Histogram) HighestTrackableValue() int64 {
|
||||
return h.highestTrackableValue
|
||||
}
|
||||
|
||||
// Histogram bar for plotting
|
||||
type Bar struct {
|
||||
From, To, Count int64
|
||||
}
|
||||
|
||||
// Pretty print as csv for easy plotting
|
||||
func (b Bar) String() string {
|
||||
return fmt.Sprintf("%v, %v, %v\n", b.From, b.To, b.Count)
|
||||
}
|
||||
|
||||
// Distribution returns an ordered list of bars of the
|
||||
// distribution of recorded values, counts can be normalized to a probability
|
||||
func (h *Histogram) Distribution() (result []Bar) {
|
||||
i := h.iterator()
|
||||
for i.next() {
|
||||
result = append(result, Bar{
|
||||
Count: i.countAtIdx,
|
||||
From: h.lowestEquivalentValue(i.valueFromIdx),
|
||||
To: i.highestEquivalentValue,
|
||||
})
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Equals returns true if the two Histograms are equivalent, false if not.
|
||||
func (h *Histogram) Equals(other *Histogram) bool {
|
||||
switch {
|
||||
case
|
||||
h.lowestTrackableValue != other.lowestTrackableValue,
|
||||
h.highestTrackableValue != other.highestTrackableValue,
|
||||
h.unitMagnitude != other.unitMagnitude,
|
||||
h.significantFigures != other.significantFigures,
|
||||
h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude,
|
||||
h.subBucketHalfCount != other.subBucketHalfCount,
|
||||
h.subBucketMask != other.subBucketMask,
|
||||
h.subBucketCount != other.subBucketCount,
|
||||
h.bucketCount != other.bucketCount,
|
||||
h.countsLen != other.countsLen,
|
||||
h.totalCount != other.totalCount:
|
||||
return false
|
||||
default:
|
||||
for i, c := range h.counts {
|
||||
if c != other.counts[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Export returns a snapshot view of the Histogram. This can be later passed to
|
||||
// Import to construct a new Histogram with the same state.
|
||||
func (h *Histogram) Export() *Snapshot {
|
||||
return &Snapshot{
|
||||
LowestTrackableValue: h.lowestTrackableValue,
|
||||
HighestTrackableValue: h.highestTrackableValue,
|
||||
SignificantFigures: h.significantFigures,
|
||||
Counts: h.counts,
|
||||
}
|
||||
}
|
||||
|
||||
// Import returns a new Histogram populated from the Snapshot data.
|
||||
func Import(s *Snapshot) *Histogram {
|
||||
h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures))
|
||||
h.counts = s.Counts
|
||||
totalCount := int64(0)
|
||||
for i := int32(0); i < h.countsLen; i++ {
|
||||
countAtIndex := h.counts[i]
|
||||
if countAtIndex > 0 {
|
||||
totalCount += countAtIndex
|
||||
}
|
||||
}
|
||||
h.totalCount = totalCount
|
||||
return h
|
||||
}
|
||||
|
||||
func (h *Histogram) iterator() *iterator {
|
||||
return &iterator{
|
||||
h: h,
|
||||
subBucketIdx: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Histogram) rIterator() *rIterator {
|
||||
return &rIterator{
|
||||
iterator: iterator{
|
||||
h: h,
|
||||
subBucketIdx: -1,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator {
|
||||
return &pIterator{
|
||||
iterator: iterator{
|
||||
h: h,
|
||||
subBucketIdx: -1,
|
||||
},
|
||||
ticksPerHalfDistance: ticksPerHalfDistance,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 {
|
||||
bucketIdx := h.getBucketIndex(v)
|
||||
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
|
||||
adjustedBucket := bucketIdx
|
||||
if subBucketIdx >= h.subBucketCount {
|
||||
adjustedBucket++
|
||||
}
|
||||
return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket))
|
||||
}
|
||||
|
||||
func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 {
|
||||
return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude)
|
||||
}
|
||||
|
||||
func (h *Histogram) lowestEquivalentValue(v int64) int64 {
|
||||
bucketIdx := h.getBucketIndex(v)
|
||||
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
|
||||
return h.valueFromIndex(bucketIdx, subBucketIdx)
|
||||
}
|
||||
|
||||
func (h *Histogram) nextNonEquivalentValue(v int64) int64 {
|
||||
return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v)
|
||||
}
|
||||
|
||||
func (h *Histogram) highestEquivalentValue(v int64) int64 {
|
||||
return h.nextNonEquivalentValue(v) - 1
|
||||
}
|
||||
|
||||
func (h *Histogram) medianEquivalentValue(v int64) int64 {
|
||||
return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1)
|
||||
}
|
||||
|
||||
func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 {
|
||||
return h.counts[h.countsIndex(bucketIdx, subBucketIdx)]
|
||||
}
|
||||
|
||||
func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 {
|
||||
bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude)
|
||||
offsetInBucket := subBucketIdx - h.subBucketHalfCount
|
||||
return bucketBaseIdx + offsetInBucket
|
||||
}
|
||||
|
||||
func (h *Histogram) getBucketIndex(v int64) int32 {
|
||||
pow2Ceiling := bitLen(v | h.subBucketMask)
|
||||
return int32(pow2Ceiling - int64(h.unitMagnitude) -
|
||||
int64(h.subBucketHalfCountMagnitude+1))
|
||||
}
|
||||
|
||||
func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 {
|
||||
return int32(v >> uint(int64(idx)+int64(h.unitMagnitude)))
|
||||
}
|
||||
|
||||
func (h *Histogram) countsIndexFor(v int64) int {
|
||||
bucketIdx := h.getBucketIndex(v)
|
||||
subBucketIdx := h.getSubBucketIdx(v, bucketIdx)
|
||||
return int(h.countsIndex(bucketIdx, subBucketIdx))
|
||||
}
|
||||
|
||||
type iterator struct {
|
||||
h *Histogram
|
||||
bucketIdx, subBucketIdx int32
|
||||
countAtIdx, countToIdx, valueFromIdx int64
|
||||
highestEquivalentValue int64
|
||||
}
|
||||
|
||||
func (i *iterator) next() bool {
|
||||
if i.countToIdx >= i.h.totalCount {
|
||||
return false
|
||||
}
|
||||
|
||||
// increment bucket
|
||||
i.subBucketIdx++
|
||||
if i.subBucketIdx >= i.h.subBucketCount {
|
||||
i.subBucketIdx = i.h.subBucketHalfCount
|
||||
i.bucketIdx++
|
||||
}
|
||||
|
||||
if i.bucketIdx >= i.h.bucketCount {
|
||||
return false
|
||||
}
|
||||
|
||||
i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx)
|
||||
i.countToIdx += i.countAtIdx
|
||||
i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx)
|
||||
i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type rIterator struct {
|
||||
iterator
|
||||
countAddedThisStep int64
|
||||
}
|
||||
|
||||
func (r *rIterator) next() bool {
|
||||
for r.iterator.next() {
|
||||
if r.countAtIdx != 0 {
|
||||
r.countAddedThisStep = r.countAtIdx
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type pIterator struct {
|
||||
iterator
|
||||
seenLastValue bool
|
||||
ticksPerHalfDistance int32
|
||||
percentileToIteratorTo float64
|
||||
percentile float64
|
||||
}
|
||||
|
||||
func (p *pIterator) next() bool {
|
||||
if !(p.countToIdx < p.h.totalCount) {
|
||||
if p.seenLastValue {
|
||||
return false
|
||||
}
|
||||
|
||||
p.seenLastValue = true
|
||||
p.percentile = 100
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if p.subBucketIdx == -1 && !p.iterator.next() {
|
||||
return false
|
||||
}
|
||||
|
||||
var done = false
|
||||
for !done {
|
||||
currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount)
|
||||
if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile {
|
||||
p.percentile = p.percentileToIteratorTo
|
||||
halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1))
|
||||
percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance
|
||||
p.percentileToIteratorTo += 100.0 / percentileReportingTicks
|
||||
return true
|
||||
}
|
||||
done = !p.iterator.next()
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func bitLen(x int64) (n int64) {
|
||||
for ; x >= 0x8000; x >>= 16 {
|
||||
n += 16
|
||||
}
|
||||
if x >= 0x80 {
|
||||
x >>= 8
|
||||
n += 8
|
||||
}
|
||||
if x >= 0x8 {
|
||||
x >>= 4
|
||||
n += 4
|
||||
}
|
||||
if x >= 0x2 {
|
||||
x >>= 2
|
||||
n += 2
|
||||
}
|
||||
if x >= 0x1 {
|
||||
n++
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
package hdrhistogram
|
||||
|
||||
// A WindowedHistogram combines histograms to provide windowed statistics.
|
||||
type WindowedHistogram struct {
|
||||
idx int
|
||||
h []Histogram
|
||||
m *Histogram
|
||||
|
||||
Current *Histogram
|
||||
}
|
||||
|
||||
// NewWindowed creates a new WindowedHistogram with N underlying histograms with
|
||||
// the given parameters.
|
||||
func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram {
|
||||
w := WindowedHistogram{
|
||||
idx: -1,
|
||||
h: make([]Histogram, n),
|
||||
m: New(minValue, maxValue, sigfigs),
|
||||
}
|
||||
|
||||
for i := range w.h {
|
||||
w.h[i] = *New(minValue, maxValue, sigfigs)
|
||||
}
|
||||
w.Rotate()
|
||||
|
||||
return &w
|
||||
}
|
||||
|
||||
// Merge returns a histogram which includes the recorded values from all the
|
||||
// sections of the window.
|
||||
func (w *WindowedHistogram) Merge() *Histogram {
|
||||
w.m.Reset()
|
||||
for _, h := range w.h {
|
||||
w.m.Merge(&h)
|
||||
}
|
||||
return w.m
|
||||
}
|
||||
|
||||
// Rotate resets the oldest histogram and rotates it to be used as the current
|
||||
// histogram.
|
||||
func (w *WindowedHistogram) Rotate() {
|
||||
w.idx++
|
||||
w.Current = &w.h[w.idx%len(w.h)]
|
||||
w.Current.Reset()
|
||||
}
|
|
@ -200,40 +200,3 @@
|
|||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
--------------------------------------------------
|
||||
SOFTWARE DISTRIBUTED WITH THRIFT:
|
||||
|
||||
The Apache Thrift software includes a number of subcomponents with
|
||||
separate copyright notices and license terms. Your use of the source
|
||||
code for the these subcomponents is subject to the terms and
|
||||
conditions of the following licenses.
|
||||
|
||||
--------------------------------------------------
|
||||
Portions of the following files are licensed under the MIT License:
|
||||
|
||||
lib/erl/src/Makefile.am
|
||||
|
||||
Please see doc/otp-base-license.txt for the full terms of this license.
|
||||
|
||||
--------------------------------------------------
|
||||
For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components:
|
||||
|
||||
# Copyright (c) 2007 Thomas Porschberg <thomas@randspringer.de>
|
||||
#
|
||||
# Copying and distribution of this file, with or without
|
||||
# modification, are permitted in any medium without royalty provided
|
||||
# the copyright notice and this notice are preserved.
|
||||
|
||||
--------------------------------------------------
|
||||
For the lib/nodejs/lib/thrift/json_parse.js:
|
||||
|
||||
/*
|
||||
json_parse.js
|
||||
2015-05-02
|
||||
Public Domain.
|
||||
NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
|
||||
|
||||
*/
|
||||
(By Douglas Crockford <douglas@crockford.com>)
|
||||
--------------------------------------------------
|
|
@ -0,0 +1,67 @@
|
|||
The source code developed under the Stargz Snapshotter Project is licensed under Apache License 2.0.
|
||||
|
||||
However, the Stargz Snapshotter project contains modified subcomponents from Container Registry Filesystem Project with separate copyright notices and license terms. Your use of the source code for the subcomponent is subject to the terms and conditions as defined by the source project. Files in these subcomponents contain following file header.
|
||||
|
||||
```
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the NOTICE.md file.
|
||||
```
|
||||
|
||||
These source code is governed by a 3-Clause BSD license. The copyright notice, list of conditions and disclaimer are the following.
|
||||
|
||||
```
|
||||
Copyright (c) 2019 Google LLC. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
```
|
||||
|
||||
The Stargz Snapshotter project also contains modified benchmarking code from HelloBench Project with separate copyright notices and license terms. Your use of the source code for the benchmarking code is subject to the terms and conditions as defined by the source project. These source code is governed by a MIT license. The copyright notice, condition and disclaimer are the following. The file in the benchmarking code contains it as the file header.
|
||||
|
||||
```
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Tintri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
```
|
|
@ -0,0 +1,394 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultMaxLRUCacheEntry = 10
|
||||
defaultMaxCacheFds = 10
|
||||
)
|
||||
|
||||
type DirectoryCacheConfig struct {
|
||||
MaxLRUCacheEntry int
|
||||
MaxCacheFds int
|
||||
SyncAdd bool
|
||||
}
|
||||
|
||||
// TODO: contents validation.
|
||||
|
||||
type BlobCache interface {
|
||||
Add(key string, p []byte, opts ...Option)
|
||||
FetchAt(key string, offset int64, p []byte, opts ...Option) (n int, err error)
|
||||
}
|
||||
|
||||
type cacheOpt struct {
|
||||
direct bool
|
||||
}
|
||||
|
||||
type Option func(o *cacheOpt) *cacheOpt
|
||||
|
||||
// When Direct option is specified for FetchAt and Add methods, these operation
|
||||
// won't use on-memory caches. When you know that the targeting value won't be
|
||||
// used immediately, you can prevent the limited space of on-memory caches from
|
||||
// being polluted by these unimportant values.
|
||||
func Direct() Option {
|
||||
return func(o *cacheOpt) *cacheOpt {
|
||||
o.direct = true
|
||||
return o
|
||||
}
|
||||
}
|
||||
|
||||
func NewDirectoryCache(directory string, config DirectoryCacheConfig) (BlobCache, error) {
|
||||
maxEntry := config.MaxLRUCacheEntry
|
||||
if maxEntry == 0 {
|
||||
maxEntry = defaultMaxLRUCacheEntry
|
||||
}
|
||||
maxFds := config.MaxCacheFds
|
||||
if maxFds == 0 {
|
||||
maxFds = defaultMaxCacheFds
|
||||
}
|
||||
if err := os.MkdirAll(directory, os.ModePerm); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dc := &directoryCache{
|
||||
cache: newObjectCache(maxEntry),
|
||||
fileCache: newObjectCache(maxFds),
|
||||
wipLock: &namedLock{},
|
||||
directory: directory,
|
||||
bufPool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
}
|
||||
dc.cache.finalize = func(value interface{}) {
|
||||
dc.bufPool.Put(value)
|
||||
}
|
||||
dc.fileCache.finalize = func(value interface{}) {
|
||||
value.(*os.File).Close()
|
||||
}
|
||||
dc.syncAdd = config.SyncAdd
|
||||
return dc, nil
|
||||
}
|
||||
|
||||
// directoryCache is a cache implementation which backend is a directory.
|
||||
type directoryCache struct {
|
||||
cache *objectCache
|
||||
fileCache *objectCache
|
||||
directory string
|
||||
wipLock *namedLock
|
||||
|
||||
bufPool sync.Pool
|
||||
|
||||
syncAdd bool
|
||||
}
|
||||
|
||||
func (dc *directoryCache) FetchAt(key string, offset int64, p []byte, opts ...Option) (n int, err error) {
|
||||
opt := &cacheOpt{}
|
||||
for _, o := range opts {
|
||||
opt = o(opt)
|
||||
}
|
||||
|
||||
if !opt.direct {
|
||||
// Get data from memory
|
||||
if b, done, ok := dc.cache.get(key); ok {
|
||||
defer done()
|
||||
data := b.(*bytes.Buffer).Bytes()
|
||||
if int64(len(data)) < offset {
|
||||
return 0, fmt.Errorf("invalid offset %d exceeds chunk size %d",
|
||||
offset, len(data))
|
||||
}
|
||||
return copy(p, data[offset:]), nil
|
||||
}
|
||||
|
||||
// Get data from disk. If the file is already opened, use it.
|
||||
if f, done, ok := dc.fileCache.get(key); ok {
|
||||
defer done()
|
||||
return f.(*os.File).ReadAt(p, offset)
|
||||
}
|
||||
}
|
||||
|
||||
// Open the cache file and read the target region
|
||||
// TODO: If the target cache is write-in-progress, should we wait for the completion
|
||||
// or simply report the cache miss?
|
||||
file, err := os.Open(dc.cachePath(key))
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "failed to open blob file for %q", key)
|
||||
}
|
||||
if n, err = file.ReadAt(p, offset); err == io.EOF {
|
||||
err = nil
|
||||
}
|
||||
|
||||
// Cache the opened file for future use. If "direct" option is specified, this
|
||||
// won't be done. This option is useful for preventing file cache from being
|
||||
// polluted by data that won't be accessed immediately.
|
||||
if opt.direct || !dc.fileCache.add(key, file) {
|
||||
file.Close()
|
||||
}
|
||||
|
||||
// TODO: should we cache the entire file data on memory?
|
||||
// but making I/O (possibly huge) on every fetching
|
||||
// might be costly.
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (dc *directoryCache) Add(key string, p []byte, opts ...Option) {
|
||||
opt := &cacheOpt{}
|
||||
for _, o := range opts {
|
||||
opt = o(opt)
|
||||
}
|
||||
|
||||
if !opt.direct {
|
||||
// Cache the passed data on memory. This enables to serve this data even
|
||||
// during writing it to the disk. If "direct" option is specified, this
|
||||
// won't be done. This option is useful for preventing memory cache from being
|
||||
// polluted by data that won't be accessed immediately.
|
||||
b := dc.bufPool.Get().(*bytes.Buffer)
|
||||
b.Reset()
|
||||
b.Write(p)
|
||||
if !dc.cache.add(key, b) {
|
||||
dc.bufPool.Put(b) // Already exists. No need to cache.
|
||||
}
|
||||
}
|
||||
|
||||
// Cache the passed data to disk.
|
||||
b2 := dc.bufPool.Get().(*bytes.Buffer)
|
||||
b2.Reset()
|
||||
b2.Write(p)
|
||||
addFunc := func() {
|
||||
defer dc.bufPool.Put(b2)
|
||||
|
||||
var (
|
||||
c = dc.cachePath(key)
|
||||
wip = dc.wipPath(key)
|
||||
)
|
||||
|
||||
dc.wipLock.lock(key)
|
||||
if _, err := os.Stat(wip); err == nil {
|
||||
dc.wipLock.unlock(key)
|
||||
return // Write in progress
|
||||
}
|
||||
if _, err := os.Stat(c); err == nil {
|
||||
dc.wipLock.unlock(key)
|
||||
return // Already exists.
|
||||
}
|
||||
|
||||
// Write the contents to a temporary file
|
||||
if err := os.MkdirAll(filepath.Dir(wip), os.ModePerm); err != nil {
|
||||
fmt.Printf("Warning: Failed to Create blob cache directory %q: %v\n", c, err)
|
||||
dc.wipLock.unlock(key)
|
||||
return
|
||||
}
|
||||
wipfile, err := os.Create(wip)
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: failed to prepare temp file for storing cache %q", key)
|
||||
dc.wipLock.unlock(key)
|
||||
return
|
||||
}
|
||||
dc.wipLock.unlock(key)
|
||||
|
||||
defer func() {
|
||||
wipfile.Close()
|
||||
os.Remove(wipfile.Name())
|
||||
}()
|
||||
want := b2.Len()
|
||||
if _, err := io.CopyN(wipfile, b2, int64(want)); err != nil {
|
||||
fmt.Printf("Warning: failed to write cache: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Commit the cache contents
|
||||
if err := os.MkdirAll(filepath.Dir(c), os.ModePerm); err != nil {
|
||||
fmt.Printf("Warning: Failed to Create blob cache directory %q: %v\n", c, err)
|
||||
return
|
||||
}
|
||||
if err := os.Rename(wipfile.Name(), c); err != nil {
|
||||
fmt.Printf("Warning: failed to commit cache to %q: %v\n", c, err)
|
||||
return
|
||||
}
|
||||
file, err := os.Open(c)
|
||||
if err != nil {
|
||||
fmt.Printf("Warning: failed to open cache on %q: %v\n", c, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Cache the opened file for future use. If "direct" option is specified, this
|
||||
// won't be done. This option is useful for preventing file cache from being
|
||||
// polluted by data that won't be accessed immediately.
|
||||
if opt.direct || !dc.fileCache.add(key, file) {
|
||||
file.Close()
|
||||
}
|
||||
}
|
||||
|
||||
if dc.syncAdd {
|
||||
addFunc()
|
||||
} else {
|
||||
go addFunc()
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *directoryCache) cachePath(key string) string {
|
||||
return filepath.Join(dc.directory, key[:2], key)
|
||||
}
|
||||
|
||||
func (dc *directoryCache) wipPath(key string) string {
|
||||
return filepath.Join(dc.directory, key[:2], "w", key)
|
||||
}
|
||||
|
||||
type namedLock struct {
|
||||
muMap map[string]*sync.Mutex
|
||||
refMap map[string]int
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (nl *namedLock) lock(name string) {
|
||||
nl.mu.Lock()
|
||||
if nl.muMap == nil {
|
||||
nl.muMap = make(map[string]*sync.Mutex)
|
||||
}
|
||||
if nl.refMap == nil {
|
||||
nl.refMap = make(map[string]int)
|
||||
}
|
||||
if _, ok := nl.muMap[name]; !ok {
|
||||
nl.muMap[name] = &sync.Mutex{}
|
||||
}
|
||||
mu := nl.muMap[name]
|
||||
nl.refMap[name]++
|
||||
nl.mu.Unlock()
|
||||
mu.Lock()
|
||||
}
|
||||
|
||||
func (nl *namedLock) unlock(name string) {
|
||||
nl.mu.Lock()
|
||||
mu := nl.muMap[name]
|
||||
nl.refMap[name]--
|
||||
if nl.refMap[name] <= 0 {
|
||||
delete(nl.muMap, name)
|
||||
delete(nl.refMap, name)
|
||||
}
|
||||
nl.mu.Unlock()
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
func newObjectCache(maxEntries int) *objectCache {
|
||||
oc := &objectCache{
|
||||
cache: lru.New(maxEntries),
|
||||
}
|
||||
oc.cache.OnEvicted = func(key lru.Key, value interface{}) {
|
||||
value.(*object).release() // Decrease ref count incremented in add operation.
|
||||
}
|
||||
return oc
|
||||
}
|
||||
|
||||
type objectCache struct {
|
||||
cache *lru.Cache
|
||||
cacheMu sync.Mutex
|
||||
finalize func(interface{})
|
||||
}
|
||||
|
||||
func (oc *objectCache) get(key string) (value interface{}, done func(), ok bool) {
|
||||
oc.cacheMu.Lock()
|
||||
defer oc.cacheMu.Unlock()
|
||||
o, ok := oc.cache.Get(key)
|
||||
if !ok {
|
||||
return nil, nil, false
|
||||
}
|
||||
o.(*object).use()
|
||||
return o.(*object).v, func() { o.(*object).release() }, true
|
||||
}
|
||||
|
||||
func (oc *objectCache) add(key string, value interface{}) bool {
|
||||
oc.cacheMu.Lock()
|
||||
defer oc.cacheMu.Unlock()
|
||||
if _, ok := oc.cache.Get(key); ok {
|
||||
return false // TODO: should we swap the object?
|
||||
}
|
||||
o := &object{
|
||||
v: value,
|
||||
finalize: oc.finalize,
|
||||
}
|
||||
o.use() // Keep this object having at least 1 ref count (will be decreased on eviction)
|
||||
oc.cache.Add(key, o)
|
||||
return true
|
||||
}
|
||||
|
||||
type object struct {
|
||||
v interface{}
|
||||
|
||||
refCounts int64
|
||||
finalize func(interface{})
|
||||
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (o *object) use() {
|
||||
o.mu.Lock()
|
||||
defer o.mu.Unlock()
|
||||
o.refCounts++
|
||||
}
|
||||
|
||||
func (o *object) release() {
|
||||
o.mu.Lock()
|
||||
defer o.mu.Unlock()
|
||||
o.refCounts--
|
||||
if o.refCounts <= 0 && o.finalize != nil {
|
||||
// nobody will refer this object
|
||||
o.finalize(o.v)
|
||||
}
|
||||
}
|
||||
|
||||
func NewMemoryCache() BlobCache {
|
||||
return &memoryCache{
|
||||
membuf: map[string]string{},
|
||||
}
|
||||
}
|
||||
|
||||
// memoryCache is a cache implementation which backend is a memory.
|
||||
type memoryCache struct {
|
||||
membuf map[string]string // read-only []byte map is more ideal but we don't have it in golang...
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
func (mc *memoryCache) FetchAt(key string, offset int64, p []byte, opts ...Option) (n int, err error) {
|
||||
mc.mu.Lock()
|
||||
defer mc.mu.Unlock()
|
||||
|
||||
cache, ok := mc.membuf[key]
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("Missed cache: %q", key)
|
||||
}
|
||||
return copy(p, cache[offset:]), nil
|
||||
}
|
||||
|
||||
func (mc *memoryCache) Add(key string, p []byte, opts ...Option) {
|
||||
mc.mu.Lock()
|
||||
defer mc.mu.Unlock()
|
||||
mc.membuf[key] = string(p)
|
||||
}
|
679
vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go
generated
vendored
Normal file
679
vendor/github.com/containerd/stargz-snapshotter/snapshot/snapshot.go
generated
vendored
Normal file
|
@ -0,0 +1,679 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package snapshot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshots"
|
||||
"github.com/containerd/containerd/snapshots/storage"
|
||||
"github.com/containerd/continuity/fs"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
targetSnapshotLabel = "containerd.io/snapshot.ref"
|
||||
remoteLabel = "containerd.io/snapshot/remote"
|
||||
|
||||
// remoteSnapshotLogKey is a key for log line, which indicates whether
|
||||
// `Prepare` method successfully prepared targeting remote snapshot or not, as
|
||||
// defined in the following:
|
||||
// - "true" : indicates the snapshot has been successfully prepared as a
|
||||
// remote snapshot
|
||||
// - "false" : indicates the snapshot failed to be prepared as a remote
|
||||
// snapshot
|
||||
// - null : undetermined
|
||||
remoteSnapshotLogKey = "remote-snapshot-prepared"
|
||||
prepareSucceeded = "true"
|
||||
prepareFailed = "false"
|
||||
)
|
||||
|
||||
// FileSystem is a backing filesystem abstraction.
|
||||
//
|
||||
// Mount() tries to mount a remote snapshot to the specified mount point
|
||||
// directory. If succeed, the mountpoint directory will be treated as a layer
|
||||
// snapshot.
|
||||
// Check() is called to check the connectibity of the existing layer snapshot
|
||||
// every time the layer is used by containerd.
|
||||
// Unmount() is called to unmount a remote snapshot from the specified mount point
|
||||
// directory.
|
||||
type FileSystem interface {
|
||||
Mount(ctx context.Context, mountpoint string, labels map[string]string) error
|
||||
Check(ctx context.Context, mountpoint string) error
|
||||
Unmount(ctx context.Context, mountpoint string) error
|
||||
}
|
||||
|
||||
// SnapshotterConfig is used to configure the remote snapshotter instance
|
||||
type SnapshotterConfig struct {
|
||||
asyncRemove bool
|
||||
}
|
||||
|
||||
// Opt is an option to configure the remote snapshotter
|
||||
type Opt func(config *SnapshotterConfig) error
|
||||
|
||||
// AsynchronousRemove defers removal of filesystem content until
|
||||
// the Cleanup method is called. Removals will make the snapshot
|
||||
// referred to by the key unavailable and make the key immediately
|
||||
// available for re-use.
|
||||
func AsynchronousRemove(config *SnapshotterConfig) error {
|
||||
config.asyncRemove = true
|
||||
return nil
|
||||
}
|
||||
|
||||
type snapshotter struct {
|
||||
root string
|
||||
ms *storage.MetaStore
|
||||
asyncRemove bool
|
||||
|
||||
// fs is a filesystem that this snapshotter recognizes.
|
||||
fs FileSystem
|
||||
}
|
||||
|
||||
// NewSnapshotter returns a Snapshotter which can use unpacked remote layers
|
||||
// as snapshots. This is implemented based on the overlayfs snapshotter, so
|
||||
// diffs are stored under the provided root and a metadata file is stored under
|
||||
// the root as same as overlayfs snapshotter.
|
||||
func NewSnapshotter(root string, targetFs FileSystem, opts ...Opt) (snapshots.Snapshotter, error) {
|
||||
if targetFs == nil {
|
||||
return nil, fmt.Errorf("Specify filesystem to use")
|
||||
}
|
||||
|
||||
var config SnapshotterConfig
|
||||
for _, opt := range opts {
|
||||
if err := opt(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(root, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
supportsDType, err := fs.SupportsDType(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !supportsDType {
|
||||
return nil, fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root)
|
||||
}
|
||||
ms, err := storage.NewMetaStore(filepath.Join(root, "metadata.db"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := os.Mkdir(filepath.Join(root, "snapshots"), 0700); err != nil && !os.IsExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &snapshotter{
|
||||
root: root,
|
||||
ms: ms,
|
||||
asyncRemove: config.asyncRemove,
|
||||
fs: targetFs,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Stat returns the info for an active or committed snapshot by name or
|
||||
// key.
|
||||
//
|
||||
// Should be used for parent resolution, existence checks and to discern
|
||||
// the kind of snapshot.
|
||||
func (o *snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) {
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, false)
|
||||
if err != nil {
|
||||
return snapshots.Info{}, err
|
||||
}
|
||||
defer t.Rollback()
|
||||
_, info, _, err := storage.GetInfo(ctx, key)
|
||||
if err != nil {
|
||||
return snapshots.Info{}, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func (o *snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, true)
|
||||
if err != nil {
|
||||
return snapshots.Info{}, err
|
||||
}
|
||||
|
||||
info, err = storage.UpdateInfo(ctx, info, fieldpaths...)
|
||||
if err != nil {
|
||||
t.Rollback()
|
||||
return snapshots.Info{}, err
|
||||
}
|
||||
|
||||
if err := t.Commit(); err != nil {
|
||||
return snapshots.Info{}, err
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// Usage returns the resources taken by the snapshot identified by key.
|
||||
//
|
||||
// For active snapshots, this will scan the usage of the overlay "diff" (aka
|
||||
// "upper") directory and may take some time.
|
||||
// for remote snapshots, no scan will be held and recognise the number of inodes
|
||||
// and these sizes as "zero".
|
||||
//
|
||||
// For committed snapshots, the value is returned from the metadata database.
|
||||
func (o *snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) {
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, false)
|
||||
if err != nil {
|
||||
return snapshots.Usage{}, err
|
||||
}
|
||||
id, info, usage, err := storage.GetInfo(ctx, key)
|
||||
t.Rollback() // transaction no longer needed at this point.
|
||||
|
||||
if err != nil {
|
||||
return snapshots.Usage{}, err
|
||||
}
|
||||
|
||||
upperPath := o.upperPath(id)
|
||||
|
||||
if info.Kind == snapshots.KindActive {
|
||||
du, err := fs.DiskUsage(ctx, upperPath)
|
||||
if err != nil {
|
||||
// TODO(stevvooe): Consider not reporting an error in this case.
|
||||
return snapshots.Usage{}, err
|
||||
}
|
||||
|
||||
usage = snapshots.Usage(du)
|
||||
}
|
||||
|
||||
return usage, nil
|
||||
}
|
||||
|
||||
func (o *snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
|
||||
s, err := o.createSnapshot(ctx, snapshots.KindActive, key, parent, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Try to prepare the remote snapshot. If succeeded, we commit the snapshot now
|
||||
// and return ErrAlreadyExists.
|
||||
var base snapshots.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if target, ok := base.Labels[targetSnapshotLabel]; ok {
|
||||
// NOTE: If passed labels include a target of the remote snapshot, `Prepare`
|
||||
// must log whether this method succeeded to prepare that remote snapshot
|
||||
// or not, using the key `remoteSnapshotLogKey` defined in the above. This
|
||||
// log is used by tests in this project.
|
||||
lCtx := log.WithLogger(ctx, log.G(ctx).WithField("key", key).WithField("parent", parent))
|
||||
if err := o.prepareRemoteSnapshot(ctx, key, base.Labels); err == nil {
|
||||
base.Labels[remoteLabel] = fmt.Sprintf("remote snapshot") // Mark this snapshot as remote
|
||||
err := o.Commit(ctx, target, key, append(opts, snapshots.WithLabels(base.Labels))...)
|
||||
if err == nil {
|
||||
log.G(lCtx).WithField(remoteSnapshotLogKey, prepareSucceeded).Debug("prepared remote snapshot")
|
||||
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "target snapshot %q", target)
|
||||
}
|
||||
log.G(lCtx).WithField(remoteSnapshotLogKey, prepareFailed).
|
||||
WithError(err).Debug("failed to internally commit remote snapshot")
|
||||
} else {
|
||||
log.G(lCtx).WithField(remoteSnapshotLogKey, prepareFailed).
|
||||
WithError(err).Debug("failed to prepare remote snapshot")
|
||||
}
|
||||
}
|
||||
|
||||
return o.mounts(ctx, s, parent)
|
||||
}
|
||||
|
||||
func (o *snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
|
||||
s, err := o.createSnapshot(ctx, snapshots.KindView, key, parent, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return o.mounts(ctx, s, parent)
|
||||
}
|
||||
|
||||
// Mounts returns the mounts for the transaction identified by key. Can be
|
||||
// called on an read-write or readonly transaction.
|
||||
//
|
||||
// This can be used to recover mounts after calling View or Prepare.
|
||||
func (o *snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s, err := storage.GetSnapshot(ctx, key)
|
||||
t.Rollback()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get active mount")
|
||||
}
|
||||
return o.mounts(ctx, s, key)
|
||||
}
|
||||
|
||||
func (o *snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if rerr := t.Rollback(); rerr != nil {
|
||||
log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// grab the existing id
|
||||
id, _, _, err := storage.GetInfo(ctx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usage, err := fs.DiskUsage(ctx, o.upperPath(id))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err = storage.CommitActive(ctx, key, name, snapshots.Usage(usage), opts...); err != nil {
|
||||
return errors.Wrap(err, "failed to commit snapshot")
|
||||
}
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
// Remove abandons the snapshot identified by key. The snapshot will
|
||||
// immediately become unavailable and unrecoverable. Disk space will
|
||||
// be freed up on the next call to `Cleanup`.
|
||||
func (o *snapshotter) Remove(ctx context.Context, key string) (err error) {
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if rerr := t.Rollback(); rerr != nil {
|
||||
log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
_, _, err = storage.Remove(ctx, key)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to remove")
|
||||
}
|
||||
|
||||
if !o.asyncRemove {
|
||||
var removals []string
|
||||
const cleanupCommitted = false
|
||||
removals, err = o.getCleanupDirectories(ctx, t, cleanupCommitted)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to get directories for removal")
|
||||
}
|
||||
|
||||
// Remove directories after the transaction is closed, failures must not
|
||||
// return error since the transaction is committed with the removal
|
||||
// key no longer available.
|
||||
defer func() {
|
||||
if err == nil {
|
||||
for _, dir := range removals {
|
||||
if err := o.cleanupSnapshotDirectory(ctx, dir); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory")
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
return t.Commit()
|
||||
}
|
||||
|
||||
// Walk the snapshots.
|
||||
func (o *snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error {
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer t.Rollback()
|
||||
return storage.WalkInfo(ctx, fn, fs...)
|
||||
}
|
||||
|
||||
// Cleanup cleans up disk resources from removed or abandoned snapshots
|
||||
func (o *snapshotter) Cleanup(ctx context.Context) error {
|
||||
const cleanupCommitted = false
|
||||
return o.cleanup(ctx, cleanupCommitted)
|
||||
}
|
||||
|
||||
func (o *snapshotter) cleanup(ctx context.Context, cleanupCommitted bool) error {
|
||||
cleanup, err := o.cleanupDirectories(ctx, cleanupCommitted)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.G(ctx).Debugf("cleanup: dirs=%v", cleanup)
|
||||
for _, dir := range cleanup {
|
||||
if err := o.cleanupSnapshotDirectory(ctx, dir); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("path", dir).Warn("failed to remove directory")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *snapshotter) cleanupDirectories(ctx context.Context, cleanupCommitted bool) ([]string, error) {
|
||||
// Get a write transaction to ensure no other write transaction can be entered
|
||||
// while the cleanup is scanning.
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer t.Rollback()
|
||||
return o.getCleanupDirectories(ctx, t, cleanupCommitted)
|
||||
}
|
||||
|
||||
func (o *snapshotter) getCleanupDirectories(ctx context.Context, t storage.Transactor, cleanupCommitted bool) ([]string, error) {
|
||||
ids, err := storage.IDMap(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
snapshotDir := filepath.Join(o.root, "snapshots")
|
||||
fd, err := os.Open(snapshotDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
dirs, err := fd.Readdirnames(0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cleanup := []string{}
|
||||
for _, d := range dirs {
|
||||
if !cleanupCommitted {
|
||||
if _, ok := ids[d]; ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
cleanup = append(cleanup, filepath.Join(snapshotDir, d))
|
||||
}
|
||||
|
||||
return cleanup, nil
|
||||
}
|
||||
|
||||
func (o *snapshotter) cleanupSnapshotDirectory(ctx context.Context, dir string) error {
|
||||
|
||||
// On a remote snapshot, the layer is mounted on the "fs" directory.
|
||||
// We use Filesystem's Unmount API so that it can do necessary finalization
|
||||
// before/after the unmount.
|
||||
mp := filepath.Join(dir, "fs")
|
||||
if err := o.fs.Unmount(ctx, mp); err != nil {
|
||||
log.G(ctx).WithError(err).WithField("dir", mp).Debug("failed to unmount")
|
||||
}
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
return errors.Wrapf(err, "failed to remove directory %q", dir)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts []snapshots.Opt) (_ storage.Snapshot, err error) {
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, true)
|
||||
if err != nil {
|
||||
return storage.Snapshot{}, err
|
||||
}
|
||||
|
||||
var td, path string
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if td != "" {
|
||||
if err1 := o.cleanupSnapshotDirectory(ctx, td); err1 != nil {
|
||||
log.G(ctx).WithError(err1).Warn("failed to cleanup temp snapshot directory")
|
||||
}
|
||||
}
|
||||
if path != "" {
|
||||
if err1 := o.cleanupSnapshotDirectory(ctx, path); err1 != nil {
|
||||
log.G(ctx).WithError(err1).WithField("path", path).Error("failed to reclaim snapshot directory, directory may need removal")
|
||||
err = errors.Wrapf(err, "failed to remove path: %v", err1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
snapshotDir := filepath.Join(o.root, "snapshots")
|
||||
td, err = o.prepareDirectory(ctx, snapshotDir, kind)
|
||||
if err != nil {
|
||||
if rerr := t.Rollback(); rerr != nil {
|
||||
log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
|
||||
}
|
||||
return storage.Snapshot{}, errors.Wrap(err, "failed to create prepare snapshot dir")
|
||||
}
|
||||
rollback := true
|
||||
defer func() {
|
||||
if rollback {
|
||||
if rerr := t.Rollback(); rerr != nil {
|
||||
log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
s, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...)
|
||||
if err != nil {
|
||||
return storage.Snapshot{}, errors.Wrap(err, "failed to create snapshot")
|
||||
}
|
||||
|
||||
if len(s.ParentIDs) > 0 {
|
||||
st, err := os.Stat(o.upperPath(s.ParentIDs[0]))
|
||||
if err != nil {
|
||||
return storage.Snapshot{}, errors.Wrap(err, "failed to stat parent")
|
||||
}
|
||||
|
||||
stat := st.Sys().(*syscall.Stat_t)
|
||||
|
||||
if err := os.Lchown(filepath.Join(td, "fs"), int(stat.Uid), int(stat.Gid)); err != nil {
|
||||
if rerr := t.Rollback(); rerr != nil {
|
||||
log.G(ctx).WithError(rerr).Warn("failed to rollback transaction")
|
||||
}
|
||||
return storage.Snapshot{}, errors.Wrap(err, "failed to chown")
|
||||
}
|
||||
}
|
||||
|
||||
path = filepath.Join(snapshotDir, s.ID)
|
||||
if err = os.Rename(td, path); err != nil {
|
||||
return storage.Snapshot{}, errors.Wrap(err, "failed to rename")
|
||||
}
|
||||
td = ""
|
||||
|
||||
rollback = false
|
||||
if err = t.Commit(); err != nil {
|
||||
return storage.Snapshot{}, errors.Wrap(err, "commit failed")
|
||||
}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (o *snapshotter) prepareDirectory(ctx context.Context, snapshotDir string, kind snapshots.Kind) (string, error) {
|
||||
td, err := ioutil.TempDir(snapshotDir, "new-")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to create temp dir")
|
||||
}
|
||||
|
||||
if err := os.Mkdir(filepath.Join(td, "fs"), 0755); err != nil {
|
||||
return td, err
|
||||
}
|
||||
|
||||
if kind == snapshots.KindActive {
|
||||
if err := os.Mkdir(filepath.Join(td, "work"), 0711); err != nil {
|
||||
return td, err
|
||||
}
|
||||
}
|
||||
|
||||
return td, nil
|
||||
}
|
||||
|
||||
func (o *snapshotter) mounts(ctx context.Context, s storage.Snapshot, checkKey string) ([]mount.Mount, error) {
|
||||
// Make sure that all layers lower than the target layer are available
|
||||
if checkKey != "" && !o.checkAvailability(ctx, checkKey) {
|
||||
return nil, errors.Wrapf(errdefs.ErrUnavailable, "layer %q unavailable", s.ID)
|
||||
}
|
||||
|
||||
if len(s.ParentIDs) == 0 {
|
||||
// if we only have one layer/no parents then just return a bind mount as overlay
|
||||
// will not work
|
||||
roFlag := "rw"
|
||||
if s.Kind == snapshots.KindView {
|
||||
roFlag = "ro"
|
||||
}
|
||||
|
||||
return []mount.Mount{
|
||||
{
|
||||
Source: o.upperPath(s.ID),
|
||||
Type: "bind",
|
||||
Options: []string{
|
||||
roFlag,
|
||||
"rbind",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
var options []string
|
||||
|
||||
if s.Kind == snapshots.KindActive {
|
||||
options = append(options,
|
||||
fmt.Sprintf("workdir=%s", o.workPath(s.ID)),
|
||||
fmt.Sprintf("upperdir=%s", o.upperPath(s.ID)),
|
||||
)
|
||||
} else if len(s.ParentIDs) == 1 {
|
||||
return []mount.Mount{
|
||||
{
|
||||
Source: o.upperPath(s.ParentIDs[0]),
|
||||
Type: "bind",
|
||||
Options: []string{
|
||||
"ro",
|
||||
"rbind",
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
parentPaths := make([]string, len(s.ParentIDs))
|
||||
for i := range s.ParentIDs {
|
||||
parentPaths[i] = o.upperPath(s.ParentIDs[i])
|
||||
}
|
||||
|
||||
options = append(options, fmt.Sprintf("lowerdir=%s", strings.Join(parentPaths, ":")))
|
||||
return []mount.Mount{
|
||||
{
|
||||
Type: "overlay",
|
||||
Source: "overlay",
|
||||
Options: options,
|
||||
},
|
||||
}, nil
|
||||
|
||||
}
|
||||
|
||||
func (o *snapshotter) upperPath(id string) string {
|
||||
return filepath.Join(o.root, "snapshots", id, "fs")
|
||||
}
|
||||
|
||||
func (o *snapshotter) workPath(id string) string {
|
||||
return filepath.Join(o.root, "snapshots", id, "work")
|
||||
}
|
||||
|
||||
// Close closes the snapshotter
|
||||
func (o *snapshotter) Close() error {
|
||||
// unmount all mounts including Committed
|
||||
const cleanupCommitted = true
|
||||
ctx := context.Background()
|
||||
if err := o.cleanup(ctx, cleanupCommitted); err != nil {
|
||||
log.G(ctx).WithError(err).Warn("failed to cleanup")
|
||||
}
|
||||
return o.ms.Close()
|
||||
}
|
||||
|
||||
// prepareRemoteSnapshot tries to prepare the snapshot as a remote snapshot
|
||||
// using filesystems registered in this snapshotter.
|
||||
func (o *snapshotter) prepareRemoteSnapshot(ctx context.Context, key string, labels map[string]string) error {
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer t.Rollback()
|
||||
id, _, _, err := storage.GetInfo(ctx, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := o.fs.Mount(ctx, o.upperPath(id), labels); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkAvailability checks avaiability of the specified layer and all lower
|
||||
// layers using filesystem's checking functionality.
|
||||
func (o *snapshotter) checkAvailability(ctx context.Context, key string) bool {
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("key", key))
|
||||
log.G(ctx).Debug("checking layer availability")
|
||||
|
||||
ctx, t, err := o.ms.TransactionContext(ctx, false)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warn("failed to get transaction")
|
||||
return false
|
||||
}
|
||||
defer t.Rollback()
|
||||
|
||||
eg, egCtx := errgroup.WithContext(ctx)
|
||||
for cKey := key; cKey != ""; {
|
||||
id, info, _, err := storage.GetInfo(ctx, cKey)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).Warnf("failed to get info of %q", cKey)
|
||||
return false
|
||||
}
|
||||
mp := o.upperPath(id)
|
||||
lCtx := log.WithLogger(ctx, log.G(ctx).WithField("mount-point", mp))
|
||||
if _, ok := info.Labels[remoteLabel]; ok {
|
||||
eg.Go(func() error {
|
||||
log.G(lCtx).Debug("checking mount point")
|
||||
if err := o.fs.Check(egCtx, mp); err != nil {
|
||||
log.G(lCtx).WithError(err).Warn("layer is unavailable")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
} else {
|
||||
log.G(lCtx).Debug("layer is normal snapshot(overlayfs)")
|
||||
}
|
||||
cKey = info.Parent
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
69
vendor/github.com/containerd/stargz-snapshotter/stargz/config/config.go
generated
vendored
Normal file
69
vendor/github.com/containerd/stargz-snapshotter/stargz/config/config.go
generated
vendored
Normal file
|
@ -0,0 +1,69 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the NOTICE.md file.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
type Config struct {
|
||||
HTTPCacheType string `toml:"http_cache_type"`
|
||||
FSCacheType string `toml:"filesystem_cache_type"`
|
||||
ResolveResultEntry int `toml:"resolve_result_entry"`
|
||||
PrefetchSize int64 `toml:"prefetch_size"`
|
||||
PrefetchTimeoutSec int64 `toml:"prefetch_timeout_sec"`
|
||||
NoPrefetch bool `toml:"noprefetch"`
|
||||
Debug bool `toml:"debug"`
|
||||
AllowNoVerification bool `toml:"allow_no_verification"`
|
||||
|
||||
// ResolverConfig is config for resolving registries.
|
||||
ResolverConfig `toml:"resolver"`
|
||||
|
||||
// BlobConfig is config for layer blob management.
|
||||
BlobConfig `toml:"blob"`
|
||||
|
||||
// DirectoryCacheConfig is config for directory-based cache.
|
||||
DirectoryCacheConfig `toml:"directory_cache"`
|
||||
}
|
||||
|
||||
type ResolverConfig struct {
|
||||
Host map[string]HostConfig `toml:"host"`
|
||||
ConnectionPoolEntry int `toml:"connection_pool_entry"`
|
||||
}
|
||||
|
||||
type HostConfig struct {
|
||||
Mirrors []MirrorConfig `toml:"mirrors"`
|
||||
}
|
||||
|
||||
type MirrorConfig struct {
|
||||
Host string `toml:"host"`
|
||||
Insecure bool `toml:"insecure"`
|
||||
}
|
||||
|
||||
type BlobConfig struct {
|
||||
ValidInterval int64 `toml:"valid_interval"`
|
||||
CheckAlways bool `toml:"check_always"`
|
||||
ChunkSize int64 `toml:"chunk_size"`
|
||||
}
|
||||
|
||||
type DirectoryCacheConfig struct {
|
||||
MaxLRUCacheEntry int `toml:"max_lru_cache_entry"`
|
||||
MaxCacheFds int `toml:"max_cache_fds"`
|
||||
SyncAdd bool `toml:"sync_add"`
|
||||
}
|
File diff suppressed because it is too large
Load Diff
72
vendor/github.com/containerd/stargz-snapshotter/stargz/handler/handler.go
generated
vendored
Normal file
72
vendor/github.com/containerd/stargz-snapshotter/stargz/handler/handler.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
TargetRefLabel = "containerd.io/snapshot/remote/stargz.reference"
|
||||
TargetDigestLabel = "containerd.io/snapshot/remote/stargz.digest"
|
||||
TargetImageLayersLabel = "containerd.io/snapshot/remote/stargz.layers"
|
||||
TargetPrefetchSizeLabel = "containerd.io/snapshot/remote/stargz.prefetch" // optional
|
||||
)
|
||||
|
||||
// AppendInfoHandlerWrapper makes a handler which appends image's basic
|
||||
// information to each layer descriptor as annotations during unpack. These
|
||||
// annotations will be passed to this remote snapshotter as labels and used by
|
||||
// this filesystem for searching/preparing layers.
|
||||
func AppendInfoHandlerWrapper(ref string, prefetchSize int64) func(f images.Handler) images.Handler {
|
||||
return func(f images.Handler) images.Handler {
|
||||
return images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
children, err := f.Handle(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch desc.MediaType {
|
||||
case ocispec.MediaTypeImageManifest, images.MediaTypeDockerSchema2Manifest:
|
||||
var layers string
|
||||
for _, c := range children {
|
||||
if images.IsLayerType(c.MediaType) {
|
||||
layers += fmt.Sprintf("%s,", c.Digest.String())
|
||||
}
|
||||
}
|
||||
if len(layers) >= 1 {
|
||||
layers = layers[:len(layers)-1]
|
||||
}
|
||||
for i := range children {
|
||||
c := &children[i]
|
||||
if images.IsLayerType(c.MediaType) {
|
||||
if c.Annotations == nil {
|
||||
c.Annotations = make(map[string]string)
|
||||
}
|
||||
c.Annotations[TargetRefLabel] = ref
|
||||
c.Annotations[TargetDigestLabel] = c.Digest.String()
|
||||
c.Annotations[TargetImageLayersLabel] = layers
|
||||
c.Annotations[TargetPrefetchSizeLabel] = fmt.Sprintf("%d", prefetchSize)
|
||||
}
|
||||
}
|
||||
}
|
||||
return children, nil
|
||||
})
|
||||
}
|
||||
}
|
365
vendor/github.com/containerd/stargz-snapshotter/stargz/reader/reader.go
generated
vendored
Normal file
365
vendor/github.com/containerd/stargz-snapshotter/stargz/reader/reader.go
generated
vendored
Normal file
|
@ -0,0 +1,365 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the NOTICE.md file.
|
||||
*/
|
||||
|
||||
package reader
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/cache"
|
||||
"github.com/containerd/stargz-snapshotter/stargz/verify"
|
||||
"github.com/google/crfs/stargz"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type Reader interface {
|
||||
OpenFile(name string) (io.ReaderAt, error)
|
||||
Lookup(name string) (*stargz.TOCEntry, bool)
|
||||
Cache(opts ...CacheOption) error
|
||||
}
|
||||
|
||||
// VerifiableReader is a function that produces a Reader with a given verifier.
|
||||
type VerifiableReader func(verify.TOCEntryVerifier) Reader
|
||||
|
||||
// NewReader creates a Reader based on the given stargz blob and cache implementation.
|
||||
// It returns VerifiableReader so the caller must provide a verify.TOCEntryVerifier
|
||||
// to use for verifying file or chunk contained in this stargz blob.
|
||||
func NewReader(sr *io.SectionReader, cache cache.BlobCache) (VerifiableReader, *stargz.TOCEntry, error) {
|
||||
r, err := stargz.Open(sr)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to parse stargz")
|
||||
}
|
||||
|
||||
root, ok := r.Lookup("")
|
||||
if !ok {
|
||||
return nil, nil, fmt.Errorf("failed to get a TOCEntry of the root")
|
||||
}
|
||||
|
||||
vr := &reader{
|
||||
r: r,
|
||||
sr: sr,
|
||||
cache: cache,
|
||||
bufPool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return func(verifier verify.TOCEntryVerifier) Reader {
|
||||
vr.verifier = verifier
|
||||
return vr
|
||||
}, root, nil
|
||||
}
|
||||
|
||||
type reader struct {
|
||||
r *stargz.Reader
|
||||
sr *io.SectionReader
|
||||
cache cache.BlobCache
|
||||
bufPool sync.Pool
|
||||
verifier verify.TOCEntryVerifier
|
||||
}
|
||||
|
||||
func (gr *reader) OpenFile(name string) (io.ReaderAt, error) {
|
||||
sr, err := gr.r.OpenFile(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
e, ok := gr.r.Lookup(name)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to get TOCEntry %q", name)
|
||||
}
|
||||
return &file{
|
||||
name: name,
|
||||
digest: e.Digest,
|
||||
r: gr.r,
|
||||
cache: gr.cache,
|
||||
ra: sr,
|
||||
gr: gr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (gr *reader) Lookup(name string) (*stargz.TOCEntry, bool) {
|
||||
return gr.r.Lookup(name)
|
||||
}
|
||||
|
||||
func (gr *reader) Cache(opts ...CacheOption) (err error) {
|
||||
var cacheOpts cacheOptions
|
||||
for _, o := range opts {
|
||||
o(&cacheOpts)
|
||||
}
|
||||
|
||||
r := gr.r
|
||||
if cacheOpts.reader != nil {
|
||||
if r, err = stargz.Open(cacheOpts.reader); err != nil {
|
||||
return errors.Wrap(err, "failed to parse stargz")
|
||||
}
|
||||
}
|
||||
root, ok := r.Lookup("")
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to get a TOCEntry of the root")
|
||||
}
|
||||
|
||||
filter := func(*stargz.TOCEntry) bool {
|
||||
return true
|
||||
}
|
||||
if cacheOpts.filter != nil {
|
||||
filter = cacheOpts.filter
|
||||
}
|
||||
|
||||
return gr.cacheWithReader(root, r, filter, cacheOpts.cacheOpts...)
|
||||
}
|
||||
|
||||
func (gr *reader) cacheWithReader(dir *stargz.TOCEntry, r *stargz.Reader, filter func(*stargz.TOCEntry) bool, opts ...cache.Option) (rErr error) {
|
||||
eg, _ := errgroup.WithContext(context.Background())
|
||||
dir.ForeachChild(func(_ string, e *stargz.TOCEntry) bool {
|
||||
if e.Type == "dir" {
|
||||
// Walk through all files on this stargz file.
|
||||
eg.Go(func() error {
|
||||
// Make sure the entry is the immediate child for avoiding loop.
|
||||
if filepath.Dir(filepath.Clean(e.Name)) != filepath.Clean(dir.Name) {
|
||||
return fmt.Errorf("invalid child path %q; must be child of %q",
|
||||
e.Name, dir.Name)
|
||||
}
|
||||
return gr.cacheWithReader(e, r, filter, opts...)
|
||||
})
|
||||
return true
|
||||
} else if e.Type != "reg" {
|
||||
// Only cache regular files
|
||||
return true
|
||||
} else if !filter(e) {
|
||||
// This entry need to be filtered out
|
||||
return true
|
||||
} else if e.Name == stargz.TOCTarName {
|
||||
// We don't need to cache TOC json file
|
||||
return true
|
||||
}
|
||||
|
||||
sr, err := r.OpenFile(e.Name)
|
||||
if err != nil {
|
||||
rErr = err
|
||||
return false
|
||||
}
|
||||
|
||||
var nr int64
|
||||
for nr < e.Size {
|
||||
ce, ok := r.ChunkEntryForOffset(e.Name, nr)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
nr += ce.ChunkSize
|
||||
|
||||
eg.Go(func() error {
|
||||
// Check if the target chunks exists in the cache
|
||||
id := genID(e.Digest, ce.ChunkOffset, ce.ChunkSize)
|
||||
if _, err := gr.cache.FetchAt(id, 0, nil, opts...); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// missed cache, needs to fetch and add it to the cache
|
||||
cr := io.NewSectionReader(sr, ce.ChunkOffset, ce.ChunkSize)
|
||||
b := gr.bufPool.Get().(*bytes.Buffer)
|
||||
defer gr.bufPool.Put(b)
|
||||
b.Reset()
|
||||
b.Grow(int(ce.ChunkSize))
|
||||
v, err := gr.verifier.Verifier(ce)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "verifier not found %q(off:%d,size:%d)",
|
||||
e.Name, ce.ChunkOffset, ce.ChunkSize)
|
||||
}
|
||||
if _, err := io.CopyN(b, io.TeeReader(cr, v), ce.ChunkSize); err != nil {
|
||||
return errors.Wrapf(err,
|
||||
"failed to read file payload of %q (offset:%d,size:%d)",
|
||||
e.Name, ce.ChunkOffset, ce.ChunkSize)
|
||||
}
|
||||
if int64(b.Len()) != ce.ChunkSize {
|
||||
return fmt.Errorf("unexpected copied data size %d; want %d",
|
||||
b.Len(), ce.ChunkSize)
|
||||
}
|
||||
if !v.Verified() {
|
||||
return fmt.Errorf("invalid chunk %q (offset:%d,size:%d)",
|
||||
e.Name, ce.ChunkOffset, ce.ChunkSize)
|
||||
}
|
||||
gr.cache.Add(id, b.Bytes()[:ce.ChunkSize], opts...)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
if rErr != nil {
|
||||
return errors.Wrapf(rErr, "failed to pre-cache some files: %v", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
type file struct {
|
||||
name string
|
||||
digest string
|
||||
ra io.ReaderAt
|
||||
r *stargz.Reader
|
||||
cache cache.BlobCache
|
||||
gr *reader
|
||||
}
|
||||
|
||||
// ReadAt reads chunks from the stargz file with trying to fetch as many chunks
|
||||
// as possible from the cache.
|
||||
func (sf *file) ReadAt(p []byte, offset int64) (int, error) {
|
||||
nr := 0
|
||||
for nr < len(p) {
|
||||
ce, ok := sf.r.ChunkEntryForOffset(sf.name, offset+int64(nr))
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
var (
|
||||
id = genID(sf.digest, ce.ChunkOffset, ce.ChunkSize)
|
||||
lowerDiscard = positive(offset - ce.ChunkOffset)
|
||||
upperDiscard = positive(ce.ChunkOffset + ce.ChunkSize - (offset + int64(len(p))))
|
||||
expectedSize = ce.ChunkSize - upperDiscard - lowerDiscard
|
||||
)
|
||||
|
||||
// Check if the content exists in the cache
|
||||
n, err := sf.cache.FetchAt(id, lowerDiscard, p[nr:int64(nr)+expectedSize])
|
||||
if err == nil && int64(n) == expectedSize {
|
||||
nr += n
|
||||
continue
|
||||
}
|
||||
|
||||
// We missed cache. Take it from underlying reader.
|
||||
// We read the whole chunk here and add it to the cache so that following
|
||||
// reads against neighboring chunks can take the data without decmpression.
|
||||
if lowerDiscard == 0 && upperDiscard == 0 {
|
||||
// We can directly store the result to the given buffer
|
||||
ip := p[nr : int64(nr)+ce.ChunkSize]
|
||||
n, err := sf.ra.ReadAt(ip, ce.ChunkOffset)
|
||||
if err != nil && err != io.EOF {
|
||||
return 0, errors.Wrap(err, "failed to read data")
|
||||
}
|
||||
|
||||
// Verify this chunk
|
||||
if err := sf.verify(ip, ce); err != nil {
|
||||
return 0, errors.Wrap(err, "invalid chunk")
|
||||
}
|
||||
|
||||
// Cache this chunk
|
||||
sf.cache.Add(id, ip)
|
||||
nr += n
|
||||
continue
|
||||
}
|
||||
|
||||
// Use temporally buffer for aligning this chunk
|
||||
b := sf.gr.bufPool.Get().(*bytes.Buffer)
|
||||
b.Reset()
|
||||
b.Grow(int(ce.ChunkSize))
|
||||
ip := b.Bytes()[:ce.ChunkSize]
|
||||
if _, err := sf.ra.ReadAt(ip, ce.ChunkOffset); err != nil && err != io.EOF {
|
||||
sf.gr.bufPool.Put(b)
|
||||
return 0, errors.Wrap(err, "failed to read data")
|
||||
}
|
||||
|
||||
// Verify this chunk
|
||||
if err := sf.verify(ip, ce); err != nil {
|
||||
sf.gr.bufPool.Put(b)
|
||||
return 0, errors.Wrap(err, "invalid chunk")
|
||||
}
|
||||
|
||||
// Cache this chunk
|
||||
sf.cache.Add(id, ip)
|
||||
n = copy(p[nr:], ip[lowerDiscard:ce.ChunkSize-upperDiscard])
|
||||
sf.gr.bufPool.Put(b)
|
||||
if int64(n) != expectedSize {
|
||||
return 0, fmt.Errorf("unexpected final data size %d; want %d", n, expectedSize)
|
||||
}
|
||||
nr += n
|
||||
}
|
||||
|
||||
return nr, nil
|
||||
}
|
||||
|
||||
func (sf *file) verify(p []byte, ce *stargz.TOCEntry) error {
|
||||
v, err := sf.gr.verifier.Verifier(ce)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "verifier not found %q (offset:%d,size:%d)",
|
||||
ce.Name, ce.ChunkOffset, ce.ChunkSize)
|
||||
}
|
||||
if _, err := io.Copy(v, bytes.NewReader(p)); err != nil {
|
||||
return errors.Wrapf(err, "failed to verify %q (offset:%d,size:%d)",
|
||||
ce.Name, ce.ChunkOffset, ce.ChunkSize)
|
||||
}
|
||||
if !v.Verified() {
|
||||
return fmt.Errorf("invalid chunk %q (offset:%d,size:%d)",
|
||||
ce.Name, ce.ChunkOffset, ce.ChunkSize)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func genID(digest string, offset, size int64) string {
|
||||
sum := sha256.Sum256([]byte(fmt.Sprintf("%s-%d-%d", digest, offset, size)))
|
||||
return fmt.Sprintf("%x", sum)
|
||||
}
|
||||
|
||||
func positive(n int64) int64 {
|
||||
if n < 0 {
|
||||
return 0
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
type CacheOption func(*cacheOptions)
|
||||
|
||||
type cacheOptions struct {
|
||||
cacheOpts []cache.Option
|
||||
filter func(*stargz.TOCEntry) bool
|
||||
reader *io.SectionReader
|
||||
}
|
||||
|
||||
func WithCacheOpts(cacheOpts ...cache.Option) CacheOption {
|
||||
return func(opts *cacheOptions) {
|
||||
opts.cacheOpts = cacheOpts
|
||||
}
|
||||
}
|
||||
|
||||
func WithFilter(filter func(*stargz.TOCEntry) bool) CacheOption {
|
||||
return func(opts *cacheOptions) {
|
||||
opts.filter = filter
|
||||
}
|
||||
}
|
||||
|
||||
func WithReader(sr *io.SectionReader) CacheOption {
|
||||
return func(opts *cacheOptions) {
|
||||
opts.reader = sr
|
||||
}
|
||||
}
|
359
vendor/github.com/containerd/stargz-snapshotter/stargz/remote/blob.go
generated
vendored
Normal file
359
vendor/github.com/containerd/stargz-snapshotter/stargz/remote/blob.go
generated
vendored
Normal file
|
@ -0,0 +1,359 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the NOTICE.md file.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/stargz-snapshotter/cache"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`)
|
||||
|
||||
type Blob interface {
|
||||
Authn(tr http.RoundTripper) (http.RoundTripper, error)
|
||||
Check() error
|
||||
Size() int64
|
||||
FetchedSize() int64
|
||||
ReadAt(p []byte, offset int64, opts ...Option) (int, error)
|
||||
Cache(offset int64, size int64, opts ...Option) error
|
||||
}
|
||||
|
||||
type blob struct {
|
||||
fetcher *fetcher
|
||||
fetcherMu sync.Mutex
|
||||
|
||||
size int64
|
||||
keychain authn.Keychain
|
||||
chunkSize int64
|
||||
cache cache.BlobCache
|
||||
lastCheck time.Time
|
||||
checkInterval time.Duration
|
||||
|
||||
fetchedRegionSet regionSet
|
||||
fetchedRegionSetMu sync.Mutex
|
||||
|
||||
resolver *Resolver
|
||||
}
|
||||
|
||||
func (b *blob) Authn(tr http.RoundTripper) (http.RoundTripper, error) {
|
||||
b.fetcherMu.Lock()
|
||||
fr := b.fetcher
|
||||
b.fetcherMu.Unlock()
|
||||
return fr.authn(tr, b.keychain)
|
||||
}
|
||||
|
||||
func (b *blob) Check() error {
|
||||
now := time.Now()
|
||||
if now.Sub(b.lastCheck) < b.checkInterval {
|
||||
// do nothing if not expired
|
||||
return nil
|
||||
}
|
||||
b.lastCheck = now
|
||||
b.fetcherMu.Lock()
|
||||
fr := b.fetcher
|
||||
b.fetcherMu.Unlock()
|
||||
return fr.check()
|
||||
}
|
||||
|
||||
func (b *blob) Size() int64 {
|
||||
return b.size
|
||||
}
|
||||
|
||||
func (b *blob) FetchedSize() int64 {
|
||||
b.fetchedRegionSetMu.Lock()
|
||||
sz := b.fetchedRegionSet.totalSize()
|
||||
b.fetchedRegionSetMu.Unlock()
|
||||
return sz
|
||||
}
|
||||
|
||||
func (b *blob) Cache(offset int64, size int64, opts ...Option) error {
|
||||
var cacheOpts options
|
||||
for _, o := range opts {
|
||||
o(&cacheOpts)
|
||||
}
|
||||
|
||||
b.fetcherMu.Lock()
|
||||
fr := b.fetcher
|
||||
b.fetcherMu.Unlock()
|
||||
|
||||
fetchReg := region{floor(offset, b.chunkSize), ceil(offset+size-1, b.chunkSize) - 1}
|
||||
discard := make(map[region]io.Writer)
|
||||
b.walkChunks(fetchReg, func(reg region) error {
|
||||
if _, err := b.cache.FetchAt(fr.genID(reg), 0, nil, cacheOpts.cacheOpts...); err != nil {
|
||||
discard[reg] = ioutil.Discard
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if err := b.fetchRange(discard, &cacheOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadAt reads remote chunks from specified offset for the buffer size.
|
||||
// It tries to fetch as many chunks as possible from local cache.
|
||||
// We can configure this function with options.
|
||||
func (b *blob) ReadAt(p []byte, offset int64, opts ...Option) (int, error) {
|
||||
if len(p) == 0 || offset > b.size {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Make the buffer chunk aligned
|
||||
allRegion := region{floor(offset, b.chunkSize), ceil(offset+int64(len(p))-1, b.chunkSize) - 1}
|
||||
allData := make(map[region]io.Writer)
|
||||
var putBufs []*bytes.Buffer
|
||||
defer func() {
|
||||
for _, bf := range putBufs {
|
||||
b.resolver.bufPool.Put(bf)
|
||||
}
|
||||
}()
|
||||
|
||||
var readAtOpts options
|
||||
for _, o := range opts {
|
||||
o(&readAtOpts)
|
||||
}
|
||||
|
||||
// Fetcher can be suddenly updated so we take and use the snapshot of it for
|
||||
// consistency.
|
||||
b.fetcherMu.Lock()
|
||||
fr := b.fetcher
|
||||
b.fetcherMu.Unlock()
|
||||
|
||||
var commits []func() error
|
||||
b.walkChunks(allRegion, func(chunk region) error {
|
||||
var (
|
||||
base = positive(chunk.b - offset)
|
||||
lowerUnread = positive(offset - chunk.b)
|
||||
upperUnread = positive(chunk.e + 1 - (offset + int64(len(p))))
|
||||
expectedSize = chunk.size() - upperUnread - lowerUnread
|
||||
)
|
||||
|
||||
// Check if the content exists in the cache
|
||||
n, err := b.cache.FetchAt(fr.genID(chunk), lowerUnread, p[base:base+expectedSize], readAtOpts.cacheOpts...)
|
||||
if err == nil && n == int(expectedSize) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We missed cache. Take it from remote registry.
|
||||
// We get the whole chunk here and add it to the cache so that following
|
||||
// reads against neighboring chunks can take the data without making HTTP requests.
|
||||
if lowerUnread == 0 && upperUnread == 0 {
|
||||
// We can directly store the result in the given buffer
|
||||
allData[chunk] = &byteWriter{
|
||||
p: p[base : base+chunk.size()],
|
||||
}
|
||||
} else {
|
||||
// Use temporally buffer for aligning this chunk
|
||||
bf := b.resolver.bufPool.Get().(*bytes.Buffer)
|
||||
putBufs = append(putBufs, bf)
|
||||
bf.Reset()
|
||||
bf.Grow(int(chunk.size()))
|
||||
allData[chunk] = bf
|
||||
|
||||
// Function for committing the buffered chunk into the result slice.
|
||||
commits = append(commits, func() error {
|
||||
if int64(bf.Len()) != chunk.size() {
|
||||
return fmt.Errorf("unexpected data size %d; want %d",
|
||||
bf.Len(), chunk.size())
|
||||
}
|
||||
bb := bf.Bytes()[:chunk.size()]
|
||||
n := copy(p[base:], bb[lowerUnread:chunk.size()-upperUnread])
|
||||
if int64(n) != expectedSize {
|
||||
return fmt.Errorf("invalid copied data size %d; want %d",
|
||||
n, expectedSize)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
// Read required data
|
||||
if err := b.fetchRange(allData, &readAtOpts); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Write all data to the result buffer
|
||||
for _, c := range commits {
|
||||
if err := c(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
// Adjust the buffer size according to the blob size
|
||||
if remain := b.size - offset; int64(len(p)) >= remain {
|
||||
if remain < 0 {
|
||||
remain = 0
|
||||
}
|
||||
p = p[:remain]
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
// fetchRange fetches all specified chunks from local cache and remote blob.
|
||||
func (b *blob) fetchRange(allData map[region]io.Writer, opts *options) error {
|
||||
if len(allData) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetcher can be suddenly updated so we take and use the snapshot of it for
|
||||
// consistency.
|
||||
b.fetcherMu.Lock()
|
||||
fr := b.fetcher
|
||||
b.fetcherMu.Unlock()
|
||||
|
||||
// request missed regions
|
||||
var req []region
|
||||
fetched := make(map[region]bool)
|
||||
for reg := range allData {
|
||||
req = append(req, reg)
|
||||
fetched[reg] = false
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
|
||||
defer cancel()
|
||||
mr, err := fr.fetch(ctx, req, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer mr.Close()
|
||||
|
||||
// Update the check timer because we succeeded to access the blob
|
||||
b.lastCheck = time.Now()
|
||||
|
||||
// chunk and cache responsed data. Regions must be aligned by chunk size.
|
||||
// TODO: Reorganize remoteData to make it be aligned by chunk size
|
||||
for {
|
||||
reg, p, err := mr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return errors.Wrapf(err, "failed to read multipart resp")
|
||||
}
|
||||
if err := b.walkChunks(reg, func(chunk region) error {
|
||||
|
||||
// Prepare the temporary buffer
|
||||
bf := b.resolver.bufPool.Get().(*bytes.Buffer)
|
||||
defer b.resolver.bufPool.Put(bf)
|
||||
bf.Reset()
|
||||
bf.Grow(int(chunk.size()))
|
||||
w := io.Writer(bf)
|
||||
|
||||
// If this chunk is one of the targets, write the content to the
|
||||
// passed reader too.
|
||||
if _, ok := fetched[chunk]; ok {
|
||||
w = io.MultiWriter(bf, allData[chunk])
|
||||
}
|
||||
|
||||
// Copy the target chunk
|
||||
if _, err := io.CopyN(w, p, chunk.size()); err != nil {
|
||||
return err
|
||||
} else if int64(bf.Len()) != chunk.size() {
|
||||
return fmt.Errorf("unexpected fetched data size %d; want %d",
|
||||
bf.Len(), chunk.size())
|
||||
}
|
||||
|
||||
// Add the target chunk to the cache
|
||||
b.cache.Add(fr.genID(chunk), bf.Bytes()[:chunk.size()], opts.cacheOpts...)
|
||||
b.fetchedRegionSetMu.Lock()
|
||||
b.fetchedRegionSet.add(chunk)
|
||||
b.fetchedRegionSetMu.Unlock()
|
||||
fetched[chunk] = true
|
||||
return nil
|
||||
}); err != nil {
|
||||
return errors.Wrapf(err, "failed to get chunks")
|
||||
}
|
||||
}
|
||||
|
||||
// Check all chunks are fetched
|
||||
var unfetched []region
|
||||
for c, b := range fetched {
|
||||
if !b {
|
||||
unfetched = append(unfetched, c)
|
||||
}
|
||||
}
|
||||
if unfetched != nil {
|
||||
return fmt.Errorf("failed to fetch region %v", unfetched)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type walkFunc func(reg region) error
|
||||
|
||||
// walkChunks walks chunks from begin to end in order in the specified region.
|
||||
// specified region must be aligned by chunk size.
|
||||
func (b *blob) walkChunks(allRegion region, walkFn walkFunc) error {
|
||||
if allRegion.b%b.chunkSize != 0 {
|
||||
return fmt.Errorf("region (%d, %d) must be aligned by chunk size",
|
||||
allRegion.b, allRegion.e)
|
||||
}
|
||||
for i := allRegion.b; i <= allRegion.e && i < b.size; i += b.chunkSize {
|
||||
reg := region{i, i + b.chunkSize - 1}
|
||||
if reg.e >= b.size {
|
||||
reg.e = b.size - 1
|
||||
}
|
||||
if err := walkFn(reg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type byteWriter struct {
|
||||
p []byte
|
||||
n int
|
||||
}
|
||||
|
||||
func (w *byteWriter) Write(p []byte) (int, error) {
|
||||
n := copy(w.p[w.n:], p)
|
||||
w.n += n
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func floor(n int64, unit int64) int64 {
|
||||
return (n / unit) * unit
|
||||
}
|
||||
|
||||
func ceil(n int64, unit int64) int64 {
|
||||
return (n/unit + 1) * unit
|
||||
}
|
||||
|
||||
func positive(n int64) int64 {
|
||||
if n < 0 {
|
||||
return 0
|
||||
}
|
||||
return n
|
||||
}
|
554
vendor/github.com/containerd/stargz-snapshotter/stargz/remote/resolver.go
generated
vendored
Normal file
554
vendor/github.com/containerd/stargz-snapshotter/stargz/remote/resolver.go
generated
vendored
Normal file
|
@ -0,0 +1,554 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the NOTICE.md file.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"mime/multipart"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/reference/docker"
|
||||
"github.com/containerd/stargz-snapshotter/cache"
|
||||
"github.com/containerd/stargz-snapshotter/stargz/config"
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultChunkSize = 50000
|
||||
defaultValidInterval = 60 * time.Second
|
||||
defaultPoolEntry = 3000
|
||||
)
|
||||
|
||||
func NewResolver(keychain authn.Keychain, cfg config.ResolverConfig) *Resolver {
|
||||
if cfg.Host == nil {
|
||||
cfg.Host = make(map[string]config.HostConfig)
|
||||
}
|
||||
poolEntry := cfg.ConnectionPoolEntry
|
||||
if poolEntry == 0 {
|
||||
poolEntry = defaultPoolEntry
|
||||
}
|
||||
return &Resolver{
|
||||
transport: http.DefaultTransport,
|
||||
trPool: lru.New(poolEntry),
|
||||
keychain: keychain,
|
||||
config: cfg,
|
||||
bufPool: sync.Pool{
|
||||
New: func() interface{} {
|
||||
return new(bytes.Buffer)
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type Resolver struct {
|
||||
transport http.RoundTripper
|
||||
trPool *lru.Cache
|
||||
trPoolMu sync.Mutex
|
||||
keychain authn.Keychain
|
||||
config config.ResolverConfig
|
||||
bufPool sync.Pool
|
||||
}
|
||||
|
||||
func (r *Resolver) Resolve(ref, digest string, cache cache.BlobCache, cfg config.BlobConfig) (Blob, error) {
|
||||
fetcher, size, err := r.resolve(ref, digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var (
|
||||
chunkSize int64
|
||||
checkInterval time.Duration
|
||||
)
|
||||
chunkSize = cfg.ChunkSize
|
||||
if chunkSize == 0 { // zero means "use default chunk size"
|
||||
chunkSize = defaultChunkSize
|
||||
}
|
||||
if cfg.ValidInterval == 0 { // zero means "use default interval"
|
||||
checkInterval = defaultValidInterval
|
||||
} else {
|
||||
checkInterval = time.Duration(cfg.ValidInterval) * time.Second
|
||||
}
|
||||
if cfg.CheckAlways {
|
||||
checkInterval = 0
|
||||
}
|
||||
return &blob{
|
||||
fetcher: fetcher,
|
||||
size: size,
|
||||
keychain: r.keychain,
|
||||
chunkSize: chunkSize,
|
||||
cache: cache,
|
||||
lastCheck: time.Now(),
|
||||
checkInterval: checkInterval,
|
||||
resolver: r,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (r *Resolver) Refresh(target Blob) error {
|
||||
b, ok := target.(*blob)
|
||||
if !ok {
|
||||
return fmt.Errorf("invalid type of blob. must be *blob")
|
||||
}
|
||||
|
||||
// refresh the fetcher
|
||||
b.fetcherMu.Lock()
|
||||
defer b.fetcherMu.Unlock()
|
||||
new, newSize, err := b.fetcher.refresh()
|
||||
if err != nil {
|
||||
return err
|
||||
} else if newSize != b.size {
|
||||
return fmt.Errorf("Invalid size of new blob %d; want %d", newSize, b.size)
|
||||
}
|
||||
|
||||
// update the blob's fetcher with new one
|
||||
b.fetcher = new
|
||||
b.lastCheck = time.Now()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Resolver) resolve(ref, digest string) (*fetcher, int64, error) {
|
||||
var (
|
||||
nref name.Reference
|
||||
url string
|
||||
tr http.RoundTripper
|
||||
size int64
|
||||
)
|
||||
named, err := docker.ParseDockerRef(ref)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
hosts := append(r.config.Host[docker.Domain(named)].Mirrors, config.MirrorConfig{
|
||||
Host: docker.Domain(named),
|
||||
})
|
||||
rErr := fmt.Errorf("failed to resolve")
|
||||
for _, h := range hosts {
|
||||
// Parse reference
|
||||
if h.Host == "" || strings.Contains(h.Host, "/") {
|
||||
rErr = errors.Wrapf(rErr, "host %q: mirror must be a domain name", h.Host)
|
||||
continue // try another host
|
||||
}
|
||||
var opts []name.Option
|
||||
if h.Insecure {
|
||||
opts = append(opts, name.Insecure)
|
||||
}
|
||||
sref := fmt.Sprintf("%s/%s", h.Host, docker.Path(named))
|
||||
nref, err = name.ParseReference(sref, opts...)
|
||||
if err != nil {
|
||||
rErr = errors.Wrapf(rErr, "host %q: failed to parse ref %q (%q): %v",
|
||||
h.Host, sref, digest, err)
|
||||
continue // try another host
|
||||
}
|
||||
|
||||
// Resolve redirection and get blob URL
|
||||
url, tr, err = r.resolveReference(nref, digest)
|
||||
if err != nil {
|
||||
rErr = errors.Wrapf(rErr, "host %q: failed to resolve ref %q (%q): %v",
|
||||
h.Host, nref.String(), digest, err)
|
||||
continue // try another host
|
||||
}
|
||||
|
||||
// Get size information
|
||||
size, err = getSize(url, tr)
|
||||
if err != nil {
|
||||
rErr = errors.Wrapf(rErr, "host %q: failed to get size: %v", h.Host, err)
|
||||
continue // try another host
|
||||
}
|
||||
|
||||
rErr = nil // Hit one accessible mirror
|
||||
break
|
||||
}
|
||||
if rErr != nil {
|
||||
return nil, 0, errors.Wrapf(rErr, "cannot resolve ref %q (%q)", ref, digest)
|
||||
}
|
||||
|
||||
return &fetcher{
|
||||
resolver: r,
|
||||
ref: ref,
|
||||
digest: digest,
|
||||
nref: nref,
|
||||
url: url,
|
||||
tr: tr,
|
||||
}, size, nil
|
||||
}
|
||||
|
||||
func (r *Resolver) resolveReference(ref name.Reference, digest string) (string, http.RoundTripper, error) {
|
||||
r.trPoolMu.Lock()
|
||||
defer r.trPoolMu.Unlock()
|
||||
|
||||
// Construct endpoint URL from given ref
|
||||
endpointURL := fmt.Sprintf("%s://%s/v2/%s/blobs/%s",
|
||||
ref.Context().Registry.Scheme(),
|
||||
ref.Context().RegistryStr(),
|
||||
ref.Context().RepositoryStr(),
|
||||
digest)
|
||||
|
||||
// Try to use cached transport (cahced per reference name)
|
||||
if tr, ok := r.trPool.Get(ref.Name()); ok {
|
||||
if url, err := redirect(endpointURL, tr.(http.RoundTripper)); err == nil {
|
||||
return url, tr.(http.RoundTripper), nil
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the stale transport from cache
|
||||
r.trPool.Remove(ref.Name())
|
||||
|
||||
// transport is unavailable/expired so refresh the transport and try again
|
||||
tr, err := authnTransport(ref, r.transport, r.keychain)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
url, err := redirect(endpointURL, tr)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
// Update transports cache
|
||||
r.trPool.Add(ref.Name(), tr)
|
||||
|
||||
return url, tr, nil
|
||||
}
|
||||
|
||||
func redirect(endpointURL string, tr http.RoundTripper) (url string, err error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// We use GET request for GCR.
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", endpointURL, nil)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to make request to the registry")
|
||||
}
|
||||
req.Close = false
|
||||
req.Header.Set("Range", "bytes=0-1")
|
||||
res, err := tr.RoundTrip(req)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to request")
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, res.Body)
|
||||
res.Body.Close()
|
||||
}()
|
||||
|
||||
if res.StatusCode/100 == 2 {
|
||||
url = endpointURL
|
||||
} else if redir := res.Header.Get("Location"); redir != "" && res.StatusCode/100 == 3 {
|
||||
// TODO: Support nested redirection
|
||||
url = redir
|
||||
} else {
|
||||
return "", fmt.Errorf("failed to access to the registry with code %v", res.StatusCode)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getSize(url string, tr http.RoundTripper) (int64, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
req.Close = false
|
||||
res, err := tr.RoundTrip(req)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != http.StatusOK {
|
||||
return 0, fmt.Errorf("failed HEAD request with code %v", res.StatusCode)
|
||||
}
|
||||
return strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
|
||||
}
|
||||
|
||||
func authnTransport(ref name.Reference, tr http.RoundTripper, keychain authn.Keychain) (http.RoundTripper, error) {
|
||||
if keychain == nil {
|
||||
keychain = authn.DefaultKeychain
|
||||
}
|
||||
authn, err := keychain.Resolve(ref.Context())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to resolve the reference %q", ref)
|
||||
}
|
||||
errCh := make(chan error)
|
||||
var rTr http.RoundTripper
|
||||
go func() {
|
||||
rTr, err = transport.New(
|
||||
ref.Context().Registry,
|
||||
authn,
|
||||
tr,
|
||||
[]string{ref.Scope(transport.PullScope)},
|
||||
)
|
||||
errCh <- err
|
||||
}()
|
||||
select {
|
||||
case err = <-errCh:
|
||||
case <-time.After(10 * time.Second):
|
||||
return nil, fmt.Errorf("authentication timeout")
|
||||
}
|
||||
return rTr, err
|
||||
}
|
||||
|
||||
type fetcher struct {
|
||||
resolver *Resolver
|
||||
ref string
|
||||
digest string
|
||||
nref name.Reference
|
||||
url string
|
||||
tr http.RoundTripper
|
||||
singleRange bool
|
||||
singleRangeMu sync.Mutex
|
||||
}
|
||||
|
||||
func (f *fetcher) refresh() (*fetcher, int64, error) {
|
||||
return f.resolver.resolve(f.ref, f.digest)
|
||||
}
|
||||
|
||||
type multipartReadCloser interface {
|
||||
Next() (region, io.Reader, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
func (f *fetcher) fetch(ctx context.Context, rs []region, opts *options) (multipartReadCloser, error) {
|
||||
if len(rs) == 0 {
|
||||
return nil, fmt.Errorf("no request queried")
|
||||
}
|
||||
|
||||
var (
|
||||
tr = f.tr
|
||||
singleRangeMode = f.isSingleRangeMode()
|
||||
)
|
||||
|
||||
if opts.ctx != nil {
|
||||
ctx = opts.ctx
|
||||
}
|
||||
if opts.tr != nil {
|
||||
tr = opts.tr
|
||||
}
|
||||
|
||||
// squash requesting chunks for reducing the total size of request header
|
||||
// (servers generally have limits for the size of headers)
|
||||
// TODO: when our request has too many ranges, we need to divide it into
|
||||
// multiple requests to avoid huge header.
|
||||
var s regionSet
|
||||
for _, reg := range rs {
|
||||
s.add(reg)
|
||||
}
|
||||
requests := s.rs
|
||||
if singleRangeMode {
|
||||
// Squash requests if the layer doesn't support multi range.
|
||||
requests = []region{superRegion(requests)}
|
||||
}
|
||||
|
||||
// Request to the registry
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", f.url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var ranges string
|
||||
for _, reg := range requests {
|
||||
ranges += fmt.Sprintf("%d-%d,", reg.b, reg.e)
|
||||
}
|
||||
req.Header.Add("Range", fmt.Sprintf("bytes=%s", ranges[:len(ranges)-1]))
|
||||
req.Header.Add("Accept-Encoding", "identity")
|
||||
req.Close = false
|
||||
res, err := tr.RoundTrip(req) // NOT DefaultClient; don't want redirects
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode == http.StatusOK {
|
||||
// We are getting the whole blob in one part (= status 200)
|
||||
size, err := strconv.ParseInt(res.Header.Get("Content-Length"), 10, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse Content-Length")
|
||||
}
|
||||
return singlePartReader(region{0, size - 1}, res.Body), nil
|
||||
} else if res.StatusCode == http.StatusPartialContent {
|
||||
mediaType, params, err := mime.ParseMediaType(res.Header.Get("Content-Type"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid media type %q", mediaType)
|
||||
}
|
||||
if strings.HasPrefix(mediaType, "multipart/") {
|
||||
// We are getting a set of chunks as a multipart body.
|
||||
return multiPartReader(res.Body, params["boundary"]), nil
|
||||
}
|
||||
|
||||
// We are getting single range
|
||||
reg, err := parseRange(res.Header.Get("Content-Range"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse Content-Range")
|
||||
}
|
||||
return singlePartReader(reg, res.Body), nil
|
||||
} else if !singleRangeMode {
|
||||
f.singleRangeMode() // fallbacks to singe range request mode
|
||||
return f.fetch(ctx, rs, opts) // retries with the single range mode
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("unexpected status code: %v", res.Status)
|
||||
}
|
||||
|
||||
func (f *fetcher) check() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", f.url, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "check failed: failed to make request")
|
||||
}
|
||||
req.Close = false
|
||||
req.Header.Set("Range", "bytes=0-1")
|
||||
res, err := f.tr.RoundTrip(req)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "check failed: failed to request to registry")
|
||||
}
|
||||
defer func() {
|
||||
io.Copy(ioutil.Discard, res.Body)
|
||||
res.Body.Close()
|
||||
}()
|
||||
if res.StatusCode != http.StatusOK && res.StatusCode != http.StatusPartialContent {
|
||||
return fmt.Errorf("unexpected status code %v", res.StatusCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fetcher) authn(tr http.RoundTripper, keychain authn.Keychain) (http.RoundTripper, error) {
|
||||
return authnTransport(f.nref, tr, keychain)
|
||||
}
|
||||
|
||||
func (f *fetcher) genID(reg region) string {
|
||||
sum := sha256.Sum256([]byte(fmt.Sprintf("%s-%d-%d", f.url, reg.b, reg.e)))
|
||||
return fmt.Sprintf("%x", sum)
|
||||
}
|
||||
|
||||
func (f *fetcher) singleRangeMode() {
|
||||
f.singleRangeMu.Lock()
|
||||
f.singleRange = true
|
||||
f.singleRangeMu.Unlock()
|
||||
}
|
||||
|
||||
func (f *fetcher) isSingleRangeMode() bool {
|
||||
f.singleRangeMu.Lock()
|
||||
r := f.singleRange
|
||||
f.singleRangeMu.Unlock()
|
||||
return r
|
||||
}
|
||||
|
||||
func singlePartReader(reg region, rc io.ReadCloser) multipartReadCloser {
|
||||
return &singlepartReader{
|
||||
r: rc,
|
||||
Closer: rc,
|
||||
reg: reg,
|
||||
}
|
||||
}
|
||||
|
||||
type singlepartReader struct {
|
||||
io.Closer
|
||||
r io.Reader
|
||||
reg region
|
||||
called bool
|
||||
}
|
||||
|
||||
func (sr *singlepartReader) Next() (region, io.Reader, error) {
|
||||
if !sr.called {
|
||||
sr.called = true
|
||||
return sr.reg, sr.r, nil
|
||||
}
|
||||
return region{}, nil, io.EOF
|
||||
}
|
||||
|
||||
func multiPartReader(rc io.ReadCloser, boundary string) multipartReadCloser {
|
||||
return &multipartReader{
|
||||
m: multipart.NewReader(rc, boundary),
|
||||
Closer: rc,
|
||||
}
|
||||
}
|
||||
|
||||
type multipartReader struct {
|
||||
io.Closer
|
||||
m *multipart.Reader
|
||||
}
|
||||
|
||||
func (sr *multipartReader) Next() (region, io.Reader, error) {
|
||||
p, err := sr.m.NextPart()
|
||||
if err != nil {
|
||||
return region{}, nil, err
|
||||
}
|
||||
reg, err := parseRange(p.Header.Get("Content-Range"))
|
||||
if err != nil {
|
||||
return region{}, nil, errors.Wrapf(err, "failed to parse Content-Range")
|
||||
}
|
||||
return reg, p, nil
|
||||
}
|
||||
|
||||
func parseRange(header string) (region, error) {
|
||||
submatches := contentRangeRegexp.FindStringSubmatch(header)
|
||||
if len(submatches) < 4 {
|
||||
return region{}, fmt.Errorf("Content-Range %q doesn't have enough information", header)
|
||||
}
|
||||
begin, err := strconv.ParseInt(submatches[1], 10, 64)
|
||||
if err != nil {
|
||||
return region{}, errors.Wrapf(err, "failed to parse beginning offset %q", submatches[1])
|
||||
}
|
||||
end, err := strconv.ParseInt(submatches[2], 10, 64)
|
||||
if err != nil {
|
||||
return region{}, errors.Wrapf(err, "failed to parse end offset %q", submatches[2])
|
||||
}
|
||||
|
||||
return region{begin, end}, nil
|
||||
}
|
||||
|
||||
type Option func(*options)
|
||||
|
||||
type options struct {
|
||||
ctx context.Context
|
||||
tr http.RoundTripper
|
||||
cacheOpts []cache.Option
|
||||
}
|
||||
|
||||
func WithContext(ctx context.Context) Option {
|
||||
return func(opts *options) {
|
||||
opts.ctx = ctx
|
||||
}
|
||||
}
|
||||
|
||||
func WithRoundTripper(tr http.RoundTripper) Option {
|
||||
return func(opts *options) {
|
||||
opts.tr = tr
|
||||
}
|
||||
}
|
||||
|
||||
func WithCacheOpts(cacheOpts ...cache.Option) Option {
|
||||
return func(opts *options) {
|
||||
opts.cacheOpts = cacheOpts
|
||||
}
|
||||
}
|
109
vendor/github.com/containerd/stargz-snapshotter/stargz/remote/util.go
generated
vendored
Normal file
109
vendor/github.com/containerd/stargz-snapshotter/stargz/remote/util.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
Copyright 2019 The Go Authors. All rights reserved.
|
||||
Use of this source code is governed by a BSD-style
|
||||
license that can be found in the NOTICE.md file.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
// region is HTTP-range-request-compliant range.
|
||||
// "b" is beginning byte of the range and "e" is the end.
|
||||
// "e" is must be inclusive along with HTTP's range expression.
|
||||
type region struct{ b, e int64 }
|
||||
|
||||
func (c region) size() int64 {
|
||||
return c.e - c.b + 1
|
||||
}
|
||||
|
||||
func superRegion(regs []region) region {
|
||||
s := regs[0]
|
||||
for _, reg := range regs {
|
||||
if reg.b < s.b {
|
||||
s.b = reg.b
|
||||
}
|
||||
if reg.e > s.e {
|
||||
s.e = reg.e
|
||||
}
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// regionSet is a set of regions
|
||||
type regionSet struct {
|
||||
rs []region // must be kept sorted
|
||||
}
|
||||
|
||||
// add attempts to merge r to rs.rs with squashing the regions as
|
||||
// small as possible. This operation takes O(n).
|
||||
// TODO: more efficient way to do it.
|
||||
func (rs *regionSet) add(r region) {
|
||||
// Iterate over the sorted region slice from the tail.
|
||||
// a) When an overwrap occurs, adjust `r` to fully contain the looking region
|
||||
// `l` and remove `l` from region slice.
|
||||
// b) Once l.e become less than r.b, no overwrap will occur again. So immediately
|
||||
// insert `r` which fully contains all overwrapped regions, to the region slice.
|
||||
// Here, `r` is inserted to the region slice with keeping it sorted, without
|
||||
// overwrapping to any regions.
|
||||
// *) If any `l` contains `r`, we don't need to do anything so return immediately.
|
||||
for i := len(rs.rs) - 1; i >= 0; i-- {
|
||||
l := &rs.rs[i]
|
||||
|
||||
// *) l contains r
|
||||
if l.b <= r.b && r.e <= l.e {
|
||||
return
|
||||
}
|
||||
|
||||
// a) r overwraps to l so adjust r to fully contain l and reomve l
|
||||
// from region slice.
|
||||
if l.b <= r.b && r.b <= l.e+1 && l.e <= r.e {
|
||||
r.b = l.b
|
||||
rs.rs = append(rs.rs[:i], rs.rs[i+1:]...)
|
||||
continue
|
||||
}
|
||||
if r.b <= l.b && l.b <= r.e+1 && r.e <= l.e {
|
||||
r.e = l.e
|
||||
rs.rs = append(rs.rs[:i], rs.rs[i+1:]...)
|
||||
continue
|
||||
}
|
||||
if r.b <= l.b && l.e <= r.e {
|
||||
rs.rs = append(rs.rs[:i], rs.rs[i+1:]...)
|
||||
continue
|
||||
}
|
||||
|
||||
// b) No overwrap will occur after this iteration. Instert r to the
|
||||
// region slice immediately.
|
||||
if l.e < r.b {
|
||||
rs.rs = append(rs.rs[:i+1], append([]region{r}, rs.rs[i+1:]...)...)
|
||||
return
|
||||
}
|
||||
|
||||
// No overwrap occurs yet. See the next region.
|
||||
}
|
||||
|
||||
// r is the topmost region among regions in the slice.
|
||||
rs.rs = append([]region{r}, rs.rs...)
|
||||
}
|
||||
|
||||
func (rs *regionSet) totalSize() int64 {
|
||||
var sz int64
|
||||
for _, f := range rs.rs {
|
||||
sz += f.size()
|
||||
}
|
||||
return sz
|
||||
}
|
320
vendor/github.com/containerd/stargz-snapshotter/stargz/verify/verify.go
generated
vendored
Normal file
320
vendor/github.com/containerd/stargz-snapshotter/stargz/verify/verify.go
generated
vendored
Normal file
|
@ -0,0 +1,320 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package verify
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/google/crfs/stargz"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
// TOCJSONDigestAnnotation is an annotation for image manifest. This stores the
|
||||
// digest of the TOC JSON
|
||||
TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest"
|
||||
)
|
||||
|
||||
// jtoc is a TOC JSON schema which contains additional fields for chunk-level
|
||||
// content verification. This is still backward compatible to the official
|
||||
// definition:
|
||||
// https://github.com/google/crfs/blob/71d77da419c90be7b05d12e59945ac7a8c94a543/stargz/stargz.go
|
||||
type jtoc struct {
|
||||
|
||||
// Version is a field to store the version information of TOC JSON. This field
|
||||
// is unused in this package but is for the backwards-compatibility to stargz.
|
||||
Version int `json:"version"`
|
||||
|
||||
// Entries is a field to store TOCEntries in this archive. These TOCEntries
|
||||
// are extended in this package for content verification, with keeping the
|
||||
// backward-compatibility with stargz.
|
||||
Entries []*TOCEntry `json:"entries"`
|
||||
}
|
||||
|
||||
// TOCEntry extends stargz.TOCEntry with an additional field for storing chunks
|
||||
// digests.
|
||||
type TOCEntry struct {
|
||||
stargz.TOCEntry
|
||||
|
||||
// ChunkDigest stores an OCI digest of the chunk. This must be formed
|
||||
// as "sha256:0123abcd...".
|
||||
ChunkDigest string `json:"chunkDigest,omitempty"`
|
||||
}
|
||||
|
||||
// NewVerifiableStagz takes stargz archive and returns modified one which contains
|
||||
// contents digests in the TOC JSON. This also returns the digest of TOC JSON
|
||||
// itself which can be used for verifying the TOC JSON.
|
||||
func NewVerifiableStagz(sgz *io.SectionReader) (newSgz io.Reader, jtocDigest digest.Digest, err error) {
|
||||
// Extract TOC JSON and some related parts
|
||||
toc, err := extractTOCJSON(sgz)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "failed to extract TOC JSON")
|
||||
}
|
||||
|
||||
// Rewrite the original TOC JSON with chunks digests
|
||||
if err := addDigests(toc.jtoc, sgz); err != nil {
|
||||
return nil, "", errors.Wrap(err, "failed to digest stargz")
|
||||
}
|
||||
tocJSON, err := json.Marshal(toc.jtoc)
|
||||
if err != nil {
|
||||
return nil, "", errors.Wrap(err, "failed to marshal TOC JSON")
|
||||
}
|
||||
dgstr := digest.Canonical.Digester()
|
||||
if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(tocJSON), int64(len(tocJSON))); err != nil {
|
||||
return nil, "", errors.Wrap(err, "failed to calculate digest of TOC JSON")
|
||||
}
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
zw, err := gzip.NewWriterLevel(pw, gzip.BestCompression)
|
||||
if err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
zw.Extra = []byte("stargz.toc") // this magic string might not be necessary but let's follow the official behaviour: https://github.com/google/crfs/blob/71d77da419c90be7b05d12e59945ac7a8c94a543/stargz/stargz.go#L596
|
||||
tw := tar.NewWriter(zw)
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: stargz.TOCTarName,
|
||||
Size: int64(len(tocJSON)),
|
||||
}); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if _, err := tw.Write(tocJSON); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err := tw.Close(); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if err := zw.Close(); err != nil {
|
||||
pw.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
|
||||
// Reconstruct stargz file with the modified TOC JSON
|
||||
if _, err := sgz.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, "", errors.Wrap(err, "failed to reset the seek position of stargz")
|
||||
}
|
||||
if _, err := toc.footer.Seek(0, io.SeekStart); err != nil {
|
||||
return nil, "", errors.Wrap(err, "failed to reset the seek position of footer")
|
||||
}
|
||||
return io.MultiReader(
|
||||
io.LimitReader(sgz, toc.offset), // Original stargz (before TOC JSON)
|
||||
pr, // Rewritten TOC JSON
|
||||
toc.footer, // Unmodified footer (because tocOffset is unchanged)
|
||||
), dgstr.Digest(), nil
|
||||
}
|
||||
|
||||
// StargzTOC checks that the TOC JSON in the passed stargz blob matches the
|
||||
// passed digests and that the TOC JSON contains digests for all chunks
|
||||
// contained in the stargz blob. If the verification succceeds, this function
|
||||
// returns TOCEntryVerifier which holds all chunk digests in the stargz blob.
|
||||
func StargzTOC(sgz *io.SectionReader, tocDigest digest.Digest) (TOCEntryVerifier, error) {
|
||||
toc, err := extractTOCJSON(sgz)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to extract TOC JSON")
|
||||
}
|
||||
|
||||
// Verify the digest of TOC JSON
|
||||
if toc.digest != tocDigest {
|
||||
return nil, fmt.Errorf("invalid TOC JSON %q; want %q", toc.digest, tocDigest)
|
||||
}
|
||||
|
||||
// Collect all chunks digests in this layer
|
||||
digestMap := make(map[int64]digest.Digest) // map from chunk offset to the digest
|
||||
for _, e := range toc.jtoc.Entries {
|
||||
if e.Type == "reg" || e.Type == "chunk" {
|
||||
if e.Type == "reg" && e.Size == 0 {
|
||||
continue // ignores empty file
|
||||
}
|
||||
|
||||
// offset must be unique in stargz blob
|
||||
if _, ok := digestMap[e.Offset]; ok {
|
||||
return nil, fmt.Errorf("offset %d found twice", e.Offset)
|
||||
}
|
||||
|
||||
// all chunk entries must contain digest
|
||||
if e.ChunkDigest == "" {
|
||||
return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON",
|
||||
e.Name, e.Offset)
|
||||
}
|
||||
|
||||
d, err := digest.Parse(e.ChunkDigest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse digest %q", e.ChunkDigest)
|
||||
}
|
||||
digestMap[e.Offset] = d
|
||||
}
|
||||
}
|
||||
|
||||
return &verifier{digestMap: digestMap}, nil
|
||||
}
|
||||
|
||||
// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained
|
||||
// in a stargz blob.
|
||||
type TOCEntryVerifier interface {
|
||||
|
||||
// Verifier provides a content verifier that can be used for verifying the
|
||||
// contents of the specified TOCEntry.
|
||||
Verifier(ce *stargz.TOCEntry) (digest.Verifier, error)
|
||||
}
|
||||
|
||||
// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by
|
||||
// offset of the chunk.
|
||||
type verifier struct {
|
||||
digestMap map[int64]digest.Digest
|
||||
digestMapMu sync.Mutex
|
||||
}
|
||||
|
||||
// Verifier returns a content verifier specified by TOCEntry.
|
||||
func (v *verifier) Verifier(ce *stargz.TOCEntry) (digest.Verifier, error) {
|
||||
v.digestMapMu.Lock()
|
||||
defer v.digestMapMu.Unlock()
|
||||
d, ok := v.digestMap[ce.Offset]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered",
|
||||
ce.Offset, ce.ChunkSize)
|
||||
}
|
||||
return d.Verifier(), nil
|
||||
}
|
||||
|
||||
// tocInfo is a set of information related to footer and TOC JSON in a stargz
|
||||
type tocInfo struct {
|
||||
jtoc *jtoc // decoded TOC JSON
|
||||
digest digest.Digest // OCI digest of TOC JSON. formed as "sha256:abcd1234..."
|
||||
offset int64 // offset of TOC JSON in the stargz blob
|
||||
footer io.ReadSeeker // stargz footer
|
||||
}
|
||||
|
||||
// extractTOCJSON extracts TOC JSON from the given stargz reader, with avoiding
|
||||
// scanning the entire blob leveraging its footer. The parsed TOC JSON contains
|
||||
// additional fields usable for chunk-level content verification.
|
||||
func extractTOCJSON(sgz *io.SectionReader) (toc *tocInfo, err error) {
|
||||
// Parse stargz footer and get the offset of TOC JSON
|
||||
if sgz.Size() < stargz.FooterSize {
|
||||
return nil, errors.New("stargz data is too small")
|
||||
}
|
||||
footerReader := io.NewSectionReader(sgz, sgz.Size()-stargz.FooterSize, stargz.FooterSize)
|
||||
zr, err := gzip.NewReader(footerReader)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to uncompress footer")
|
||||
}
|
||||
defer zr.Close()
|
||||
if len(zr.Header.Extra) != 22 {
|
||||
return nil, errors.Wrap(err, "invalid extra size; must be 22 bytes")
|
||||
} else if string(zr.Header.Extra[16:]) != "STARGZ" {
|
||||
return nil, errors.New("invalid footer; extra must contain magic string \"STARGZ\"")
|
||||
}
|
||||
tocOffset, err := strconv.ParseInt(string(zr.Header.Extra[:16]), 16, 64)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid footer; failed to get the offset of TOC JSON")
|
||||
} else if tocOffset > sgz.Size() {
|
||||
return nil, fmt.Errorf("invalid footer; offset of TOC JSON is too large (%d > %d)",
|
||||
tocOffset, sgz.Size())
|
||||
}
|
||||
|
||||
// Decode the TOC JSON
|
||||
tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-stargz.FooterSize)
|
||||
if err := zr.Reset(tocReader); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to uncompress TOC JSON targz entry")
|
||||
}
|
||||
tr := tar.NewReader(zr)
|
||||
h, err := tr.Next()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to get TOC JSON tar entry")
|
||||
} else if h.Name != stargz.TOCTarName {
|
||||
return nil, fmt.Errorf("invalid TOC JSON tar entry name %q; must be %q",
|
||||
h.Name, stargz.TOCTarName)
|
||||
}
|
||||
dgstr := digest.Canonical.Digester()
|
||||
decodedJTOC := new(jtoc)
|
||||
if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&decodedJTOC); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode TOC JSON")
|
||||
}
|
||||
if _, err := tr.Next(); err != io.EOF {
|
||||
// We only accept stargz file that its TOC JSON resides at the end of that
|
||||
// file to avoid changing the offsets of the following file entries by
|
||||
// rewriting TOC JSON (The official stargz lib also puts TOC JSON at the end
|
||||
// of the stargz file at this mement).
|
||||
// TODO: in the future, we should relax this restriction.
|
||||
return nil, errors.New("TOC JSON must reside at the end of targz")
|
||||
}
|
||||
|
||||
return &tocInfo{
|
||||
jtoc: decodedJTOC,
|
||||
digest: dgstr.Digest(),
|
||||
offset: tocOffset,
|
||||
footer: footerReader,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// addDigests calculates chunk digests and adds them to the TOC JSON
|
||||
func addDigests(toc *jtoc, sgz *io.SectionReader) error {
|
||||
var (
|
||||
sizeMap = make(map[string]int64)
|
||||
zr *gzip.Reader
|
||||
err error
|
||||
)
|
||||
for _, e := range toc.Entries {
|
||||
if e.Type == "reg" || e.Type == "chunk" {
|
||||
if e.Type == "reg" {
|
||||
sizeMap[e.Name] = e.Size // saves the file size for futural use
|
||||
}
|
||||
size := e.ChunkSize
|
||||
if size == 0 {
|
||||
// In the seriarized form, ChunkSize field of the last chunk in a file
|
||||
// is zero so we need to calculate it.
|
||||
size = sizeMap[e.Name] - e.ChunkOffset
|
||||
}
|
||||
if size > 0 {
|
||||
if _, err := sgz.Seek(e.Offset, io.SeekStart); err != nil {
|
||||
return errors.Wrapf(err, "failed to seek to %q", e.Name)
|
||||
}
|
||||
if zr == nil {
|
||||
if zr, err = gzip.NewReader(sgz); err != nil {
|
||||
return errors.Wrapf(err, "failed to decomp %q", e.Name)
|
||||
}
|
||||
} else {
|
||||
if err := zr.Reset(sgz); err != nil {
|
||||
return errors.Wrapf(err, "failed to decomp %q", e.Name)
|
||||
}
|
||||
}
|
||||
dgstr := digest.Canonical.Digester()
|
||||
if _, err := io.CopyN(dgstr.Hash(), zr, size); err != nil {
|
||||
return errors.Wrapf(err, "failed to read %q; (file size: %d, e.Offset: %d, e.ChunkOffset: %d, chunk size: %d, end offset: %d, size of stargz file: %d)",
|
||||
e.Name, sizeMap[e.Name], e.Offset, e.ChunkOffset, size,
|
||||
e.Offset+size, sgz.Size())
|
||||
}
|
||||
e.ChunkDigest = dgstr.Digest().String()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,153 @@
|
|||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package task
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
|
||||
// NewBackgroundTaskManager provides a task manager. You can specify the
|
||||
// concurrency of background tasks. When running a background task, this will be
|
||||
// forced to wait until no prioritized task is running for some period. You can
|
||||
// specify the period through the argument of this function, too.
|
||||
func NewBackgroundTaskManager(concurrency int64, period time.Duration) *BackgroundTaskManager {
|
||||
return &BackgroundTaskManager{
|
||||
backgroundSem: semaphore.NewWeighted(concurrency),
|
||||
prioritizedTaskSilencePeriod: period,
|
||||
prioritizedTaskStartNotify: make(chan struct{}),
|
||||
prioritizedTaskDoneCond: sync.NewCond(&sync.Mutex{}),
|
||||
}
|
||||
}
|
||||
|
||||
// BackgroundTaskManager is a task manager which manages prioritized tasks and
|
||||
// background tasks execution. Background tasks are less important than
|
||||
// prioritized tasks. You can let these background tasks not to use compute
|
||||
// resources (CPU, NW, etc...) during more important tasks(=prioritized tasks)
|
||||
// running.
|
||||
//
|
||||
// When you run a prioritised task and don't want background tasks to use
|
||||
// resources you can tell it this manager by calling DoPrioritizedTask method.
|
||||
// DonePrioritizedTask method must be called at the end of the prioritised task
|
||||
// execution.
|
||||
//
|
||||
// For running a background task, you can use InvokeBackgroundTask method. The
|
||||
// background task must be able to be cancelled via context.Context argument.
|
||||
// The task is forced to wait until no prioritized task is running for some
|
||||
// period. You can specify the period when making this manager instance. The
|
||||
// limited number of background tasks run simultaneously and you can specify the
|
||||
// concurrency when making this manager instance too. If a prioritized task
|
||||
// starts during the execution of background tasks, all background tasks running
|
||||
// will be cancelled via context. These cancelled tasks will be executed again
|
||||
// later, same as other background tasks (when no prioritized task is running
|
||||
// for some period).
|
||||
type BackgroundTaskManager struct {
|
||||
prioritizedTasks int64
|
||||
backgroundSem *semaphore.Weighted
|
||||
prioritizedTaskSilencePeriod time.Duration
|
||||
prioritizedTaskStartNotify chan struct{}
|
||||
prioritizedTaskStartNotifyMu sync.Mutex
|
||||
prioritizedTaskDoneCond *sync.Cond
|
||||
}
|
||||
|
||||
// DoPrioritizedTask tells the manager that we are running a prioritized task
|
||||
// and don't want background tasks to disturb resources(CPU, NW, etc...)
|
||||
func (ts *BackgroundTaskManager) DoPrioritizedTask() {
|
||||
// Notify the prioritized task execution to background tasks.
|
||||
ts.prioritizedTaskStartNotifyMu.Lock()
|
||||
atomic.AddInt64(&ts.prioritizedTasks, 1)
|
||||
close(ts.prioritizedTaskStartNotify)
|
||||
ts.prioritizedTaskStartNotify = make(chan struct{})
|
||||
ts.prioritizedTaskStartNotifyMu.Unlock()
|
||||
}
|
||||
|
||||
// DonePrioritizedTask tells the manager that we've done a prioritized task
|
||||
// and don't want background tasks to disturb resources(CPU, NW, etc...)
|
||||
func (ts *BackgroundTaskManager) DonePrioritizedTask() {
|
||||
go func() {
|
||||
// Notify the task completion after `ts.prioritizedTaskSilencePeriod`
|
||||
// so that background tasks aren't invoked immediately.
|
||||
time.Sleep(ts.prioritizedTaskSilencePeriod)
|
||||
atomic.AddInt64(&ts.prioritizedTasks, -1)
|
||||
ts.prioritizedTaskDoneCond.Broadcast()
|
||||
}()
|
||||
}
|
||||
|
||||
// InvokeBackgroundTask invokes a background task. The task is started only when
|
||||
// no prioritized tasks are running. Prioritized task's execution stops the
|
||||
// execution of all background tasks. Background task must be able to be
|
||||
// cancelled via context.Context argument and be able to be restarted again.
|
||||
func (ts *BackgroundTaskManager) InvokeBackgroundTask(do func(context.Context), timeout time.Duration) {
|
||||
for {
|
||||
// Wait until all prioritized tasks are done
|
||||
for {
|
||||
if atomic.LoadInt64(&ts.prioritizedTasks) <= 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// waits until a prioritized task is done
|
||||
ts.prioritizedTaskDoneCond.L.Lock()
|
||||
if atomic.LoadInt64(&ts.prioritizedTasks) > 0 {
|
||||
ts.prioritizedTaskDoneCond.Wait()
|
||||
}
|
||||
ts.prioritizedTaskDoneCond.L.Unlock()
|
||||
}
|
||||
|
||||
// limited number of background tasks can run at once.
|
||||
// if prioritized tasks are running, cancel this task.
|
||||
if func() bool {
|
||||
ts.backgroundSem.Acquire(context.Background(), 1)
|
||||
defer ts.backgroundSem.Release(1)
|
||||
|
||||
// Get notify the prioritized tasks execution.
|
||||
ts.prioritizedTaskStartNotifyMu.Lock()
|
||||
ch := ts.prioritizedTaskStartNotify
|
||||
tasks := atomic.LoadInt64(&ts.prioritizedTasks)
|
||||
ts.prioritizedTaskStartNotifyMu.Unlock()
|
||||
if tasks > 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Invoke the background task. if some prioritized tasks added during
|
||||
// execution, cancel it and try it later.
|
||||
var (
|
||||
done = make(chan struct{})
|
||||
ctx, cancel = context.WithTimeout(context.Background(), timeout)
|
||||
)
|
||||
defer cancel()
|
||||
go func() {
|
||||
do(ctx)
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// Wait until the background task is done or canceled.
|
||||
select {
|
||||
case <-ch: // some prioritized tasks started; retry it later
|
||||
cancel()
|
||||
return false
|
||||
case <-done: // All tasks completed
|
||||
}
|
||||
return true
|
||||
}() {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package credentials
|
||||
|
||||
// Version holds a string describing the current version
|
||||
const Version = "0.6.0"
|
||||
const Version = "0.6.3"
|
||||
|
|
|
@ -113,7 +113,7 @@ func SplitProtoPort(rawPort string) (string, string) {
|
|||
}
|
||||
|
||||
func validateProto(proto string) bool {
|
||||
for _, availableProto := range []string{"tcp", "udp"} {
|
||||
for _, availableProto := range []string{"tcp", "udp", "sctp"} {
|
||||
if availableProto == proto {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@ package tlsconfig
|
|||
|
||||
import (
|
||||
"crypto/x509"
|
||||
|
||||
)
|
||||
|
||||
// SystemCertPool returns an new empty cert pool,
|
||||
|
|
|
@ -46,8 +46,6 @@ var acceptedCBCCiphers = []uint16{
|
|||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||
}
|
||||
|
||||
// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls
|
||||
|
@ -65,22 +63,34 @@ var allTLSVersions = map[uint16]struct{}{
|
|||
}
|
||||
|
||||
// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration.
|
||||
func ServerDefault() *tls.Config {
|
||||
return &tls.Config{
|
||||
// Avoid fallback to SSL protocols < TLS1.0
|
||||
MinVersion: tls.VersionTLS10,
|
||||
func ServerDefault(ops ...func(*tls.Config)) *tls.Config {
|
||||
tlsconfig := &tls.Config{
|
||||
// Avoid fallback by default to SSL protocols < TLS1.2
|
||||
MinVersion: tls.VersionTLS12,
|
||||
PreferServerCipherSuites: true,
|
||||
CipherSuites: DefaultServerAcceptedCiphers,
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
op(tlsconfig)
|
||||
}
|
||||
|
||||
return tlsconfig
|
||||
}
|
||||
|
||||
// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration.
|
||||
func ClientDefault() *tls.Config {
|
||||
return &tls.Config{
|
||||
func ClientDefault(ops ...func(*tls.Config)) *tls.Config {
|
||||
tlsconfig := &tls.Config{
|
||||
// Prefer TLS1.2 as the client minimum
|
||||
MinVersion: tls.VersionTLS12,
|
||||
CipherSuites: clientCipherSuites,
|
||||
}
|
||||
|
||||
for _, op := range ops {
|
||||
op(tlsconfig)
|
||||
}
|
||||
|
||||
return tlsconfig
|
||||
}
|
||||
|
||||
// certPool returns an X.509 certificate pool from `caFile`, the certificate file.
|
||||
|
|
|
@ -0,0 +1,191 @@
|
|||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
Copyright 2013 Google Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package lru implements an LRU cache.
|
||||
package lru
|
||||
|
||||
import "container/list"
|
||||
|
||||
// Cache is an LRU cache. It is not safe for concurrent access.
|
||||
type Cache struct {
|
||||
// MaxEntries is the maximum number of cache entries before
|
||||
// an item is evicted. Zero means no limit.
|
||||
MaxEntries int
|
||||
|
||||
// OnEvicted optionally specifies a callback function to be
|
||||
// executed when an entry is purged from the cache.
|
||||
OnEvicted func(key Key, value interface{})
|
||||
|
||||
ll *list.List
|
||||
cache map[interface{}]*list.Element
|
||||
}
|
||||
|
||||
// A Key may be any value that is comparable. See http://golang.org/ref/spec#Comparison_operators
|
||||
type Key interface{}
|
||||
|
||||
type entry struct {
|
||||
key Key
|
||||
value interface{}
|
||||
}
|
||||
|
||||
// New creates a new Cache.
|
||||
// If maxEntries is zero, the cache has no limit and it's assumed
|
||||
// that eviction is done by the caller.
|
||||
func New(maxEntries int) *Cache {
|
||||
return &Cache{
|
||||
MaxEntries: maxEntries,
|
||||
ll: list.New(),
|
||||
cache: make(map[interface{}]*list.Element),
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a value to the cache.
|
||||
func (c *Cache) Add(key Key, value interface{}) {
|
||||
if c.cache == nil {
|
||||
c.cache = make(map[interface{}]*list.Element)
|
||||
c.ll = list.New()
|
||||
}
|
||||
if ee, ok := c.cache[key]; ok {
|
||||
c.ll.MoveToFront(ee)
|
||||
ee.Value.(*entry).value = value
|
||||
return
|
||||
}
|
||||
ele := c.ll.PushFront(&entry{key, value})
|
||||
c.cache[key] = ele
|
||||
if c.MaxEntries != 0 && c.ll.Len() > c.MaxEntries {
|
||||
c.RemoveOldest()
|
||||
}
|
||||
}
|
||||
|
||||
// Get looks up a key's value from the cache.
|
||||
func (c *Cache) Get(key Key) (value interface{}, ok bool) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.ll.MoveToFront(ele)
|
||||
return ele.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key Key) {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
if ele, hit := c.cache[key]; hit {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() {
|
||||
if c.cache == nil {
|
||||
return
|
||||
}
|
||||
ele := c.ll.Back()
|
||||
if ele != nil {
|
||||
c.removeElement(ele)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cache) removeElement(e *list.Element) {
|
||||
c.ll.Remove(e)
|
||||
kv := e.Value.(*entry)
|
||||
delete(c.cache, kv.key)
|
||||
if c.OnEvicted != nil {
|
||||
c.OnEvicted(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
|
||||
// Len returns the number of items in the cache.
|
||||
func (c *Cache) Len() int {
|
||||
if c.cache == nil {
|
||||
return 0
|
||||
}
|
||||
return c.ll.Len()
|
||||
}
|
||||
|
||||
// Clear purges all stored items from the cache.
|
||||
func (c *Cache) Clear() {
|
||||
if c.OnEvicted != nil {
|
||||
for _, e := range c.cache {
|
||||
kv := e.Value.(*entry)
|
||||
c.OnEvicted(kv.key, kv.value)
|
||||
}
|
||||
}
|
||||
c.ll = nil
|
||||
c.cache = nil
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2019 Google LLC. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,865 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The stargz package reads & writes tar.gz ("tarball") files in a
|
||||
// seekable, indexed format call "stargz". A stargz file is still a
|
||||
// valid tarball, but it's slightly bigger with new gzip streams for
|
||||
// each new file & throughout large files, and has an index in a magic
|
||||
// file at the end.
|
||||
package stargz
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// TOCTarName is the name of the JSON file in the tar archive in the
|
||||
// table of contents gzip stream.
|
||||
const TOCTarName = "stargz.index.json"
|
||||
|
||||
// FooterSize is the number of bytes in the stargz footer.
|
||||
//
|
||||
// The footer is an empty gzip stream with no compression and an Extra
|
||||
// header of the form "%016xSTARGZ", where the 64 bit hex-encoded
|
||||
// number is the offset to the gzip stream of JSON TOC.
|
||||
//
|
||||
// 47 comes from:
|
||||
//
|
||||
// 10 byte gzip header +
|
||||
// 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" +
|
||||
// 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset))
|
||||
// 5 byte flate header
|
||||
// 8 byte gzip footer (two little endian uint32s: digest, size)
|
||||
const FooterSize = 47
|
||||
|
||||
// A Reader permits random access reads from a stargz file.
|
||||
type Reader struct {
|
||||
sr *io.SectionReader
|
||||
toc *jtoc
|
||||
|
||||
// m stores all non-chunk entries, keyed by name.
|
||||
m map[string]*TOCEntry
|
||||
|
||||
// chunks stores all TOCEntry values for regular files that
|
||||
// are split up. For a file with a single chunk, it's only
|
||||
// stored in m.
|
||||
chunks map[string][]*TOCEntry
|
||||
}
|
||||
|
||||
// Open opens a stargz file for reading.
|
||||
func Open(sr *io.SectionReader) (*Reader, error) {
|
||||
if sr.Size() < FooterSize {
|
||||
return nil, fmt.Errorf("stargz size %d is smaller than the stargz footer size", sr.Size())
|
||||
}
|
||||
// TODO: read a bigger chunk (1MB?) at once here to hopefully
|
||||
// get the TOC + footer in one go.
|
||||
var footer [FooterSize]byte
|
||||
if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil {
|
||||
return nil, fmt.Errorf("error reading footer: %v", err)
|
||||
}
|
||||
tocOff, ok := parseFooter(footer[:])
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("error parsing footer")
|
||||
}
|
||||
tocTargz := make([]byte, sr.Size()-tocOff-FooterSize)
|
||||
if _, err := sr.ReadAt(tocTargz, tocOff); err != nil {
|
||||
return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocTargz), err)
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(tocTargz))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("malformed TOC gzip header: %v", err)
|
||||
}
|
||||
zr.Multistream(false)
|
||||
tr := tar.NewReader(zr)
|
||||
h, err := tr.Next()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err)
|
||||
}
|
||||
if h.Name != TOCTarName {
|
||||
return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName)
|
||||
}
|
||||
toc := new(jtoc)
|
||||
if err := json.NewDecoder(tr).Decode(&toc); err != nil {
|
||||
return nil, fmt.Errorf("error decoding TOC JSON: %v", err)
|
||||
}
|
||||
r := &Reader{sr: sr, toc: toc}
|
||||
if err := r.initFields(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize fields of entries: %v", err)
|
||||
}
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// TOCEntry is an entry in the stargz file's TOC (Table of Contents).
|
||||
type TOCEntry struct {
|
||||
// Name is the tar entry's name. It is the complete path
|
||||
// stored in the tar file, not just the base name.
|
||||
Name string `json:"name"`
|
||||
|
||||
// Type is one of "dir", "reg", "symlink", "hardlink", "char",
|
||||
// "block", "fifo", or "chunk".
|
||||
// The "chunk" type is used for regular file data chunks past the first
|
||||
// TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset,
|
||||
// ChunkOffset, and ChunkSize populated.
|
||||
Type string `json:"type"`
|
||||
|
||||
// Size, for regular files, is the logical size of the file.
|
||||
Size int64 `json:"size,omitempty"`
|
||||
|
||||
// ModTime3339 is the modification time of the tar entry. Empty
|
||||
// means zero or unknown. Otherwise it's in UTC RFC3339
|
||||
// format. Use the ModTime method to access the time.Time value.
|
||||
ModTime3339 string `json:"modtime,omitempty"`
|
||||
modTime time.Time
|
||||
|
||||
// LinkName, for symlinks and hardlinks, is the link target.
|
||||
LinkName string `json:"linkName,omitempty"`
|
||||
|
||||
// Mode is the permission and mode bits.
|
||||
Mode int64 `json:"mode,omitempty"`
|
||||
|
||||
// Uid is the user ID of the owner.
|
||||
Uid int `json:"uid,omitempty"`
|
||||
|
||||
// Gid is the group ID of the owner.
|
||||
Gid int `json:"gid,omitempty"`
|
||||
|
||||
// Uname is the username of the owner.
|
||||
//
|
||||
// In the serialized JSON, this field may only be present for
|
||||
// the first entry with the same Uid.
|
||||
Uname string `json:"userName,omitempty"`
|
||||
|
||||
// Gname is the group name of the owner.
|
||||
//
|
||||
// In the serialized JSON, this field may only be present for
|
||||
// the first entry with the same Gid.
|
||||
Gname string `json:"groupName,omitempty"`
|
||||
|
||||
// Offset, for regular files, provides the offset in the
|
||||
// stargz file to the file's data bytes. See ChunkOffset and
|
||||
// ChunkSize.
|
||||
Offset int64 `json:"offset,omitempty"`
|
||||
|
||||
nextOffset int64 // the Offset of the next entry with a non-zero Offset
|
||||
|
||||
// DevMajor is the major device number for "char" and "block" types.
|
||||
DevMajor int `json:"devMajor,omitempty"`
|
||||
|
||||
// DevMinor is the major device number for "char" and "block" types.
|
||||
DevMinor int `json:"devMinor,omitempty"`
|
||||
|
||||
// NumLink is the number of entry names pointing to this entry.
|
||||
// Zero means one name references this entry.
|
||||
NumLink int
|
||||
|
||||
// Xattrs are the extended attribute for the entry.
|
||||
Xattrs map[string][]byte `json:"xattrs,omitempty"`
|
||||
|
||||
// Digest stores the OCI checksum for regular files payload.
|
||||
// It has the form "sha256:abcdef01234....".
|
||||
Digest string `json:"digest,omitempty"`
|
||||
|
||||
// ChunkOffset is non-zero if this is a chunk of a large,
|
||||
// regular file. If so, the Offset is where the gzip header of
|
||||
// ChunkSize bytes at ChunkOffset in Name begin.
|
||||
//
|
||||
// In serialized form, a "chunkSize" JSON field of zero means
|
||||
// that the chunk goes to the end of the file. After reading
|
||||
// from the stargz TOC, though, the ChunkSize is initialized
|
||||
// to a non-zero file for when Type is either "reg" or
|
||||
// "chunk".
|
||||
ChunkOffset int64 `json:"chunkOffset,omitempty"`
|
||||
ChunkSize int64 `json:"chunkSize,omitempty"`
|
||||
|
||||
children map[string]*TOCEntry
|
||||
}
|
||||
|
||||
// ModTime returns the entry's modification time.
|
||||
func (e *TOCEntry) ModTime() time.Time { return e.modTime }
|
||||
|
||||
// NextOffset returns the position (relative to the start of the
|
||||
// stargz file) of the next gzip boundary after e.Offset.
|
||||
func (e *TOCEntry) NextOffset() int64 { return e.nextOffset }
|
||||
|
||||
func (e *TOCEntry) addChild(baseName string, child *TOCEntry) {
|
||||
if e.children == nil {
|
||||
e.children = make(map[string]*TOCEntry)
|
||||
}
|
||||
if child.Type == "dir" {
|
||||
e.NumLink++ // Entry ".." in the subdirectory links to this directory
|
||||
}
|
||||
e.children[baseName] = child
|
||||
}
|
||||
|
||||
// isDataType reports whether TOCEntry is a regular file or chunk (something that
|
||||
// contains regular file data).
|
||||
func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" }
|
||||
|
||||
// jtoc is the JSON-serialized table of contents index of the files in the stargz file.
|
||||
type jtoc struct {
|
||||
Version int `json:"version"`
|
||||
Entries []*TOCEntry `json:"entries"`
|
||||
}
|
||||
|
||||
// Stat returns a FileInfo value representing e.
|
||||
func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} }
|
||||
|
||||
// ForeachChild calls f for each child item. If f returns false, iteration ends.
|
||||
// If e is not a directory, f is not called.
|
||||
func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) {
|
||||
for name, ent := range e.children {
|
||||
if !f(name, ent) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// LookupChild returns the directory e's child by its base name.
|
||||
func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) {
|
||||
child, ok = e.children[baseName]
|
||||
return
|
||||
}
|
||||
|
||||
// fileInfo implements os.FileInfo using the wrapped *TOCEntry.
|
||||
type fileInfo struct{ e *TOCEntry }
|
||||
|
||||
var _ os.FileInfo = fileInfo{}
|
||||
|
||||
func (fi fileInfo) Name() string { return path.Base(fi.e.Name) }
|
||||
func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" }
|
||||
func (fi fileInfo) Size() int64 { return fi.e.Size }
|
||||
func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() }
|
||||
func (fi fileInfo) Sys() interface{} { return fi.e }
|
||||
func (fi fileInfo) Mode() (m os.FileMode) {
|
||||
m = os.FileMode(fi.e.Mode) & os.ModePerm
|
||||
switch fi.e.Type {
|
||||
case "dir":
|
||||
m |= os.ModeDir
|
||||
case "symlink":
|
||||
m |= os.ModeSymlink
|
||||
case "char":
|
||||
m |= os.ModeDevice | os.ModeCharDevice
|
||||
case "block":
|
||||
m |= os.ModeDevice
|
||||
case "fifo":
|
||||
m |= os.ModeNamedPipe
|
||||
}
|
||||
// TODO: ModeSetuid, ModeSetgid, if/as needed.
|
||||
return m
|
||||
}
|
||||
|
||||
// initFields populates the Reader from r.toc after decoding it from
|
||||
// JSON.
|
||||
//
|
||||
// Unexported fields are populated and TOCEntry fields that were
|
||||
// implicit in the JSON are populated.
|
||||
func (r *Reader) initFields() error {
|
||||
r.m = make(map[string]*TOCEntry, len(r.toc.Entries))
|
||||
r.chunks = make(map[string][]*TOCEntry)
|
||||
var lastPath string
|
||||
uname := map[int]string{}
|
||||
gname := map[int]string{}
|
||||
var lastRegEnt *TOCEntry
|
||||
for _, ent := range r.toc.Entries {
|
||||
ent.Name = strings.TrimPrefix(ent.Name, "./")
|
||||
if ent.Type == "reg" {
|
||||
lastRegEnt = ent
|
||||
}
|
||||
if ent.Type == "chunk" {
|
||||
ent.Name = lastPath
|
||||
r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
|
||||
if ent.ChunkSize == 0 && lastRegEnt != nil {
|
||||
ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset
|
||||
}
|
||||
} else {
|
||||
lastPath = ent.Name
|
||||
|
||||
if ent.Uname != "" {
|
||||
uname[ent.Uid] = ent.Uname
|
||||
} else {
|
||||
ent.Uname = uname[ent.Uid]
|
||||
}
|
||||
if ent.Gname != "" {
|
||||
gname[ent.Gid] = ent.Gname
|
||||
} else {
|
||||
ent.Gname = uname[ent.Gid]
|
||||
}
|
||||
|
||||
ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339)
|
||||
|
||||
if ent.Type == "dir" {
|
||||
ent.NumLink++ // Parent dir links to this directory
|
||||
r.m[strings.TrimSuffix(ent.Name, "/")] = ent
|
||||
} else {
|
||||
r.m[ent.Name] = ent
|
||||
}
|
||||
}
|
||||
if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size {
|
||||
r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1)
|
||||
r.chunks[ent.Name] = append(r.chunks[ent.Name], ent)
|
||||
}
|
||||
if ent.ChunkSize == 0 && ent.Size != 0 {
|
||||
ent.ChunkSize = ent.Size
|
||||
}
|
||||
}
|
||||
|
||||
// Populate children, add implicit directories:
|
||||
for _, ent := range r.toc.Entries {
|
||||
if ent.Type == "chunk" {
|
||||
continue
|
||||
}
|
||||
// add "foo/":
|
||||
// add "foo" child to "" (creating "" if necessary)
|
||||
//
|
||||
// add "foo/bar/":
|
||||
// add "bar" child to "foo" (creating "foo" if necessary)
|
||||
//
|
||||
// add "foo/bar.txt":
|
||||
// add "bar.txt" child to "foo" (creating "foo" if necessary)
|
||||
//
|
||||
// add "a/b/c/d/e/f.txt":
|
||||
// create "a/b/c/d/e" node
|
||||
// add "f.txt" child to "e"
|
||||
|
||||
name := ent.Name
|
||||
if ent.Type == "dir" {
|
||||
name = strings.TrimSuffix(name, "/")
|
||||
}
|
||||
pdir := r.getOrCreateDir(parentDir(name))
|
||||
ent.NumLink++ // at least one name(ent.Name) references this entry.
|
||||
if ent.Type == "hardlink" {
|
||||
if org, ok := r.m[ent.LinkName]; ok {
|
||||
org.NumLink++ // original entry is referenced by this ent.Name.
|
||||
ent = org
|
||||
} else {
|
||||
return fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName)
|
||||
}
|
||||
}
|
||||
pdir.addChild(path.Base(name), ent)
|
||||
}
|
||||
|
||||
lastOffset := r.sr.Size()
|
||||
for i := len(r.toc.Entries) - 1; i >= 0; i-- {
|
||||
e := r.toc.Entries[i]
|
||||
if e.isDataType() {
|
||||
e.nextOffset = lastOffset
|
||||
}
|
||||
if e.Offset != 0 {
|
||||
lastOffset = e.Offset
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func parentDir(p string) string {
|
||||
dir, _ := path.Split(p)
|
||||
return strings.TrimSuffix(dir, "/")
|
||||
}
|
||||
|
||||
func (r *Reader) getOrCreateDir(d string) *TOCEntry {
|
||||
e, ok := r.m[d]
|
||||
if !ok {
|
||||
e = &TOCEntry{
|
||||
Name: d,
|
||||
Type: "dir",
|
||||
Mode: 0755,
|
||||
NumLink: 2, // The directory itself(.) and the parent link to this directory.
|
||||
}
|
||||
r.m[d] = e
|
||||
if d != "" {
|
||||
pdir := r.getOrCreateDir(parentDir(d))
|
||||
pdir.addChild(path.Base(d), e)
|
||||
}
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// ChunkEntryForOffset returns the TOCEntry containing the byte of the
|
||||
// named file at the given offset within the file.
|
||||
func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) {
|
||||
e, ok = r.Lookup(name)
|
||||
if !ok || !e.isDataType() {
|
||||
return nil, false
|
||||
}
|
||||
ents := r.chunks[name]
|
||||
if len(ents) < 2 {
|
||||
if offset >= e.ChunkSize {
|
||||
return nil, false
|
||||
}
|
||||
return e, true
|
||||
}
|
||||
i := sort.Search(len(ents), func(i int) bool {
|
||||
e := ents[i]
|
||||
return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize)
|
||||
})
|
||||
if i == len(ents) {
|
||||
return nil, false
|
||||
}
|
||||
return ents[i], true
|
||||
}
|
||||
|
||||
// Lookup returns the Table of Contents entry for the given path.
|
||||
//
|
||||
// To get the root directory, use the empty string.
|
||||
func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) {
|
||||
if r == nil {
|
||||
return
|
||||
}
|
||||
e, ok = r.m[path]
|
||||
if ok && e.Type == "hardlink" {
|
||||
e, ok = r.m[e.LinkName]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *Reader) OpenFile(name string) (*io.SectionReader, error) {
|
||||
ent, ok := r.Lookup(name)
|
||||
if !ok {
|
||||
// TODO: come up with some error plan. This is lazy:
|
||||
return nil, &os.PathError{
|
||||
Path: name,
|
||||
Op: "OpenFile",
|
||||
Err: os.ErrNotExist,
|
||||
}
|
||||
}
|
||||
if ent.Type != "reg" {
|
||||
return nil, &os.PathError{
|
||||
Path: name,
|
||||
Op: "OpenFile",
|
||||
Err: errors.New("not a regular file"),
|
||||
}
|
||||
}
|
||||
fr := &fileReader{
|
||||
r: r,
|
||||
size: ent.Size,
|
||||
ents: r.getChunks(ent),
|
||||
}
|
||||
return io.NewSectionReader(fr, 0, fr.size), nil
|
||||
}
|
||||
|
||||
func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry {
|
||||
if ents, ok := r.chunks[ent.Name]; ok {
|
||||
return ents
|
||||
}
|
||||
return []*TOCEntry{ent}
|
||||
}
|
||||
|
||||
type fileReader struct {
|
||||
r *Reader
|
||||
size int64
|
||||
ents []*TOCEntry // 1 or more reg/chunk entries
|
||||
}
|
||||
|
||||
func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) {
|
||||
if off >= fr.size {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if off < 0 {
|
||||
return 0, errors.New("invalid offset")
|
||||
}
|
||||
var i int
|
||||
if len(fr.ents) > 1 {
|
||||
i = sort.Search(len(fr.ents), func(i int) bool {
|
||||
return fr.ents[i].ChunkOffset >= off
|
||||
})
|
||||
if i == len(fr.ents) {
|
||||
i = len(fr.ents) - 1
|
||||
}
|
||||
}
|
||||
ent := fr.ents[i]
|
||||
if ent.ChunkOffset > off {
|
||||
if i == 0 {
|
||||
return 0, errors.New("internal error; first chunk offset is non-zero")
|
||||
}
|
||||
ent = fr.ents[i-1]
|
||||
}
|
||||
|
||||
// If ent is a chunk of a large file, adjust the ReadAt
|
||||
// offset by the chunk's offset.
|
||||
off -= ent.ChunkOffset
|
||||
|
||||
finalEnt := fr.ents[len(fr.ents)-1]
|
||||
gzOff := ent.Offset
|
||||
// gzBytesRemain is the number of compressed gzip bytes in this
|
||||
// file remaining, over 1+ gzip chunks.
|
||||
gzBytesRemain := finalEnt.NextOffset() - gzOff
|
||||
|
||||
sr := io.NewSectionReader(fr.r.sr, gzOff, gzBytesRemain)
|
||||
|
||||
const maxGZread = 2 << 20
|
||||
var bufSize int = maxGZread
|
||||
if gzBytesRemain < maxGZread {
|
||||
bufSize = int(gzBytesRemain)
|
||||
}
|
||||
|
||||
br := bufio.NewReaderSize(sr, bufSize)
|
||||
if _, err := br.Peek(bufSize); err != nil {
|
||||
return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err)
|
||||
}
|
||||
|
||||
gz, err := gzip.NewReader(br)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("fileReader.ReadAt.gzipNewReader: %v", err)
|
||||
}
|
||||
if n, err := io.CopyN(ioutil.Discard, gz, off); n != off || err != nil {
|
||||
return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err)
|
||||
}
|
||||
return io.ReadFull(gz, p)
|
||||
}
|
||||
|
||||
// A Writer writes stargz files.
|
||||
//
|
||||
// Use NewWriter to create a new Writer.
|
||||
type Writer struct {
|
||||
bw *bufio.Writer
|
||||
cw *countWriter
|
||||
toc *jtoc
|
||||
diffHash hash.Hash // SHA-256 of uncompressed tar
|
||||
|
||||
closed bool
|
||||
gz *gzip.Writer
|
||||
lastUsername map[int]string
|
||||
lastGroupname map[int]string
|
||||
|
||||
// ChunkSize optionally controls the maximum number of bytes
|
||||
// of data of a regular file that can be written in one gzip
|
||||
// stream before a new gzip stream is started.
|
||||
// Zero means to use a default, currently 4 MiB.
|
||||
ChunkSize int
|
||||
}
|
||||
|
||||
// currentGzipWriter writes to the current w.gz field, which can
|
||||
// change throughout writing a tar entry.
|
||||
//
|
||||
// Additionally, it updates w's SHA-256 of the uncompressed bytes
|
||||
// of the tar file.
|
||||
type currentGzipWriter struct{ w *Writer }
|
||||
|
||||
func (cgw currentGzipWriter) Write(p []byte) (int, error) {
|
||||
cgw.w.diffHash.Write(p)
|
||||
return cgw.w.gz.Write(p)
|
||||
}
|
||||
|
||||
func (w *Writer) chunkSize() int {
|
||||
if w.ChunkSize <= 0 {
|
||||
return 4 << 20
|
||||
}
|
||||
return w.ChunkSize
|
||||
}
|
||||
|
||||
// NewWriter returns a new stargz writer writing to w.
|
||||
//
|
||||
// The writer must be closed to write its trailing table of contents.
|
||||
func NewWriter(w io.Writer) *Writer {
|
||||
bw := bufio.NewWriter(w)
|
||||
cw := &countWriter{w: bw}
|
||||
return &Writer{
|
||||
bw: bw,
|
||||
cw: cw,
|
||||
toc: &jtoc{Version: 1},
|
||||
diffHash: sha256.New(),
|
||||
}
|
||||
}
|
||||
|
||||
// Close writes the stargz's table of contents and flushes all the
|
||||
// buffers, returning any error.
|
||||
func (w *Writer) Close() error {
|
||||
if w.closed {
|
||||
return nil
|
||||
}
|
||||
defer func() { w.closed = true }()
|
||||
|
||||
if err := w.closeGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the TOC index.
|
||||
tocOff := w.cw.n
|
||||
w.gz, _ = gzip.NewWriterLevel(w.cw, gzip.BestCompression)
|
||||
w.gz.Extra = []byte("stargz.toc")
|
||||
tw := tar.NewWriter(currentGzipWriter{w})
|
||||
tocJSON, err := json.MarshalIndent(w.toc, "", "\t")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tw.WriteHeader(&tar.Header{
|
||||
Typeflag: tar.TypeReg,
|
||||
Name: TOCTarName,
|
||||
Size: int64(len(tocJSON)),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := tw.Write(tocJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := tw.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.closeGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// And a little footer with pointer to the TOC gzip stream.
|
||||
if _, err := w.bw.Write(footerBytes(tocOff)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := w.bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) closeGz() error {
|
||||
if w.closed {
|
||||
return errors.New("write on closed Writer")
|
||||
}
|
||||
if w.gz != nil {
|
||||
if err := w.gz.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
w.gz = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// nameIfChanged returns name, unless it was the already the value of (*mp)[id],
|
||||
// in which case it returns the empty string.
|
||||
func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string {
|
||||
if name == "" {
|
||||
return ""
|
||||
}
|
||||
if *mp == nil {
|
||||
*mp = make(map[int]string)
|
||||
}
|
||||
if (*mp)[id] == name {
|
||||
return ""
|
||||
}
|
||||
(*mp)[id] = name
|
||||
return name
|
||||
}
|
||||
|
||||
func (w *Writer) condOpenGz() {
|
||||
if w.gz == nil {
|
||||
w.gz, _ = gzip.NewWriterLevel(w.cw, gzip.BestCompression)
|
||||
}
|
||||
}
|
||||
|
||||
// AppendTar reads the tar or tar.gz file from r and appends
|
||||
// each of its contents to w.
|
||||
//
|
||||
// The input r can optionally be gzip compressed but the output will
|
||||
// always be gzip compressed.
|
||||
func (w *Writer) AppendTar(r io.Reader) error {
|
||||
br := bufio.NewReader(r)
|
||||
var tr *tar.Reader
|
||||
if isGzip(br) {
|
||||
// NewReader can't fail if isGzip returned true.
|
||||
zr, _ := gzip.NewReader(br)
|
||||
tr = tar.NewReader(zr)
|
||||
} else {
|
||||
tr = tar.NewReader(br)
|
||||
}
|
||||
for {
|
||||
h, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err)
|
||||
}
|
||||
if h.Name == TOCTarName {
|
||||
// It is possible for a layer to be "stargzified" twice during the
|
||||
// distribution lifecycle. So we reserve "TOCTarName" here to avoid
|
||||
// duplicated entries in the resulting layer.
|
||||
continue
|
||||
}
|
||||
|
||||
var xattrs map[string][]byte
|
||||
if h.Xattrs != nil {
|
||||
xattrs = make(map[string][]byte)
|
||||
for k, v := range h.Xattrs {
|
||||
xattrs[k] = []byte(v)
|
||||
}
|
||||
}
|
||||
ent := &TOCEntry{
|
||||
Name: h.Name,
|
||||
Mode: h.Mode,
|
||||
Uid: h.Uid,
|
||||
Gid: h.Gid,
|
||||
Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname),
|
||||
Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname),
|
||||
ModTime3339: formatModtime(h.ModTime),
|
||||
Xattrs: xattrs,
|
||||
}
|
||||
w.condOpenGz()
|
||||
tw := tar.NewWriter(currentGzipWriter{w})
|
||||
if err := tw.WriteHeader(h); err != nil {
|
||||
return err
|
||||
}
|
||||
switch h.Typeflag {
|
||||
case tar.TypeLink:
|
||||
ent.Type = "hardlink"
|
||||
ent.LinkName = h.Linkname
|
||||
case tar.TypeSymlink:
|
||||
ent.Type = "symlink"
|
||||
ent.LinkName = h.Linkname
|
||||
case tar.TypeDir:
|
||||
ent.Type = "dir"
|
||||
case tar.TypeReg:
|
||||
ent.Type = "reg"
|
||||
ent.Size = h.Size
|
||||
case tar.TypeChar:
|
||||
ent.Type = "char"
|
||||
ent.DevMajor = int(h.Devmajor)
|
||||
ent.DevMinor = int(h.Devminor)
|
||||
case tar.TypeBlock:
|
||||
ent.Type = "block"
|
||||
ent.DevMajor = int(h.Devmajor)
|
||||
ent.DevMinor = int(h.Devminor)
|
||||
case tar.TypeFifo:
|
||||
ent.Type = "fifo"
|
||||
default:
|
||||
return fmt.Errorf("unsupported input tar entry %q", h.Typeflag)
|
||||
}
|
||||
|
||||
// We need to keep a reference to the TOC entry for regular files, so that we
|
||||
// can fill the digest later.
|
||||
var regFileEntry *TOCEntry
|
||||
var payloadDigest hash.Hash
|
||||
if h.Typeflag == tar.TypeReg {
|
||||
regFileEntry = ent
|
||||
payloadDigest = sha256.New()
|
||||
}
|
||||
|
||||
if h.Typeflag == tar.TypeReg && ent.Size > 0 {
|
||||
var written int64
|
||||
totalSize := ent.Size // save it before we destroy ent
|
||||
tee := io.TeeReader(tr, payloadDigest)
|
||||
for written < totalSize {
|
||||
if err := w.closeGz(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
chunkSize := int64(w.chunkSize())
|
||||
remain := totalSize - written
|
||||
if remain < chunkSize {
|
||||
chunkSize = remain
|
||||
} else {
|
||||
ent.ChunkSize = chunkSize
|
||||
}
|
||||
ent.Offset = w.cw.n
|
||||
ent.ChunkOffset = written
|
||||
|
||||
w.condOpenGz()
|
||||
|
||||
if _, err := io.CopyN(tw, tee, chunkSize); err != nil {
|
||||
return fmt.Errorf("error copying %q: %v", h.Name, err)
|
||||
}
|
||||
w.toc.Entries = append(w.toc.Entries, ent)
|
||||
written += chunkSize
|
||||
ent = &TOCEntry{
|
||||
Name: h.Name,
|
||||
Type: "chunk",
|
||||
}
|
||||
}
|
||||
} else {
|
||||
w.toc.Entries = append(w.toc.Entries, ent)
|
||||
}
|
||||
if payloadDigest != nil {
|
||||
regFileEntry.Digest = fmt.Sprintf("sha256:%x", payloadDigest.Sum(nil))
|
||||
}
|
||||
if err := tw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DiffID returns the SHA-256 of the uncompressed tar bytes.
|
||||
// It is only valid to call DiffID after Close.
|
||||
func (w *Writer) DiffID() string {
|
||||
return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil))
|
||||
}
|
||||
|
||||
// footerBytes the 47 byte footer.
|
||||
func footerBytes(tocOff int64) []byte {
|
||||
buf := bytes.NewBuffer(make([]byte, 0, FooterSize))
|
||||
gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression)
|
||||
gz.Header.Extra = []byte(fmt.Sprintf("%016xSTARGZ", tocOff))
|
||||
gz.Close()
|
||||
if buf.Len() != FooterSize {
|
||||
panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize))
|
||||
}
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func parseFooter(p []byte) (tocOffset int64, ok bool) {
|
||||
if len(p) != FooterSize {
|
||||
return 0, false
|
||||
}
|
||||
zr, err := gzip.NewReader(bytes.NewReader(p))
|
||||
if err != nil {
|
||||
return 0, false
|
||||
}
|
||||
extra := zr.Header.Extra
|
||||
if len(extra) != 16+len("STARGZ") {
|
||||
return 0, false
|
||||
}
|
||||
if string(extra[16:]) != "STARGZ" {
|
||||
return 0, false
|
||||
}
|
||||
tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64)
|
||||
return tocOffset, err == nil
|
||||
}
|
||||
|
||||
func formatModtime(t time.Time) string {
|
||||
if t.IsZero() || t.Unix() == 0 {
|
||||
return ""
|
||||
}
|
||||
return t.UTC().Round(time.Second).Format(time.RFC3339)
|
||||
}
|
||||
|
||||
// countWriter counts how many bytes have been written to its wrapped
|
||||
// io.Writer.
|
||||
type countWriter struct {
|
||||
w io.Writer
|
||||
n int64
|
||||
}
|
||||
|
||||
func (cw *countWriter) Write(p []byte) (n int, err error) {
|
||||
n, err = cw.w.Write(p)
|
||||
cw.n += int64(n)
|
||||
return
|
||||
}
|
||||
|
||||
// isGzip reports whether br is positioned right before an upcoming gzip stream.
|
||||
// It does not consume any bytes from br.
|
||||
func isGzip(br *bufio.Reader) bool {
|
||||
const (
|
||||
gzipID1 = 0x1f
|
||||
gzipID2 = 0x8b
|
||||
gzipDeflate = 8
|
||||
)
|
||||
peek, _ := br.Peek(3)
|
||||
return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate
|
||||
}
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
242
vendor/github.com/google/go-containerregistry/pkg/authn/README.md
generated
vendored
Normal file
242
vendor/github.com/google/go-containerregistry/pkg/authn/README.md
generated
vendored
Normal file
|
@ -0,0 +1,242 @@
|
|||
# `authn`
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/authn?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/authn)
|
||||
|
||||
This README outlines how we acquire and use credentials when interacting with a registry.
|
||||
|
||||
As much as possible, we attempt to emulate docker's authentication behavior and configuration so that this library "just works" if you've already configured credentials that work with docker; however, when things don't work, a basic understanding of what's going on can help with debugging.
|
||||
|
||||
The official documentation for how docker authentication works is (reasonably) scattered across several different sites and GitHub repositories, so we've tried to summarize the relevant bits here.
|
||||
|
||||
## tl;dr for consumers of this package
|
||||
|
||||
By default, [`pkg/v1/remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote) uses [`Anonymous`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#Anonymous) credentials (i.e. _none_), which for most registries will only allow read access to public images.
|
||||
|
||||
To use the credentials found in your docker config file, you can use the [`DefaultKeychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/authn#DefaultKeychain), e.g.:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
)
|
||||
|
||||
func main() {
|
||||
ref, err := name.ParseReference("registry.example.com/private/repo")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Fetch the manifest using default credentials.
|
||||
img, err := remote.Get(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Prints the digest of registry.example.com/private/repo
|
||||
fmt.Println(img.Digest)
|
||||
}
|
||||
```
|
||||
|
||||
(If you're only using [gcr.io](https://gcr.io), see the [`pkg/v1/google.Keychain`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/google#Keychain), which emulates [`docker-credential-gcr`](https://github.com/GoogleCloudPlatform/docker-credential-gcr).)
|
||||
|
||||
## The Config File
|
||||
|
||||
This file contains various configuration options for docker and is (by default) located at:
|
||||
* `$HOME/.docker/config.json` (on linux and darwin), or
|
||||
* `%USERPROFILE%\.docker\config.json` (on windows).
|
||||
|
||||
You can override this location with the `DOCKER_CONFIG` environment variable.
|
||||
|
||||
### Plaintext
|
||||
|
||||
The config file is where your credentials are stored when you invoke `docker login`, e.g. the contents may look something like this:
|
||||
|
||||
```json
|
||||
{
|
||||
"auths": {
|
||||
"registry.example.com": {
|
||||
"auth": "QXp1cmVEaWFtb25kOmh1bnRlcjI="
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `auths` map has an entry per registry, and the `auth` field contains your username and password encoded as [HTTP 'Basic' Auth](https://tools.ietf.org/html/rfc7617).
|
||||
|
||||
**NOTE**: This means that your credentials are stored _in plaintext_:
|
||||
|
||||
```bash
|
||||
$ echo "QXp1cmVEaWFtb25kOmh1bnRlcjI=" | base64 -d
|
||||
AzureDiamond:hunter2
|
||||
```
|
||||
|
||||
For what it's worth, this config file is equivalent to:
|
||||
|
||||
```json
|
||||
{
|
||||
"auths": {
|
||||
"registry.example.com": {
|
||||
"username": "AzureDiamond",
|
||||
"password": "hunter2"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
... which is useful to know if e.g. your CI system provides you a registry username and password via environment variables and you want to populate this file manually without invoking `docker login`.
|
||||
|
||||
### Helpers
|
||||
|
||||
If you log in like this, docker will warn you that you should use a [credential helper](https://docs.docker.com/engine/reference/commandline/login/#credentials-store), and you should!
|
||||
|
||||
To configure a global credential helper:
|
||||
```json
|
||||
{
|
||||
"credsStore": "osxkeychain"
|
||||
}
|
||||
```
|
||||
|
||||
To configure a per-registry credential helper:
|
||||
```json
|
||||
{
|
||||
"credHelpers": {
|
||||
"gcr.io": "gcr"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
We use [`github.com/docker/cli/cli/config.Load`](https://godoc.org/github.com/docker/cli/cli/config#Load) to parse the config file and invoke any necessary credential helpers. This handles the logic of taking a [`ConfigFile`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/configfile/file.go#L25-L54) + registry domain and producing an [`AuthConfig`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L3-L22), which determines how we authenticate to the registry.
|
||||
|
||||
## Credential Helpers
|
||||
|
||||
The [credential helper protocol](https://github.com/docker/docker-credential-helpers) allows you to configure a binary that supplies credentials for the registry, rather than hard-coding them in the config file.
|
||||
|
||||
The protocol has several verbs, but the one we most care about is `get`.
|
||||
|
||||
For example, using the following config file:
|
||||
```json
|
||||
{
|
||||
"credHelpers": {
|
||||
"gcr.io": "gcr",
|
||||
"eu.gcr.io": "gcr"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
To acquire credentials for `gcr.io`, we look in the `credHelpers` map to find
|
||||
the credential helper for `gcr.io` is `gcr`. By appending that value to
|
||||
`docker-credential-`, we can get the name of the binary we need to use.
|
||||
|
||||
For this example, that's `docker-credential-gcr`, which must be on our `$PATH`.
|
||||
We'll then invoke that binary to get credentials:
|
||||
|
||||
```bash
|
||||
$ echo "gcr.io" | docker-credential-gcr get
|
||||
{"Username":"_token","Secret":"<long access token>"}
|
||||
```
|
||||
|
||||
You can configure the same credential helper for multiple registries, which is
|
||||
why we need to pass the domain in via STDIN, e.g. if we were trying to access
|
||||
`eu.gcr.io`, we'd do this instead:
|
||||
|
||||
```bash
|
||||
$ echo "eu.gcr.io" | docker-credential-gcr get
|
||||
{"Username":"_token","Secret":"<long access token>"}
|
||||
```
|
||||
|
||||
### Debugging credential helpers
|
||||
|
||||
If a credential helper is configured but doesn't seem to be working, it can be
|
||||
challenging to debug. Implementing a fake credential helper lets you poke around
|
||||
to make it easier to see where the failure is happening.
|
||||
|
||||
This "implements" a credential helper with hard-coded values:
|
||||
```
|
||||
#!/usr/bin/env bash
|
||||
echo '{"Username":"<token>","Secret":"hunter2"}'
|
||||
```
|
||||
|
||||
|
||||
This implements a credential helper that prints the output of
|
||||
`docker-credential-gcr` to both stderr and whatever called it, which allows you
|
||||
to snoop on another credential helper:
|
||||
```
|
||||
#!/usr/bin/env bash
|
||||
docker-credential-gcr $@ | tee >(cat 1>&2)
|
||||
```
|
||||
|
||||
Put those files somewhere on your path, naming them e.g.
|
||||
`docker-credential-hardcoded` and `docker-credential-tee`, then modify the
|
||||
config file to use them:
|
||||
|
||||
```json
|
||||
{
|
||||
"credHelpers": {
|
||||
"gcr.io": "tee",
|
||||
"eu.gcr.io": "hardcoded"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The `docker-credential-tee` trick works with both `crane` and `docker`:
|
||||
|
||||
```bash
|
||||
$ crane manifest gcr.io/google-containers/pause > /dev/null
|
||||
{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":"<redacted>"}
|
||||
|
||||
$ docker pull gcr.io/google-containers/pause
|
||||
Using default tag: latest
|
||||
{"ServerURL":"","Username":"_dcgcr_1_5_0_token","Secret":"<redacted>"}
|
||||
latest: Pulling from google-containers/pause
|
||||
a3ed95caeb02: Pull complete
|
||||
4964c72cd024: Pull complete
|
||||
Digest: sha256:a78c2d6208eff9b672de43f880093100050983047b7b0afe0217d3656e1b0d5f
|
||||
Status: Downloaded newer image for gcr.io/google-containers/pause:latest
|
||||
gcr.io/google-containers/pause:latest
|
||||
```
|
||||
|
||||
## The Registry
|
||||
|
||||
There are two methods for authenticating against a registry:
|
||||
[token](https://docs.docker.com/registry/spec/auth/token/) and
|
||||
[oauth2](https://docs.docker.com/registry/spec/auth/oauth/).
|
||||
|
||||
Both methods are used to acquire an opaque `Bearer` token (or
|
||||
[RegistryToken](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L21))
|
||||
to use in the `Authorization` header. The registry will return a `401
|
||||
Unauthorized` during the [version
|
||||
check](https://github.com/opencontainers/distribution-spec/blob/2c3975d1f03b67c9a0203199038adea0413f0573/spec.md#api-version-check)
|
||||
(or during normal operations) with
|
||||
[Www-Authenticate](https://tools.ietf.org/html/rfc7235#section-4.1) challenge
|
||||
indicating how to proceed.
|
||||
|
||||
### Token
|
||||
|
||||
If we get back an `AuthConfig` containing a [`Username/Password`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L5-L6)
|
||||
or
|
||||
[`Auth`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L7),
|
||||
we'll use the token method for authentication:
|
||||
|
||||
![basic](../../images/credhelper-basic.svg)
|
||||
|
||||
### OAuth 2
|
||||
|
||||
If we get back an `AuthConfig` containing an [`IdentityToken`](https://github.com/docker/cli/blob/ba63a92655c0bea4857b8d6cc4991498858b3c60/cli/config/types/authconfig.go#L18)
|
||||
we'll use the oauth2 method for authentication:
|
||||
|
||||
![oauth](../../images/credhelper-oauth.svg)
|
||||
|
||||
This happens when a credential helper returns a response with the
|
||||
[`Username`](https://github.com/docker/docker-credential-helpers/blob/f78081d1f7fef6ad74ad6b79368de6348386e591/credentials/credentials.go#L16)
|
||||
set to `<token>` (no, that's not a placeholder, the literal string `"<token>"`).
|
||||
It is unclear why: [moby/moby#36926](https://github.com/moby/moby/issues/36926).
|
||||
|
||||
We only support the oauth2 `grant_type` for `refresh_token` ([#629](https://github.com/google/go-containerregistry/issues/629)),
|
||||
since it's impossible to determine from the registry response whether we should
|
||||
use oauth, and the token method for authentication is widely implemented by
|
||||
registries.
|
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn
|
||||
|
||||
// anonymous implements Authenticator for anonymous authentication.
|
||||
type anonymous struct{}
|
||||
|
||||
// Authorization implements Authenticator.
|
||||
func (a *anonymous) Authorization() (*AuthConfig, error) {
|
||||
return &AuthConfig{}, nil
|
||||
}
|
||||
|
||||
// Anonymous is a singleton Authenticator for providing anonymous auth.
|
||||
var Anonymous Authenticator = &anonymous{}
|
|
@ -0,0 +1,30 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn
|
||||
|
||||
// auth is an Authenticator that simply returns the wrapped AuthConfig.
|
||||
type auth struct {
|
||||
config AuthConfig
|
||||
}
|
||||
|
||||
// FromConfig returns an Authenticator that just returns the given AuthConfig.
|
||||
func FromConfig(cfg AuthConfig) Authenticator {
|
||||
return &auth{cfg}
|
||||
}
|
||||
|
||||
// Authorization implements Authenticator.
|
||||
func (a *auth) Authorization() (*AuthConfig, error) {
|
||||
return &a.config, nil
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn
|
||||
|
||||
// AuthConfig contains authorization information for connecting to a Registry
|
||||
// Inlined what we use from github.com/cli/cli/config/types
|
||||
type AuthConfig struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Auth string `json:"auth,omitempty"`
|
||||
|
||||
// IdentityToken is used to authenticate the user and get
|
||||
// an access token for the registry.
|
||||
IdentityToken string `json:"identitytoken,omitempty"`
|
||||
|
||||
// RegistryToken is a bearer token to be sent to a registry
|
||||
RegistryToken string `json:"registrytoken,omitempty"`
|
||||
}
|
||||
|
||||
// Authenticator is used to authenticate Docker transports.
|
||||
type Authenticator interface {
|
||||
// Authorization returns the value to use in an http transport's Authorization header.
|
||||
Authorization() (*AuthConfig, error)
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn
|
||||
|
||||
// Basic implements Authenticator for basic authentication.
|
||||
type Basic struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// Authorization implements Authenticator.
|
||||
func (b *Basic) Authorization() (*AuthConfig, error) {
|
||||
return &AuthConfig{
|
||||
Username: b.Username,
|
||||
Password: b.Password,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn
|
||||
|
||||
// Bearer implements Authenticator for bearer authentication.
|
||||
type Bearer struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
// Authorization implements Authenticator.
|
||||
func (b *Bearer) Authorization() (*AuthConfig, error) {
|
||||
return &AuthConfig{
|
||||
RegistryToken: b.Token,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package authn defines different methods of authentication for
|
||||
// talking to a container registry.
|
||||
package authn
|
97
vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go
generated
vendored
Normal file
97
vendor/github.com/google/go-containerregistry/pkg/authn/keychain.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli/config"
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
// Resource represents a registry or repository that can be authenticated against.
|
||||
type Resource interface {
|
||||
// String returns the full string representation of the target, e.g.
|
||||
// gcr.io/my-project or just gcr.io.
|
||||
String() string
|
||||
|
||||
// RegistryStr returns just the registry portion of the target, e.g. for
|
||||
// gcr.io/my-project, this should just return gcr.io. This is needed to
|
||||
// pull out an appropriate hostname.
|
||||
RegistryStr() string
|
||||
}
|
||||
|
||||
// Keychain is an interface for resolving an image reference to a credential.
|
||||
type Keychain interface {
|
||||
// Resolve looks up the most appropriate credential for the specified target.
|
||||
Resolve(Resource) (Authenticator, error)
|
||||
}
|
||||
|
||||
// defaultKeychain implements Keychain with the semantics of the standard Docker
|
||||
// credential keychain.
|
||||
type defaultKeychain struct{}
|
||||
|
||||
var (
|
||||
// DefaultKeychain implements Keychain by interpreting the docker config file.
|
||||
DefaultKeychain Keychain = &defaultKeychain{}
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultAuthKey is the key used for dockerhub in config files, which
|
||||
// is hardcoded for historical reasons.
|
||||
DefaultAuthKey = "https://" + name.DefaultRegistry + "/v1/"
|
||||
)
|
||||
|
||||
// Resolve implements Keychain.
|
||||
func (dk *defaultKeychain) Resolve(target Resource) (Authenticator, error) {
|
||||
cf, err := config.Load(os.Getenv("DOCKER_CONFIG"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// See:
|
||||
// https://github.com/google/ko/issues/90
|
||||
// https://github.com/moby/moby/blob/fc01c2b481097a6057bec3cd1ab2d7b4488c50c4/registry/config.go#L397-L404
|
||||
key := target.RegistryStr()
|
||||
if key == name.DefaultRegistry {
|
||||
key = DefaultAuthKey
|
||||
}
|
||||
|
||||
cfg, err := cf.GetAuthConfig(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if logs.Enabled(logs.Debug) {
|
||||
b, err := json.Marshal(cfg)
|
||||
if err == nil {
|
||||
logs.Debug.Printf("defaultKeychain.Resolve(%q) = %s", key, string(b))
|
||||
}
|
||||
}
|
||||
|
||||
empty := types.AuthConfig{}
|
||||
if cfg == empty {
|
||||
return Anonymous, nil
|
||||
}
|
||||
return FromConfig(AuthConfig{
|
||||
Username: cfg.Username,
|
||||
Password: cfg.Password,
|
||||
Auth: cfg.Auth,
|
||||
IdentityToken: cfg.IdentityToken,
|
||||
RegistryToken: cfg.RegistryToken,
|
||||
}), nil
|
||||
}
|
41
vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go
generated
vendored
Normal file
41
vendor/github.com/google/go-containerregistry/pkg/authn/multikeychain.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package authn
|
||||
|
||||
type multiKeychain struct {
|
||||
keychains []Keychain
|
||||
}
|
||||
|
||||
// Assert that our multi-keychain implements Keychain.
|
||||
var _ (Keychain) = (*multiKeychain)(nil)
|
||||
|
||||
// NewMultiKeychain composes a list of keychains into one new keychain.
|
||||
func NewMultiKeychain(kcs ...Keychain) Keychain {
|
||||
return &multiKeychain{keychains: kcs}
|
||||
}
|
||||
|
||||
// Resolve implements Keychain.
|
||||
func (mk *multiKeychain) Resolve(target Resource) (Authenticator, error) {
|
||||
for _, kc := range mk.keychains {
|
||||
auth, err := kc.Resolve(target)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if auth != Anonymous {
|
||||
return auth, nil
|
||||
}
|
||||
}
|
||||
return Anonymous, nil
|
||||
}
|
77
vendor/github.com/google/go-containerregistry/pkg/internal/retry/retry.go
generated
vendored
Normal file
77
vendor/github.com/google/go-containerregistry/pkg/internal/retry/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,77 @@
|
|||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package retry provides methods for retrying operations. It is a thin wrapper
|
||||
// around k8s.io/apimachinery/pkg/util/wait to make certain operations easier.
|
||||
package retry
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/internal/retry/wait"
|
||||
)
|
||||
|
||||
// Backoff is an alias of our own wait.Backoff to avoid name conflicts with
|
||||
// the kubernetes wait package. Typing retry.Backoff is aesier than fixing
|
||||
// the wrong import every time you use wait.Backoff.
|
||||
type Backoff = wait.Backoff
|
||||
|
||||
// This is implemented by several errors in the net package as well as our
|
||||
// transport.Error.
|
||||
type temporary interface {
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
// IsTemporary returns true if err implements Temporary() and it returns true.
|
||||
func IsTemporary(err error) bool {
|
||||
if err == context.DeadlineExceeded {
|
||||
return false
|
||||
}
|
||||
if te, ok := err.(temporary); ok && te.Temporary() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsNotNil returns true if err is not nil.
|
||||
func IsNotNil(err error) bool {
|
||||
return err != nil
|
||||
}
|
||||
|
||||
// Predicate determines whether an error should be retried.
|
||||
type Predicate func(error) (retry bool)
|
||||
|
||||
// Retry retries a given function, f, until a predicate is satisfied, using
|
||||
// exponential backoff. If the predicate is never satisfied, it will return the
|
||||
// last error returned by f.
|
||||
func Retry(f func() error, p Predicate, backoff wait.Backoff) (err error) {
|
||||
if f == nil {
|
||||
return fmt.Errorf("nil f passed to retry")
|
||||
}
|
||||
if p == nil {
|
||||
return fmt.Errorf("nil p passed to retry")
|
||||
}
|
||||
|
||||
condition := func() (bool, error) {
|
||||
err = f()
|
||||
if p(err) {
|
||||
return false, nil
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
wait.ExponentialBackoff(backoff, condition)
|
||||
return
|
||||
}
|
123
vendor/github.com/google/go-containerregistry/pkg/internal/retry/wait/kubernetes_apimachinery_wait.go
generated
vendored
Normal file
123
vendor/github.com/google/go-containerregistry/pkg/internal/retry/wait/kubernetes_apimachinery_wait.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package wait is a subset of k8s.io/apimachinery to avoid conflicts
|
||||
// in dependencies (specifically, logging).
|
||||
package wait
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Jitter returns a time.Duration between duration and duration + maxFactor *
|
||||
// duration.
|
||||
//
|
||||
// This allows clients to avoid converging on periodic behavior. If maxFactor
|
||||
// is 0.0, a suggested default value will be chosen.
|
||||
func Jitter(duration time.Duration, maxFactor float64) time.Duration {
|
||||
if maxFactor <= 0.0 {
|
||||
maxFactor = 1.0
|
||||
}
|
||||
wait := duration + time.Duration(rand.Float64()*maxFactor*float64(duration))
|
||||
return wait
|
||||
}
|
||||
|
||||
// ErrWaitTimeout is returned when the condition exited without success.
|
||||
var ErrWaitTimeout = errors.New("timed out waiting for the condition")
|
||||
|
||||
// ConditionFunc returns true if the condition is satisfied, or an error
|
||||
// if the loop should be aborted.
|
||||
type ConditionFunc func() (done bool, err error)
|
||||
|
||||
// Backoff holds parameters applied to a Backoff function.
|
||||
type Backoff struct {
|
||||
// The initial duration.
|
||||
Duration time.Duration
|
||||
// Duration is multiplied by factor each iteration, if factor is not zero
|
||||
// and the limits imposed by Steps and Cap have not been reached.
|
||||
// Should not be negative.
|
||||
// The jitter does not contribute to the updates to the duration parameter.
|
||||
Factor float64
|
||||
// The sleep at each iteration is the duration plus an additional
|
||||
// amount chosen uniformly at random from the interval between
|
||||
// zero and `jitter*duration`.
|
||||
Jitter float64
|
||||
// The remaining number of iterations in which the duration
|
||||
// parameter may change (but progress can be stopped earlier by
|
||||
// hitting the cap). If not positive, the duration is not
|
||||
// changed. Used for exponential backoff in combination with
|
||||
// Factor and Cap.
|
||||
Steps int
|
||||
// A limit on revised values of the duration parameter. If a
|
||||
// multiplication by the factor parameter would make the duration
|
||||
// exceed the cap then the duration is set to the cap and the
|
||||
// steps parameter is set to zero.
|
||||
Cap time.Duration
|
||||
}
|
||||
|
||||
// Step (1) returns an amount of time to sleep determined by the
|
||||
// original Duration and Jitter and (2) mutates the provided Backoff
|
||||
// to update its Steps and Duration.
|
||||
func (b *Backoff) Step() time.Duration {
|
||||
if b.Steps < 1 {
|
||||
if b.Jitter > 0 {
|
||||
return Jitter(b.Duration, b.Jitter)
|
||||
}
|
||||
return b.Duration
|
||||
}
|
||||
b.Steps--
|
||||
|
||||
duration := b.Duration
|
||||
|
||||
// calculate the next step
|
||||
if b.Factor != 0 {
|
||||
b.Duration = time.Duration(float64(b.Duration) * b.Factor)
|
||||
if b.Cap > 0 && b.Duration > b.Cap {
|
||||
b.Duration = b.Cap
|
||||
b.Steps = 0
|
||||
}
|
||||
}
|
||||
|
||||
if b.Jitter > 0 {
|
||||
duration = Jitter(duration, b.Jitter)
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
// ExponentialBackoff repeats a condition check with exponential backoff.
|
||||
//
|
||||
// It repeatedly checks the condition and then sleeps, using `backoff.Step()`
|
||||
// to determine the length of the sleep and adjust Duration and Steps.
|
||||
// Stops and returns as soon as:
|
||||
// 1. the condition check returns true or an error,
|
||||
// 2. `backoff.Steps` checks of the condition have been done, or
|
||||
// 3. a sleep truncated by the cap on duration has been completed.
|
||||
// In case (1) the returned error is what the condition function returned.
|
||||
// In all other cases, ErrWaitTimeout is returned.
|
||||
func ExponentialBackoff(backoff Backoff, condition ConditionFunc) error {
|
||||
for backoff.Steps > 0 {
|
||||
if ok, err := condition(); err != nil || ok {
|
||||
return err
|
||||
}
|
||||
if backoff.Steps == 1 {
|
||||
break
|
||||
}
|
||||
time.Sleep(backoff.Step())
|
||||
}
|
||||
return ErrWaitTimeout
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package logs exposes the loggers used by this library.
|
||||
package logs
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"log"
|
||||
)
|
||||
|
||||
var (
|
||||
// Warn is used to log non-fatal errors.
|
||||
Warn = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
|
||||
// Progress is used to log notable, successful events.
|
||||
Progress = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
|
||||
// Debug is used to log information that is useful for debugging.
|
||||
Debug = log.New(ioutil.Discard, "", log.LstdFlags)
|
||||
)
|
||||
|
||||
// Enabled checks to see if the logger's writer is set to something other
|
||||
// than ioutil.Discard. This allows callers to avoid expensive operations
|
||||
// that will end up in /dev/null anyway.
|
||||
func Enabled(l *log.Logger) bool {
|
||||
return l.Writer() != ioutil.Discard
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
# `name`
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/name?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/name)
|
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// stripRunesFn returns a function which returns -1 (i.e. a value which
|
||||
// signals deletion in strings.Map) for runes in 'runes', and the rune otherwise.
|
||||
func stripRunesFn(runes string) func(rune) rune {
|
||||
return func(r rune) rune {
|
||||
if strings.ContainsRune(runes, r) {
|
||||
return -1
|
||||
}
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
// checkElement checks a given named element matches character and length restrictions.
|
||||
// Returns true if the given element adheres to the given restrictions, false otherwise.
|
||||
func checkElement(name, element, allowedRunes string, minRunes, maxRunes int) error {
|
||||
numRunes := utf8.RuneCountInString(element)
|
||||
if (numRunes < minRunes) || (maxRunes < numRunes) {
|
||||
return NewErrBadName("%s must be between %d and %d runes in length: %s", name, minRunes, maxRunes, element)
|
||||
} else if len(strings.Map(stripRunesFn(allowedRunes), element)) != 0 {
|
||||
return NewErrBadName("%s can only contain the runes `%s`: %s", name, allowedRunes, element)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// These have the form: sha256:<hex string>
|
||||
// TODO(dekkagaijin): replace with opencontainers/go-digest or docker/distribution's validation.
|
||||
digestChars = "sh:0123456789abcdef"
|
||||
digestDelim = "@"
|
||||
)
|
||||
|
||||
// Digest stores a digest name in a structured form.
|
||||
type Digest struct {
|
||||
Repository
|
||||
digest string
|
||||
original string
|
||||
}
|
||||
|
||||
// Ensure Digest implements Reference
|
||||
var _ Reference = (*Digest)(nil)
|
||||
|
||||
// Context implements Reference.
|
||||
func (d Digest) Context() Repository {
|
||||
return d.Repository
|
||||
}
|
||||
|
||||
// Identifier implements Reference.
|
||||
func (d Digest) Identifier() string {
|
||||
return d.DigestStr()
|
||||
}
|
||||
|
||||
// DigestStr returns the digest component of the Digest.
|
||||
func (d Digest) DigestStr() string {
|
||||
return d.digest
|
||||
}
|
||||
|
||||
// Name returns the name from which the Digest was derived.
|
||||
func (d Digest) Name() string {
|
||||
return d.Repository.Name() + digestDelim + d.DigestStr()
|
||||
}
|
||||
|
||||
// String returns the original input string.
|
||||
func (d Digest) String() string {
|
||||
return d.original
|
||||
}
|
||||
|
||||
func checkDigest(name string) error {
|
||||
return checkElement("digest", name, digestChars, 7+64, 7+64)
|
||||
}
|
||||
|
||||
// NewDigest returns a new Digest representing the given name.
|
||||
func NewDigest(name string, opts ...Option) (Digest, error) {
|
||||
// Split on "@"
|
||||
parts := strings.Split(name, digestDelim)
|
||||
if len(parts) != 2 {
|
||||
return Digest{}, NewErrBadName("a digest must contain exactly one '@' separator (e.g. registry/repository@digest) saw: %s", name)
|
||||
}
|
||||
base := parts[0]
|
||||
digest := parts[1]
|
||||
|
||||
// Always check that the digest is valid.
|
||||
if err := checkDigest(digest); err != nil {
|
||||
return Digest{}, err
|
||||
}
|
||||
|
||||
tag, err := NewTag(base, opts...)
|
||||
if err == nil {
|
||||
base = tag.Repository.Name()
|
||||
}
|
||||
|
||||
repo, err := NewRepository(base, opts...)
|
||||
if err != nil {
|
||||
return Digest{}, err
|
||||
}
|
||||
return Digest{
|
||||
Repository: repo,
|
||||
digest: digest,
|
||||
original: name,
|
||||
}, nil
|
||||
}
|
|
@ -0,0 +1,42 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package name defines structured types for representing image references.
|
||||
//
|
||||
// What's in a name? For image references, not nearly enough!
|
||||
//
|
||||
// Image references look a lot like URLs, but they differ in that they don't
|
||||
// contain the scheme (http or https), they can end with a :tag or a @digest
|
||||
// (the latter being validated), and they perform defaulting for missing
|
||||
// components.
|
||||
//
|
||||
// Since image references don't contain the scheme, we do our best to infer
|
||||
// if we use http or https from the given hostname. We allow http fallback for
|
||||
// any host that looks like localhost (localhost, 127.0.0.1, ::1), ends in
|
||||
// ".local", or is in the "private" address space per RFC 1918. For everything
|
||||
// else, we assume https only. To override this heuristic, use the Insecure
|
||||
// option.
|
||||
//
|
||||
// Image references with a digest signal to us that we should verify the content
|
||||
// of the image matches the digest. E.g. when pulling a Digest reference, we'll
|
||||
// calculate the sha256 of the manifest returned by the registry and error out
|
||||
// if it doesn't match what we asked for.
|
||||
//
|
||||
// For defaulting, we interpret "ubuntu" as
|
||||
// "index.docker.io/library/ubuntu:latest" because we add the missing repo
|
||||
// "library", the missing registry "index.docker.io", and the missing tag
|
||||
// "latest". To disable this defaulting, use the StrictValidation option. This
|
||||
// is useful e.g. to only allow image references that explicitly set a tag or
|
||||
// digest, so that you don't accidentally pull "latest".
|
||||
package name
|
|
@ -0,0 +1,37 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name
|
||||
|
||||
import "fmt"
|
||||
|
||||
// ErrBadName is an error for when a bad docker name is supplied.
|
||||
type ErrBadName struct {
|
||||
info string
|
||||
}
|
||||
|
||||
func (e *ErrBadName) Error() string {
|
||||
return e.info
|
||||
}
|
||||
|
||||
// NewErrBadName returns a ErrBadName which returns the given formatted string from Error().
|
||||
func NewErrBadName(fmtStr string, args ...interface{}) *ErrBadName {
|
||||
return &ErrBadName{fmt.Sprintf(fmtStr, args...)}
|
||||
}
|
||||
|
||||
// IsErrBadName returns true if the given error is an ErrBadName.
|
||||
func IsErrBadName(err error) bool {
|
||||
_, ok := err.(*ErrBadName)
|
||||
return ok
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name
|
||||
|
||||
type options struct {
|
||||
strict bool // weak by default
|
||||
insecure bool // secure by default
|
||||
}
|
||||
|
||||
func makeOptions(opts ...Option) options {
|
||||
opt := options{}
|
||||
for _, o := range opts {
|
||||
o(&opt)
|
||||
}
|
||||
return opt
|
||||
}
|
||||
|
||||
// Option is a functional option for name parsing.
|
||||
type Option func(*options)
|
||||
|
||||
// StrictValidation is an Option that requires image references to be fully
|
||||
// specified; i.e. no defaulting for registry (dockerhub), repo (library),
|
||||
// or tag (latest).
|
||||
func StrictValidation(opts *options) {
|
||||
opts.strict = true
|
||||
}
|
||||
|
||||
// WeakValidation is an Option that sets defaults when parsing names, see
|
||||
// StrictValidation.
|
||||
func WeakValidation(opts *options) {
|
||||
opts.strict = false
|
||||
}
|
||||
|
||||
// Insecure is an Option that allows image references to be fetched without TLS.
|
||||
func Insecure(opts *options) {
|
||||
opts.insecure = true
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Reference defines the interface that consumers use when they can
|
||||
// take either a tag or a digest.
|
||||
type Reference interface {
|
||||
fmt.Stringer
|
||||
|
||||
// Context accesses the Repository context of the reference.
|
||||
Context() Repository
|
||||
|
||||
// Identifier accesses the type-specific portion of the reference.
|
||||
Identifier() string
|
||||
|
||||
// Name is the fully-qualified reference name.
|
||||
Name() string
|
||||
|
||||
// Scope is the scope needed to access this reference.
|
||||
Scope(string) string
|
||||
}
|
||||
|
||||
// ParseReference parses the string as a reference, either by tag or digest.
|
||||
func ParseReference(s string, opts ...Option) (Reference, error) {
|
||||
if t, err := NewTag(s, opts...); err == nil {
|
||||
return t, nil
|
||||
}
|
||||
if d, err := NewDigest(s, opts...); err == nil {
|
||||
return d, nil
|
||||
}
|
||||
return nil, NewErrBadName("could not parse reference: " + s)
|
||||
|
||||
}
|
142
vendor/github.com/google/go-containerregistry/pkg/name/registry.go
generated
vendored
Normal file
142
vendor/github.com/google/go-containerregistry/pkg/name/registry.go
generated
vendored
Normal file
|
@ -0,0 +1,142 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultRegistry is Docker Hub, assumed when a hostname is omitted.
|
||||
DefaultRegistry = "index.docker.io"
|
||||
defaultRegistryAlias = "docker.io"
|
||||
)
|
||||
|
||||
// Detect more complex forms of local references.
|
||||
var reLocal = regexp.MustCompile(`.*\.local(?:host)?(?::\d{1,5})?$`)
|
||||
|
||||
// Detect the loopback IP (127.0.0.1)
|
||||
var reLoopback = regexp.MustCompile(regexp.QuoteMeta("127.0.0.1"))
|
||||
|
||||
// Detect the loopback IPV6 (::1)
|
||||
var reipv6Loopback = regexp.MustCompile(regexp.QuoteMeta("::1"))
|
||||
|
||||
// Registry stores a docker registry name in a structured form.
|
||||
type Registry struct {
|
||||
insecure bool
|
||||
registry string
|
||||
}
|
||||
|
||||
// RegistryStr returns the registry component of the Registry.
|
||||
func (r Registry) RegistryStr() string {
|
||||
if r.registry != "" {
|
||||
return r.registry
|
||||
}
|
||||
return DefaultRegistry
|
||||
}
|
||||
|
||||
// Name returns the name from which the Registry was derived.
|
||||
func (r Registry) Name() string {
|
||||
return r.RegistryStr()
|
||||
}
|
||||
|
||||
func (r Registry) String() string {
|
||||
return r.Name()
|
||||
}
|
||||
|
||||
// Scope returns the scope required to access the registry.
|
||||
func (r Registry) Scope(string) string {
|
||||
// The only resource under 'registry' is 'catalog'. http://goo.gl/N9cN9Z
|
||||
return "registry:catalog:*"
|
||||
}
|
||||
|
||||
func (r Registry) isRFC1918() bool {
|
||||
ipStr := strings.Split(r.Name(), ":")[0]
|
||||
ip := net.ParseIP(ipStr)
|
||||
if ip == nil {
|
||||
return false
|
||||
}
|
||||
for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} {
|
||||
_, block, _ := net.ParseCIDR(cidr)
|
||||
if block.Contains(ip) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Scheme returns https scheme for all the endpoints except localhost or when explicitly defined.
|
||||
func (r Registry) Scheme() string {
|
||||
if r.insecure {
|
||||
return "http"
|
||||
}
|
||||
if r.isRFC1918() {
|
||||
return "http"
|
||||
}
|
||||
if strings.HasPrefix(r.Name(), "localhost:") {
|
||||
return "http"
|
||||
}
|
||||
if reLocal.MatchString(r.Name()) {
|
||||
return "http"
|
||||
}
|
||||
if reLoopback.MatchString(r.Name()) {
|
||||
return "http"
|
||||
}
|
||||
if reipv6Loopback.MatchString(r.Name()) {
|
||||
return "http"
|
||||
}
|
||||
return "https"
|
||||
}
|
||||
|
||||
func checkRegistry(name string) error {
|
||||
// Per RFC 3986, registries (authorities) are required to be prefixed with "//"
|
||||
// url.Host == hostname[:port] == authority
|
||||
if url, err := url.Parse("//" + name); err != nil || url.Host != name {
|
||||
return NewErrBadName("registries must be valid RFC 3986 URI authorities: %s", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewRegistry returns a Registry based on the given name.
|
||||
// Strict validation requires explicit, valid RFC 3986 URI authorities to be given.
|
||||
func NewRegistry(name string, opts ...Option) (Registry, error) {
|
||||
opt := makeOptions(opts...)
|
||||
if opt.strict && len(name) == 0 {
|
||||
return Registry{}, NewErrBadName("strict validation requires the registry to be explicitly defined")
|
||||
}
|
||||
|
||||
if err := checkRegistry(name); err != nil {
|
||||
return Registry{}, err
|
||||
}
|
||||
|
||||
// Rewrite "docker.io" to "index.docker.io".
|
||||
// See: https://github.com/google/go-containerregistry/issues/68
|
||||
if name == defaultRegistryAlias {
|
||||
name = DefaultRegistry
|
||||
}
|
||||
|
||||
return Registry{registry: name, insecure: opt.insecure}, nil
|
||||
}
|
||||
|
||||
// NewInsecureRegistry returns an Insecure Registry based on the given name.
|
||||
//
|
||||
// Deprecated: Use the Insecure Option with NewRegistry instead.
|
||||
func NewInsecureRegistry(name string, opts ...Option) (Registry, error) {
|
||||
opts = append(opts, Insecure)
|
||||
return NewRegistry(name, opts...)
|
||||
}
|
121
vendor/github.com/google/go-containerregistry/pkg/name/repository.go
generated
vendored
Normal file
121
vendor/github.com/google/go-containerregistry/pkg/name/repository.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultNamespace = "library"
|
||||
repositoryChars = "abcdefghijklmnopqrstuvwxyz0123456789_-./"
|
||||
regRepoDelimiter = "/"
|
||||
)
|
||||
|
||||
// Repository stores a docker repository name in a structured form.
|
||||
type Repository struct {
|
||||
Registry
|
||||
repository string
|
||||
}
|
||||
|
||||
// See https://docs.docker.com/docker-hub/official_repos
|
||||
func hasImplicitNamespace(repo string, reg Registry) bool {
|
||||
return !strings.ContainsRune(repo, '/') && reg.RegistryStr() == DefaultRegistry
|
||||
}
|
||||
|
||||
// RepositoryStr returns the repository component of the Repository.
|
||||
func (r Repository) RepositoryStr() string {
|
||||
if hasImplicitNamespace(r.repository, r.Registry) {
|
||||
return fmt.Sprintf("%s/%s", defaultNamespace, r.repository)
|
||||
}
|
||||
return r.repository
|
||||
}
|
||||
|
||||
// Name returns the name from which the Repository was derived.
|
||||
func (r Repository) Name() string {
|
||||
regName := r.Registry.Name()
|
||||
if regName != "" {
|
||||
return regName + regRepoDelimiter + r.RepositoryStr()
|
||||
}
|
||||
// TODO: As far as I can tell, this is unreachable.
|
||||
return r.RepositoryStr()
|
||||
}
|
||||
|
||||
func (r Repository) String() string {
|
||||
return r.Name()
|
||||
}
|
||||
|
||||
// Scope returns the scope required to perform the given action on the registry.
|
||||
// TODO(jonjohnsonjr): consider moving scopes to a separate package.
|
||||
func (r Repository) Scope(action string) string {
|
||||
return fmt.Sprintf("repository:%s:%s", r.RepositoryStr(), action)
|
||||
}
|
||||
|
||||
func checkRepository(repository string) error {
|
||||
return checkElement("repository", repository, repositoryChars, 2, 255)
|
||||
}
|
||||
|
||||
// NewRepository returns a new Repository representing the given name, according to the given strictness.
|
||||
func NewRepository(name string, opts ...Option) (Repository, error) {
|
||||
opt := makeOptions(opts...)
|
||||
if len(name) == 0 {
|
||||
return Repository{}, NewErrBadName("a repository name must be specified")
|
||||
}
|
||||
|
||||
var registry string
|
||||
repo := name
|
||||
parts := strings.SplitN(name, regRepoDelimiter, 2)
|
||||
if len(parts) == 2 && (strings.ContainsRune(parts[0], '.') || strings.ContainsRune(parts[0], ':')) {
|
||||
// The first part of the repository is treated as the registry domain
|
||||
// iff it contains a '.' or ':' character, otherwise it is all repository
|
||||
// and the domain defaults to Docker Hub.
|
||||
registry = parts[0]
|
||||
repo = parts[1]
|
||||
}
|
||||
|
||||
if err := checkRepository(repo); err != nil {
|
||||
return Repository{}, err
|
||||
}
|
||||
|
||||
reg, err := NewRegistry(registry, opts...)
|
||||
if err != nil {
|
||||
return Repository{}, err
|
||||
}
|
||||
if hasImplicitNamespace(repo, reg) && opt.strict {
|
||||
return Repository{}, NewErrBadName("strict validation requires the full repository path (missing 'library')")
|
||||
}
|
||||
return Repository{reg, repo}, nil
|
||||
}
|
||||
|
||||
// Tag returns a Tag in this Repository.
|
||||
func (r Repository) Tag(identifier string) Tag {
|
||||
t := Tag{
|
||||
tag: identifier,
|
||||
Repository: r,
|
||||
}
|
||||
t.original = t.Name()
|
||||
return t
|
||||
}
|
||||
|
||||
// Digest returns a Digest in this Repository.
|
||||
func (r Repository) Digest(identifier string) Digest {
|
||||
d := Digest{
|
||||
digest: identifier,
|
||||
Repository: r,
|
||||
}
|
||||
d.original = d.Name()
|
||||
return d
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package name
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTag = "latest"
|
||||
// TODO(dekkagaijin): use the docker/distribution regexes for validation.
|
||||
tagChars = "abcdefghijklmnopqrstuvwxyz0123456789_-.ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||
tagDelim = ":"
|
||||
)
|
||||
|
||||
// Tag stores a docker tag name in a structured form.
|
||||
type Tag struct {
|
||||
Repository
|
||||
tag string
|
||||
original string
|
||||
}
|
||||
|
||||
// Ensure Tag implements Reference
|
||||
var _ Reference = (*Tag)(nil)
|
||||
|
||||
// Context implements Reference.
|
||||
func (t Tag) Context() Repository {
|
||||
return t.Repository
|
||||
}
|
||||
|
||||
// Identifier implements Reference.
|
||||
func (t Tag) Identifier() string {
|
||||
return t.TagStr()
|
||||
}
|
||||
|
||||
// TagStr returns the tag component of the Tag.
|
||||
func (t Tag) TagStr() string {
|
||||
if t.tag != "" {
|
||||
return t.tag
|
||||
}
|
||||
return defaultTag
|
||||
}
|
||||
|
||||
// Name returns the name from which the Tag was derived.
|
||||
func (t Tag) Name() string {
|
||||
return t.Repository.Name() + tagDelim + t.TagStr()
|
||||
}
|
||||
|
||||
// String returns the original input string.
|
||||
func (t Tag) String() string {
|
||||
return t.original
|
||||
}
|
||||
|
||||
// Scope returns the scope required to perform the given action on the tag.
|
||||
func (t Tag) Scope(action string) string {
|
||||
return t.Repository.Scope(action)
|
||||
}
|
||||
|
||||
func checkTag(name string) error {
|
||||
return checkElement("tag", name, tagChars, 1, 127)
|
||||
}
|
||||
|
||||
// NewTag returns a new Tag representing the given name, according to the given strictness.
|
||||
func NewTag(name string, opts ...Option) (Tag, error) {
|
||||
opt := makeOptions(opts...)
|
||||
base := name
|
||||
tag := ""
|
||||
|
||||
// Split on ":"
|
||||
parts := strings.Split(name, tagDelim)
|
||||
// Verify that we aren't confusing a tag for a hostname w/ port for the purposes of weak validation.
|
||||
if len(parts) > 1 && !strings.Contains(parts[len(parts)-1], regRepoDelimiter) {
|
||||
base = strings.Join(parts[:len(parts)-1], tagDelim)
|
||||
tag = parts[len(parts)-1]
|
||||
}
|
||||
|
||||
// We don't require a tag, but if we get one check it's valid,
|
||||
// even when not being strict.
|
||||
// If we are being strict, we want to validate the tag regardless in case
|
||||
// it's empty.
|
||||
if tag != "" || opt.strict {
|
||||
if err := checkTag(tag); err != nil {
|
||||
return Tag{}, err
|
||||
}
|
||||
}
|
||||
|
||||
repo, err := NewRepository(base, opts...)
|
||||
if err != nil {
|
||||
return Tag{}, err
|
||||
}
|
||||
return Tag{
|
||||
Repository: repo,
|
||||
tag: tag,
|
||||
original: name,
|
||||
}, nil
|
||||
}
|
129
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md
generated
vendored
Normal file
129
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/README.md
generated
vendored
Normal file
|
@ -0,0 +1,129 @@
|
|||
# `transport`
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport?status.svg)](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/transport)
|
||||
|
||||
The [distribution protocol](https://github.com/opencontainers/distribution-spec) is fairly simple, but correctly [implementing authentication](../../../authn/README.md) is **hard**.
|
||||
|
||||
This package [implements](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#New) an [`http.RoundTripper`](https://godoc.org/net/http#RoundTripper)
|
||||
that transparently performs:
|
||||
* [Token
|
||||
Authentication](https://docs.docker.com/registry/spec/auth/token/) and
|
||||
* [OAuth2
|
||||
Authentication](https://docs.docker.com/registry/spec/auth/oauth/)
|
||||
|
||||
for registry clients.
|
||||
|
||||
## Raison d'être
|
||||
|
||||
> Why not just use the [`docker/distribution`](https://godoc.org/github.com/docker/distribution/registry/client/auth) client?
|
||||
|
||||
Great question! Mostly, because I don't want to depend on [`prometheus/client_golang`](https://github.com/prometheus/client_golang).
|
||||
|
||||
As a performance optimization, that client uses [a cache](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/client/repository.go#L173) to keep track of a mapping between blob digests and their [descriptors](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/blobs.go#L57-L86). Unfortunately, the cache [uses prometheus](https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/storage/cache/cachedblobdescriptorstore.go#L44) to track hits and misses, so if you want to use that client you have to pull in all of prometheus, which is pretty large.
|
||||
|
||||
![docker/distribution](../../../../images/docker.dot.svg)
|
||||
|
||||
> Why does it matter if you depend on prometheus? Who cares?
|
||||
|
||||
It's generally polite to your downstream to reduce the number of dependencies your package requires:
|
||||
|
||||
* Downloading your package is faster, which helps our Australian friends and people on airplanes.
|
||||
* There is less code to compile, which speeds up builds and saves the planet from global warming.
|
||||
* You reduce the likelihood of inflicting dependency hell upon your consumers.
|
||||
* [Tim Hockin](https://twitter.com/thockin/status/958606077456654336) prefers it based on his experience working on Kubernetes, and he's a pretty smart guy.
|
||||
|
||||
> Okay, what about [`containerd/containerd`](https://godoc.org/github.com/containerd/containerd/remotes/docker)?
|
||||
|
||||
Similar reasons! That ends up pulling in grpc, protobuf, and logrus.
|
||||
|
||||
![containerd/containerd](../../../../images/containerd.dot.svg)
|
||||
|
||||
> Well... what about [`containers/image`](https://godoc.org/github.com/containers/image/docker)?
|
||||
|
||||
That just uses the the `docker/distribution` client... and more!
|
||||
|
||||
![containerd/containerd](../../../../images/containers.dot.svg)
|
||||
|
||||
> Wow, what about this package?
|
||||
|
||||
Of course, this package isn't perfect either. `transport` depends on `authn`,
|
||||
which in turn depends on docker's config file parsing and handling package,
|
||||
which you don't strictly need but almost certainly want if you're going to be
|
||||
interacting with a registry.
|
||||
|
||||
![google/go-containerregistry](../../../../images/ggcr.dot.svg)
|
||||
|
||||
*These graphs were generated by
|
||||
[`kisielk/godepgraph`](https://github.com/kisielk/godepgraph).*
|
||||
|
||||
## Usage
|
||||
|
||||
This is heavily used by the
|
||||
[`remote`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote)
|
||||
package, which implements higher level image-centric functionality, but this
|
||||
package is useful if you want to interact directly with the registry to do
|
||||
something that `remote` doesn't support, e.g. [to handle with schema 1
|
||||
images](https://github.com/google/go-containerregistry/pull/509).
|
||||
|
||||
This package also includes some [error
|
||||
handling](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#errors)
|
||||
facilities in the form of
|
||||
[`CheckError`](https://godoc.org/github.com/google/go-containerregistry/pkg/v1/remote/transport#CheckError),
|
||||
which will parse the response body into a structured error for unexpected http
|
||||
status codes.
|
||||
|
||||
Here's a "simple" program that writes the result of
|
||||
[listing tags](https://github.com/opencontainers/distribution-spec/blob/60be706c34ee7805bdd1d3d11affec53b0dfb8fb/spec.md#tags)
|
||||
for [`gcr.io/google-containers/pause`](https://gcr.io/google-containers/pause)
|
||||
to stdout.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote/transport"
|
||||
)
|
||||
|
||||
func main() {
|
||||
repo, err := name.NewRepository("gcr.io/google-containers/pause")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Fetch credentials based on your docker config file, which is $HOME/.docker/config.json or $DOCKER_CONFIG.
|
||||
auth, err := authn.DefaultKeychain.Resolve(repo.Registry)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Construct an http.Client that is authorized to pull from gcr.io/google-containers/pause.
|
||||
scopes := []string{repo.Scope(transport.PullScope)}
|
||||
t, err := transport.New(repo.Registry, auth, http.DefaultTransport, scopes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
client := &http.Client{Transport: t}
|
||||
|
||||
// Make the actual request.
|
||||
resp, err := client.Get("https://gcr.io/v2/google-containers/pause/tags/list")
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Assert that we get a 200, otherwise attempt to parse body as a structured error.
|
||||
if err := transport.CheckError(resp, http.StatusOK); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Write the response to stdout.
|
||||
if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
```
|
63
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go
generated
vendored
Normal file
63
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
)
|
||||
|
||||
type basicTransport struct {
|
||||
inner http.RoundTripper
|
||||
auth authn.Authenticator
|
||||
target string
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = (*basicTransport)(nil)
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||
if bt.auth != authn.Anonymous {
|
||||
auth, err := bt.auth.Authorization()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// http.Client handles redirects at a layer above the http.RoundTripper
|
||||
// abstraction, so to avoid forwarding Authorization headers to places
|
||||
// we are redirected, only set it when the authorization header matches
|
||||
// the host with which we are interacting.
|
||||
// In case of redirect http.Client can use an empty Host, check URL too.
|
||||
if in.Host == bt.target || in.URL.Host == bt.target {
|
||||
if bearer := auth.RegistryToken; bearer != "" {
|
||||
hdr := fmt.Sprintf("Bearer %s", bearer)
|
||||
in.Header.Set("Authorization", hdr)
|
||||
} else if user, pass := auth.Username, auth.Password; user != "" && pass != "" {
|
||||
delimited := fmt.Sprintf("%s:%s", user, pass)
|
||||
encoded := base64.StdEncoding.EncodeToString([]byte(delimited))
|
||||
hdr := fmt.Sprintf("Basic %s", encoded)
|
||||
in.Header.Set("Authorization", hdr)
|
||||
} else if token := auth.Auth; token != "" {
|
||||
hdr := fmt.Sprintf("Basic %s", token)
|
||||
in.Header.Set("Authorization", hdr)
|
||||
}
|
||||
}
|
||||
}
|
||||
in.Header.Set("User-Agent", transportName)
|
||||
return bt.inner.RoundTrip(in)
|
||||
}
|
257
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
Normal file
257
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go
generated
vendored
Normal file
|
@ -0,0 +1,257 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
type bearerTransport struct {
|
||||
// Wrapped by bearerTransport.
|
||||
inner http.RoundTripper
|
||||
// Basic credentials that we exchange for bearer tokens.
|
||||
basic authn.Authenticator
|
||||
// Holds the bearer response from the token service.
|
||||
bearer authn.AuthConfig
|
||||
// Registry to which we send bearer tokens.
|
||||
registry name.Registry
|
||||
// See https://tools.ietf.org/html/rfc6750#section-3
|
||||
realm string
|
||||
// See https://docs.docker.com/registry/spec/auth/token/
|
||||
service string
|
||||
scopes []string
|
||||
// Scheme we should use, determined by ping response.
|
||||
scheme string
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = (*bearerTransport)(nil)
|
||||
|
||||
var portMap = map[string]string{
|
||||
"http": "80",
|
||||
"https": "443",
|
||||
}
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||
sendRequest := func() (*http.Response, error) {
|
||||
// http.Client handles redirects at a layer above the http.RoundTripper
|
||||
// abstraction, so to avoid forwarding Authorization headers to places
|
||||
// we are redirected, only set it when the authorization header matches
|
||||
// the registry with which we are interacting.
|
||||
// In case of redirect http.Client can use an empty Host, check URL too.
|
||||
if matchesHost(bt.registry, in, bt.scheme) {
|
||||
hdr := fmt.Sprintf("Bearer %s", bt.bearer.RegistryToken)
|
||||
in.Header.Set("Authorization", hdr)
|
||||
}
|
||||
return bt.inner.RoundTrip(in)
|
||||
}
|
||||
|
||||
res, err := sendRequest()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Perform a token refresh() and retry the request in case the token has expired
|
||||
if res.StatusCode == http.StatusUnauthorized {
|
||||
if err = bt.refresh(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sendRequest()
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
// It's unclear which authentication flow to use based purely on the protocol,
|
||||
// so we rely on heuristics and fallbacks to support as many registries as possible.
|
||||
// The basic token exchange is attempted first, falling back to the oauth flow.
|
||||
// If the IdentityToken is set, this indicates that we should start with the oauth flow.
|
||||
func (bt *bearerTransport) refresh() error {
|
||||
auth, err := bt.basic.Authorization()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if auth.RegistryToken != "" {
|
||||
bt.bearer.RegistryToken = auth.RegistryToken
|
||||
return nil
|
||||
}
|
||||
|
||||
var content []byte
|
||||
if auth.IdentityToken != "" {
|
||||
// If the secret being stored is an identity token,
|
||||
// the Username should be set to <token>, which indicates
|
||||
// we are using an oauth flow.
|
||||
content, err = bt.refreshOauth()
|
||||
if terr, ok := err.(*Error); ok && terr.StatusCode == http.StatusNotFound {
|
||||
// Note: Not all token servers implement oauth2.
|
||||
// If the request to the endpoint returns 404 using the HTTP POST method,
|
||||
// refer to Token Documentation for using the HTTP GET method supported by all token servers.
|
||||
content, err = bt.refreshBasic()
|
||||
}
|
||||
} else {
|
||||
content, err = bt.refreshBasic()
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Some registries don't have "token" in the response. See #54.
|
||||
type tokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
// TODO: handle expiry?
|
||||
}
|
||||
|
||||
var response tokenResponse
|
||||
if err := json.Unmarshal(content, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Some registries set access_token instead of token.
|
||||
if response.AccessToken != "" {
|
||||
response.Token = response.AccessToken
|
||||
}
|
||||
|
||||
// Find a token to turn into a Bearer authenticator
|
||||
if response.Token != "" {
|
||||
bt.bearer.RegistryToken = response.Token
|
||||
} else {
|
||||
return fmt.Errorf("no token in bearer response:\n%s", content)
|
||||
}
|
||||
|
||||
// If we obtained a refresh token from the oauth flow, use that for refresh() now.
|
||||
if response.RefreshToken != "" {
|
||||
bt.basic = authn.FromConfig(authn.AuthConfig{
|
||||
IdentityToken: response.RefreshToken,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func matchesHost(reg name.Registry, in *http.Request, scheme string) bool {
|
||||
canonicalHeaderHost := canonicalAddress(in.Host, scheme)
|
||||
canonicalURLHost := canonicalAddress(in.URL.Host, scheme)
|
||||
canonicalRegistryHost := canonicalAddress(reg.RegistryStr(), scheme)
|
||||
return canonicalHeaderHost == canonicalRegistryHost || canonicalURLHost == canonicalRegistryHost
|
||||
}
|
||||
|
||||
func canonicalAddress(host, scheme string) (address string) {
|
||||
// The host may be any one of:
|
||||
// - hostname
|
||||
// - hostname:port
|
||||
// - ipv4
|
||||
// - ipv4:port
|
||||
// - ipv6
|
||||
// - [ipv6]:port
|
||||
// As net.SplitHostPort returns an error if the host does not contain a port, we should only attempt
|
||||
// to call it when we know that the address contains a port
|
||||
if strings.Count(host, ":") == 1 || (strings.Count(host, ":") >= 2 && strings.Contains(host, "]:")) {
|
||||
hostname, port, err := net.SplitHostPort(host)
|
||||
if err != nil {
|
||||
return host
|
||||
}
|
||||
if port == "" {
|
||||
port = portMap[scheme]
|
||||
}
|
||||
|
||||
return net.JoinHostPort(hostname, port)
|
||||
}
|
||||
|
||||
return net.JoinHostPort(host, portMap[scheme])
|
||||
}
|
||||
|
||||
// https://docs.docker.com/registry/spec/auth/oauth/
|
||||
func (bt *bearerTransport) refreshOauth() ([]byte, error) {
|
||||
auth, err := bt.basic.Authorization()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
u, err := url.Parse(bt.realm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v := url.Values{}
|
||||
v.Set("scope", strings.Join(bt.scopes, " "))
|
||||
v.Set("service", bt.service)
|
||||
v.Set("client_id", transportName)
|
||||
if auth.IdentityToken != "" {
|
||||
v.Set("grant_type", "refresh_token")
|
||||
v.Set("refresh_token", auth.IdentityToken)
|
||||
} else if auth.Username != "" && auth.Password != "" {
|
||||
// TODO(#629): This is unreachable.
|
||||
v.Set("grant_type", "password")
|
||||
v.Set("username", auth.Username)
|
||||
v.Set("password", auth.Password)
|
||||
v.Set("access_type", "offline")
|
||||
}
|
||||
|
||||
client := http.Client{Transport: bt.inner}
|
||||
resp, err := client.PostForm(u.String(), v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
||||
|
||||
// https://docs.docker.com/registry/spec/auth/token/
|
||||
func (bt *bearerTransport) refreshBasic() ([]byte, error) {
|
||||
u, err := url.Parse(bt.realm)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b := &basicTransport{
|
||||
inner: bt.inner,
|
||||
auth: bt.basic,
|
||||
target: u.Host,
|
||||
}
|
||||
client := http.Client{Transport: b}
|
||||
|
||||
u.RawQuery = url.Values{
|
||||
"scope": bt.scopes,
|
||||
"service": []string{bt.service},
|
||||
}.Encode()
|
||||
|
||||
resp, err := client.Get(u.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if err := CheckError(resp, http.StatusOK); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ioutil.ReadAll(resp.Body)
|
||||
}
|
18
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go
generated
vendored
Normal file
18
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package transport provides facilities for setting up an authenticated
|
||||
// http.RoundTripper given an Authenticator and base RoundTripper. See
|
||||
// transport.New for more information.
|
||||
package transport
|
175
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
Normal file
175
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go
generated
vendored
Normal file
|
@ -0,0 +1,175 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The set of query string keys that we expect to send as part of the registry
|
||||
// protocol. Anything else is potentially dangerous to leak, as it's probably
|
||||
// from a redirect. These redirects often included tokens or signed URLs.
|
||||
var paramWhitelist = map[string]struct{}{
|
||||
// Token exchange
|
||||
"scope": struct{}{},
|
||||
"service": struct{}{},
|
||||
// Cross-repo mounting
|
||||
"mount": struct{}{},
|
||||
"from": struct{}{},
|
||||
// Layer PUT
|
||||
"digest": struct{}{},
|
||||
// Listing tags and catalog
|
||||
"n": struct{}{},
|
||||
"last": struct{}{},
|
||||
}
|
||||
|
||||
// Error implements error to support the following error specification:
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
|
||||
type Error struct {
|
||||
Errors []Diagnostic `json:"errors,omitempty"`
|
||||
// The http status code returned.
|
||||
StatusCode int
|
||||
// The raw body if we couldn't understand it.
|
||||
rawBody string
|
||||
// The request that failed.
|
||||
request *http.Request
|
||||
}
|
||||
|
||||
// Check that Error implements error
|
||||
var _ error = (*Error)(nil)
|
||||
|
||||
// Error implements error
|
||||
func (e *Error) Error() string {
|
||||
prefix := ""
|
||||
if e.request != nil {
|
||||
prefix = fmt.Sprintf("%s %s: ", e.request.Method, redact(e.request.URL))
|
||||
}
|
||||
return prefix + e.responseErr()
|
||||
}
|
||||
|
||||
func (e *Error) responseErr() string {
|
||||
switch len(e.Errors) {
|
||||
case 0:
|
||||
if len(e.rawBody) == 0 {
|
||||
return fmt.Sprintf("unsupported status code %d", e.StatusCode)
|
||||
}
|
||||
return fmt.Sprintf("unsupported status code %d; body: %s", e.StatusCode, e.rawBody)
|
||||
case 1:
|
||||
return e.Errors[0].String()
|
||||
default:
|
||||
var errors []string
|
||||
for _, d := range e.Errors {
|
||||
errors = append(errors, d.String())
|
||||
}
|
||||
return fmt.Sprintf("multiple errors returned: %s",
|
||||
strings.Join(errors, "; "))
|
||||
}
|
||||
}
|
||||
|
||||
// Temporary returns whether the request that preceded the error is temporary.
|
||||
func (e *Error) Temporary() bool {
|
||||
if len(e.Errors) == 0 {
|
||||
return false
|
||||
}
|
||||
for _, d := range e.Errors {
|
||||
// TODO: Include other error types.
|
||||
if d.Code != BlobUploadInvalidErrorCode {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func redact(original *url.URL) *url.URL {
|
||||
qs := original.Query()
|
||||
for k, v := range qs {
|
||||
for i := range v {
|
||||
if _, ok := paramWhitelist[k]; !ok {
|
||||
// key is not in the whitelist
|
||||
v[i] = "REDACTED"
|
||||
}
|
||||
}
|
||||
}
|
||||
redacted := *original
|
||||
redacted.RawQuery = qs.Encode()
|
||||
return &redacted
|
||||
}
|
||||
|
||||
// Diagnostic represents a single error returned by a Docker registry interaction.
|
||||
type Diagnostic struct {
|
||||
Code ErrorCode `json:"code"`
|
||||
Message string `json:"message,omitempty"`
|
||||
Detail interface{} `json:"detail,omitempty"`
|
||||
}
|
||||
|
||||
// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail]
|
||||
func (d Diagnostic) String() string {
|
||||
msg := fmt.Sprintf("%s: %s", d.Code, d.Message)
|
||||
if d.Detail != nil {
|
||||
msg = fmt.Sprintf("%s; %v", msg, d.Detail)
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
// ErrorCode is an enumeration of supported error codes.
|
||||
type ErrorCode string
|
||||
|
||||
// The set of error conditions a registry may return:
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2
|
||||
const (
|
||||
BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN"
|
||||
BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID"
|
||||
BlobUploadUnknownErrorCode ErrorCode = "BLOB_UPLOAD_UNKNOWN"
|
||||
DigestInvalidErrorCode ErrorCode = "DIGEST_INVALID"
|
||||
ManifestBlobUnknownErrorCode ErrorCode = "MANIFEST_BLOB_UNKNOWN"
|
||||
ManifestInvalidErrorCode ErrorCode = "MANIFEST_INVALID"
|
||||
ManifestUnknownErrorCode ErrorCode = "MANIFEST_UNKNOWN"
|
||||
ManifestUnverifiedErrorCode ErrorCode = "MANIFEST_UNVERIFIED"
|
||||
NameInvalidErrorCode ErrorCode = "NAME_INVALID"
|
||||
NameUnknownErrorCode ErrorCode = "NAME_UNKNOWN"
|
||||
SizeInvalidErrorCode ErrorCode = "SIZE_INVALID"
|
||||
TagInvalidErrorCode ErrorCode = "TAG_INVALID"
|
||||
UnauthorizedErrorCode ErrorCode = "UNAUTHORIZED"
|
||||
DeniedErrorCode ErrorCode = "DENIED"
|
||||
UnsupportedErrorCode ErrorCode = "UNSUPPORTED"
|
||||
)
|
||||
|
||||
// CheckError returns a structured error if the response status is not in codes.
|
||||
func CheckError(resp *http.Response, codes ...int) error {
|
||||
for _, code := range codes {
|
||||
if resp.StatusCode == code {
|
||||
// This is one of the supported status codes.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors
|
||||
structuredError := &Error{}
|
||||
if err := json.Unmarshal(b, structuredError); err != nil {
|
||||
structuredError.rawBody = string(b)
|
||||
}
|
||||
structuredError.StatusCode = resp.StatusCode
|
||||
structuredError.request = resp.Request
|
||||
return structuredError
|
||||
}
|
44
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go
generated
vendored
Normal file
44
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/logger.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/logs"
|
||||
)
|
||||
|
||||
type logTransport struct {
|
||||
inner http.RoundTripper
|
||||
}
|
||||
|
||||
// NewLogger returns a transport that logs requests and responses to
|
||||
// github.com/google/go-containerregistry/pkg/logs.Debug.
|
||||
func NewLogger(inner http.RoundTripper) http.RoundTripper {
|
||||
return &logTransport{inner}
|
||||
}
|
||||
|
||||
func (t *logTransport) RoundTrip(in *http.Request) (out *http.Response, err error) {
|
||||
// Inspired by: github.com/motemen/go-loghttp
|
||||
logs.Debug.Printf("--> %s %s", in.Method, in.URL)
|
||||
b, err := httputil.DumpRequestOut(in, true)
|
||||
if err == nil {
|
||||
logs.Debug.Println(string(b))
|
||||
}
|
||||
out, err = t.inner.RoundTrip(in)
|
||||
if err != nil {
|
||||
logs.Debug.Printf("<-- %v %s", err, in.URL)
|
||||
}
|
||||
if out != nil {
|
||||
msg := fmt.Sprintf("<-- %d", out.StatusCode)
|
||||
if out.Request != nil {
|
||||
msg = fmt.Sprintf("%s %s", msg, out.Request.URL)
|
||||
}
|
||||
logs.Debug.Print(msg)
|
||||
b, err := httputil.DumpResponse(out, true)
|
||||
if err == nil {
|
||||
logs.Debug.Println(string(b))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
122
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
generated
vendored
Normal file
122
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go
generated
vendored
Normal file
|
@ -0,0 +1,122 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
type challenge string
|
||||
|
||||
const (
|
||||
anonymous challenge = "anonymous"
|
||||
basic challenge = "basic"
|
||||
bearer challenge = "bearer"
|
||||
)
|
||||
|
||||
type pingResp struct {
|
||||
challenge challenge
|
||||
|
||||
// Following the challenge there are often key/value pairs
|
||||
// e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz"
|
||||
parameters map[string]string
|
||||
|
||||
// The registry's scheme to use. Communicates whether we fell back to http.
|
||||
scheme string
|
||||
}
|
||||
|
||||
func (c challenge) Canonical() challenge {
|
||||
return challenge(strings.ToLower(string(c)))
|
||||
}
|
||||
|
||||
func parseChallenge(suffix string) map[string]string {
|
||||
kv := make(map[string]string)
|
||||
for _, token := range strings.Split(suffix, ",") {
|
||||
// Trim any whitespace around each token.
|
||||
token = strings.Trim(token, " ")
|
||||
|
||||
// Break the token into a key/value pair
|
||||
if parts := strings.SplitN(token, "=", 2); len(parts) == 2 {
|
||||
// Unquote the value, if it is quoted.
|
||||
kv[parts[0]] = strings.Trim(parts[1], `"`)
|
||||
} else {
|
||||
// If there was only one part, treat is as a key with an empty value
|
||||
kv[token] = ""
|
||||
}
|
||||
}
|
||||
return kv
|
||||
}
|
||||
|
||||
func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) {
|
||||
client := http.Client{Transport: t}
|
||||
|
||||
// This first attempts to use "https" for every request, falling back to http
|
||||
// if the registry matches our localhost heuristic or if it is intentionally
|
||||
// set to insecure via name.NewInsecureRegistry.
|
||||
schemes := []string{"https"}
|
||||
if reg.Scheme() == "http" {
|
||||
schemes = append(schemes, "http")
|
||||
}
|
||||
|
||||
var connErr error
|
||||
for _, scheme := range schemes {
|
||||
url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name())
|
||||
resp, err := client.Get(url)
|
||||
if err != nil {
|
||||
connErr = err
|
||||
// Potentially retry with http.
|
||||
continue
|
||||
}
|
||||
defer func() {
|
||||
// By draining the body, make sure to reuse the connection made by
|
||||
// the ping for the following access to the registry
|
||||
io.Copy(ioutil.Discard, resp.Body)
|
||||
resp.Body.Close()
|
||||
}()
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK:
|
||||
// If we get a 200, then no authentication is needed.
|
||||
return &pingResp{
|
||||
challenge: anonymous,
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
case http.StatusUnauthorized:
|
||||
wac := resp.Header.Get("WWW-Authenticate")
|
||||
if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 {
|
||||
// If there are two parts, then parse the challenge parameters.
|
||||
return &pingResp{
|
||||
challenge: challenge(parts[0]).Canonical(),
|
||||
parameters: parseChallenge(parts[1]),
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
}
|
||||
// Otherwise, just return the challenge without parameters.
|
||||
return &pingResp{
|
||||
challenge: challenge(wac).Canonical(),
|
||||
scheme: scheme,
|
||||
}, nil
|
||||
default:
|
||||
return nil, CheckError(resp, http.StatusOK, http.StatusUnauthorized)
|
||||
}
|
||||
}
|
||||
return nil, connErr
|
||||
}
|
88
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
generated
vendored
Normal file
88
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/retry.go
generated
vendored
Normal file
|
@ -0,0 +1,88 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/internal/retry"
|
||||
)
|
||||
|
||||
// Sleep for 0.1, 0.3, 0.9, 2.7 seconds. This should cover networking blips.
|
||||
var defaultBackoff = retry.Backoff{
|
||||
Duration: 100 * time.Millisecond,
|
||||
Factor: 3.0,
|
||||
Jitter: 0.1,
|
||||
Steps: 5,
|
||||
}
|
||||
|
||||
var _ http.RoundTripper = (*retryTransport)(nil)
|
||||
|
||||
// retryTransport wraps a RoundTripper and retries temporary network errors.
|
||||
type retryTransport struct {
|
||||
inner http.RoundTripper
|
||||
backoff retry.Backoff
|
||||
predicate retry.Predicate
|
||||
}
|
||||
|
||||
// Option is a functional option for retryTransport.
|
||||
type Option func(*options)
|
||||
|
||||
type options struct {
|
||||
backoff retry.Backoff
|
||||
predicate retry.Predicate
|
||||
}
|
||||
|
||||
// WithRetryBackoff sets the backoff for retry operations.
|
||||
func WithRetryBackoff(backoff retry.Backoff) Option {
|
||||
return func(o *options) {
|
||||
o.backoff = backoff
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryPredicate sets the predicate for retry operations.
|
||||
func WithRetryPredicate(predicate func(error) bool) Option {
|
||||
return func(o *options) {
|
||||
o.predicate = predicate
|
||||
}
|
||||
}
|
||||
|
||||
// NewRetry returns a transport that retries errors.
|
||||
func NewRetry(inner http.RoundTripper, opts ...Option) http.RoundTripper {
|
||||
o := &options{
|
||||
backoff: defaultBackoff,
|
||||
predicate: retry.IsTemporary,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(o)
|
||||
}
|
||||
|
||||
return &retryTransport{
|
||||
inner: inner,
|
||||
backoff: o.backoff,
|
||||
predicate: o.predicate,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *retryTransport) RoundTrip(in *http.Request) (out *http.Response, err error) {
|
||||
roundtrip := func() error {
|
||||
out, err = t.inner.RoundTrip(in)
|
||||
return err
|
||||
}
|
||||
retry.Retry(roundtrip, t.predicate, t.backoff)
|
||||
return
|
||||
}
|
44
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go
generated
vendored
Normal file
44
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/schemer.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
type schemeTransport struct {
|
||||
// Scheme we should use, determined by ping response.
|
||||
scheme string
|
||||
|
||||
// Registry we're talking to.
|
||||
registry name.Registry
|
||||
|
||||
// Wrapped by schemeTransport.
|
||||
inner http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (st *schemeTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||
// When we ping() the registry, we determine whether to use http or https
|
||||
// based on which scheme was successful. That is only valid for the
|
||||
// registry server and not e.g. a separate token server or blob storage,
|
||||
// so we should only override the scheme if the host is the registry.
|
||||
if matchesHost(st.registry, in, st.scheme) {
|
||||
in.URL.Scheme = st.scheme
|
||||
}
|
||||
return st.inner.RoundTrip(in)
|
||||
}
|
24
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go
generated
vendored
Normal file
24
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/scope.go
generated
vendored
Normal file
|
@ -0,0 +1,24 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
// Scopes suitable to qualify each Repository
|
||||
const (
|
||||
PullScope string = "pull"
|
||||
PushScope string = "push,pull"
|
||||
// For now DELETE is PUSH, which is the read/write ACL.
|
||||
DeleteScope string = PushScope
|
||||
CatalogScope string = "catalog"
|
||||
)
|
91
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
Normal file
91
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
|||
// Copyright 2018 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
)
|
||||
|
||||
// New returns a new RoundTripper based on the provided RoundTripper that has been
|
||||
// setup to authenticate with the remote registry "reg", in the capacity
|
||||
// laid out by the specified scopes.
|
||||
func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scopes []string) (http.RoundTripper, error) {
|
||||
// The handshake:
|
||||
// 1. Use "t" to ping() the registry for the authentication challenge.
|
||||
//
|
||||
// 2a. If we get back a 200, then simply use "t".
|
||||
//
|
||||
// 2b. If we get back a 401 with a Basic challenge, then use a transport
|
||||
// that just attachs auth each roundtrip.
|
||||
//
|
||||
// 2c. If we get back a 401 with a Bearer challenge, then use a transport
|
||||
// that attaches a bearer token to each request, and refreshes is on 401s.
|
||||
// Perform an initial refresh to seed the bearer token.
|
||||
|
||||
// First we ping the registry to determine the parameters of the authentication handshake
|
||||
// (if one is even necessary).
|
||||
pr, err := ping(reg, t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Wrap the given transport in transports that use an appropriate scheme,
|
||||
// (based on the ping response) and set the user agent.
|
||||
t = &useragentTransport{
|
||||
inner: &schemeTransport{
|
||||
scheme: pr.scheme,
|
||||
registry: reg,
|
||||
inner: t,
|
||||
},
|
||||
}
|
||||
|
||||
switch pr.challenge.Canonical() {
|
||||
case anonymous:
|
||||
return t, nil
|
||||
case basic:
|
||||
return &basicTransport{inner: t, auth: auth, target: reg.RegistryStr()}, nil
|
||||
case bearer:
|
||||
// We require the realm, which tells us where to send our Basic auth to turn it into Bearer auth.
|
||||
realm, ok := pr.parameters["realm"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("malformed www-authenticate, missing realm: %v", pr.parameters)
|
||||
}
|
||||
service, ok := pr.parameters["service"]
|
||||
if !ok {
|
||||
// If the service parameter is not specified, then default it to the registry
|
||||
// with which we are talking.
|
||||
service = reg.String()
|
||||
}
|
||||
bt := &bearerTransport{
|
||||
inner: t,
|
||||
basic: auth,
|
||||
realm: realm,
|
||||
registry: reg,
|
||||
service: service,
|
||||
scopes: scopes,
|
||||
scheme: pr.scheme,
|
||||
}
|
||||
if err := bt.refresh(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bt, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unrecognized challenge: %s", pr.challenge)
|
||||
}
|
||||
}
|
32
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go
generated
vendored
Normal file
32
vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/useragent.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
|||
// Copyright 2019 Google LLC All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package transport
|
||||
|
||||
import "net/http"
|
||||
|
||||
const (
|
||||
transportName = "go-containerregistry"
|
||||
)
|
||||
|
||||
type useragentTransport struct {
|
||||
// Wrapped by useragentTransport.
|
||||
inner http.RoundTripper
|
||||
}
|
||||
|
||||
// RoundTrip implements http.RoundTripper
|
||||
func (ut *useragentTransport) RoundTrip(in *http.Request) (*http.Response, error) {
|
||||
in.Header.Set("User-Agent", transportName)
|
||||
return ut.inner.RoundTrip(in)
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
Adam H. Leventhal <adam.leventhal@gmail.com>
|
||||
Daniel Martà <mvdan@mvdan.cc>
|
||||
Fazlul Shahriar <fshahriar@gmail.com>
|
||||
Frederick Akalin <akalin@gmail.com>
|
||||
Google Inc.
|
||||
Haitao Li <lihaitao@gmail.com>
|
||||
Jakob Unterwurzacher <jakobunt@gmail.com>
|
||||
James D. Nurmi <james@abneptis.com>
|
||||
Jeff <leterip@me.com>
|
||||
Kaoet Ibe <kaoet.ibe@outlook.com>
|
||||
Kirill Smelkov <kirr@nexedi.com>
|
||||
Logan Hanks <logan@bitcasa.com>
|
||||
Maria Shaldibina <mshaldibina@pivotal.io>
|
||||
Nick Cooper <gh@smoogle.org>
|
||||
Patrick Crosby <pcrosby@gmail.com>
|
||||
Paul Jolly <paul@myitcv.org.uk>
|
||||
Paul Warren <paul.warren@emc.com>
|
||||
Shayan Pooya <shayan@arista.com>
|
||||
Valient Gough <vgough@pobox.com>
|
||||
Yongwoo Park <nnnlife@gmail.com>
|
|
@ -0,0 +1,30 @@
|
|||
// New BSD License
|
||||
//
|
||||
// Copyright (c) 2010 the Go-FUSE Authors. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Ivan Krasin nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
//
|
|
@ -0,0 +1,60 @@
|
|||
|
||||
Objective
|
||||
=========
|
||||
|
||||
A high-performance FUSE API that minimizes pitfalls with writing
|
||||
correct filesystems.
|
||||
|
||||
Decisions
|
||||
=========
|
||||
|
||||
* Nodes contain references to their children. This is useful
|
||||
because most filesystems will need to construct tree-like
|
||||
structures.
|
||||
|
||||
* Nodes contain references to their parents. As a result, we can
|
||||
derive the path for each Inode, and there is no need for a
|
||||
separate PathFS.
|
||||
|
||||
* Nodes can be "persistent", meaning their lifetime is not under
|
||||
control of the kernel. This is useful for constructing FS trees
|
||||
in advance, rather than driven by LOOKUP.
|
||||
|
||||
* The NodeID for FS tree node must be defined on creation and are
|
||||
immutable. By contrast, reusing NodeIds (eg. rsc/bazil FUSE, as
|
||||
well as old go-fuse/fuse/nodefs) needs extra synchronization to
|
||||
avoid races with notify and FORGET, and makes handling the inode
|
||||
Generation more complicated.
|
||||
|
||||
* The mode of an Inode is defined on creation. Files cannot change
|
||||
type during their lifetime. This also prevents the common error
|
||||
of forgetting to return the filetype in Lookup/GetAttr.
|
||||
|
||||
* The NodeID (used for communicating with kernel) is equal to
|
||||
Attr.Ino (value shown in Stat and Lstat return values.).
|
||||
|
||||
* No global treelock, to ensure scalability.
|
||||
|
||||
* Support for hard links. libfuse doesn't support this in the
|
||||
high-level API. Extra care for race conditions is needed when
|
||||
looking up the same file through different paths.
|
||||
|
||||
* do not issue Notify{Entry,Delete} as part of
|
||||
AddChild/RmChild/MvChild: because NodeIDs are unique and
|
||||
immutable, there is no confusion about which nodes are
|
||||
invalidated, and the notification doesn't have to happen under
|
||||
lock.
|
||||
|
||||
* Directory reading uses the DirStream. Semantics for rewinding
|
||||
directory reads, and adding files after opening (but before
|
||||
reading) are handled automatically. No support for directory
|
||||
seeks.
|
||||
|
||||
* Method names are based on syscall names. Where there is no
|
||||
syscall (eg. "open directory"), we bias towards writing
|
||||
everything together (Opendir)
|
||||
|
||||
To do/To decide
|
||||
=========
|
||||
|
||||
* Symlink []byte vs string.
|
|
@ -0,0 +1,613 @@
|
|||
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package fs provides infrastructure to build tree-organized filesystems.
|
||||
//
|
||||
// Structure of a file system implementation
|
||||
//
|
||||
// To create a file system, you should first define types for the
|
||||
// nodes of the file system tree.
|
||||
//
|
||||
// struct myNode {
|
||||
// fs.Inode
|
||||
// }
|
||||
//
|
||||
// // Node types must be InodeEmbedders
|
||||
// var _ = (fs.InodeEmbedder)((*myNode)(nil))
|
||||
//
|
||||
// // Node types should implement some file system operations, eg. Lookup
|
||||
// var _ = (fs.NodeLookuper)((*myNode)(nil))
|
||||
//
|
||||
// func (n *myNode) Lookup(ctx context.Context, name string, ... ) (*Inode, syscall.Errno) {
|
||||
// ops := myNode{}
|
||||
// return n.NewInode(ctx, &ops, fs.StableAttr{Mode: syscall.S_IFDIR}), 0
|
||||
// }
|
||||
//
|
||||
// The method names are inspired on the system call names, so we have
|
||||
// Listxattr rather than ListXAttr.
|
||||
//
|
||||
// the file system is mounted by calling mount on the root of the tree,
|
||||
//
|
||||
// server, err := fs.Mount("/tmp/mnt", &myNode{}, &fs.Options{})
|
||||
// ..
|
||||
// // start serving the file system
|
||||
// server.Wait()
|
||||
//
|
||||
// Error handling
|
||||
//
|
||||
// All error reporting must use the syscall.Errno type. This is an
|
||||
// integer with predefined error codes, where the value 0 (`OK`)
|
||||
// should be used to indicate success.
|
||||
//
|
||||
// File system concepts
|
||||
//
|
||||
// The FUSE API is very similar to Linux' internal VFS API for
|
||||
// defining file systems in the kernel. It is therefore useful to
|
||||
// understand some terminology.
|
||||
//
|
||||
// File content: the raw bytes that we store inside regular files.
|
||||
//
|
||||
// Path: a /-separated string path that describes location of a node
|
||||
// in the file system tree. For example
|
||||
//
|
||||
// dir1/file
|
||||
//
|
||||
// describes path root → dir1 → file.
|
||||
//
|
||||
// There can be several paths leading from tree root to a particular node,
|
||||
// known as hard-linking, for example
|
||||
//
|
||||
// root
|
||||
// / \
|
||||
// dir1 dir2
|
||||
// \ /
|
||||
// file
|
||||
//
|
||||
// Inode: ("index node") points to the file content, and stores
|
||||
// metadata (size, timestamps) about a file or directory. Each
|
||||
// inode has a type (directory, symlink, regular file, etc.) and
|
||||
// an identity (a 64-bit number, unique to the file
|
||||
// system). Directories can have children.
|
||||
//
|
||||
// The inode in the kernel is represented in Go-FUSE as the Inode
|
||||
// type.
|
||||
//
|
||||
// While common OS APIs are phrased in terms of paths (strings), the
|
||||
// precise semantics of a file system are better described in terms of
|
||||
// Inodes. This allows us to specify what happens in corner cases,
|
||||
// such as writing data to deleted files.
|
||||
//
|
||||
// File descriptor: a handle returned to opening a file. File
|
||||
// descriptors always refer to a single inode.
|
||||
//
|
||||
// Dirent: a dirent maps (parent inode number, name string) tuple to
|
||||
// child inode, thus representing a parent/child relation (or the
|
||||
// absense thereof). Dirents do not have an equivalent type inside
|
||||
// Go-FUSE, but the result of Lookup operation essentially is a
|
||||
// dirent, which the kernel puts in a cache.
|
||||
//
|
||||
//
|
||||
// Kernel caching
|
||||
//
|
||||
// The kernel caches several pieces of information from the FUSE process:
|
||||
//
|
||||
// 1. File contents: enabled with the fuse.FOPEN_KEEP_CACHE return flag
|
||||
// in Open, manipulated with ReadCache and WriteCache, and invalidated
|
||||
// with Inode.NotifyContent
|
||||
//
|
||||
// 2. File Attributes (size, mtime, etc.): controlled with the
|
||||
// attribute timeout fields in fuse.AttrOut and fuse.EntryOut, which
|
||||
// get be populated from Getattr and Lookup
|
||||
//
|
||||
// 3. Directory entries (parent/child relations in the FS tree):
|
||||
// controlled with the timeout fields in fuse.EntryOut, and
|
||||
// invalidated with Inode.NotifyEntry and Inode.NotifyDelete.
|
||||
//
|
||||
// Without Directory Entry timeouts, every operation on file "a/b/c"
|
||||
// must first do lookups for "a", "a/b" and "a/b/c", which is
|
||||
// expensive because of context switches between the kernel and the
|
||||
// FUSE process.
|
||||
//
|
||||
// Unsuccessful entry lookups can also be cached by setting an entry
|
||||
// timeout when Lookup returns ENOENT.
|
||||
//
|
||||
// The libfuse C library specifies 1 second timeouts for both
|
||||
// attribute and directory entries, but no timeout for negative
|
||||
// entries. by default. This can be achieve in go-fuse by setting
|
||||
// options on mount, eg.
|
||||
//
|
||||
// sec := time.Second
|
||||
// opts := fs.Options{
|
||||
// EntryTimeout: &sec,
|
||||
// AttrTimeout: &sec,
|
||||
// }
|
||||
//
|
||||
// Locking
|
||||
//
|
||||
// Locks for networked filesystems are supported through the suite of
|
||||
// Getlk, Setlk and Setlkw methods. They alllow locks on regions of
|
||||
// regular files.
|
||||
//
|
||||
// Parallelism
|
||||
//
|
||||
// The VFS layer in the kernel is optimized to be highly parallel, and
|
||||
// this parallelism also affects FUSE file systems: many FUSE
|
||||
// operations can run in parallel, and this invites race
|
||||
// conditions. It is strongly recommended to test your FUSE file
|
||||
// system issuing file operations in parallel, and using the race
|
||||
// detector to weed out data races.
|
||||
//
|
||||
// Dynamically discovered file systems
|
||||
//
|
||||
// File system data usually cannot fit all in RAM, so the kernel must
|
||||
// discover the file system dynamically: as you are entering and list
|
||||
// directory contents, the kernel asks the FUSE server about the files
|
||||
// and directories you are busy reading/writing, and forgets parts of
|
||||
// your file system when it is low on memory.
|
||||
//
|
||||
// The two important operations for dynamic file systems are:
|
||||
// 1. Lookup, part of the NodeLookuper interface for discovering
|
||||
// individual children of directories, and 2. Readdir, part of the
|
||||
// NodeReaddirer interface for listing the contents of a directory.
|
||||
//
|
||||
// Static in-memory file systems
|
||||
//
|
||||
// For small, read-only file systems, getting the locking mechanics of
|
||||
// Lookup correct is tedious, so Go-FUSE provides a feature to
|
||||
// simplify building such file systems.
|
||||
//
|
||||
// Instead of discovering the FS tree on the fly, you can construct
|
||||
// the entire tree from an OnAdd method. Then, that in-memory tree
|
||||
// structure becomes the source of truth. This means you Go-FUSE must
|
||||
// remember Inodes even if the kernel is no longer interested in
|
||||
// them. This is done by instantiating "persistent" inodes from the
|
||||
// OnAdd method of the root node. See the ZipFS example for a
|
||||
// runnable example of how to do this.
|
||||
package fs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
// InodeEmbedder is an interface for structs that embed Inode.
|
||||
//
|
||||
// InodeEmbedder objects usually should implement some of the NodeXxxx
|
||||
// interfaces, to provide user-defined file system behaviors.
|
||||
//
|
||||
// In general, if an InodeEmbedder does not implement specific
|
||||
// filesystem methods, the filesystem will react as if it is a
|
||||
// read-only filesystem with a predefined tree structure.
|
||||
type InodeEmbedder interface {
|
||||
// populateInode and inode are used internally to link Inode
|
||||
// to a Node.
|
||||
//
|
||||
// See Inode() for the public API to retrieve an inode from Node.
|
||||
embed() *Inode
|
||||
|
||||
// EmbeddedInode returns a pointer to the embedded inode.
|
||||
EmbeddedInode() *Inode
|
||||
}
|
||||
|
||||
// Statfs implements statistics for the filesystem that holds this
|
||||
// Inode. If not defined, the `out` argument will zeroed with an OK
|
||||
// result. This is because OSX filesystems must Statfs, or the mount
|
||||
// will not work.
|
||||
type NodeStatfser interface {
|
||||
Statfs(ctx context.Context, out *fuse.StatfsOut) syscall.Errno
|
||||
}
|
||||
|
||||
// Access should return if the caller can access the file with the
|
||||
// given mode. This is used for two purposes: to determine if a user
|
||||
// may enter a directory, and to answer to implement the access system
|
||||
// call. In the latter case, the context has data about the real
|
||||
// UID. For example, a root-SUID binary called by user susan gets the
|
||||
// UID and GID for susan here.
|
||||
//
|
||||
// If not defined, a default implementation will check traditional
|
||||
// unix permissions of the Getattr result agains the caller. If so, it
|
||||
// is necessary to either return permissions from GetAttr/Lookup or
|
||||
// set Options.DefaultPermissions in order to allow chdir into the
|
||||
// FUSE mount.
|
||||
type NodeAccesser interface {
|
||||
Access(ctx context.Context, mask uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// GetAttr reads attributes for an Inode. The library will ensure that
|
||||
// Mode and Ino are set correctly. For files that are not opened with
|
||||
// FOPEN_DIRECTIO, Size should be set so it can be read correctly. If
|
||||
// returning zeroed permissions, the default behavior is to change the
|
||||
// mode of 0755 (directory) or 0644 (files). This can be switched off
|
||||
// with the Options.NullPermissions setting. If blksize is unset, 4096
|
||||
// is assumed, and the 'blocks' field is set accordingly.
|
||||
type NodeGetattrer interface {
|
||||
Getattr(ctx context.Context, f FileHandle, out *fuse.AttrOut) syscall.Errno
|
||||
}
|
||||
|
||||
// SetAttr sets attributes for an Inode.
|
||||
type NodeSetattrer interface {
|
||||
Setattr(ctx context.Context, f FileHandle, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno
|
||||
}
|
||||
|
||||
// OnAdd is called when this InodeEmbedder is initialized.
|
||||
type NodeOnAdder interface {
|
||||
OnAdd(ctx context.Context)
|
||||
}
|
||||
|
||||
// Getxattr should read data for the given attribute into
|
||||
// `dest` and return the number of bytes. If `dest` is too
|
||||
// small, it should return ERANGE and the size of the attribute.
|
||||
// If not defined, Getxattr will return ENOATTR.
|
||||
type NodeGetxattrer interface {
|
||||
Getxattr(ctx context.Context, attr string, dest []byte) (uint32, syscall.Errno)
|
||||
}
|
||||
|
||||
// Setxattr should store data for the given attribute. See
|
||||
// setxattr(2) for information about flags.
|
||||
// If not defined, Setxattr will return ENOATTR.
|
||||
type NodeSetxattrer interface {
|
||||
Setxattr(ctx context.Context, attr string, data []byte, flags uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// Removexattr should delete the given attribute.
|
||||
// If not defined, Removexattr will return ENOATTR.
|
||||
type NodeRemovexattrer interface {
|
||||
Removexattr(ctx context.Context, attr string) syscall.Errno
|
||||
}
|
||||
|
||||
// Listxattr should read all attributes (null terminated) into
|
||||
// `dest`. If the `dest` buffer is too small, it should return ERANGE
|
||||
// and the correct size. If not defined, return an empty list and
|
||||
// success.
|
||||
type NodeListxattrer interface {
|
||||
Listxattr(ctx context.Context, dest []byte) (uint32, syscall.Errno)
|
||||
}
|
||||
|
||||
// Readlink reads the content of a symlink.
|
||||
type NodeReadlinker interface {
|
||||
Readlink(ctx context.Context) ([]byte, syscall.Errno)
|
||||
}
|
||||
|
||||
// Open opens an Inode (of regular file type) for reading. It
|
||||
// is optional but recommended to return a FileHandle.
|
||||
type NodeOpener interface {
|
||||
Open(ctx context.Context, flags uint32) (fh FileHandle, fuseFlags uint32, errno syscall.Errno)
|
||||
}
|
||||
|
||||
// Reads data from a file. The data should be returned as
|
||||
// ReadResult, which may be constructed from the incoming
|
||||
// `dest` buffer. If the file was opened without FileHandle,
|
||||
// the FileHandle argument here is nil. The default
|
||||
// implementation forwards to the FileHandle.
|
||||
type NodeReader interface {
|
||||
Read(ctx context.Context, f FileHandle, dest []byte, off int64) (fuse.ReadResult, syscall.Errno)
|
||||
}
|
||||
|
||||
// Writes the data into the file handle at given offset. After
|
||||
// returning, the data will be reused and may not referenced.
|
||||
// The default implementation forwards to the FileHandle.
|
||||
type NodeWriter interface {
|
||||
Write(ctx context.Context, f FileHandle, data []byte, off int64) (written uint32, errno syscall.Errno)
|
||||
}
|
||||
|
||||
// Fsync is a signal to ensure writes to the Inode are flushed
|
||||
// to stable storage.
|
||||
type NodeFsyncer interface {
|
||||
Fsync(ctx context.Context, f FileHandle, flags uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// Flush is called for the close(2) call on a file descriptor. In case
|
||||
// of a descriptor that was duplicated using dup(2), it may be called
|
||||
// more than once for the same FileHandle. The default implementation
|
||||
// forwards to the FileHandle, or if the handle does not support
|
||||
// FileFlusher, returns OK.
|
||||
type NodeFlusher interface {
|
||||
Flush(ctx context.Context, f FileHandle) syscall.Errno
|
||||
}
|
||||
|
||||
// This is called to before a FileHandle is forgotten. The
|
||||
// kernel ignores the return value of this method,
|
||||
// so any cleanup that requires specific synchronization or
|
||||
// could fail with I/O errors should happen in Flush instead.
|
||||
// The default implementation forwards to the FileHandle.
|
||||
type NodeReleaser interface {
|
||||
Release(ctx context.Context, f FileHandle) syscall.Errno
|
||||
}
|
||||
|
||||
// Allocate preallocates space for future writes, so they will
|
||||
// never encounter ESPACE.
|
||||
type NodeAllocater interface {
|
||||
Allocate(ctx context.Context, f FileHandle, off uint64, size uint64, mode uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// CopyFileRange copies data between sections of two files,
|
||||
// without the data having to pass through the calling process.
|
||||
type NodeCopyFileRanger interface {
|
||||
CopyFileRange(ctx context.Context, fhIn FileHandle,
|
||||
offIn uint64, out *Inode, fhOut FileHandle, offOut uint64,
|
||||
len uint64, flags uint64) (uint32, syscall.Errno)
|
||||
}
|
||||
|
||||
// Lseek is used to implement holes: it should return the
|
||||
// first offset beyond `off` where there is data (SEEK_DATA)
|
||||
// or where there is a hole (SEEK_HOLE).
|
||||
type NodeLseeker interface {
|
||||
Lseek(ctx context.Context, f FileHandle, Off uint64, whence uint32) (uint64, syscall.Errno)
|
||||
}
|
||||
|
||||
// Getlk returns locks that would conflict with the given input
|
||||
// lock. If no locks conflict, the output has type L_UNLCK. See
|
||||
// fcntl(2) for more information.
|
||||
// If not defined, returns ENOTSUP
|
||||
type NodeGetlker interface {
|
||||
Getlk(ctx context.Context, f FileHandle, owner uint64, lk *fuse.FileLock, flags uint32, out *fuse.FileLock) syscall.Errno
|
||||
}
|
||||
|
||||
// Setlk obtains a lock on a file, or fail if the lock could not
|
||||
// obtained. See fcntl(2) for more information. If not defined,
|
||||
// returns ENOTSUP
|
||||
type NodeSetlker interface {
|
||||
Setlk(ctx context.Context, f FileHandle, owner uint64, lk *fuse.FileLock, flags uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// Setlkw obtains a lock on a file, waiting if necessary. See fcntl(2)
|
||||
// for more information. If not defined, returns ENOTSUP
|
||||
type NodeSetlkwer interface {
|
||||
Setlkw(ctx context.Context, f FileHandle, owner uint64, lk *fuse.FileLock, flags uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// DirStream lists directory entries.
|
||||
type DirStream interface {
|
||||
// HasNext indicates if there are further entries. HasNext
|
||||
// might be called on already closed streams.
|
||||
HasNext() bool
|
||||
|
||||
// Next retrieves the next entry. It is only called if HasNext
|
||||
// has previously returned true. The Errno return may be used to
|
||||
// indicate I/O errors
|
||||
Next() (fuse.DirEntry, syscall.Errno)
|
||||
|
||||
// Close releases resources related to this directory
|
||||
// stream.
|
||||
Close()
|
||||
}
|
||||
|
||||
// Lookup should find a direct child of a directory by the child's name. If
|
||||
// the entry does not exist, it should return ENOENT and optionally
|
||||
// set a NegativeTimeout in `out`. If it does exist, it should return
|
||||
// attribute data in `out` and return the Inode for the child. A new
|
||||
// inode can be created using `Inode.NewInode`. The new Inode will be
|
||||
// added to the FS tree automatically if the return status is OK.
|
||||
//
|
||||
// If a directory does not implement NodeLookuper, the library looks
|
||||
// for an existing child with the given name.
|
||||
//
|
||||
// The input to a Lookup is {parent directory, name string}.
|
||||
//
|
||||
// Lookup, if successful, must return an *Inode. Once the Inode is
|
||||
// returned to the kernel, the kernel can issue further operations,
|
||||
// such as Open or Getxattr on that node.
|
||||
//
|
||||
// A successful Lookup also returns an EntryOut. Among others, this
|
||||
// contains file attributes (mode, size, mtime, etc.).
|
||||
//
|
||||
// FUSE supports other operations that modify the namespace. For
|
||||
// example, the Symlink, Create, Mknod, Link methods all create new
|
||||
// children in directories. Hence, they also return *Inode and must
|
||||
// populate their fuse.EntryOut arguments.
|
||||
|
||||
//
|
||||
type NodeLookuper interface {
|
||||
Lookup(ctx context.Context, name string, out *fuse.EntryOut) (*Inode, syscall.Errno)
|
||||
}
|
||||
|
||||
// OpenDir opens a directory Inode for reading its
|
||||
// contents. The actual reading is driven from ReadDir, so
|
||||
// this method is just for performing sanity/permission
|
||||
// checks. The default is to return success.
|
||||
type NodeOpendirer interface {
|
||||
Opendir(ctx context.Context) syscall.Errno
|
||||
}
|
||||
|
||||
// ReadDir opens a stream of directory entries.
|
||||
//
|
||||
// Readdir essentiallly returns a list of strings, and it is allowed
|
||||
// for Readdir to return different results from Lookup. For example,
|
||||
// you can return nothing for Readdir ("ls my-fuse-mount" is empty),
|
||||
// while still implementing Lookup ("ls my-fuse-mount/a-specific-file"
|
||||
// shows a single file).
|
||||
//
|
||||
// If a directory does not implement NodeReaddirer, a list of
|
||||
// currently known children from the tree is returned. This means that
|
||||
// static in-memory file systems need not implement NodeReaddirer.
|
||||
type NodeReaddirer interface {
|
||||
Readdir(ctx context.Context) (DirStream, syscall.Errno)
|
||||
}
|
||||
|
||||
// Mkdir is similar to Lookup, but must create a directory entry and Inode.
|
||||
// Default is to return EROFS.
|
||||
type NodeMkdirer interface {
|
||||
Mkdir(ctx context.Context, name string, mode uint32, out *fuse.EntryOut) (*Inode, syscall.Errno)
|
||||
}
|
||||
|
||||
// Mknod is similar to Lookup, but must create a device entry and Inode.
|
||||
// Default is to return EROFS.
|
||||
type NodeMknoder interface {
|
||||
Mknod(ctx context.Context, name string, mode uint32, dev uint32, out *fuse.EntryOut) (*Inode, syscall.Errno)
|
||||
}
|
||||
|
||||
// Link is similar to Lookup, but must create a new link to an existing Inode.
|
||||
// Default is to return EROFS.
|
||||
type NodeLinker interface {
|
||||
Link(ctx context.Context, target InodeEmbedder, name string, out *fuse.EntryOut) (node *Inode, errno syscall.Errno)
|
||||
}
|
||||
|
||||
// Symlink is similar to Lookup, but must create a new symbolic link.
|
||||
// Default is to return EROFS.
|
||||
type NodeSymlinker interface {
|
||||
Symlink(ctx context.Context, target, name string, out *fuse.EntryOut) (node *Inode, errno syscall.Errno)
|
||||
}
|
||||
|
||||
// Create is similar to Lookup, but should create a new
|
||||
// child. It typically also returns a FileHandle as a
|
||||
// reference for future reads/writes.
|
||||
// Default is to return EROFS.
|
||||
type NodeCreater interface {
|
||||
Create(ctx context.Context, name string, flags uint32, mode uint32, out *fuse.EntryOut) (node *Inode, fh FileHandle, fuseFlags uint32, errno syscall.Errno)
|
||||
}
|
||||
|
||||
// Unlink should remove a child from this directory. If the
|
||||
// return status is OK, the Inode is removed as child in the
|
||||
// FS tree automatically. Default is to return EROFS.
|
||||
type NodeUnlinker interface {
|
||||
Unlink(ctx context.Context, name string) syscall.Errno
|
||||
}
|
||||
|
||||
// Rmdir is like Unlink but for directories.
|
||||
// Default is to return EROFS.
|
||||
type NodeRmdirer interface {
|
||||
Rmdir(ctx context.Context, name string) syscall.Errno
|
||||
}
|
||||
|
||||
// Rename should move a child from one directory to a different
|
||||
// one. The change is effected in the FS tree if the return status is
|
||||
// OK. Default is to return EROFS.
|
||||
type NodeRenamer interface {
|
||||
Rename(ctx context.Context, name string, newParent InodeEmbedder, newName string, flags uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// FileHandle is a resource identifier for opened files. Usually, a
|
||||
// FileHandle should implement some of the FileXxxx interfaces.
|
||||
//
|
||||
// All of the FileXxxx operations can also be implemented at the
|
||||
// InodeEmbedder level, for example, one can implement NodeReader
|
||||
// instead of FileReader.
|
||||
//
|
||||
// FileHandles are useful in two cases: First, if the underlying
|
||||
// storage systems needs a handle for reading/writing. This is the
|
||||
// case with Unix system calls, which need a file descriptor (See also
|
||||
// the function `NewLoopbackFile`). Second, it is useful for
|
||||
// implementing files whose contents are not tied to an inode. For
|
||||
// example, a file like `/proc/interrupts` has no fixed content, but
|
||||
// changes on each open call. This means that each file handle must
|
||||
// have its own view of the content; this view can be tied to a
|
||||
// FileHandle. Files that have such dynamic content should return the
|
||||
// FOPEN_DIRECT_IO flag from their `Open` method. See directio_test.go
|
||||
// for an example.
|
||||
type FileHandle interface {
|
||||
}
|
||||
|
||||
// See NodeReleaser.
|
||||
type FileReleaser interface {
|
||||
Release(ctx context.Context) syscall.Errno
|
||||
}
|
||||
|
||||
// See NodeGetattrer.
|
||||
type FileGetattrer interface {
|
||||
Getattr(ctx context.Context, out *fuse.AttrOut) syscall.Errno
|
||||
}
|
||||
|
||||
// See NodeReader.
|
||||
type FileReader interface {
|
||||
Read(ctx context.Context, dest []byte, off int64) (fuse.ReadResult, syscall.Errno)
|
||||
}
|
||||
|
||||
// See NodeWriter.
|
||||
type FileWriter interface {
|
||||
Write(ctx context.Context, data []byte, off int64) (written uint32, errno syscall.Errno)
|
||||
}
|
||||
|
||||
// See NodeGetlker.
|
||||
type FileGetlker interface {
|
||||
Getlk(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32, out *fuse.FileLock) syscall.Errno
|
||||
}
|
||||
|
||||
// See NodeSetlker.
|
||||
type FileSetlker interface {
|
||||
Setlk(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// See NodeSetlkwer.
|
||||
type FileSetlkwer interface {
|
||||
Setlkw(ctx context.Context, owner uint64, lk *fuse.FileLock, flags uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// See NodeLseeker.
|
||||
type FileLseeker interface {
|
||||
Lseek(ctx context.Context, off uint64, whence uint32) (uint64, syscall.Errno)
|
||||
}
|
||||
|
||||
// See NodeFlusher.
|
||||
type FileFlusher interface {
|
||||
Flush(ctx context.Context) syscall.Errno
|
||||
}
|
||||
|
||||
// See NodeFsync.
|
||||
type FileFsyncer interface {
|
||||
Fsync(ctx context.Context, flags uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// See NodeFsync.
|
||||
type FileSetattrer interface {
|
||||
Setattr(ctx context.Context, in *fuse.SetAttrIn, out *fuse.AttrOut) syscall.Errno
|
||||
}
|
||||
|
||||
// See NodeAllocater.
|
||||
type FileAllocater interface {
|
||||
Allocate(ctx context.Context, off uint64, size uint64, mode uint32) syscall.Errno
|
||||
}
|
||||
|
||||
// Options sets options for the entire filesystem
|
||||
type Options struct {
|
||||
// MountOptions contain the options for mounting the fuse server
|
||||
fuse.MountOptions
|
||||
|
||||
// If set to nonnil, this defines the overall entry timeout
|
||||
// for the file system. See fuse.EntryOut for more information.
|
||||
EntryTimeout *time.Duration
|
||||
|
||||
// If set to nonnil, this defines the overall attribute
|
||||
// timeout for the file system. See fuse.EntryOut for more
|
||||
// information.
|
||||
AttrTimeout *time.Duration
|
||||
|
||||
// If set to nonnil, this defines the overall entry timeout
|
||||
// for failed lookups (fuse.ENOENT). See fuse.EntryOut for
|
||||
// more information.
|
||||
NegativeTimeout *time.Duration
|
||||
|
||||
// Automatic inode numbers are handed out sequentially
|
||||
// starting from this number. If unset, use 2^63.
|
||||
FirstAutomaticIno uint64
|
||||
|
||||
// OnAdd is an alternative way to specify the OnAdd
|
||||
// functionality of the root node.
|
||||
OnAdd func(ctx context.Context)
|
||||
|
||||
// NullPermissions if set, leaves null file permissions
|
||||
// alone. Otherwise, they are set to 755 (dirs) or 644 (other
|
||||
// files.), which is necessary for doing a chdir into the FUSE
|
||||
// directories.
|
||||
NullPermissions bool
|
||||
|
||||
// If nonzero, replace default (zero) UID with the given UID
|
||||
UID uint32
|
||||
|
||||
// If nonzero, replace default (zero) GID with the given GID
|
||||
GID uint32
|
||||
|
||||
// ServerCallbacks can be provided to stub out notification
|
||||
// functions for testing a filesystem without mounting it.
|
||||
ServerCallbacks ServerCallbacks
|
||||
|
||||
// Logger is a sink for diagnostic messages. Diagnostic
|
||||
// messages are printed under conditions where we cannot
|
||||
// return error, but want to signal something seems off
|
||||
// anyway. If unset, no messages are printed.
|
||||
Logger *log.Logger
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fs
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
|
||||
"github.com/hanwen/go-fuse/v2/fuse"
|
||||
)
|
||||
|
||||
// OK is the Errno return value to indicate absense of errors.
|
||||
var OK = syscall.Errno(0)
|
||||
|
||||
// ToErrno exhumes the syscall.Errno error from wrapped error values.
|
||||
func ToErrno(err error) syscall.Errno {
|
||||
s := fuse.ToStatus(err)
|
||||
return syscall.Errno(s)
|
||||
}
|
||||
|
||||
// RENAME_EXCHANGE is a flag argument for renameat2()
|
||||
const RENAME_EXCHANGE = 0x2
|
||||
|
||||
// seek to the next data
|
||||
const _SEEK_DATA = 3
|
||||
|
||||
// seek to the next hole
|
||||
const _SEEK_HOLE = 4
|
|
@ -0,0 +1,10 @@
|
|||
// Copyright 2019 the Go-FUSE Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package fs
|
||||
|
||||
import "syscall"
|
||||
|
||||
// ENOATTR indicates that an extended attribute was not present.
|
||||
var ENOATTR = syscall.ENOATTR
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue