commit
56c6ea2827
|
@ -69,7 +69,7 @@ Different versions of the example scripts show different ways of describing the
|
|||
|
||||
#### Supported runc version
|
||||
|
||||
During development buildkit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/8095244c26fa2daaef850be862e5b1b56d7cec66/RUNC.md) for more information.
|
||||
During development buildkit is tested with the version of runc that is being used by the containerd repository. Please refer to [runc.md](https://github.com/containerd/containerd/blob/d1e11f17ec7b325f89608dd46c128300b8727d50/RUNC.md) for more information.
|
||||
|
||||
|
||||
#### Contributing
|
||||
|
|
|
@ -186,7 +186,7 @@ func (w *bufferedWriter) Digest() digest.Digest {
|
|||
return w.digester.Digest()
|
||||
}
|
||||
|
||||
func (w *bufferedWriter) Commit(size int64, expected digest.Digest, opt ...content.Opt) error {
|
||||
func (w *bufferedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opt ...content.Opt) error {
|
||||
if w.buffer == nil {
|
||||
return errors.Errorf("can't commit already committed or closed")
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ func NewContainerd(root, address string) (*Controller, error) {
|
|||
func newContainerdPullDeps(client *containerd.Client) *pullDeps {
|
||||
diff := client.DiffService()
|
||||
return &pullDeps{
|
||||
Snapshotter: client.SnapshotService(""),
|
||||
Snapshotter: client.SnapshotService(containerd.DefaultSnapshotter),
|
||||
ContentStore: client.ContentStore(),
|
||||
Applier: diff,
|
||||
Differ: diff,
|
||||
|
|
|
@ -55,7 +55,7 @@ func newStandalonePullDeps(root string) (*pullDeps, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
differ, err := differ.NewBaseDiff(c)
|
||||
df, err := differ.NewWalkingDiff(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -63,8 +63,8 @@ func newStandalonePullDeps(root string) (*pullDeps, error) {
|
|||
return &pullDeps{
|
||||
Snapshotter: &nsSnapshotter{s},
|
||||
ContentStore: c,
|
||||
Applier: differ,
|
||||
Differ: differ,
|
||||
Applier: df,
|
||||
Differ: df,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,9 @@ func TestControl(t *testing.T) {
|
|||
|
||||
err = w.Exec(ctx, meta, snap, nil, nil, &nopCloser{stderr})
|
||||
assert.Error(t, err) // Read-only root
|
||||
assert.Contains(t, stderr.String(), "Read-only file system")
|
||||
// typical error is like `mkdir /.../rootfs/proc: read-only file system`.
|
||||
// make sure the error is caused before running `echo foo > /bar`.
|
||||
assert.Contains(t, stderr.String(), "read-only file system")
|
||||
|
||||
root, err := cm.New(ctx, snap)
|
||||
assert.NoError(t, err)
|
||||
|
|
|
@ -115,7 +115,7 @@ func copyFrom(src llb.State, srcPath, destPath string) llb.StateOption {
|
|||
// copy copies files between 2 states using cp until there is no copyOp
|
||||
func copy(src llb.State, srcPath string, dest llb.State, destPath string) llb.State {
|
||||
cpImage := llb.Image("docker.io/library/alpine:latest")
|
||||
cp := cpImage.Run(llb.Shlexf("cp -a /src%s /dest%s", srcPath, destPath), llb.ReadonlyRootFS)
|
||||
cp := cpImage.Run(llb.Shlexf("cp -a /src%s /dest%s", srcPath, destPath))
|
||||
cp.AddMount("/src", src, llb.Readonly)
|
||||
return cp.AddMount("/dest", dest)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
ARG RUNC_VERSION=e775f0fba3ea329b8b766451c892c41a3d49594d
|
||||
ARG CONTAINERD_VERSION=8095244c26fa2daaef850be862e5b1b56d7cec66
|
||||
ARG CONTAINERD_VERSION=d1e11f17ec7b325f89608dd46c128300b8727d50
|
||||
|
||||
FROM golang:1.8-alpine AS gobuild-base
|
||||
RUN apk add --no-cache g++ linux-headers
|
||||
|
|
|
@ -78,7 +78,7 @@ func (e *execOp) Run(ctx context.Context, inputs []Reference) ([]Reference, erro
|
|||
mountable = ref
|
||||
}
|
||||
if m.Output != pb.SkipOutput {
|
||||
if m.Readonly && ref != nil {
|
||||
if m.Readonly && ref != nil && m.Dest != pb.RootMount { // exclude read-only rootfs
|
||||
outputs = append(outputs, newSharedRef(ref).Clone())
|
||||
} else {
|
||||
active, err := e.cm.New(ctx, ref, cache.WithDescription(fmt.Sprintf("mount %s from exec %s", m.Dest, strings.Join(e.op.Meta.Args, " ")))) // TODO: should be method
|
||||
|
|
|
@ -2,7 +2,6 @@ package containerimage
|
|||
|
||||
import (
|
||||
gocontext "context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
@ -12,6 +11,7 @@ import (
|
|||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
|
@ -213,21 +213,17 @@ func (is *imageSource) fillBlobMapping(ctx context.Context, layers []rootfs.Laye
|
|||
}
|
||||
|
||||
func getLayers(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]rootfs.Layer, error) {
|
||||
p, err := content.ReadBlob(ctx, provider, desc.Digest)
|
||||
manifest, err := images.Manifest(ctx, provider, desc, platforms.Format(platforms.Default()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read manifest blob")
|
||||
}
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(p, &manifest); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal manifest")
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
image := images.Image{Target: desc}
|
||||
diffIDs, err := image.RootFS(ctx, provider)
|
||||
diffIDs, err := image.RootFS(ctx, provider, platforms.Format(platforms.Default()))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to resolve rootfs")
|
||||
}
|
||||
if len(diffIDs) != len(manifest.Layers) {
|
||||
return nil, errors.Errorf("mismatched image rootfs and manifest layers")
|
||||
return nil, errors.Errorf("mismatched image rootfs and manifest layers %+v %+v", diffIDs, manifest.Layers)
|
||||
}
|
||||
layers := make([]rootfs.Layer, len(diffIDs))
|
||||
for i := range diffIDs {
|
||||
|
|
|
@ -35,7 +35,7 @@ func Config(ctx context.Context, ref string, resolver remotes.Resolver, ingester
|
|||
if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config, err := images.Config(ctx, ingester, desc)
|
||||
config, err := images.Config(ctx, ingester, desc, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ github.com/davecgh/go-spew v1.1.0
|
|||
github.com/pmezard/go-difflib v1.0.0
|
||||
golang.org/x/sys 739734461d1c916b6c72a63d7efda2b27edb369f
|
||||
|
||||
github.com/containerd/containerd 8095244c26fa2daaef850be862e5b1b56d7cec66
|
||||
github.com/containerd/containerd d1e11f17ec7b325f89608dd46c128300b8727d50
|
||||
golang.org/x/sync f52d1811a62927559de87708c8913c1650ce4f26
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
google.golang.org/grpc v1.3.0
|
||||
|
|
|
@ -60,25 +60,25 @@ image, err := client.Pull(context, "docker.io/library/redis:latest")
|
|||
err := client.Push(context, "docker.io/library/redis:latest", image.Target())
|
||||
```
|
||||
|
||||
### OCI Runtime Specification
|
||||
|
||||
containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters.
|
||||
|
||||
```go
|
||||
spec, err := containerd.GenerateSpec(containerd.WithImageConfig(context, image))
|
||||
```
|
||||
|
||||
### Containers
|
||||
|
||||
In containerd, a container is a metadata object. Resources such as an OCI runtime specification, image, root filesystem, and other metadata can be attached to a container.
|
||||
|
||||
```go
|
||||
redis, err := client.NewContainer(context, "redis-master",
|
||||
containerd.WithSpec(spec),
|
||||
)
|
||||
redis, err := client.NewContainer(context, "redis-master")
|
||||
defer redis.Delete(context)
|
||||
```
|
||||
|
||||
### OCI Runtime Specification
|
||||
|
||||
containerd fully supports the OCI runtime specification for running containers. We have built in functions to help you generate runtime specifications based on images as well as custom parameters.
|
||||
|
||||
You can specify options when creating a container about how to modify the specification.
|
||||
|
||||
```go
|
||||
redis, err := client.NewContainer(context, "redis-master", containerd.WithNewSpec(containerd.WithImageConfig(image)))
|
||||
```
|
||||
|
||||
## Root Filesystems
|
||||
|
||||
containerd allows you to use overlay or snapshot filesystems with your containers. It comes with builtin support for overlayfs and btrfs.
|
||||
|
@ -89,16 +89,17 @@ image, err := client.Pull(context, "docker.io/library/redis:latest", containerd.
|
|||
|
||||
// allocate a new RW root filesystem for a container based on the image
|
||||
redis, err := client.NewContainer(context, "redis-master",
|
||||
containerd.WithSpec(spec),
|
||||
containerd.WithNewSnapshot("redis-rootfs", image),
|
||||
containerd.WithNewSpec(containerd.WithImageConfig(image)),
|
||||
|
||||
)
|
||||
|
||||
// use a readonly filesystem with multiple containers
|
||||
for i := 0; i < 10; i++ {
|
||||
id := fmt.Sprintf("id-%s", i)
|
||||
container, err := client.NewContainer(ctx, id,
|
||||
containerd.WithSpec(spec),
|
||||
containerd.WithNewSnapshotView(id, image),
|
||||
containerd.WithNewSpec(containerd.WithImageConfig(image)),
|
||||
)
|
||||
}
|
||||
```
|
||||
|
@ -191,6 +192,11 @@ For sync communication we have a community slack with a #containerd channel that
|
|||
|
||||
**Slack:** https://dockr.ly/community
|
||||
|
||||
### Reporting security issues
|
||||
|
||||
__If you are reporting a security issue, please follow the responsible
|
||||
disclosure guidelines and reach out discreetly at containerd-security@googlegroups.com__.
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Copyright ©2016-2017 Docker, Inc. All rights reserved, except as follows. Code
|
||||
|
|
115
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
115
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.pb.go
generated
vendored
|
@ -79,15 +79,17 @@ type Container struct {
|
|||
Spec *google_protobuf1.Any `protobuf:"bytes,5,opt,name=spec" json:"spec,omitempty"`
|
||||
// Snapshotter specifies the snapshotter name used for rootfs
|
||||
Snapshotter string `protobuf:"bytes,6,opt,name=snapshotter,proto3" json:"snapshotter,omitempty"`
|
||||
// RootFS specifies the snapshot key to use for the container's root
|
||||
// SnapshotKey specifies the snapshot key to use for the container's root
|
||||
// filesystem. When starting a task from this container, a caller should
|
||||
// look up the mounts from the snapshot service and include those on the
|
||||
// task create request.
|
||||
//
|
||||
// Snapshots referenced in this field will not be garbage collected.
|
||||
//
|
||||
// This field is set to empty when the rootfs is not a snapshot.
|
||||
//
|
||||
// This field may be updated.
|
||||
RootFS string `protobuf:"bytes,7,opt,name=rootfs,proto3" json:"rootfs,omitempty"`
|
||||
SnapshotKey string `protobuf:"bytes,7,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"`
|
||||
// CreatedAt is the time the container was first created.
|
||||
CreatedAt time.Time `protobuf:"bytes,8,opt,name=created_at,json=createdAt,stdtime" json:"created_at"`
|
||||
// UpdatedAt is the last time the container was mutated.
|
||||
|
@ -494,11 +496,11 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) {
|
|||
i = encodeVarintContainers(dAtA, i, uint64(len(m.Snapshotter)))
|
||||
i += copy(dAtA[i:], m.Snapshotter)
|
||||
}
|
||||
if len(m.RootFS) > 0 {
|
||||
if len(m.SnapshotKey) > 0 {
|
||||
dAtA[i] = 0x3a
|
||||
i++
|
||||
i = encodeVarintContainers(dAtA, i, uint64(len(m.RootFS)))
|
||||
i += copy(dAtA[i:], m.RootFS)
|
||||
i = encodeVarintContainers(dAtA, i, uint64(len(m.SnapshotKey)))
|
||||
i += copy(dAtA[i:], m.SnapshotKey)
|
||||
}
|
||||
dAtA[i] = 0x42
|
||||
i++
|
||||
|
@ -862,7 +864,7 @@ func (m *Container) Size() (n int) {
|
|||
if l > 0 {
|
||||
n += 1 + l + sovContainers(uint64(l))
|
||||
}
|
||||
l = len(m.RootFS)
|
||||
l = len(m.SnapshotKey)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovContainers(uint64(l))
|
||||
}
|
||||
|
@ -1009,7 +1011,7 @@ func (this *Container) String() string {
|
|||
`Runtime:` + strings.Replace(fmt.Sprintf("%v", this.Runtime), "Container_Runtime", "Container_Runtime", 1) + `,`,
|
||||
`Spec:` + strings.Replace(fmt.Sprintf("%v", this.Spec), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||
`Snapshotter:` + fmt.Sprintf("%v", this.Snapshotter) + `,`,
|
||||
`RootFS:` + fmt.Sprintf("%v", this.RootFS) + `,`,
|
||||
`SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`,
|
||||
`CreatedAt:` + strings.Replace(strings.Replace(this.CreatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`UpdatedAt:` + strings.Replace(strings.Replace(this.UpdatedAt.String(), "Timestamp", "google_protobuf4.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`}`,
|
||||
|
@ -1426,7 +1428,7 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||
iNdEx = postIndex
|
||||
case 7:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field RootFS", wireType)
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
|
@ -1451,7 +1453,7 @@ func (m *Container) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.RootFS = string(dAtA[iNdEx:postIndex])
|
||||
m.SnapshotKey = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 8:
|
||||
if wireType != 2 {
|
||||
|
@ -2507,52 +2509,51 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptorContainers = []byte{
|
||||
// 738 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x92, 0xd2, 0x40,
|
||||
0x14, 0x9d, 0x40, 0x26, 0x0c, 0x97, 0x8d, 0xd5, 0x22, 0xc6, 0x58, 0x05, 0x98, 0x15, 0x0b, 0x0d,
|
||||
0x0e, 0x5a, 0x3a, 0x8f, 0xd5, 0x30, 0xaf, 0xb2, 0x6a, 0xc6, 0x9a, 0x6a, 0x75, 0xa3, 0x8b, 0x31,
|
||||
0x40, 0xc3, 0x44, 0x92, 0x74, 0x4c, 0x37, 0x54, 0x51, 0x2e, 0xf4, 0x13, 0xfc, 0x0b, 0x7f, 0x65,
|
||||
0x96, 0x2e, 0x5d, 0xcd, 0x83, 0x2f, 0xb1, 0xd2, 0x49, 0x26, 0xc8, 0xa3, 0x84, 0x51, 0x76, 0x7d,
|
||||
0xe9, 0x7b, 0xee, 0x3d, 0x39, 0xf7, 0xdc, 0x10, 0x38, 0xea, 0x58, 0xfc, 0xac, 0xd7, 0x30, 0x9a,
|
||||
0xd4, 0xa9, 0x36, 0xa9, 0xcb, 0x4d, 0xcb, 0x25, 0x7e, 0x6b, 0xf4, 0x68, 0x7a, 0x56, 0x95, 0x11,
|
||||
0xbf, 0x6f, 0x35, 0x09, 0x4b, 0x7e, 0x67, 0xd5, 0xfe, 0xfa, 0x48, 0x64, 0x78, 0x3e, 0xe5, 0x14,
|
||||
0x3d, 0x4a, 0x70, 0x46, 0x8c, 0x31, 0x46, 0xb2, 0xfa, 0xeb, 0x5a, 0xbe, 0x43, 0x3b, 0x54, 0x64,
|
||||
0x57, 0x83, 0x53, 0x08, 0xd4, 0x1e, 0x74, 0x28, 0xed, 0xd8, 0xa4, 0x2a, 0xa2, 0x46, 0xaf, 0x5d,
|
||||
0x35, 0xdd, 0x41, 0x74, 0xf5, 0x70, 0xfc, 0x8a, 0x38, 0x1e, 0x8f, 0x2f, 0xcb, 0xe3, 0x97, 0x6d,
|
||||
0x8b, 0xd8, 0xad, 0x53, 0xc7, 0x64, 0xdd, 0x28, 0xa3, 0x34, 0x9e, 0xc1, 0x2d, 0x87, 0x30, 0x6e,
|
||||
0x3a, 0x5e, 0x98, 0xa0, 0x5f, 0xca, 0x90, 0xdd, 0x8d, 0x29, 0xa2, 0x02, 0xa4, 0xac, 0x96, 0x2a,
|
||||
0x95, 0xa5, 0x4a, 0xb6, 0xae, 0x0c, 0x2f, 0x4a, 0xa9, 0x57, 0x7b, 0x38, 0x65, 0xb5, 0xd0, 0x09,
|
||||
0x28, 0xb6, 0xd9, 0x20, 0x36, 0x53, 0x53, 0xe5, 0x74, 0x25, 0x57, 0xdb, 0x30, 0xfe, 0xfa, 0xa8,
|
||||
0xc6, 0x4d, 0x55, 0xe3, 0x48, 0x40, 0xf7, 0x5d, 0xee, 0x0f, 0x70, 0x54, 0x07, 0xe5, 0x61, 0xd5,
|
||||
0x72, 0xcc, 0x0e, 0x51, 0xd3, 0x41, 0x33, 0x1c, 0x06, 0xe8, 0x35, 0x64, 0xfc, 0x9e, 0x1b, 0x70,
|
||||
0x54, 0xe5, 0xb2, 0x54, 0xc9, 0xd5, 0x9e, 0x2f, 0xd4, 0x08, 0x87, 0x58, 0x1c, 0x17, 0x41, 0x15,
|
||||
0x90, 0x99, 0x47, 0x9a, 0xea, 0xaa, 0x28, 0x96, 0x37, 0x42, 0x35, 0x8c, 0x58, 0x0d, 0x63, 0xc7,
|
||||
0x1d, 0x60, 0x91, 0x81, 0xca, 0x90, 0x63, 0xae, 0xe9, 0xb1, 0x33, 0xca, 0x39, 0xf1, 0x55, 0x45,
|
||||
0xb0, 0x1a, 0xfd, 0x09, 0xe9, 0xa0, 0xf8, 0x94, 0xf2, 0x36, 0x53, 0x33, 0x42, 0x1f, 0x18, 0x5e,
|
||||
0x94, 0x14, 0x4c, 0x29, 0x3f, 0x78, 0x83, 0xa3, 0x1b, 0xb4, 0x0b, 0xd0, 0xf4, 0x89, 0xc9, 0x49,
|
||||
0xeb, 0xd4, 0xe4, 0xea, 0x9a, 0xe8, 0xaa, 0x4d, 0x74, 0x7d, 0x1b, 0xcf, 0xa0, 0xbe, 0x76, 0x7e,
|
||||
0x51, 0x5a, 0xf9, 0x7e, 0x59, 0x92, 0x70, 0x36, 0xc2, 0xed, 0xf0, 0xa0, 0x48, 0xcf, 0x6b, 0xc5,
|
||||
0x45, 0xb2, 0x8b, 0x14, 0x89, 0x70, 0x3b, 0x5c, 0xdb, 0x84, 0xdc, 0x88, 0xec, 0xe8, 0x0e, 0xa4,
|
||||
0xbb, 0x64, 0x10, 0x4e, 0x16, 0x07, 0xc7, 0x60, 0x00, 0x7d, 0xd3, 0xee, 0x11, 0x35, 0x15, 0x0e,
|
||||
0x40, 0x04, 0x5b, 0xa9, 0x0d, 0x49, 0x3b, 0x86, 0x4c, 0x24, 0x24, 0x42, 0x20, 0xbb, 0xa6, 0x43,
|
||||
0x22, 0x9c, 0x38, 0x23, 0x03, 0x32, 0xd4, 0xe3, 0x16, 0x75, 0x99, 0x80, 0xce, 0x92, 0x35, 0x4e,
|
||||
0xd2, 0x9f, 0xc0, 0xdd, 0x43, 0xc2, 0x6f, 0x86, 0x84, 0xc9, 0xe7, 0x1e, 0x61, 0x7c, 0x96, 0xd5,
|
||||
0xf4, 0x33, 0xc8, 0xff, 0x99, 0xce, 0x3c, 0xea, 0x32, 0x82, 0x4e, 0x20, 0x7b, 0x33, 0x76, 0x01,
|
||||
0xcb, 0xd5, 0x1e, 0x2f, 0x62, 0x8e, 0xba, 0x1c, 0xc8, 0x84, 0x93, 0x22, 0xfa, 0x3a, 0xdc, 0x3b,
|
||||
0xb2, 0x58, 0xd2, 0x8a, 0xc5, 0xd4, 0x54, 0xc8, 0xb4, 0x2d, 0x9b, 0x13, 0x9f, 0xa9, 0x52, 0x39,
|
||||
0x5d, 0xc9, 0xe2, 0x38, 0xd4, 0x6d, 0x28, 0x8c, 0x43, 0x22, 0x7a, 0x18, 0x20, 0x69, 0x2c, 0x60,
|
||||
0xb7, 0xe3, 0x37, 0x52, 0x45, 0xff, 0x04, 0x85, 0x5d, 0xe1, 0x8a, 0x09, 0xf1, 0xfe, 0xbf, 0x18,
|
||||
0x5d, 0xb8, 0x3f, 0xd1, 0x6b, 0x69, 0xca, 0xff, 0x90, 0xa0, 0xf0, 0x4e, 0x58, 0x75, 0xf9, 0x4f,
|
||||
0x86, 0xb6, 0x21, 0x17, 0xae, 0x85, 0x78, 0x2f, 0x46, 0x9e, 0x9d, 0xdc, 0xa7, 0x83, 0xe0, 0xd5,
|
||||
0x79, 0x6c, 0xb2, 0x2e, 0x8e, 0xb6, 0x2f, 0x38, 0x07, 0xb2, 0x4c, 0x10, 0x5d, 0x9a, 0x2c, 0x4f,
|
||||
0xa1, 0xb0, 0x47, 0x6c, 0x32, 0x45, 0x95, 0x19, 0xcb, 0x52, 0xbb, 0x92, 0x01, 0x12, 0x33, 0xa2,
|
||||
0x3e, 0xa4, 0x0f, 0x09, 0x47, 0x2f, 0xe6, 0xa0, 0x31, 0x65, 0x25, 0xb5, 0x97, 0x0b, 0xe3, 0x22,
|
||||
0x29, 0xbe, 0x80, 0x1c, 0xac, 0x05, 0x9a, 0xe7, 0x6f, 0x61, 0xea, 0xca, 0x69, 0x9b, 0xb7, 0x40,
|
||||
0x46, 0xcd, 0xbf, 0x82, 0x12, 0x3a, 0x17, 0xcd, 0x53, 0x64, 0xfa, 0x42, 0x69, 0x5b, 0xb7, 0x81,
|
||||
0x26, 0x04, 0x42, 0x8f, 0xcc, 0x45, 0x60, 0xba, 0xef, 0xe7, 0x22, 0x30, 0xcb, 0x89, 0x1f, 0x40,
|
||||
0x09, 0x7d, 0x33, 0x17, 0x81, 0xe9, 0x16, 0xd3, 0x0a, 0x13, 0x1b, 0xb1, 0x1f, 0x7c, 0x69, 0xd4,
|
||||
0x3f, 0x9e, 0x5f, 0x17, 0x57, 0x7e, 0x5d, 0x17, 0x57, 0xbe, 0x0d, 0x8b, 0xd2, 0xf9, 0xb0, 0x28,
|
||||
0xfd, 0x1c, 0x16, 0xa5, 0xab, 0x61, 0x51, 0x7a, 0x7f, 0xf0, 0x0f, 0x1f, 0x4f, 0xdb, 0x49, 0xd4,
|
||||
0x50, 0x44, 0xc7, 0x67, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xef, 0xc2, 0x41, 0x7b, 0x8d, 0x09,
|
||||
0x00, 0x00,
|
||||
// 730 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcb, 0x72, 0x12, 0x41,
|
||||
0x14, 0xcd, 0xc0, 0x04, 0xc2, 0xc5, 0x85, 0xd5, 0x22, 0x8e, 0x63, 0x15, 0x10, 0x56, 0x2c, 0x74,
|
||||
0x30, 0x68, 0x69, 0x1e, 0xab, 0x90, 0x57, 0x59, 0x26, 0x56, 0xaa, 0x4b, 0x37, 0xba, 0x88, 0x0d,
|
||||
0x74, 0xc8, 0xc8, 0xbc, 0x9c, 0x6e, 0xa8, 0xa2, 0x5c, 0xe8, 0x27, 0xf8, 0x17, 0xfe, 0x4a, 0x96,
|
||||
0x2e, 0x5d, 0xc5, 0x84, 0x2f, 0xb1, 0xba, 0x67, 0x26, 0x43, 0x78, 0x94, 0x10, 0x65, 0x77, 0x2f,
|
||||
0x7d, 0xcf, 0xbd, 0x67, 0x4e, 0x9f, 0x3b, 0x0c, 0x1c, 0xb6, 0x4d, 0x7e, 0xd6, 0x6d, 0x18, 0x4d,
|
||||
0xd7, 0xae, 0x36, 0x5d, 0x87, 0x13, 0xd3, 0xa1, 0x7e, 0x6b, 0x38, 0x24, 0x9e, 0x59, 0x65, 0xd4,
|
||||
0xef, 0x99, 0x4d, 0xca, 0xe2, 0xdf, 0x59, 0xb5, 0xb7, 0x36, 0x94, 0x19, 0x9e, 0xef, 0x72, 0x17,
|
||||
0xad, 0xc6, 0x38, 0x23, 0xc2, 0x18, 0x43, 0x55, 0xbd, 0x35, 0x3d, 0xd7, 0x76, 0xdb, 0xae, 0xac,
|
||||
0xae, 0x8a, 0x28, 0x00, 0xea, 0x0f, 0xdb, 0xae, 0xdb, 0xb6, 0x68, 0x55, 0x66, 0x8d, 0xee, 0x69,
|
||||
0x95, 0x38, 0xfd, 0xf0, 0xe8, 0xd1, 0xe8, 0x11, 0xb5, 0x3d, 0x1e, 0x1d, 0x96, 0x46, 0x0f, 0x4f,
|
||||
0x4d, 0x6a, 0xb5, 0x4e, 0x6c, 0xc2, 0x3a, 0x61, 0x45, 0x71, 0xb4, 0x82, 0x9b, 0x36, 0x65, 0x9c,
|
||||
0xd8, 0x5e, 0x50, 0x50, 0xbe, 0x50, 0x21, 0xb3, 0x13, 0x51, 0x44, 0x79, 0x48, 0x98, 0x2d, 0x4d,
|
||||
0x29, 0x29, 0x95, 0x4c, 0x3d, 0x35, 0xb8, 0x28, 0x26, 0x5e, 0xed, 0xe2, 0x84, 0xd9, 0x42, 0xc7,
|
||||
0x90, 0xb2, 0x48, 0x83, 0x5a, 0x4c, 0x4b, 0x94, 0x92, 0x95, 0x6c, 0x6d, 0xdd, 0xf8, 0xeb, 0xa3,
|
||||
0x1a, 0xd7, 0x5d, 0x8d, 0x43, 0x09, 0xdd, 0x73, 0xb8, 0xdf, 0xc7, 0x61, 0x1f, 0x94, 0x83, 0x65,
|
||||
0xd3, 0x26, 0x6d, 0xaa, 0x25, 0xc5, 0x30, 0x1c, 0x24, 0xe8, 0x0d, 0xa4, 0xfd, 0xae, 0x23, 0x38,
|
||||
0x6a, 0x6a, 0x49, 0xa9, 0x64, 0x6b, 0xcf, 0xe7, 0x1a, 0x84, 0x03, 0x2c, 0x8e, 0x9a, 0xa0, 0x0a,
|
||||
0xa8, 0xcc, 0xa3, 0x4d, 0x6d, 0x59, 0x36, 0xcb, 0x19, 0x81, 0x1a, 0x46, 0xa4, 0x86, 0xb1, 0xed,
|
||||
0xf4, 0xb1, 0xac, 0x40, 0x25, 0xc8, 0x32, 0x87, 0x78, 0xec, 0xcc, 0xe5, 0x9c, 0xfa, 0x5a, 0x4a,
|
||||
0xb2, 0x1a, 0xfe, 0x09, 0xad, 0xc2, 0x9d, 0x28, 0x3d, 0xe9, 0xd0, 0xbe, 0x96, 0xbe, 0x59, 0xf2,
|
||||
0x9a, 0xf6, 0xd1, 0x0e, 0x40, 0xd3, 0xa7, 0x84, 0xd3, 0xd6, 0x09, 0xe1, 0xda, 0x8a, 0x1c, 0xaa,
|
||||
0x8f, 0x0d, 0x7d, 0x1b, 0x5d, 0x41, 0x7d, 0xe5, 0xfc, 0xa2, 0xb8, 0xf4, 0xfd, 0x77, 0x51, 0xc1,
|
||||
0x99, 0x10, 0xb7, 0xcd, 0x45, 0x93, 0xae, 0xd7, 0x8a, 0x9a, 0x64, 0xe6, 0x69, 0x12, 0xe2, 0xb6,
|
||||
0xb9, 0xbe, 0x01, 0xd9, 0x21, 0xd5, 0xd1, 0x5d, 0x48, 0x0a, 0xca, 0xf2, 0x62, 0xb1, 0x08, 0x85,
|
||||
0xfe, 0x3d, 0x62, 0x75, 0xa9, 0x96, 0x08, 0xf4, 0x97, 0xc9, 0x66, 0x62, 0x5d, 0xd1, 0x8f, 0x20,
|
||||
0x1d, 0xea, 0x88, 0x10, 0xa8, 0x0e, 0xb1, 0x69, 0x88, 0x93, 0x31, 0x32, 0x20, 0xed, 0x7a, 0xdc,
|
||||
0x74, 0x1d, 0x26, 0xa1, 0xd3, 0x54, 0x8d, 0x8a, 0xca, 0x4f, 0xe0, 0xde, 0x01, 0xe5, 0xd7, 0x77,
|
||||
0x84, 0xe9, 0xe7, 0x2e, 0x65, 0x7c, 0x9a, 0xd3, 0xca, 0x67, 0x90, 0xbb, 0x59, 0xce, 0x3c, 0xd7,
|
||||
0x61, 0x14, 0x1d, 0x43, 0xe6, 0xfa, 0xd6, 0x25, 0x2c, 0x5b, 0x7b, 0x3c, 0x8f, 0x37, 0xea, 0xaa,
|
||||
0x90, 0x09, 0xc7, 0x4d, 0xca, 0x6b, 0x70, 0xff, 0xd0, 0x64, 0xf1, 0x28, 0x16, 0x51, 0xd3, 0x20,
|
||||
0x7d, 0x6a, 0x5a, 0x9c, 0xfa, 0x4c, 0x53, 0x4a, 0xc9, 0x4a, 0x06, 0x47, 0x69, 0xd9, 0x82, 0xfc,
|
||||
0x28, 0x24, 0xa4, 0x87, 0x01, 0xe2, 0xc1, 0x12, 0x76, 0x3b, 0x7e, 0x43, 0x5d, 0xca, 0x9f, 0x20,
|
||||
0xbf, 0x23, 0x5d, 0x31, 0x26, 0xde, 0xff, 0x17, 0xa3, 0x03, 0x0f, 0xc6, 0x66, 0x2d, 0x4c, 0xf9,
|
||||
0x1f, 0x0a, 0xe4, 0xdf, 0x49, 0xab, 0x2e, 0xfe, 0xc9, 0xd0, 0x16, 0x64, 0x83, 0xb5, 0x90, 0xaf,
|
||||
0xc5, 0xd0, 0xb3, 0xe3, 0xfb, 0xb4, 0x2f, 0xde, 0x9c, 0x47, 0x84, 0x75, 0x70, 0xb8, 0x7d, 0x22,
|
||||
0x16, 0xb2, 0x8c, 0x11, 0x5d, 0x98, 0x2c, 0x4f, 0x21, 0xbf, 0x4b, 0x2d, 0x3a, 0x41, 0x95, 0x29,
|
||||
0xcb, 0x52, 0xbb, 0x54, 0x01, 0x62, 0x33, 0xa2, 0x1e, 0x24, 0x0f, 0x28, 0x47, 0x2f, 0x66, 0xa0,
|
||||
0x31, 0x61, 0x25, 0xf5, 0x97, 0x73, 0xe3, 0x42, 0x29, 0xbe, 0x80, 0x2a, 0xd6, 0x02, 0xcd, 0xf2,
|
||||
0xaf, 0x30, 0x71, 0xe5, 0xf4, 0x8d, 0x5b, 0x20, 0xc3, 0xe1, 0x5f, 0x21, 0x15, 0x38, 0x17, 0xcd,
|
||||
0xd2, 0x64, 0xf2, 0x42, 0xe9, 0x9b, 0xb7, 0x81, 0xc6, 0x04, 0x02, 0x8f, 0xcc, 0x44, 0x60, 0xb2,
|
||||
0xef, 0x67, 0x22, 0x30, 0xcd, 0x89, 0x1f, 0x20, 0x15, 0xf8, 0x66, 0x26, 0x02, 0x93, 0x2d, 0xa6,
|
||||
0xe7, 0xc7, 0x36, 0x62, 0x4f, 0x7c, 0x68, 0xd4, 0x3f, 0x9e, 0x5f, 0x15, 0x96, 0x7e, 0x5d, 0x15,
|
||||
0x96, 0xbe, 0x0d, 0x0a, 0xca, 0xf9, 0xa0, 0xa0, 0xfc, 0x1c, 0x14, 0x94, 0xcb, 0x41, 0x41, 0x79,
|
||||
0xbf, 0xff, 0x0f, 0xdf, 0x4e, 0x5b, 0x71, 0xd6, 0x48, 0xc9, 0x89, 0xcf, 0xfe, 0x04, 0x00, 0x00,
|
||||
0xff, 0xff, 0x7e, 0x6d, 0xca, 0xbd, 0x8c, 0x09, 0x00, 0x00,
|
||||
}
|
||||
|
|
6
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto
generated
vendored
6
vendor/github.com/containerd/containerd/api/services/containers/v1/containers.proto
generated
vendored
|
@ -67,15 +67,17 @@ message Container {
|
|||
// Snapshotter specifies the snapshotter name used for rootfs
|
||||
string snapshotter = 6;
|
||||
|
||||
// RootFS specifies the snapshot key to use for the container's root
|
||||
// SnapshotKey specifies the snapshot key to use for the container's root
|
||||
// filesystem. When starting a task from this container, a caller should
|
||||
// look up the mounts from the snapshot service and include those on the
|
||||
// task create request.
|
||||
//
|
||||
// Snapshots referenced in this field will not be garbage collected.
|
||||
//
|
||||
// This field is set to empty when the rootfs is not a snapshot.
|
||||
//
|
||||
// This field may be updated.
|
||||
string rootfs = 7 [(gogoproto.customname) = "RootFS"];
|
||||
string snapshot_key = 7;
|
||||
|
||||
// CreatedAt is the time the container was first created.
|
||||
google.protobuf.Timestamp created_at = 8 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
|
|
82
vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
generated
vendored
82
vendor/github.com/containerd/containerd/api/services/events/v1/container.pb.go
generated
vendored
|
@ -94,10 +94,10 @@ func (*ContainerCreate_Runtime) Descriptor() ([]byte, []int) {
|
|||
}
|
||||
|
||||
type ContainerUpdate struct {
|
||||
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
|
||||
Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
RootFS string `protobuf:"bytes,4,opt,name=rootfs,proto3" json:"rootfs,omitempty"`
|
||||
ID string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
|
||||
Labels map[string]string `protobuf:"bytes,3,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
SnapshotKey string `protobuf:"bytes,4,opt,name=snapshot_key,json=snapshotKey,proto3" json:"snapshot_key,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ContainerUpdate) Reset() { *m = ContainerUpdate{} }
|
||||
|
@ -194,8 +194,8 @@ func (m *ContainerUpdate) Field(fieldpath []string) (string, bool) {
|
|||
}
|
||||
value, ok := m.Labels[strings.Join(fieldpath[1:], ".")]
|
||||
return value, ok
|
||||
case "rootfs":
|
||||
return string(m.RootFS), len(m.RootFS) > 0
|
||||
case "snapshot_key":
|
||||
return string(m.SnapshotKey), len(m.SnapshotKey) > 0
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
@ -331,11 +331,11 @@ func (m *ContainerUpdate) MarshalTo(dAtA []byte) (int, error) {
|
|||
i += copy(dAtA[i:], v)
|
||||
}
|
||||
}
|
||||
if len(m.RootFS) > 0 {
|
||||
if len(m.SnapshotKey) > 0 {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintContainer(dAtA, i, uint64(len(m.RootFS)))
|
||||
i += copy(dAtA[i:], m.RootFS)
|
||||
i = encodeVarintContainer(dAtA, i, uint64(len(m.SnapshotKey)))
|
||||
i += copy(dAtA[i:], m.SnapshotKey)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
@ -442,7 +442,7 @@ func (m *ContainerUpdate) Size() (n int) {
|
|||
n += mapEntrySize + 1 + sovContainer(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
l = len(m.RootFS)
|
||||
l = len(m.SnapshotKey)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovContainer(uint64(l))
|
||||
}
|
||||
|
@ -513,7 +513,7 @@ func (this *ContainerUpdate) String() string {
|
|||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||
`Image:` + fmt.Sprintf("%v", this.Image) + `,`,
|
||||
`Labels:` + mapStringForLabels + `,`,
|
||||
`RootFS:` + fmt.Sprintf("%v", this.RootFS) + `,`,
|
||||
`SnapshotKey:` + fmt.Sprintf("%v", this.SnapshotKey) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -994,7 +994,7 @@ func (m *ContainerUpdate) Unmarshal(dAtA []byte) error {
|
|||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field RootFS", wireType)
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field SnapshotKey", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
|
@ -1019,7 +1019,7 @@ func (m *ContainerUpdate) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.RootFS = string(dAtA[iNdEx:postIndex])
|
||||
m.SnapshotKey = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
|
@ -1231,32 +1231,32 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptorContainer = []byte{
|
||||
// 429 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0x4d, 0x8b, 0xd4, 0x40,
|
||||
0x10, 0xdd, 0xce, 0xac, 0x19, 0xec, 0x39, 0x28, 0xcd, 0x20, 0x71, 0xc0, 0xec, 0x30, 0xa7, 0xf1,
|
||||
0xd2, 0xcd, 0x8e, 0x20, 0xba, 0x82, 0xe8, 0xee, 0xaa, 0x08, 0x0a, 0xd2, 0xe2, 0x45, 0xbc, 0xf4,
|
||||
0x4c, 0x6a, 0xb2, 0x8d, 0x49, 0x77, 0x48, 0x3a, 0x81, 0xdc, 0xfc, 0x29, 0xfe, 0x9c, 0x3d, 0x7a,
|
||||
0xf4, 0xb4, 0xec, 0xe6, 0x27, 0x88, 0x3f, 0x40, 0xd2, 0x9d, 0xec, 0x06, 0xc1, 0xcf, 0xdb, 0xab,
|
||||
0xd4, 0x7b, 0x55, 0xef, 0x55, 0x1a, 0xbf, 0x88, 0xa5, 0x39, 0x29, 0xd7, 0x74, 0xa3, 0x53, 0xb6,
|
||||
0xd1, 0xca, 0x08, 0xa9, 0x20, 0x8f, 0x86, 0x50, 0x64, 0x92, 0x15, 0x90, 0x57, 0x72, 0x03, 0x05,
|
||||
0x83, 0x0a, 0x94, 0x29, 0x58, 0xb5, 0x7f, 0xc5, 0xa0, 0x59, 0xae, 0x8d, 0x26, 0x77, 0xae, 0x24,
|
||||
0xb4, 0xa7, 0x53, 0x47, 0xa7, 0xd5, 0xfe, 0x6c, 0x1a, 0xeb, 0x58, 0x5b, 0x26, 0x6b, 0x91, 0x13,
|
||||
0xcd, 0x6e, 0xc7, 0x5a, 0xc7, 0x09, 0x30, 0x5b, 0xad, 0xcb, 0x2d, 0x13, 0xaa, 0xee, 0x5a, 0x4f,
|
||||
0xfe, 0x68, 0xec, 0x52, 0x94, 0x25, 0x65, 0x2c, 0x15, 0xdb, 0x4a, 0x48, 0xa2, 0x4c, 0x98, 0x13,
|
||||
0x37, 0x61, 0x71, 0x8e, 0xf0, 0x8d, 0xa3, 0x9e, 0x7e, 0x94, 0x83, 0x30, 0x40, 0x6e, 0x61, 0x4f,
|
||||
0x46, 0x01, 0x9a, 0xa3, 0xe5, 0xf5, 0x43, 0xbf, 0x39, 0xdb, 0xf3, 0x5e, 0x1e, 0x73, 0x4f, 0x46,
|
||||
0x64, 0x8a, 0xaf, 0xc9, 0x54, 0xc4, 0x10, 0x78, 0x6d, 0x8b, 0xbb, 0x82, 0xbc, 0xc1, 0xe3, 0xbc,
|
||||
0x54, 0x46, 0xa6, 0x10, 0x8c, 0xe6, 0x68, 0x39, 0x59, 0xdd, 0xa7, 0xbf, 0x4d, 0x49, 0x7f, 0x5a,
|
||||
0x47, 0xb9, 0x53, 0xf3, 0x7e, 0xcc, 0xec, 0x35, 0x1e, 0x77, 0xdf, 0x08, 0xc1, 0xbb, 0x4a, 0xa4,
|
||||
0xe0, 0xcc, 0x70, 0x8b, 0x09, 0xc5, 0x63, 0x9d, 0x19, 0xa9, 0x55, 0x61, 0x8d, 0x4c, 0x56, 0x53,
|
||||
0xea, 0x2e, 0x44, 0xfb, 0xb0, 0xf4, 0xa9, 0xaa, 0x79, 0x4f, 0x5a, 0x7c, 0x1b, 0x46, 0x7c, 0x97,
|
||||
0x45, 0xff, 0x1e, 0x91, 0x63, 0x3f, 0x11, 0x6b, 0x48, 0x8a, 0x60, 0x34, 0x1f, 0x2d, 0x27, 0xab,
|
||||
0x83, 0xbf, 0x4d, 0xe8, 0xb6, 0xd1, 0x57, 0x56, 0xfc, 0x4c, 0x99, 0xbc, 0xe6, 0xdd, 0x24, 0xb2,
|
||||
0xc0, 0x7e, 0xae, 0xb5, 0xd9, 0x16, 0xc1, 0xae, 0x75, 0x81, 0x9b, 0xb3, 0x3d, 0x9f, 0x6b, 0x6d,
|
||||
0x9e, 0xbf, 0xe5, 0x5d, 0x67, 0xf6, 0x10, 0x4f, 0x06, 0x52, 0x72, 0x13, 0x8f, 0x3e, 0x42, 0xdd,
|
||||
0xdd, 0xa2, 0x85, 0xad, 0xdd, 0x4a, 0x24, 0xe5, 0xa5, 0x5d, 0x5b, 0x1c, 0x78, 0x0f, 0xd0, 0xe2,
|
||||
0xee, 0x20, 0xf3, 0x31, 0x24, 0xf0, 0xeb, 0xcc, 0x87, 0x1f, 0x4e, 0x2f, 0xc2, 0x9d, 0xaf, 0x17,
|
||||
0xe1, 0xce, 0xa7, 0x26, 0x44, 0xa7, 0x4d, 0x88, 0xbe, 0x34, 0x21, 0x3a, 0x6f, 0x42, 0xf4, 0xf9,
|
||||
0x7b, 0x88, 0xde, 0x3f, 0xfe, 0xcf, 0xb7, 0xff, 0xc8, 0xa1, 0xb5, 0x6f, 0x7f, 0xca, 0xbd, 0x1f,
|
||||
0x01, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xae, 0x41, 0x3c, 0x44, 0x03, 0x00, 0x00,
|
||||
// 427 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0x5d, 0x8b, 0xd4, 0x30,
|
||||
0x14, 0xdd, 0x74, 0xd6, 0x19, 0x4c, 0x05, 0x25, 0x0c, 0x52, 0x07, 0xac, 0xe3, 0x3c, 0x8d, 0x2f,
|
||||
0x09, 0x3b, 0x82, 0xe8, 0x0a, 0xa2, 0xbb, 0x2b, 0x22, 0x2a, 0x48, 0xc0, 0x17, 0x11, 0x24, 0x9d,
|
||||
0xde, 0xed, 0x04, 0xdb, 0xa4, 0xb4, 0x69, 0xa1, 0x6f, 0xfe, 0x14, 0x7f, 0xce, 0x3e, 0xfa, 0xe8,
|
||||
0x93, 0xec, 0xf6, 0x1f, 0x08, 0xfe, 0x00, 0x69, 0x32, 0xdd, 0x2d, 0x82, 0x9f, 0x6f, 0xe7, 0xe6,
|
||||
0x9e, 0x73, 0xef, 0x39, 0xb7, 0xc5, 0xcf, 0x12, 0x69, 0x36, 0x55, 0x44, 0xd7, 0x3a, 0x63, 0x6b,
|
||||
0xad, 0x8c, 0x90, 0x0a, 0x8a, 0x78, 0x08, 0x45, 0x2e, 0x59, 0x09, 0x45, 0x2d, 0xd7, 0x50, 0x32,
|
||||
0xa8, 0x41, 0x99, 0x92, 0xd5, 0x7b, 0x17, 0x0c, 0x9a, 0x17, 0xda, 0x68, 0x72, 0xf3, 0x42, 0x42,
|
||||
0x7b, 0x3a, 0x75, 0x74, 0x5a, 0xef, 0xcd, 0xa6, 0x89, 0x4e, 0xb4, 0x65, 0xb2, 0x0e, 0x39, 0xd1,
|
||||
0xec, 0x46, 0xa2, 0x75, 0x92, 0x02, 0xb3, 0x55, 0x54, 0x1d, 0x33, 0xa1, 0x9a, 0x6d, 0xeb, 0xf1,
|
||||
0x1f, 0x8d, 0x9d, 0x8b, 0xf2, 0xb4, 0x4a, 0xa4, 0x62, 0xc7, 0x12, 0xd2, 0x38, 0x17, 0x66, 0xe3,
|
||||
0x26, 0x2c, 0x4e, 0x11, 0xbe, 0x7a, 0xd8, 0xd3, 0x0f, 0x0b, 0x10, 0x06, 0xc8, 0x75, 0xec, 0xc9,
|
||||
0x38, 0x40, 0x73, 0xb4, 0xbc, 0x7c, 0x30, 0x6e, 0xbf, 0xde, 0xf2, 0x9e, 0x1f, 0x71, 0x4f, 0xc6,
|
||||
0x64, 0x8a, 0x2f, 0xc9, 0x4c, 0x24, 0x10, 0x78, 0x5d, 0x8b, 0xbb, 0x82, 0xbc, 0xc6, 0x93, 0xa2,
|
||||
0x52, 0x46, 0x66, 0x10, 0x8c, 0xe6, 0x68, 0xe9, 0xaf, 0xee, 0xd1, 0xdf, 0xa6, 0xa4, 0x3f, 0xad,
|
||||
0xa3, 0xdc, 0xa9, 0x79, 0x3f, 0x66, 0xf6, 0x0a, 0x4f, 0xb6, 0x6f, 0x84, 0xe0, 0x5d, 0x25, 0x32,
|
||||
0x70, 0x66, 0xb8, 0xc5, 0x84, 0xe2, 0x89, 0xce, 0x8d, 0xd4, 0xaa, 0xb4, 0x46, 0xfc, 0xd5, 0x94,
|
||||
0xba, 0x0b, 0xd1, 0x3e, 0x2c, 0x7d, 0xa2, 0x1a, 0xde, 0x93, 0x16, 0xdf, 0x86, 0x11, 0xdf, 0xe4,
|
||||
0xf1, 0xbf, 0x47, 0xe4, 0x78, 0x9c, 0x8a, 0x08, 0xd2, 0x32, 0x18, 0xcd, 0x47, 0x4b, 0x7f, 0xb5,
|
||||
0xff, 0xb7, 0x09, 0xdd, 0x36, 0xfa, 0xd2, 0x8a, 0x9f, 0x2a, 0x53, 0x34, 0x7c, 0x3b, 0x89, 0xdc,
|
||||
0xc6, 0x57, 0x4a, 0x25, 0xf2, 0x72, 0xa3, 0xcd, 0xfb, 0x0f, 0xd0, 0x04, 0xbb, 0x76, 0xa1, 0xdf,
|
||||
0xbf, 0xbd, 0x80, 0x66, 0xf6, 0x00, 0xfb, 0x03, 0x25, 0xb9, 0x86, 0x47, 0x1d, 0xd1, 0x9d, 0xa2,
|
||||
0x83, 0x9d, 0xdb, 0x5a, 0xa4, 0xd5, 0xb9, 0x5b, 0x5b, 0xec, 0x7b, 0xf7, 0xd1, 0xe2, 0xce, 0x20,
|
||||
0xf2, 0x11, 0xa4, 0xf0, 0xeb, 0xc8, 0x07, 0xef, 0x4e, 0xce, 0xc2, 0x9d, 0x2f, 0x67, 0xe1, 0xce,
|
||||
0xc7, 0x36, 0x44, 0x27, 0x6d, 0x88, 0x3e, 0xb7, 0x21, 0x3a, 0x6d, 0x43, 0xf4, 0xe9, 0x7b, 0x88,
|
||||
0xde, 0x3e, 0xfa, 0xcf, 0x5f, 0xff, 0xa1, 0x43, 0xd1, 0xd8, 0x7e, 0x93, 0xbb, 0x3f, 0x02, 0x00,
|
||||
0x00, 0xff, 0xff, 0x26, 0x0d, 0x55, 0x1c, 0x43, 0x03, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ message ContainerUpdate {
|
|||
string id = 1;
|
||||
string image = 2;
|
||||
map<string, string> labels = 3;
|
||||
string rootfs = 4 [(gogoproto.customname) = "RootFS"];
|
||||
string snapshot_key = 4;
|
||||
}
|
||||
|
||||
message ContainerDelete {
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
CheckpointTaskRequest
|
||||
CheckpointTaskResponse
|
||||
UpdateTaskRequest
|
||||
MetricsRequest
|
||||
MetricsResponse
|
||||
*/
|
||||
package tasks
|
||||
|
||||
|
@ -43,6 +45,7 @@ import google_protobuf1 "github.com/gogo/protobuf/types"
|
|||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import containerd_types "github.com/containerd/containerd/api/types"
|
||||
import containerd_types1 "github.com/containerd/containerd/api/types"
|
||||
import containerd_types2 "github.com/containerd/containerd/api/types"
|
||||
import containerd_v1_types "github.com/containerd/containerd/api/types/task"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
|
||||
|
@ -86,7 +89,7 @@ type CreateTaskRequest struct {
|
|||
Stdout string `protobuf:"bytes,5,opt,name=stdout,proto3" json:"stdout,omitempty"`
|
||||
Stderr string `protobuf:"bytes,6,opt,name=stderr,proto3" json:"stderr,omitempty"`
|
||||
Terminal bool `protobuf:"varint,7,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
Checkpoint *containerd_types1.Descriptor `protobuf:"bytes,8,opt,name=checkpoint" json:"checkpoint,omitempty"`
|
||||
Checkpoint *containerd_types2.Descriptor `protobuf:"bytes,8,opt,name=checkpoint" json:"checkpoint,omitempty"`
|
||||
Options *google_protobuf1.Any `protobuf:"bytes,9,opt,name=options" json:"options,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -281,7 +284,7 @@ func (*CheckpointTaskRequest) ProtoMessage() {}
|
|||
func (*CheckpointTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{20} }
|
||||
|
||||
type CheckpointTaskResponse struct {
|
||||
Descriptors []*containerd_types1.Descriptor `protobuf:"bytes,1,rep,name=descriptors" json:"descriptors,omitempty"`
|
||||
Descriptors []*containerd_types2.Descriptor `protobuf:"bytes,1,rep,name=descriptors" json:"descriptors,omitempty"`
|
||||
}
|
||||
|
||||
func (m *CheckpointTaskResponse) Reset() { *m = CheckpointTaskResponse{} }
|
||||
|
@ -297,6 +300,22 @@ func (m *UpdateTaskRequest) Reset() { *m = UpdateTaskRequest{
|
|||
func (*UpdateTaskRequest) ProtoMessage() {}
|
||||
func (*UpdateTaskRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{22} }
|
||||
|
||||
type MetricsRequest struct {
|
||||
Filters []string `protobuf:"bytes,1,rep,name=filters" json:"filters,omitempty"`
|
||||
}
|
||||
|
||||
func (m *MetricsRequest) Reset() { *m = MetricsRequest{} }
|
||||
func (*MetricsRequest) ProtoMessage() {}
|
||||
func (*MetricsRequest) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{23} }
|
||||
|
||||
type MetricsResponse struct {
|
||||
Metrics []*containerd_types1.Metric `protobuf:"bytes,1,rep,name=metrics" json:"metrics,omitempty"`
|
||||
}
|
||||
|
||||
func (m *MetricsResponse) Reset() { *m = MetricsResponse{} }
|
||||
func (*MetricsResponse) ProtoMessage() {}
|
||||
func (*MetricsResponse) Descriptor() ([]byte, []int) { return fileDescriptorTasks, []int{24} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*CreateTaskRequest)(nil), "containerd.services.tasks.v1.CreateTaskRequest")
|
||||
proto.RegisterType((*CreateTaskResponse)(nil), "containerd.services.tasks.v1.CreateTaskResponse")
|
||||
|
@ -321,6 +340,8 @@ func init() {
|
|||
proto.RegisterType((*CheckpointTaskRequest)(nil), "containerd.services.tasks.v1.CheckpointTaskRequest")
|
||||
proto.RegisterType((*CheckpointTaskResponse)(nil), "containerd.services.tasks.v1.CheckpointTaskResponse")
|
||||
proto.RegisterType((*UpdateTaskRequest)(nil), "containerd.services.tasks.v1.UpdateTaskRequest")
|
||||
proto.RegisterType((*MetricsRequest)(nil), "containerd.services.tasks.v1.MetricsRequest")
|
||||
proto.RegisterType((*MetricsResponse)(nil), "containerd.services.tasks.v1.MetricsResponse")
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@ -353,6 +374,7 @@ type TasksClient interface {
|
|||
ListPids(ctx context.Context, in *ListPidsRequest, opts ...grpc.CallOption) (*ListPidsResponse, error)
|
||||
Checkpoint(ctx context.Context, in *CheckpointTaskRequest, opts ...grpc.CallOption) (*CheckpointTaskResponse, error)
|
||||
Update(ctx context.Context, in *UpdateTaskRequest, opts ...grpc.CallOption) (*google_protobuf.Empty, error)
|
||||
Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error)
|
||||
}
|
||||
|
||||
type tasksClient struct {
|
||||
|
@ -498,6 +520,15 @@ func (c *tasksClient) Update(ctx context.Context, in *UpdateTaskRequest, opts ..
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *tasksClient) Metrics(ctx context.Context, in *MetricsRequest, opts ...grpc.CallOption) (*MetricsResponse, error) {
|
||||
out := new(MetricsResponse)
|
||||
err := grpc.Invoke(ctx, "/containerd.services.tasks.v1.Tasks/Metrics", in, out, c.cc, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Server API for Tasks service
|
||||
|
||||
type TasksServer interface {
|
||||
|
@ -520,6 +551,7 @@ type TasksServer interface {
|
|||
ListPids(context.Context, *ListPidsRequest) (*ListPidsResponse, error)
|
||||
Checkpoint(context.Context, *CheckpointTaskRequest) (*CheckpointTaskResponse, error)
|
||||
Update(context.Context, *UpdateTaskRequest) (*google_protobuf.Empty, error)
|
||||
Metrics(context.Context, *MetricsRequest) (*MetricsResponse, error)
|
||||
}
|
||||
|
||||
func RegisterTasksServer(s *grpc.Server, srv TasksServer) {
|
||||
|
@ -796,6 +828,24 @@ func _Tasks_Update_Handler(srv interface{}, ctx context.Context, dec func(interf
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _Tasks_Metrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(MetricsRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(TasksServer).Metrics(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/containerd.services.tasks.v1.Tasks/Metrics",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(TasksServer).Metrics(ctx, req.(*MetricsRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _Tasks_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "containerd.services.tasks.v1.Tasks",
|
||||
HandlerType: (*TasksServer)(nil),
|
||||
|
@ -860,6 +910,10 @@ var _Tasks_serviceDesc = grpc.ServiceDesc{
|
|||
MethodName: "Update",
|
||||
Handler: _Tasks_Update_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Metrics",
|
||||
Handler: _Tasks_Metrics_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "github.com/containerd/containerd/api/services/tasks/v1/tasks.proto",
|
||||
|
@ -1661,6 +1715,69 @@ func (m *UpdateTaskRequest) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func (m *MetricsRequest) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *MetricsRequest) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Filters) > 0 {
|
||||
for _, s := range m.Filters {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *MetricsResponse) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *MetricsResponse) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Metrics) > 0 {
|
||||
for _, msg := range m.Metrics {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintTasks(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Tasks(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
|
@ -2034,6 +2151,30 @@ func (m *UpdateTaskRequest) Size() (n int) {
|
|||
return n
|
||||
}
|
||||
|
||||
func (m *MetricsRequest) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Filters) > 0 {
|
||||
for _, s := range m.Filters {
|
||||
l = len(s)
|
||||
n += 1 + l + sovTasks(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *MetricsResponse) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Metrics) > 0 {
|
||||
for _, e := range m.Metrics {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTasks(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTasks(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
|
@ -2058,7 +2199,7 @@ func (this *CreateTaskRequest) String() string {
|
|||
`Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`,
|
||||
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
|
||||
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
|
||||
`Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||
`Checkpoint:` + strings.Replace(fmt.Sprintf("%v", this.Checkpoint), "Descriptor", "containerd_types2.Descriptor", 1) + `,`,
|
||||
`Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
|
@ -2291,7 +2432,7 @@ func (this *CheckpointTaskResponse) String() string {
|
|||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&CheckpointTaskResponse{`,
|
||||
`Descriptors:` + strings.Replace(fmt.Sprintf("%v", this.Descriptors), "Descriptor", "containerd_types1.Descriptor", 1) + `,`,
|
||||
`Descriptors:` + strings.Replace(fmt.Sprintf("%v", this.Descriptors), "Descriptor", "containerd_types2.Descriptor", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -2307,6 +2448,26 @@ func (this *UpdateTaskRequest) String() string {
|
|||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *MetricsRequest) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&MetricsRequest{`,
|
||||
`Filters:` + fmt.Sprintf("%v", this.Filters) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *MetricsResponse) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&MetricsResponse{`,
|
||||
`Metrics:` + strings.Replace(fmt.Sprintf("%v", this.Metrics), "Metric", "containerd_types1.Metric", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringTasks(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
|
@ -2538,7 +2699,7 @@ func (m *CreateTaskRequest) Unmarshal(dAtA []byte) error {
|
|||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Checkpoint == nil {
|
||||
m.Checkpoint = &containerd_types1.Descriptor{}
|
||||
m.Checkpoint = &containerd_types2.Descriptor{}
|
||||
}
|
||||
if err := m.Checkpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
|
@ -4822,7 +4983,7 @@ func (m *CheckpointTaskResponse) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Descriptors = append(m.Descriptors, &containerd_types1.Descriptor{})
|
||||
m.Descriptors = append(m.Descriptors, &containerd_types2.Descriptor{})
|
||||
if err := m.Descriptors[len(m.Descriptors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -4960,6 +5121,166 @@ func (m *UpdateTaskRequest) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
func (m *MetricsRequest) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTasks
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: MetricsRequest: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: MetricsRequest: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTasks
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthTasks
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTasks(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthTasks
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *MetricsResponse) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTasks
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: MetricsResponse: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: MetricsResponse: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTasks
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTasks
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Metrics = append(m.Metrics, &containerd_types1.Metric{})
|
||||
if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTasks(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthTasks
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTasks(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
@ -5070,81 +5391,85 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptorTasks = []byte{
|
||||
// 1207 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6f, 0x1b, 0x45,
|
||||
0x14, 0xef, 0xfa, 0x63, 0x6d, 0x3f, 0x37, 0x6d, 0x32, 0xa4, 0xc1, 0x2c, 0x95, 0x1d, 0x16, 0x09,
|
||||
0x99, 0x42, 0x77, 0xa9, 0x8b, 0x7a, 0xa0, 0x15, 0x52, 0x13, 0x87, 0xc8, 0x02, 0xd4, 0x74, 0x53,
|
||||
0x10, 0xca, 0xc5, 0x6c, 0xbc, 0x13, 0x67, 0x14, 0x7b, 0x77, 0xbb, 0x33, 0x4e, 0x13, 0x38, 0xc0,
|
||||
0x9f, 0xd0, 0x0b, 0x07, 0x2e, 0xfc, 0x3d, 0x39, 0x72, 0x44, 0x08, 0x05, 0xea, 0xff, 0x82, 0x1b,
|
||||
0x9a, 0x0f, 0x6f, 0x36, 0x76, 0xfc, 0x91, 0xba, 0xe1, 0x92, 0xcc, 0xcc, 0xbe, 0xaf, 0xf9, 0xbd,
|
||||
0x37, 0xbf, 0xf7, 0x12, 0x58, 0x6b, 0x13, 0xb6, 0xdf, 0xdb, 0xb5, 0x5a, 0x41, 0xd7, 0x6e, 0x05,
|
||||
0x3e, 0x73, 0x89, 0x8f, 0x23, 0x2f, 0xb9, 0x74, 0x43, 0x62, 0x53, 0x1c, 0x1d, 0x92, 0x16, 0xa6,
|
||||
0x36, 0x73, 0xe9, 0x01, 0xb5, 0x0f, 0xef, 0xc9, 0x85, 0x15, 0x46, 0x01, 0x0b, 0xd0, 0xed, 0x33,
|
||||
0x69, 0x6b, 0x20, 0x69, 0x49, 0x81, 0xc3, 0x7b, 0xc6, 0xbb, 0xed, 0x20, 0x68, 0x77, 0xb0, 0x2d,
|
||||
0x64, 0x77, 0x7b, 0x7b, 0x36, 0xee, 0x86, 0xec, 0x58, 0xaa, 0x1a, 0xef, 0x0c, 0x7f, 0x74, 0xfd,
|
||||
0xc1, 0xa7, 0xe5, 0x76, 0xd0, 0x0e, 0xc4, 0xd2, 0xe6, 0x2b, 0x75, 0xfa, 0x60, 0xa6, 0x78, 0xd9,
|
||||
0x71, 0x88, 0xa9, 0xdd, 0x0d, 0x7a, 0x3e, 0x53, 0x7a, 0x0f, 0x2f, 0xa1, 0xe7, 0x61, 0xda, 0x8a,
|
||||
0x48, 0xc8, 0x82, 0x48, 0x29, 0x7f, 0x76, 0x09, 0x65, 0x7e, 0x6f, 0xf1, 0x43, 0xe9, 0x56, 0x86,
|
||||
0x6f, 0xc8, 0x48, 0x17, 0x53, 0xe6, 0x76, 0x43, 0x29, 0x60, 0x9e, 0xa4, 0x60, 0x69, 0x3d, 0xc2,
|
||||
0x2e, 0xc3, 0xcf, 0x5c, 0x7a, 0xe0, 0xe0, 0xe7, 0x3d, 0x4c, 0x19, 0xaa, 0xc1, 0xf5, 0xd8, 0x7c,
|
||||
0x93, 0x78, 0x25, 0x6d, 0x55, 0xab, 0x16, 0xd6, 0x6e, 0xf6, 0x4f, 0x2b, 0xc5, 0xf5, 0xc1, 0x79,
|
||||
0xa3, 0xee, 0x14, 0x63, 0xa1, 0x86, 0x87, 0x6c, 0xd0, 0xa3, 0x20, 0x60, 0x7b, 0xb4, 0x94, 0x5e,
|
||||
0x4d, 0x57, 0x8b, 0xb5, 0xb7, 0xad, 0x44, 0x62, 0x44, 0x74, 0xd6, 0xd7, 0x1c, 0x12, 0x47, 0x89,
|
||||
0xa1, 0x65, 0xc8, 0x52, 0xe6, 0x11, 0xbf, 0x94, 0xe1, 0xd6, 0x1d, 0xb9, 0x41, 0x2b, 0xa0, 0x53,
|
||||
0xe6, 0x05, 0x3d, 0x56, 0xca, 0x8a, 0x63, 0xb5, 0x53, 0xe7, 0x38, 0x8a, 0x4a, 0x7a, 0x7c, 0x8e,
|
||||
0xa3, 0x08, 0x19, 0x90, 0x67, 0x38, 0xea, 0x12, 0xdf, 0xed, 0x94, 0x72, 0xab, 0x5a, 0x35, 0xef,
|
||||
0xc4, 0x7b, 0xf4, 0x08, 0xa0, 0xb5, 0x8f, 0x5b, 0x07, 0x61, 0x40, 0x7c, 0x56, 0xca, 0xaf, 0x6a,
|
||||
0xd5, 0x62, 0xed, 0xf6, 0x68, 0x58, 0xf5, 0x18, 0x71, 0x27, 0x21, 0x8f, 0x2c, 0xc8, 0x05, 0x21,
|
||||
0x23, 0x81, 0x4f, 0x4b, 0x05, 0xa1, 0xba, 0x6c, 0x49, 0x34, 0xad, 0x01, 0x9a, 0xd6, 0x63, 0xff,
|
||||
0xd8, 0x19, 0x08, 0x99, 0x3b, 0x80, 0x92, 0x48, 0xd2, 0x30, 0xf0, 0x29, 0x7e, 0x2d, 0x28, 0x17,
|
||||
0x21, 0x1d, 0x12, 0xaf, 0x94, 0x5a, 0xd5, 0xaa, 0x0b, 0x0e, 0x5f, 0x9a, 0x6d, 0xb8, 0xbe, 0xcd,
|
||||
0xdc, 0x88, 0xcd, 0x93, 0xa0, 0xf7, 0x21, 0x87, 0x8f, 0x70, 0xab, 0xa9, 0x2c, 0x17, 0xd6, 0xa0,
|
||||
0x7f, 0x5a, 0xd1, 0x37, 0x8e, 0x70, 0xab, 0x51, 0x77, 0x74, 0xfe, 0xa9, 0xe1, 0x99, 0xef, 0xc1,
|
||||
0x82, 0x72, 0xa4, 0xe2, 0x57, 0xb1, 0x68, 0x67, 0xb1, 0x6c, 0xc2, 0x52, 0x1d, 0x77, 0xf0, 0xdc,
|
||||
0x15, 0x63, 0xfe, 0xa6, 0xc1, 0x0d, 0x69, 0x29, 0xf6, 0xb6, 0x02, 0xa9, 0x58, 0x59, 0xef, 0x9f,
|
||||
0x56, 0x52, 0x8d, 0xba, 0x93, 0x22, 0x17, 0x20, 0x82, 0x2a, 0x50, 0xc4, 0x47, 0x84, 0x35, 0x29,
|
||||
0x73, 0x59, 0x8f, 0xd7, 0x1c, 0xff, 0x02, 0xfc, 0x68, 0x5b, 0x9c, 0xa0, 0xc7, 0x50, 0xe0, 0x3b,
|
||||
0xec, 0x35, 0x5d, 0x26, 0x4a, 0xac, 0x58, 0x33, 0x46, 0x12, 0xf8, 0x6c, 0xf0, 0x1c, 0xd6, 0xf2,
|
||||
0x27, 0xa7, 0x95, 0x6b, 0x2f, 0xff, 0xae, 0x68, 0x4e, 0x5e, 0xaa, 0x3d, 0x66, 0x66, 0x00, 0xcb,
|
||||
0x32, 0xbe, 0xad, 0x28, 0x68, 0x61, 0x4a, 0xaf, 0x1c, 0x7d, 0x0c, 0xb0, 0x89, 0xaf, 0x3e, 0xc9,
|
||||
0x1b, 0x50, 0x14, 0x6e, 0x14, 0xe8, 0x0f, 0x20, 0x17, 0xca, 0x0b, 0x0a, 0x17, 0x43, 0x6f, 0xe4,
|
||||
0xf0, 0x9e, 0x7a, 0x26, 0x03, 0x10, 0x06, 0xc2, 0xe6, 0x1d, 0x58, 0xfc, 0x8a, 0x50, 0xc6, 0xcb,
|
||||
0x20, 0x86, 0x66, 0x05, 0xf4, 0x3d, 0xd2, 0x61, 0x38, 0x92, 0xd1, 0x3a, 0x6a, 0xc7, 0x8b, 0x26,
|
||||
0x21, 0x1b, 0xbf, 0x8d, 0xac, 0x20, 0xea, 0x92, 0x26, 0x18, 0x63, 0xb2, 0x5b, 0x29, 0x6a, 0xbe,
|
||||
0xd4, 0xa0, 0xf8, 0x25, 0xe9, 0x74, 0xae, 0x1a, 0x24, 0x41, 0x38, 0xa4, 0xcd, 0x69, 0x45, 0xd6,
|
||||
0x96, 0xda, 0xf1, 0x52, 0x74, 0x3b, 0x1d, 0x51, 0x51, 0x79, 0x87, 0x2f, 0xcd, 0x7f, 0x35, 0x40,
|
||||
0x5c, 0xf9, 0x0d, 0x54, 0x49, 0xcc, 0x89, 0xa9, 0x8b, 0x39, 0x31, 0x3d, 0x86, 0x13, 0x33, 0x63,
|
||||
0x39, 0x31, 0x3b, 0xc4, 0x89, 0x55, 0xc8, 0xd0, 0x10, 0xb7, 0x04, 0x8b, 0x8e, 0xa3, 0x34, 0x21,
|
||||
0x91, 0x44, 0x29, 0x37, 0xb6, 0x94, 0x6e, 0xc1, 0x5b, 0xe7, 0xae, 0x2e, 0x33, 0x6b, 0xfe, 0xaa,
|
||||
0xc1, 0xa2, 0x83, 0x29, 0xf9, 0x01, 0x6f, 0xb1, 0xe3, 0x2b, 0x4f, 0xd5, 0x32, 0x64, 0x5f, 0x10,
|
||||
0x8f, 0xed, 0xab, 0x4c, 0xc9, 0x0d, 0x47, 0x67, 0x1f, 0x93, 0xf6, 0xbe, 0x7c, 0xfd, 0x0b, 0x8e,
|
||||
0xda, 0x99, 0x3f, 0xc1, 0x8d, 0xf5, 0x4e, 0x40, 0x71, 0xe3, 0xc9, 0xff, 0x11, 0x98, 0x4c, 0x67,
|
||||
0x5a, 0x64, 0x41, 0x6e, 0xcc, 0x2f, 0x60, 0x71, 0xcb, 0xed, 0xd1, 0xb9, 0xf9, 0x73, 0x13, 0x96,
|
||||
0x1c, 0x4c, 0x7b, 0xdd, 0xb9, 0x0d, 0x6d, 0xc0, 0x4d, 0xfe, 0x38, 0xb7, 0x88, 0x37, 0x4f, 0xf1,
|
||||
0x9a, 0x1f, 0x48, 0x3e, 0x90, 0x66, 0xd4, 0x13, 0x47, 0x90, 0x09, 0x89, 0x27, 0x5f, 0xf8, 0x82,
|
||||
0x23, 0xd6, 0xe6, 0x5f, 0x1a, 0xdc, 0x5a, 0x8f, 0xfb, 0xec, 0xbc, 0x73, 0x47, 0x13, 0x96, 0x42,
|
||||
0x37, 0xc2, 0x3e, 0x6b, 0x26, 0x7a, 0xbd, 0x4c, 0x49, 0x8d, 0x73, 0xfa, 0x9f, 0xa7, 0x95, 0x3b,
|
||||
0x89, 0x09, 0x2a, 0x08, 0xb1, 0x1f, 0xab, 0x53, 0xbb, 0x1d, 0xdc, 0xf5, 0x48, 0x1b, 0x53, 0x66,
|
||||
0xd5, 0xc5, 0x2f, 0x67, 0x51, 0x1a, 0x5b, 0xbf, 0x70, 0x0e, 0x48, 0xcf, 0x32, 0x07, 0x7c, 0x07,
|
||||
0x2b, 0xc3, 0xb7, 0x53, 0x60, 0x7c, 0x0e, 0xc5, 0xb3, 0xe9, 0xee, 0x42, 0xd6, 0x1b, 0x19, 0x48,
|
||||
0x92, 0x0a, 0xe6, 0x8f, 0xb0, 0xf4, 0x4d, 0xe8, 0xbd, 0x81, 0x59, 0xad, 0x06, 0x85, 0x08, 0xd3,
|
||||
0xa0, 0x17, 0xb5, 0x30, 0x15, 0x58, 0x8d, 0xbb, 0xd4, 0x99, 0x58, 0xed, 0x97, 0x22, 0x64, 0x05,
|
||||
0x7d, 0xa3, 0x03, 0xd0, 0xe5, 0xa0, 0x83, 0x6c, 0x6b, 0xd2, 0xf0, 0x6d, 0x8d, 0x0c, 0x96, 0xc6,
|
||||
0x27, 0xb3, 0x2b, 0x28, 0xcc, 0xbe, 0x87, 0xac, 0x18, 0x48, 0xd0, 0x9d, 0xc9, 0xaa, 0xc9, 0xf1,
|
||||
0xc8, 0xf8, 0x68, 0x26, 0x59, 0xe5, 0xa1, 0x0d, 0xba, 0xec, 0xf2, 0xd3, 0xae, 0x33, 0x32, 0xf5,
|
||||
0x18, 0x1f, 0xcf, 0xa2, 0x10, 0x3b, 0x7a, 0x0e, 0x0b, 0xe7, 0xc6, 0x09, 0x54, 0x9b, 0x45, 0xfd,
|
||||
0x7c, 0x57, 0xb9, 0xa4, 0xcb, 0x1d, 0x48, 0x6f, 0x62, 0x86, 0xaa, 0x93, 0x95, 0xce, 0x66, 0x0e,
|
||||
0xe3, 0xc3, 0x19, 0x24, 0x63, 0xdc, 0x32, 0xfc, 0xb9, 0x23, 0x6b, 0xb2, 0xca, 0xf0, 0x88, 0x60,
|
||||
0xd8, 0x33, 0xcb, 0x2b, 0x47, 0x0d, 0xc8, 0xf0, 0x8e, 0x8f, 0xa6, 0xc4, 0x96, 0x98, 0x0a, 0x8c,
|
||||
0x95, 0x91, 0x6a, 0xde, 0xe0, 0x7f, 0xf7, 0xa1, 0x2d, 0xc8, 0x70, 0x8a, 0x46, 0x53, 0xea, 0x70,
|
||||
0xb4, 0x9b, 0x8f, 0xb5, 0xb8, 0x0d, 0x85, 0xb8, 0xd1, 0x4d, 0x83, 0x62, 0xb8, 0x23, 0x8e, 0x35,
|
||||
0xfa, 0x04, 0x72, 0xaa, 0x45, 0xa1, 0x29, 0xf9, 0x3e, 0xdf, 0xc9, 0x26, 0x18, 0xcc, 0x8a, 0x96,
|
||||
0x33, 0x2d, 0xc2, 0xe1, 0xbe, 0x34, 0xd6, 0xe0, 0x53, 0xd0, 0x65, 0xef, 0x99, 0xf6, 0x68, 0x46,
|
||||
0x3a, 0xd4, 0x58, 0x93, 0x04, 0xf2, 0x83, 0xf6, 0x81, 0xee, 0x4e, 0xaf, 0x91, 0x44, 0xb7, 0x32,
|
||||
0xac, 0x59, 0xc5, 0x55, 0x45, 0xbd, 0x00, 0x48, 0x10, 0xfc, 0xfd, 0x29, 0x10, 0x5f, 0xd4, 0xaa,
|
||||
0x8c, 0x4f, 0x2f, 0xa7, 0xa4, 0x1c, 0x3f, 0x05, 0x5d, 0x32, 0xf8, 0x34, 0xd8, 0x46, 0x78, 0x7e,
|
||||
0x1c, 0x6c, 0x6b, 0xdf, 0x9e, 0xbc, 0x2a, 0x5f, 0xfb, 0xe3, 0x55, 0xf9, 0xda, 0xcf, 0xfd, 0xb2,
|
||||
0x76, 0xd2, 0x2f, 0x6b, 0xbf, 0xf7, 0xcb, 0xda, 0x3f, 0xfd, 0xb2, 0xb6, 0xf3, 0xe8, 0xf5, 0xfe,
|
||||
0xbb, 0xf2, 0x50, 0x2c, 0x76, 0x75, 0xe1, 0xe7, 0xfe, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x17,
|
||||
0xec, 0xf8, 0x44, 0xa4, 0x11, 0x00, 0x00,
|
||||
// 1274 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0x45,
|
||||
0x14, 0xef, 0xc6, 0xf6, 0xda, 0x7e, 0x6e, 0xda, 0x64, 0x48, 0xc3, 0xb2, 0x54, 0x71, 0x58, 0x24,
|
||||
0x64, 0x02, 0xdd, 0xa5, 0x2e, 0xaa, 0x10, 0xad, 0x90, 0x9a, 0x3f, 0x44, 0x16, 0x54, 0x4d, 0xb7,
|
||||
0x05, 0xa1, 0x5e, 0xc2, 0x76, 0x77, 0xe2, 0x8c, 0x62, 0xef, 0x6e, 0x77, 0xc6, 0x69, 0x03, 0x07,
|
||||
0xf8, 0x08, 0xbd, 0x72, 0xe1, 0xf3, 0xe4, 0xc8, 0x11, 0x21, 0x14, 0xa8, 0xbf, 0x05, 0x07, 0x24,
|
||||
0x34, 0x7f, 0x76, 0xb3, 0xb1, 0xe3, 0xd8, 0xa9, 0x1b, 0x2e, 0xed, 0xcc, 0xec, 0xef, 0xbd, 0x79,
|
||||
0xf3, 0x9b, 0x37, 0xef, 0xfd, 0x1c, 0x58, 0x6d, 0x13, 0xb6, 0xdb, 0x7b, 0x6a, 0xfb, 0x51, 0xd7,
|
||||
0xf1, 0xa3, 0x90, 0x79, 0x24, 0xc4, 0x49, 0x90, 0x1f, 0x7a, 0x31, 0x71, 0x28, 0x4e, 0xf6, 0x89,
|
||||
0x8f, 0xa9, 0xc3, 0x3c, 0xba, 0x47, 0x9d, 0xfd, 0x9b, 0x72, 0x60, 0xc7, 0x49, 0xc4, 0x22, 0x74,
|
||||
0xfd, 0x18, 0x6d, 0xa7, 0x48, 0x5b, 0x02, 0xf6, 0x6f, 0x9a, 0xef, 0xb6, 0xa3, 0xa8, 0xdd, 0xc1,
|
||||
0x8e, 0xc0, 0x3e, 0xed, 0xed, 0x38, 0xb8, 0x1b, 0xb3, 0x03, 0x69, 0x6a, 0xbe, 0x33, 0xf8, 0xd1,
|
||||
0x0b, 0xd3, 0x4f, 0x0b, 0xed, 0xa8, 0x1d, 0x89, 0xa1, 0xc3, 0x47, 0x6a, 0xf5, 0xf6, 0x44, 0xf1,
|
||||
0xb2, 0x83, 0x18, 0x53, 0xa7, 0x1b, 0xf5, 0x42, 0xa6, 0xec, 0x3e, 0x3b, 0x8f, 0x1d, 0x66, 0x09,
|
||||
0xf1, 0xd5, 0xe9, 0xcc, 0x3b, 0xe7, 0xb0, 0x0c, 0x30, 0xf5, 0x13, 0x12, 0xb3, 0x28, 0x51, 0xc6,
|
||||
0x9f, 0x9f, 0xc3, 0x98, 0x33, 0x26, 0xfe, 0x51, 0xb6, 0xf5, 0x41, 0x6e, 0x18, 0xe9, 0x62, 0xca,
|
||||
0xbc, 0x6e, 0x2c, 0x01, 0xd6, 0xe1, 0x0c, 0xcc, 0xaf, 0x25, 0xd8, 0x63, 0xf8, 0xb1, 0x47, 0xf7,
|
||||
0x5c, 0xfc, 0xac, 0x87, 0x29, 0x43, 0x4d, 0xb8, 0x9c, 0xb9, 0xdf, 0x26, 0x81, 0xa1, 0x2d, 0x6b,
|
||||
0x8d, 0xea, 0xea, 0xd5, 0xfe, 0x51, 0xbd, 0xb6, 0x96, 0xae, 0xb7, 0xd6, 0xdd, 0x5a, 0x06, 0x6a,
|
||||
0x05, 0xc8, 0x01, 0x3d, 0x89, 0x22, 0xb6, 0x43, 0x8d, 0xc2, 0x72, 0xa1, 0x51, 0x6b, 0xbe, 0x6d,
|
||||
0xe7, 0xae, 0x54, 0x44, 0x67, 0xdf, 0xe7, 0x64, 0xba, 0x0a, 0x86, 0x16, 0xa0, 0x44, 0x59, 0x40,
|
||||
0x42, 0xa3, 0xc8, 0xbd, 0xbb, 0x72, 0x82, 0x16, 0x41, 0xa7, 0x2c, 0x88, 0x7a, 0xcc, 0x28, 0x89,
|
||||
0x65, 0x35, 0x53, 0xeb, 0x38, 0x49, 0x0c, 0x3d, 0x5b, 0xc7, 0x49, 0x82, 0x4c, 0xa8, 0x30, 0x9c,
|
||||
0x74, 0x49, 0xe8, 0x75, 0x8c, 0xf2, 0xb2, 0xd6, 0xa8, 0xb8, 0xd9, 0x1c, 0xdd, 0x05, 0xf0, 0x77,
|
||||
0xb1, 0xbf, 0x17, 0x47, 0x24, 0x64, 0x46, 0x65, 0x59, 0x6b, 0xd4, 0x9a, 0xd7, 0x87, 0xc3, 0x5a,
|
||||
0xcf, 0x18, 0x77, 0x73, 0x78, 0x64, 0x43, 0x39, 0x8a, 0x19, 0x89, 0x42, 0x6a, 0x54, 0x85, 0xe9,
|
||||
0x82, 0x2d, 0xd9, 0xb4, 0x53, 0x36, 0xed, 0x7b, 0xe1, 0x81, 0x9b, 0x82, 0xac, 0x27, 0x80, 0xf2,
|
||||
0x4c, 0xd2, 0x38, 0x0a, 0x29, 0x7e, 0x2d, 0x2a, 0xe7, 0xa0, 0x10, 0x93, 0xc0, 0x98, 0x59, 0xd6,
|
||||
0x1a, 0xb3, 0x2e, 0x1f, 0x5a, 0x6d, 0xb8, 0xfc, 0x88, 0x79, 0x09, 0x9b, 0xe6, 0x82, 0xde, 0x87,
|
||||
0x32, 0x7e, 0x81, 0xfd, 0x6d, 0xe5, 0xb9, 0xba, 0x0a, 0xfd, 0xa3, 0xba, 0xbe, 0xf1, 0x02, 0xfb,
|
||||
0xad, 0x75, 0x57, 0xe7, 0x9f, 0x5a, 0x81, 0xf5, 0x1e, 0xcc, 0xaa, 0x8d, 0x54, 0xfc, 0x2a, 0x16,
|
||||
0xed, 0x38, 0x96, 0x4d, 0x98, 0x5f, 0xc7, 0x1d, 0x3c, 0x75, 0xc6, 0x58, 0xbf, 0x6a, 0x70, 0x45,
|
||||
0x7a, 0xca, 0x76, 0x5b, 0x84, 0x99, 0xcc, 0x58, 0xef, 0x1f, 0xd5, 0x67, 0x5a, 0xeb, 0xee, 0x0c,
|
||||
0x39, 0x85, 0x11, 0x54, 0x87, 0x1a, 0x7e, 0x41, 0xd8, 0x36, 0x65, 0x1e, 0xeb, 0xf1, 0x9c, 0xe3,
|
||||
0x5f, 0x80, 0x2f, 0x3d, 0x12, 0x2b, 0xe8, 0x1e, 0x54, 0xf9, 0x0c, 0x07, 0xdb, 0x1e, 0x13, 0x29,
|
||||
0x56, 0x6b, 0x9a, 0x43, 0x17, 0xf8, 0x38, 0x7d, 0x0e, 0xab, 0x95, 0xc3, 0xa3, 0xfa, 0xa5, 0x97,
|
||||
0x7f, 0xd5, 0x35, 0xb7, 0x22, 0xcd, 0xee, 0x31, 0x2b, 0x82, 0x05, 0x19, 0xdf, 0x56, 0x12, 0xf9,
|
||||
0x98, 0xd2, 0x0b, 0x67, 0x1f, 0x03, 0x6c, 0xe2, 0x8b, 0xbf, 0xe4, 0x0d, 0xa8, 0x89, 0x6d, 0x14,
|
||||
0xe9, 0xb7, 0xa1, 0x1c, 0xcb, 0x03, 0x8a, 0x2d, 0x06, 0xde, 0xc8, 0xfe, 0x4d, 0xf5, 0x4c, 0x52,
|
||||
0x12, 0x52, 0xb0, 0xb5, 0x02, 0x73, 0x5f, 0x13, 0xca, 0x78, 0x1a, 0x64, 0xd4, 0x2c, 0x82, 0xbe,
|
||||
0x43, 0x3a, 0x0c, 0x27, 0x32, 0x5a, 0x57, 0xcd, 0x78, 0xd2, 0xe4, 0xb0, 0xd9, 0xdb, 0x28, 0x89,
|
||||
0x12, 0x6f, 0x68, 0xa2, 0x62, 0x9c, 0xbd, 0xad, 0x84, 0x5a, 0x2f, 0x35, 0xa8, 0x7d, 0x45, 0x3a,
|
||||
0x9d, 0x8b, 0x26, 0x49, 0x14, 0x1c, 0xd2, 0xe6, 0x65, 0x45, 0xe6, 0x96, 0x9a, 0xf1, 0x54, 0xf4,
|
||||
0x3a, 0x1d, 0x91, 0x51, 0x15, 0x97, 0x0f, 0xad, 0x7f, 0x34, 0x40, 0xdc, 0xf8, 0x0d, 0x64, 0x49,
|
||||
0x56, 0x13, 0x67, 0x4e, 0xaf, 0x89, 0x85, 0x11, 0x35, 0xb1, 0x38, 0xb2, 0x26, 0x96, 0x06, 0x6a,
|
||||
0x62, 0x03, 0x8a, 0x34, 0xc6, 0xbe, 0xa8, 0xa2, 0xa3, 0x4a, 0x9a, 0x40, 0xe4, 0x59, 0x2a, 0x8f,
|
||||
0x4c, 0xa5, 0x6b, 0xf0, 0xd6, 0x89, 0xa3, 0xcb, 0x9b, 0xb5, 0x7e, 0xd1, 0x60, 0xce, 0xc5, 0x94,
|
||||
0xfc, 0x80, 0xb7, 0xd8, 0xc1, 0x85, 0x5f, 0xd5, 0x02, 0x94, 0x9e, 0x93, 0x80, 0xed, 0xaa, 0x9b,
|
||||
0x92, 0x13, 0xce, 0xce, 0x2e, 0x26, 0xed, 0x5d, 0xf9, 0xfa, 0x67, 0x5d, 0x35, 0xb3, 0x7e, 0x82,
|
||||
0x2b, 0x6b, 0x9d, 0x88, 0xe2, 0xd6, 0x83, 0xff, 0x23, 0x30, 0x79, 0x9d, 0x05, 0x71, 0x0b, 0x72,
|
||||
0x62, 0x7d, 0x09, 0x73, 0x5b, 0x5e, 0x8f, 0x4e, 0x5d, 0x3f, 0x37, 0x61, 0xde, 0xc5, 0xb4, 0xd7,
|
||||
0x9d, 0xda, 0xd1, 0x06, 0x5c, 0xe5, 0x8f, 0x73, 0x8b, 0x04, 0xd3, 0x24, 0xaf, 0xf5, 0x81, 0xac,
|
||||
0x07, 0xd2, 0x8d, 0x7a, 0xe2, 0x08, 0x8a, 0x31, 0x09, 0xe4, 0x0b, 0x9f, 0x75, 0xc5, 0xd8, 0xfa,
|
||||
0x53, 0x83, 0x6b, 0x6b, 0x59, 0x9f, 0x9d, 0x56, 0x77, 0x6c, 0xc3, 0x7c, 0xec, 0x25, 0x38, 0x64,
|
||||
0xdb, 0xb9, 0x5e, 0x2f, 0xaf, 0xa4, 0xc9, 0x6b, 0xfa, 0x1f, 0x47, 0xf5, 0x95, 0x9c, 0x82, 0x8a,
|
||||
0x62, 0x1c, 0x66, 0xe6, 0xd4, 0x69, 0x47, 0x37, 0x02, 0xd2, 0xc6, 0x94, 0xd9, 0xeb, 0xe2, 0x3f,
|
||||
0x77, 0x4e, 0x3a, 0x5b, 0x3b, 0x55, 0x07, 0x14, 0x26, 0xd1, 0x01, 0xdf, 0xc1, 0xe2, 0xe0, 0xe9,
|
||||
0x14, 0x19, 0x5f, 0x40, 0xed, 0x58, 0xdd, 0x9d, 0x5a, 0xf5, 0x86, 0x04, 0x49, 0xde, 0xc0, 0xfa,
|
||||
0x11, 0xe6, 0xbf, 0x89, 0x83, 0x37, 0xa0, 0xd5, 0x9a, 0x50, 0x4d, 0x30, 0x8d, 0x7a, 0x89, 0x8f,
|
||||
0xa9, 0xe0, 0x6a, 0xd4, 0xa1, 0x8e, 0x61, 0xd6, 0x0a, 0x5c, 0xb9, 0x2f, 0x45, 0x6d, 0xba, 0xb3,
|
||||
0x01, 0x65, 0x59, 0xdd, 0xe5, 0x51, 0xaa, 0x6e, 0x3a, 0xe5, 0x09, 0x95, 0x61, 0xb3, 0x5a, 0x5f,
|
||||
0x56, 0x9a, 0x58, 0x9d, 0xdb, 0x38, 0x45, 0x1f, 0x0a, 0x80, 0x9b, 0x02, 0x9b, 0xff, 0xd6, 0xa0,
|
||||
0x24, 0x3a, 0x06, 0xda, 0x03, 0x5d, 0x6a, 0x2b, 0xe4, 0xd8, 0x67, 0xfd, 0x52, 0xb0, 0x87, 0xb4,
|
||||
0xac, 0xf9, 0xc9, 0xe4, 0x06, 0x2a, 0xd4, 0xef, 0xa1, 0x24, 0x34, 0x10, 0x5a, 0x39, 0xdb, 0x34,
|
||||
0xaf, 0xc8, 0xcc, 0x8f, 0x26, 0xc2, 0xaa, 0x1d, 0xda, 0xa0, 0x4b, 0x61, 0x31, 0xee, 0x38, 0x43,
|
||||
0x42, 0xcb, 0xfc, 0x78, 0x12, 0x83, 0x6c, 0xa3, 0x67, 0x30, 0x7b, 0x42, 0xc1, 0xa0, 0xe6, 0x24,
|
||||
0xe6, 0x27, 0x1b, 0xd9, 0x39, 0xb7, 0x7c, 0x02, 0x85, 0x4d, 0xcc, 0x50, 0xe3, 0x6c, 0xa3, 0x63,
|
||||
0x99, 0x63, 0x7e, 0x38, 0x01, 0x32, 0xe3, 0xad, 0xc8, 0x2b, 0x0c, 0xb2, 0xcf, 0x36, 0x19, 0x54,
|
||||
0x25, 0xa6, 0x33, 0x31, 0x5e, 0x6d, 0xd4, 0x82, 0x22, 0x17, 0x19, 0x68, 0x4c, 0x6c, 0x39, 0x21,
|
||||
0x62, 0x2e, 0x0e, 0x3d, 0xa0, 0x0d, 0xfe, 0x23, 0x15, 0x6d, 0x41, 0x91, 0x77, 0x05, 0x34, 0x26,
|
||||
0x0f, 0x87, 0x05, 0xc4, 0x48, 0x8f, 0x8f, 0xa0, 0x9a, 0xf5, 0xd6, 0x71, 0x54, 0x0c, 0x36, 0xe1,
|
||||
0x91, 0x4e, 0x1f, 0x40, 0x59, 0x75, 0x45, 0x34, 0xe6, 0xbe, 0x4f, 0x36, 0xcf, 0x33, 0x1c, 0x96,
|
||||
0x44, 0x97, 0x1b, 0x17, 0xe1, 0x60, 0x2b, 0x1c, 0xe9, 0xf0, 0x21, 0xe8, 0xb2, 0xdd, 0x8d, 0x7b,
|
||||
0x34, 0x43, 0x4d, 0x71, 0xa4, 0x4b, 0x02, 0x95, 0xb4, 0x63, 0xa1, 0x1b, 0xe3, 0x73, 0x24, 0xd7,
|
||||
0x20, 0x4d, 0x7b, 0x52, 0xb8, 0xca, 0xa8, 0xe7, 0x00, 0xb9, 0x9e, 0x72, 0x6b, 0x0c, 0xc5, 0xa7,
|
||||
0x75, 0x47, 0xf3, 0xd3, 0xf3, 0x19, 0xa9, 0x8d, 0x1f, 0x82, 0x2e, 0x9b, 0xc6, 0x38, 0xda, 0x86,
|
||||
0x5a, 0xcb, 0x48, 0xda, 0x76, 0xa0, 0xac, 0xca, 0xfb, 0xb8, 0x5c, 0x39, 0xd9, 0x31, 0xcc, 0x1b,
|
||||
0x13, 0xa2, 0x65, 0xe8, 0xab, 0xdf, 0x1e, 0xbe, 0x5a, 0xba, 0xf4, 0xfb, 0xab, 0xa5, 0x4b, 0x3f,
|
||||
0xf7, 0x97, 0xb4, 0xc3, 0xfe, 0x92, 0xf6, 0x5b, 0x7f, 0x49, 0xfb, 0xbb, 0xbf, 0xa4, 0x3d, 0xb9,
|
||||
0xfb, 0x7a, 0x7f, 0x72, 0xba, 0x23, 0x06, 0x4f, 0x75, 0x71, 0x9e, 0x5b, 0xff, 0x05, 0x00, 0x00,
|
||||
0xff, 0xff, 0x5e, 0xf6, 0xae, 0x85, 0xb9, 0x12, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import "google/protobuf/empty.proto";
|
|||
import "google/protobuf/any.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
import "github.com/containerd/containerd/api/types/mount.proto";
|
||||
import "github.com/containerd/containerd/api/types/metrics.proto";
|
||||
import "github.com/containerd/containerd/api/types/descriptor.proto";
|
||||
import "github.com/containerd/containerd/api/types/task/task.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
@ -46,6 +47,8 @@ service Tasks {
|
|||
rpc Checkpoint(CheckpointTaskRequest) returns (CheckpointTaskResponse);
|
||||
|
||||
rpc Update(UpdateTaskRequest) returns (google.protobuf.Empty);
|
||||
|
||||
rpc Metrics(MetricsRequest) returns (MetricsResponse);
|
||||
}
|
||||
|
||||
message CreateTaskRequest {
|
||||
|
@ -183,3 +186,11 @@ message UpdateTaskRequest {
|
|||
string container_id = 1;
|
||||
google.protobuf.Any resources = 2;
|
||||
}
|
||||
|
||||
message MetricsRequest {
|
||||
repeated string filters = 1;
|
||||
}
|
||||
|
||||
message MetricsResponse {
|
||||
repeated types.Metric metrics = 1;
|
||||
}
|
||||
|
|
|
@ -7,10 +7,12 @@
|
|||
|
||||
It is generated from these files:
|
||||
github.com/containerd/containerd/api/types/descriptor.proto
|
||||
github.com/containerd/containerd/api/types/metrics.proto
|
||||
github.com/containerd/containerd/api/types/mount.proto
|
||||
|
||||
It has these top-level messages:
|
||||
Descriptor
|
||||
Metric
|
||||
Mount
|
||||
*/
|
||||
package types
|
||||
|
|
|
@ -0,0 +1,429 @@
|
|||
// Code generated by protoc-gen-gogo.
|
||||
// source: github.com/containerd/containerd/api/types/metrics.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
package types
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import google_protobuf1 "github.com/gogo/protobuf/types"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
|
||||
import time "time"
|
||||
|
||||
import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
var _ = time.Kitchen
|
||||
|
||||
type Metric struct {
|
||||
Timestamp time.Time `protobuf:"bytes,1,opt,name=timestamp,stdtime" json:"timestamp"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Data *google_protobuf1.Any `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (m *Metric) Reset() { *m = Metric{} }
|
||||
func (*Metric) ProtoMessage() {}
|
||||
func (*Metric) Descriptor() ([]byte, []int) { return fileDescriptorMetrics, []int{0} }
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*Metric)(nil), "containerd.types.Metric")
|
||||
}
|
||||
func (m *Metric) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Metric) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)))
|
||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
if len(m.ID) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(len(m.ID)))
|
||||
i += copy(dAtA[i:], m.ID)
|
||||
}
|
||||
if m.Data != nil {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintMetrics(dAtA, i, uint64(m.Data.Size()))
|
||||
n2, err := m.Data.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n2
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Metrics(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Metrics(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintMetrics(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *Metric) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp)
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
l = len(m.ID)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
}
|
||||
if m.Data != nil {
|
||||
l = m.Data.Size()
|
||||
n += 1 + l + sovMetrics(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovMetrics(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozMetrics(x uint64) (n int) {
|
||||
return sovMetrics(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *Metric) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Metric{`,
|
||||
`Timestamp:` + strings.Replace(strings.Replace(this.Timestamp.String(), "Timestamp", "google_protobuf2.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
|
||||
`Data:` + strings.Replace(fmt.Sprintf("%v", this.Data), "Any", "google_protobuf1.Any", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringMetrics(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *Metric) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Metric: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Metric: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.ID = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.Data == nil {
|
||||
m.Data = &google_protobuf1.Any{}
|
||||
}
|
||||
if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipMetrics(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthMetrics
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipMetrics(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthMetrics
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowMetrics
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipMetrics(dAtA[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthMetrics = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowMetrics = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("github.com/containerd/containerd/api/types/metrics.proto", fileDescriptorMetrics)
|
||||
}
|
||||
|
||||
var fileDescriptorMetrics = []byte{
|
||||
// 256 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xb2, 0x48, 0xcf, 0x2c, 0xc9,
|
||||
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xcc, 0xcc, 0x4b, 0x2d,
|
||||
0x4a, 0x41, 0x66, 0x26, 0x16, 0x64, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xe7, 0xa6, 0x96,
|
||||
0x14, 0x65, 0x26, 0x17, 0xeb, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x20, 0xd4, 0xe8, 0x81,
|
||||
0xe5, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x92, 0xfa, 0x20, 0x16, 0x44, 0x9d, 0x94, 0x64,
|
||||
0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x98, 0x57, 0x09,
|
||||
0x95, 0x92, 0x47, 0x97, 0x2a, 0xc9, 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x80, 0x28, 0x50,
|
||||
0xea, 0x63, 0xe4, 0x62, 0xf3, 0x05, 0xdb, 0x2a, 0xe4, 0xc4, 0xc5, 0x09, 0x97, 0x95, 0x60, 0x54,
|
||||
0x60, 0xd4, 0xe0, 0x36, 0x92, 0xd2, 0x83, 0xe8, 0xd7, 0x83, 0xe9, 0xd7, 0x0b, 0x81, 0xa9, 0x70,
|
||||
0xe2, 0x38, 0x71, 0x4f, 0x9e, 0x61, 0xc2, 0x7d, 0x79, 0xc6, 0x20, 0x84, 0x36, 0x21, 0x31, 0x2e,
|
||||
0xa6, 0xcc, 0x14, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xb6, 0x47, 0xf7, 0xe4, 0x99, 0x3c,
|
||||
0x5d, 0x82, 0x98, 0x32, 0x53, 0x84, 0x34, 0xb8, 0x58, 0x52, 0x12, 0x4b, 0x12, 0x25, 0x98, 0xc1,
|
||||
0xc6, 0x8a, 0x60, 0x18, 0xeb, 0x98, 0x57, 0x19, 0x04, 0x56, 0xe1, 0xe4, 0x75, 0xe2, 0xa1, 0x1c,
|
||||
0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x0d, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48,
|
||||
0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x28, 0x03, 0xe2, 0x03, 0xd2, 0x1a, 0x4c, 0x26, 0xb1, 0x81,
|
||||
0xcd, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf8, 0x51, 0x36, 0x74, 0x83, 0x01, 0x00, 0x00,
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
syntax = "proto3";
|
||||
|
||||
package containerd.types;
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
import "google/protobuf/any.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
option go_package = "github.com/containerd/containerd/api/types;types";
|
||||
|
||||
message Metric {
|
||||
google.protobuf.Timestamp timestamp = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
string id = 2;
|
||||
google.protobuf.Any data = 3;
|
||||
}
|
|
@ -17,6 +17,11 @@ import proto "github.com/gogo/protobuf/proto"
|
|||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import _ "github.com/gogo/protobuf/types"
|
||||
|
||||
import time "time"
|
||||
|
||||
import github_com_gogo_protobuf_types "github.com/gogo/protobuf/types"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
@ -27,6 +32,7 @@ import io "io"
|
|||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
var _ = time.Kitchen
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
|
@ -68,15 +74,16 @@ func (x Status) String() string {
|
|||
func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptorTask, []int{0} }
|
||||
|
||||
type Process struct {
|
||||
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||
Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
|
||||
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
|
||||
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
|
||||
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
|
||||
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
|
||||
ContainerID string `protobuf:"bytes,1,opt,name=container_id,json=containerId,proto3" json:"container_id,omitempty"`
|
||||
ID string `protobuf:"bytes,2,opt,name=id,proto3" json:"id,omitempty"`
|
||||
Pid uint32 `protobuf:"varint,3,opt,name=pid,proto3" json:"pid,omitempty"`
|
||||
Status Status `protobuf:"varint,4,opt,name=status,proto3,enum=containerd.v1.types.Status" json:"status,omitempty"`
|
||||
Stdin string `protobuf:"bytes,5,opt,name=stdin,proto3" json:"stdin,omitempty"`
|
||||
Stdout string `protobuf:"bytes,6,opt,name=stdout,proto3" json:"stdout,omitempty"`
|
||||
Stderr string `protobuf:"bytes,7,opt,name=stderr,proto3" json:"stderr,omitempty"`
|
||||
Terminal bool `protobuf:"varint,8,opt,name=terminal,proto3" json:"terminal,omitempty"`
|
||||
ExitStatus uint32 `protobuf:"varint,9,opt,name=exit_status,json=exitStatus,proto3" json:"exit_status,omitempty"`
|
||||
ExitedAt time.Time `protobuf:"bytes,10,opt,name=exited_at,json=exitedAt,stdtime" json:"exited_at"`
|
||||
}
|
||||
|
||||
func (m *Process) Reset() { *m = Process{} }
|
||||
|
@ -157,6 +164,14 @@ func (m *Process) MarshalTo(dAtA []byte) (int, error) {
|
|||
i++
|
||||
i = encodeVarintTask(dAtA, i, uint64(m.ExitStatus))
|
||||
}
|
||||
dAtA[i] = 0x52
|
||||
i++
|
||||
i = encodeVarintTask(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)))
|
||||
n1, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.ExitedAt, dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n1
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -222,6 +237,8 @@ func (m *Process) Size() (n int) {
|
|||
if m.ExitStatus != 0 {
|
||||
n += 1 + sovTask(uint64(m.ExitStatus))
|
||||
}
|
||||
l = github_com_gogo_protobuf_types.SizeOfStdTime(m.ExitedAt)
|
||||
n += 1 + l + sovTask(uint64(l))
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -252,6 +269,7 @@ func (this *Process) String() string {
|
|||
`Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`,
|
||||
`Terminal:` + fmt.Sprintf("%v", this.Terminal) + `,`,
|
||||
`ExitStatus:` + fmt.Sprintf("%v", this.ExitStatus) + `,`,
|
||||
`ExitedAt:` + strings.Replace(strings.Replace(this.ExitedAt.String(), "Timestamp", "google_protobuf1.Timestamp", 1), `&`, ``, 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -515,6 +533,36 @@ func (m *Process) Unmarshal(dAtA []byte) error {
|
|||
break
|
||||
}
|
||||
}
|
||||
case 10:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ExitedAt", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTask
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.ExitedAt, dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTask(dAtA[iNdEx:])
|
||||
|
@ -646,33 +694,37 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptorTask = []byte{
|
||||
// 447 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x4f, 0x6f, 0xd3, 0x30,
|
||||
0x18, 0xc6, 0xeb, 0x74, 0x4d, 0x3b, 0x77, 0x1b, 0xc1, 0x4c, 0x93, 0x55, 0x90, 0x1b, 0x71, 0xaa,
|
||||
0x38, 0xa4, 0x62, 0xbb, 0x71, 0xdb, 0x9a, 0x0a, 0x55, 0x48, 0x59, 0xe4, 0xae, 0xe2, 0x38, 0x65,
|
||||
0xb5, 0x15, 0xac, 0x31, 0x3b, 0x72, 0x1c, 0xfe, 0xdc, 0x38, 0xa2, 0x7d, 0x87, 0x89, 0x03, 0x7c,
|
||||
0x0a, 0x3e, 0xc1, 0x8e, 0x9c, 0x10, 0xa7, 0x89, 0xe5, 0x93, 0x20, 0x27, 0xa1, 0xe4, 0xc0, 0x25,
|
||||
0x7a, 0xde, 0xe7, 0xf7, 0xbc, 0x6f, 0x9e, 0x48, 0x81, 0x2f, 0x52, 0x61, 0xde, 0x14, 0x17, 0xc1,
|
||||
0x5a, 0x5d, 0x4d, 0xd7, 0x4a, 0x9a, 0x44, 0x48, 0xae, 0x59, 0x5b, 0x26, 0x99, 0x98, 0x9a, 0x8f,
|
||||
0x19, 0xcf, 0xa7, 0x26, 0xc9, 0x2f, 0xab, 0x47, 0x90, 0x69, 0x65, 0x14, 0x7a, 0xf4, 0x2f, 0x15,
|
||||
0xbc, 0x7b, 0x1e, 0x54, 0xa1, 0xd1, 0x7e, 0xaa, 0x52, 0x55, 0xf1, 0xa9, 0x55, 0x75, 0xf4, 0xe9,
|
||||
0x17, 0x07, 0xf6, 0x63, 0xad, 0xd6, 0x3c, 0xcf, 0xd1, 0x21, 0xdc, 0xd9, 0x2c, 0x9e, 0x0b, 0x86,
|
||||
0x81, 0x0f, 0x26, 0xdb, 0x27, 0x0f, 0xca, 0xbb, 0xf1, 0x70, 0xf6, 0xd7, 0x5f, 0x84, 0x74, 0xb8,
|
||||
0x09, 0x2d, 0x18, 0x3a, 0x80, 0x8e, 0x60, 0xd8, 0xa9, 0x92, 0x6e, 0x79, 0x37, 0x76, 0x16, 0x21,
|
||||
0x75, 0x04, 0x43, 0x1e, 0xec, 0x66, 0x82, 0xe1, 0xae, 0x0f, 0x26, 0xbb, 0xd4, 0x4a, 0x74, 0x04,
|
||||
0xdd, 0xdc, 0x24, 0xa6, 0xc8, 0xf1, 0x96, 0x0f, 0x26, 0x7b, 0x87, 0x8f, 0x83, 0xff, 0xb4, 0x0c,
|
||||
0x96, 0x55, 0x84, 0x36, 0x51, 0xb4, 0x0f, 0x7b, 0xb9, 0x61, 0x42, 0xe2, 0x9e, 0x7d, 0x03, 0xad,
|
||||
0x07, 0x74, 0x60, 0x4f, 0x31, 0x55, 0x18, 0xec, 0x56, 0x76, 0x33, 0x35, 0x3e, 0xd7, 0x1a, 0xf7,
|
||||
0x37, 0x3e, 0xd7, 0x1a, 0x8d, 0xe0, 0xc0, 0x70, 0x7d, 0x25, 0x64, 0xf2, 0x16, 0x0f, 0x7c, 0x30,
|
||||
0x19, 0xd0, 0xcd, 0x8c, 0xc6, 0x70, 0xc8, 0x3f, 0x08, 0x73, 0xde, 0x74, 0xdb, 0xae, 0x0a, 0x43,
|
||||
0x6b, 0xd5, 0x55, 0x9e, 0xfd, 0x04, 0xd0, 0xad, 0x25, 0x22, 0xb0, 0xbf, 0x8a, 0x5e, 0x45, 0xa7,
|
||||
0xaf, 0x23, 0xaf, 0x33, 0x7a, 0x78, 0x7d, 0xe3, 0xef, 0xd6, 0x60, 0x25, 0x2f, 0xa5, 0x7a, 0x2f,
|
||||
0x2d, 0x9f, 0xd1, 0xf9, 0xf1, 0xd9, 0x3c, 0xf4, 0x40, 0x9b, 0xcf, 0x34, 0x4f, 0x0c, 0x67, 0x96,
|
||||
0xd3, 0x55, 0x14, 0x2d, 0xa2, 0x97, 0x9e, 0xd3, 0xe6, 0xb4, 0x90, 0x52, 0xc8, 0xd4, 0xf2, 0xe5,
|
||||
0xd9, 0x69, 0x1c, 0xcf, 0x43, 0xaf, 0xdb, 0xe6, 0x4b, 0xa3, 0xb2, 0x8c, 0x33, 0xf4, 0x04, 0xba,
|
||||
0xf1, 0xf1, 0x6a, 0x39, 0x0f, 0xbd, 0xad, 0x91, 0x77, 0x7d, 0xe3, 0xef, 0xd4, 0x38, 0x4e, 0x8a,
|
||||
0xbc, 0xbe, 0x6e, 0xa9, 0xbd, 0xde, 0x6b, 0x6f, 0x5b, 0x2c, 0x64, 0x3a, 0xda, 0xfb, 0xfc, 0x95,
|
||||
0x74, 0xbe, 0x7f, 0x23, 0xcd, 0xd7, 0x9c, 0xe0, 0xdb, 0x7b, 0xd2, 0xf9, 0x75, 0x4f, 0x3a, 0x9f,
|
||||
0x4a, 0x02, 0x6e, 0x4b, 0x02, 0x7e, 0x94, 0x04, 0xfc, 0x2e, 0x09, 0xb8, 0x70, 0xab, 0x7f, 0xe3,
|
||||
0xe8, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x46, 0x63, 0xaf, 0x84, 0x02, 0x00, 0x00,
|
||||
// 503 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x3d, 0x6f, 0xd3, 0x40,
|
||||
0x1c, 0xc6, 0x73, 0x6e, 0xe3, 0x24, 0x97, 0xb6, 0x98, 0xa3, 0xaa, 0x2c, 0x83, 0x6c, 0x8b, 0x29,
|
||||
0x62, 0xb0, 0x45, 0xba, 0xb1, 0xe5, 0x4d, 0x28, 0x42, 0x72, 0xa3, 0x4b, 0x22, 0xc6, 0xc8, 0xc9,
|
||||
0x1d, 0xe6, 0xd4, 0xe6, 0xce, 0xb2, 0xcf, 0xbc, 0x6c, 0x8c, 0xa8, 0x13, 0x5f, 0xa0, 0x13, 0x7c,
|
||||
0x0a, 0x3e, 0x41, 0x46, 0x26, 0xc4, 0x14, 0xa8, 0x3f, 0x09, 0x3a, 0xdb, 0x09, 0x19, 0x58, 0xac,
|
||||
0x7b, 0x9e, 0xdf, 0x73, 0x8f, 0xff, 0xf7, 0x87, 0x2f, 0x22, 0x26, 0xdf, 0x66, 0x4b, 0x6f, 0x25,
|
||||
0xd6, 0xfe, 0x4a, 0x70, 0x19, 0x32, 0x4e, 0x13, 0x72, 0x78, 0x0c, 0x63, 0xe6, 0xcb, 0x8f, 0x31,
|
||||
0x4d, 0x7d, 0x19, 0xa6, 0xd7, 0xc5, 0xc7, 0x8b, 0x13, 0x21, 0x05, 0x7a, 0xf4, 0x2f, 0xe5, 0xbd,
|
||||
0x7b, 0xee, 0x15, 0x21, 0xeb, 0x3c, 0x12, 0x91, 0x28, 0xb8, 0xaf, 0x4e, 0x65, 0xd4, 0x72, 0x22,
|
||||
0x21, 0xa2, 0x1b, 0xea, 0x17, 0x6a, 0x99, 0xbd, 0xf1, 0x25, 0x5b, 0xd3, 0x54, 0x86, 0xeb, 0xb8,
|
||||
0x0c, 0x3c, 0xcd, 0x35, 0xd8, 0x98, 0x24, 0x62, 0x45, 0xd3, 0x14, 0x75, 0xe1, 0xc9, 0xbe, 0x79,
|
||||
0xc1, 0x88, 0x09, 0x5c, 0xd0, 0x69, 0xf5, 0x1f, 0xe4, 0x5b, 0xa7, 0x3d, 0xd8, 0xf9, 0xe3, 0x21,
|
||||
0x6e, 0xef, 0x43, 0x63, 0x82, 0x2e, 0xa0, 0xc6, 0x88, 0xa9, 0x15, 0x49, 0x3d, 0xdf, 0x3a, 0xda,
|
||||
0x78, 0x88, 0x35, 0x46, 0x90, 0x01, 0x8f, 0x62, 0x46, 0xcc, 0x23, 0x17, 0x74, 0x4e, 0xb1, 0x3a,
|
||||
0xa2, 0x4b, 0xa8, 0xa7, 0x32, 0x94, 0x59, 0x6a, 0x1e, 0xbb, 0xa0, 0x73, 0xd6, 0x7d, 0xec, 0xfd,
|
||||
0xe7, 0x19, 0xde, 0xb4, 0x88, 0xe0, 0x2a, 0x8a, 0xce, 0x61, 0x3d, 0x95, 0x84, 0x71, 0xb3, 0xae,
|
||||
0xfe, 0x80, 0x4b, 0x81, 0x2e, 0x54, 0x15, 0x11, 0x99, 0x34, 0xf5, 0xc2, 0xae, 0x54, 0xe5, 0xd3,
|
||||
0x24, 0x31, 0x1b, 0x7b, 0x9f, 0x26, 0x09, 0xb2, 0x60, 0x53, 0xd2, 0x64, 0xcd, 0x78, 0x78, 0x63,
|
||||
0x36, 0x5d, 0xd0, 0x69, 0xe2, 0xbd, 0x46, 0x0e, 0x6c, 0xd3, 0x0f, 0x4c, 0x2e, 0xaa, 0xd9, 0x5a,
|
||||
0xc5, 0xc0, 0x50, 0x59, 0xe5, 0x28, 0xa8, 0x07, 0x5b, 0x4a, 0x51, 0xb2, 0x08, 0xa5, 0x09, 0x5d,
|
||||
0xd0, 0x69, 0x77, 0x2d, 0xaf, 0x5c, 0xab, 0xb7, 0x5b, 0xab, 0x37, 0xdb, 0xad, 0xb5, 0xdf, 0xdc,
|
||||
0x6c, 0x9d, 0xda, 0x97, 0xdf, 0x0e, 0xc0, 0xcd, 0xf2, 0x5a, 0x4f, 0x3e, 0xfb, 0x09, 0xa0, 0x5e,
|
||||
0xb5, 0xd9, 0xb0, 0x31, 0x0f, 0x5e, 0x05, 0x57, 0xaf, 0x03, 0xa3, 0x66, 0x3d, 0xbc, 0xbd, 0x73,
|
||||
0x4f, 0x4b, 0x30, 0xe7, 0xd7, 0x5c, 0xbc, 0xe7, 0x8a, 0x0f, 0xf0, 0xa8, 0x37, 0x1b, 0x0d, 0x0d,
|
||||
0x70, 0xc8, 0x07, 0x09, 0x0d, 0x25, 0x25, 0x8a, 0xe3, 0x79, 0x10, 0x8c, 0x83, 0x97, 0x86, 0x76,
|
||||
0xc8, 0x71, 0xc6, 0x39, 0xe3, 0x91, 0xe2, 0xd3, 0xd9, 0xd5, 0x64, 0x32, 0x1a, 0x1a, 0x47, 0x87,
|
||||
0x7c, 0x2a, 0x45, 0x1c, 0x53, 0x82, 0x9e, 0x40, 0x7d, 0xd2, 0x9b, 0x4f, 0x47, 0x43, 0xe3, 0xd8,
|
||||
0x32, 0x6e, 0xef, 0xdc, 0x93, 0x12, 0x4f, 0xc2, 0x2c, 0x2d, 0xdb, 0x15, 0x55, 0xed, 0xf5, 0xc3,
|
||||
0xdb, 0x0a, 0x33, 0x1e, 0x59, 0x67, 0x9f, 0xbf, 0xda, 0xb5, 0xef, 0xdf, 0xec, 0xea, 0x35, 0x7d,
|
||||
0x73, 0x73, 0x6f, 0xd7, 0x7e, 0xdd, 0xdb, 0xb5, 0x4f, 0xb9, 0x0d, 0x36, 0xb9, 0x0d, 0x7e, 0xe4,
|
||||
0x36, 0xf8, 0x93, 0xdb, 0x60, 0xa9, 0x17, 0xab, 0xb9, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x14,
|
||||
0xf3, 0xee, 0x85, 0xe8, 0x02, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ syntax = "proto3";
|
|||
package containerd.v1.types;
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
enum Status {
|
||||
option (gogoproto.goproto_enum_prefix) = false;
|
||||
|
@ -26,4 +27,5 @@ message Process {
|
|||
string stderr = 7;
|
||||
bool terminal = 8;
|
||||
uint32 exit_status = 9;
|
||||
google.protobuf.Timestamp exited_at = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
|
||||
}
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package containerd
|
||||
|
||||
import specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
|
||||
// WithApparmor sets the provided apparmor profile to the spec
|
||||
func WithApparmorProfile(profile string) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
s.Process.ApparmorProfile = profile
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -123,7 +123,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
|||
|
||||
func getxattr(path, attr string) ([]byte, error) {
|
||||
b, err := sysx.LGetxattr(path, attr)
|
||||
if err == unix.ENOTSUP || err == unix.ENODATA {
|
||||
if err == unix.ENOTSUP || err == sysx.ENODATA {
|
||||
return nil, nil
|
||||
}
|
||||
return b, err
|
||||
|
|
|
@ -192,6 +192,7 @@ func defaultRemoteContext() *RemoteContext {
|
|||
Resolver: docker.NewResolver(docker.ResolverOptions{
|
||||
Client: http.DefaultClient,
|
||||
}),
|
||||
Snapshotter: DefaultSnapshotter,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -352,6 +353,52 @@ func (c *Client) ListImages(ctx context.Context) ([]Image, error) {
|
|||
return images, nil
|
||||
}
|
||||
|
||||
// Subscribe to events that match one or more of the provided filters.
|
||||
//
|
||||
// Callers should listen on both the envelope channel and errs channel. If the
|
||||
// errs channel returns nil or an error, the subscriber should terminate.
|
||||
//
|
||||
// To cancel shutdown reciept of events, cancel the provided context. The errs
|
||||
// channel will be closed and return a nil error.
|
||||
func (c *Client) Subscribe(ctx context.Context, filters ...string) (ch <-chan *eventsapi.Envelope, errs <-chan error) {
|
||||
var (
|
||||
evq = make(chan *eventsapi.Envelope)
|
||||
errq = make(chan error, 1)
|
||||
)
|
||||
|
||||
errs = errq
|
||||
ch = evq
|
||||
|
||||
session, err := c.EventService().Subscribe(ctx, &eventsapi.SubscribeRequest{
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
errq <- err
|
||||
close(errq)
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer close(errq)
|
||||
|
||||
for {
|
||||
ev, err := session.Recv()
|
||||
if err != nil {
|
||||
errq <- err
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case evq <- ev:
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return ch, errs
|
||||
}
|
||||
|
||||
// Close closes the clients connection to containerd
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
|
|
|
@ -173,9 +173,13 @@ func (c *container) NewTask(ctx context.Context, ioCreate IOCreation, opts ...Ne
|
|||
Stdout: cfg.Stdout,
|
||||
Stderr: cfg.Stderr,
|
||||
}
|
||||
if c.c.RootFS != "" {
|
||||
if c.c.SnapshotKey != "" {
|
||||
if c.c.Snapshotter == "" {
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "unable to resolve rootfs mounts without snapshotter on container")
|
||||
}
|
||||
|
||||
// get the rootfs from the snapshotter and add it to the request
|
||||
mounts, err := c.client.SnapshotService(c.c.Snapshotter).Mounts(ctx, c.c.RootFS)
|
||||
mounts, err := c.client.SnapshotService(c.c.Snapshotter).Mounts(ctx, c.c.SnapshotKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -241,19 +245,7 @@ func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, erro
|
|||
}
|
||||
var i IO
|
||||
if ioAttach != nil {
|
||||
// get the existing fifo paths from the task information stored by the daemon
|
||||
paths := &FIFOSet{
|
||||
Dir: getFifoDir([]string{
|
||||
response.Process.Stdin,
|
||||
response.Process.Stdout,
|
||||
response.Process.Stderr,
|
||||
}),
|
||||
In: response.Process.Stdin,
|
||||
Out: response.Process.Stdout,
|
||||
Err: response.Process.Stderr,
|
||||
Terminal: response.Process.Terminal,
|
||||
}
|
||||
if i, err = ioAttach(paths); err != nil {
|
||||
if i, err = attachExistingIO(response, ioAttach); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
@ -266,6 +258,22 @@ func (c *container) loadTask(ctx context.Context, ioAttach IOAttach) (Task, erro
|
|||
return t, nil
|
||||
}
|
||||
|
||||
func attachExistingIO(response *tasks.GetResponse, ioAttach IOAttach) (IO, error) {
|
||||
// get the existing fifo paths from the task information stored by the daemon
|
||||
paths := &FIFOSet{
|
||||
Dir: getFifoDir([]string{
|
||||
response.Process.Stdin,
|
||||
response.Process.Stdout,
|
||||
response.Process.Stderr,
|
||||
}),
|
||||
In: response.Process.Stdin,
|
||||
Out: response.Process.Stdout,
|
||||
Err: response.Process.Stderr,
|
||||
Terminal: response.Process.Terminal,
|
||||
}
|
||||
return ioAttach(paths)
|
||||
}
|
||||
|
||||
// getFifoDir looks for any non-empty path for a stdio fifo
|
||||
// and returns the dir for where it is located
|
||||
func getFifoDir(paths []string) string {
|
||||
|
|
|
@ -4,7 +4,12 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/typeurl"
|
||||
"github.com/gogo/protobuf/types"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NewContainerOpts allows the caller to set additional options when creating a container
|
||||
|
@ -12,10 +17,21 @@ type NewContainerOpts func(ctx context.Context, client *Client, c *containers.Co
|
|||
|
||||
// WithRuntime allows a user to specify the runtime name and additional options that should
|
||||
// be used to create tasks for the container
|
||||
func WithRuntime(name string) NewContainerOpts {
|
||||
func WithRuntime(name string, options interface{}) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
var (
|
||||
any *types.Any
|
||||
err error
|
||||
)
|
||||
if options != nil {
|
||||
any, err = typeurl.MarshalAny(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
c.Runtime = containers.RuntimeInfo{
|
||||
Name: name,
|
||||
Name: name,
|
||||
Options: any,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -38,6 +54,8 @@ func WithContainerLabels(labels map[string]string) NewContainerOpts {
|
|||
}
|
||||
|
||||
// WithSnapshotter sets the provided snapshotter for use by the container
|
||||
//
|
||||
// This option must appear before other snapshotter options to have an effect.
|
||||
func WithSnapshotter(name string) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
c.Snapshotter = name
|
||||
|
@ -48,11 +66,12 @@ func WithSnapshotter(name string) NewContainerOpts {
|
|||
// WithSnapshot uses an existing root filesystem for the container
|
||||
func WithSnapshot(id string) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
setSnapshotterIfEmpty(c)
|
||||
// check that the snapshot exists, if not, fail on creation
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, id); err != nil {
|
||||
return err
|
||||
}
|
||||
c.RootFS = id
|
||||
c.SnapshotKey = id
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -61,23 +80,27 @@ func WithSnapshot(id string) NewContainerOpts {
|
|||
// root filesystem in read-write mode
|
||||
func WithNewSnapshot(id string, i Image) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Format(platforms.Default()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setSnapshotterIfEmpty(c)
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
||||
return err
|
||||
}
|
||||
c.RootFS = id
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSnapshotCleanup deletes the rootfs allocated for the container
|
||||
// WithSnapshotCleanup deletes the rootfs snapshot allocated for the container
|
||||
func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Container) error {
|
||||
if c.RootFS != "" {
|
||||
return client.SnapshotService(c.Snapshotter).Remove(ctx, c.RootFS)
|
||||
if c.SnapshotKey != "" {
|
||||
if c.Snapshotter == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set to cleanup rootfs snapshot")
|
||||
}
|
||||
return client.SnapshotService(c.Snapshotter).Remove(ctx, c.SnapshotKey)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -86,15 +109,22 @@ func WithSnapshotCleanup(ctx context.Context, client *Client, c containers.Conta
|
|||
// root filesystem in read-only mode
|
||||
func WithNewSnapshotView(id string, i Image) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Format(platforms.Default()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
setSnapshotterIfEmpty(c)
|
||||
if _, err := client.SnapshotService(c.Snapshotter).View(ctx, id, identity.ChainID(diffIDs).String()); err != nil {
|
||||
return err
|
||||
}
|
||||
c.RootFS = id
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func setSnapshotterIfEmpty(c *containers.Container) {
|
||||
if c.Snapshotter == "" {
|
||||
c.Snapshotter = DefaultSnapshotter
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
protobuf "github.com/gogo/protobuf/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
@ -22,7 +23,7 @@ import (
|
|||
// WithCheckpoint allows a container to be created from the checkpointed information
|
||||
// provided by the descriptor. The image, snapshot, and runtime specifications are
|
||||
// restored on the container
|
||||
func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
||||
func WithCheckpoint(desc v1.Descriptor, snapshotKey string) NewContainerOpts {
|
||||
// set image and rw, and spec
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
id := desc.Digest
|
||||
|
@ -38,7 +39,7 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||
fk := m
|
||||
rw = &fk
|
||||
case images.MediaTypeDockerSchema2Manifest:
|
||||
config, err := images.Config(ctx, store, m)
|
||||
config, err := images.Config(ctx, store, m, platforms.Format(platforms.Default()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -46,7 +47,8 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, rootfsID, identity.ChainID(diffIDs).String()); err != nil {
|
||||
setSnapshotterIfEmpty(c)
|
||||
if _, err := client.SnapshotService(c.Snapshotter).Prepare(ctx, snapshotKey, identity.ChainID(diffIDs).String()); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
|
@ -66,7 +68,7 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||
}
|
||||
if rw != nil {
|
||||
// apply the rw snapshot to the new rw layer
|
||||
mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, rootfsID)
|
||||
mounts, err := client.SnapshotService(c.Snapshotter).Mounts(ctx, snapshotKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -74,7 +76,7 @@ func WithCheckpoint(desc v1.Descriptor, rootfsID string) NewContainerOpts {
|
|||
return err
|
||||
}
|
||||
}
|
||||
c.RootFS = rootfsID
|
||||
c.SnapshotKey = snapshotKey
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,15 +12,51 @@ import (
|
|||
//
|
||||
// The resources specified in this object are used to create tasks from the container.
|
||||
type Container struct {
|
||||
ID string
|
||||
Labels map[string]string
|
||||
Image string
|
||||
Runtime RuntimeInfo
|
||||
Spec *types.Any
|
||||
RootFS string
|
||||
// ID uniquely identifies the container in a nameapace.
|
||||
//
|
||||
// This property is required and cannot be changed after creation.
|
||||
ID string
|
||||
|
||||
// Labels provide metadata extension for a contaienr.
|
||||
//
|
||||
// These are optional and fully mutable.
|
||||
Labels map[string]string
|
||||
|
||||
// Image specifies the image reference used for a container.
|
||||
//
|
||||
// This property is optional but immutable.
|
||||
Image string
|
||||
|
||||
// Runtime specifies which runtime should be used when launching container
|
||||
// tasks.
|
||||
//
|
||||
// This property is required and immutable.
|
||||
Runtime RuntimeInfo
|
||||
|
||||
// Spec should carry the the runtime specification used to implement the
|
||||
// container.
|
||||
//
|
||||
// This field is required but mutable.
|
||||
Spec *types.Any
|
||||
|
||||
// SnapshotKey specifies the snapshot key to use for the container's root
|
||||
// filesystem. When starting a task from this container, a caller should
|
||||
// look up the mounts from the snapshot service and include those on the
|
||||
// task create request.
|
||||
//
|
||||
// This field is not required but immutable.
|
||||
SnapshotKey string
|
||||
|
||||
// Snapshotter specifies the snapshotter name used for rootfs
|
||||
//
|
||||
// This field is not required but immutable.
|
||||
Snapshotter string
|
||||
CreatedAt time.Time
|
||||
UpdatedAt time.Time
|
||||
|
||||
// CreatedAt is the time at which the container was created.
|
||||
CreatedAt time.Time
|
||||
|
||||
// UpdatedAt is the time at which the container was updated.
|
||||
UpdatedAt time.Time
|
||||
}
|
||||
|
||||
type RuntimeInfo struct {
|
||||
|
@ -34,6 +70,7 @@ type Store interface {
|
|||
// List returns containers that match one or more of the provided filters.
|
||||
List(ctx context.Context, filters ...string) ([]Container, error)
|
||||
|
||||
// Create a container in the store from the provided container.
|
||||
Create(ctx context.Context, container Container) (Container, error)
|
||||
|
||||
// Update the container with the provided container object. ID must be set.
|
||||
|
@ -42,5 +79,9 @@ type Store interface {
|
|||
// the fieldpaths will be mutated.
|
||||
Update(ctx context.Context, container Container, fieldpaths ...string) (Container, error)
|
||||
|
||||
// Delete a container using the id.
|
||||
//
|
||||
// nil will be returned on success. If the container is not known to the
|
||||
// store, ErrNotFound will be returned.
|
||||
Delete(ctx context.Context, id string) error
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ func containerToProto(container *containers.Container) containersapi.Container {
|
|||
},
|
||||
Spec: container.Spec,
|
||||
Snapshotter: container.Snapshotter,
|
||||
RootFS: container.RootFS,
|
||||
SnapshotKey: container.SnapshotKey,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -115,7 +115,7 @@ func containerFromProto(containerpb *containersapi.Container) containers.Contain
|
|||
Runtime: runtime,
|
||||
Spec: containerpb.Spec,
|
||||
Snapshotter: containerpb.Snapshotter,
|
||||
RootFS: containerpb.RootFS,
|
||||
SnapshotKey: containerpb.SnapshotKey,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ type Writer interface {
|
|||
|
||||
// Commit commits the blob (but no roll-back is guaranteed on an error).
|
||||
// size and expected can be zero-value when unknown.
|
||||
Commit(size int64, expected digest.Digest, opts ...Opt) error
|
||||
Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error
|
||||
|
||||
// Status returns the current state of write
|
||||
Status() (Status, error)
|
||||
|
|
|
@ -58,7 +58,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size i
|
|||
}
|
||||
defer cw.Close()
|
||||
|
||||
return Copy(cw, r, size, expected)
|
||||
return Copy(ctx, cw, r, size, expected)
|
||||
}
|
||||
|
||||
// Copy copies data with the expected digest from the reader into the
|
||||
|
@ -68,7 +68,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, size i
|
|||
// the size or digest is unknown, these values may be empty.
|
||||
//
|
||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||
func Copy(cw Writer, r io.Reader, size int64, expected digest.Digest) error {
|
||||
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -95,7 +95,7 @@ func Copy(cw Writer, r io.Reader, size int64, expected digest.Digest) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := cw.Commit(size, expected); err != nil {
|
||||
if err := cw.Commit(ctx, size, expected); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
@ -54,7 +55,11 @@ func (w *writer) Write(p []byte) (n int, err error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
func (w *writer) Commit(size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
if w.fp == nil {
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
|
||||
}
|
||||
|
||||
if err := w.fp.Sync(); err != nil {
|
||||
return errors.Wrap(err, "sync failed")
|
||||
}
|
||||
|
@ -115,8 +120,8 @@ func (w *writer) Commit(size int64, expected digest.Digest, opts ...content.Opt)
|
|||
return err
|
||||
}
|
||||
|
||||
unlock(w.ref)
|
||||
w.fp = nil
|
||||
unlock(w.ref)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -130,12 +135,13 @@ func (w *writer) Commit(size int64, expected digest.Digest, opts ...content.Opt)
|
|||
//
|
||||
// To abandon a transaction completely, first call close then `Store.Remove` to
|
||||
// clean up the associated resources.
|
||||
func (cw *writer) Close() (err error) {
|
||||
unlock(cw.ref)
|
||||
|
||||
if cw.fp != nil {
|
||||
cw.fp.Sync()
|
||||
return cw.fp.Close()
|
||||
func (w *writer) Close() (err error) {
|
||||
if w.fp != nil {
|
||||
w.fp.Sync()
|
||||
err = w.fp.Close()
|
||||
w.fp = nil
|
||||
unlock(w.ref)
|
||||
return
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -4,11 +4,14 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/containerd/containerd/archive"
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/metadata"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
|
@ -21,7 +24,7 @@ import (
|
|||
func init() {
|
||||
plugin.Register(&plugin.Registration{
|
||||
Type: plugin.DiffPlugin,
|
||||
ID: "base-diff",
|
||||
ID: "walking",
|
||||
Requires: []plugin.PluginType{
|
||||
plugin.ContentPlugin,
|
||||
plugin.MetadataPlugin,
|
||||
|
@ -35,27 +38,43 @@ func init() {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return NewBaseDiff(metadata.NewContentStore(md.(*bolt.DB), c.(content.Store)))
|
||||
return NewWalkingDiff(metadata.NewContentStore(md.(*bolt.DB), c.(content.Store)))
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type BaseDiff struct {
|
||||
type walkingDiff struct {
|
||||
store content.Store
|
||||
}
|
||||
|
||||
var _ plugin.Differ = &BaseDiff{}
|
||||
|
||||
var emptyDesc = ocispec.Descriptor{}
|
||||
|
||||
func NewBaseDiff(store content.Store) (*BaseDiff, error) {
|
||||
return &BaseDiff{
|
||||
// NewWalkingDiff is a generic implementation of plugin.Differ.
|
||||
// NewWalkingDiff is expected to work with any filesystem.
|
||||
func NewWalkingDiff(store content.Store) (plugin.Differ, error) {
|
||||
return &walkingDiff{
|
||||
store: store,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *BaseDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
|
||||
// TODO: Check for supported media types
|
||||
// Apply applies the content associated with the provided digests onto the
|
||||
// provided mounts. Archive content will be extracted and decompressed if
|
||||
// necessary.
|
||||
func (s *walkingDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount) (ocispec.Descriptor, error) {
|
||||
var isCompressed bool
|
||||
switch desc.MediaType {
|
||||
case ocispec.MediaTypeImageLayer, images.MediaTypeDockerSchema2Layer:
|
||||
case ocispec.MediaTypeImageLayerGzip, images.MediaTypeDockerSchema2LayerGzip:
|
||||
isCompressed = true
|
||||
default:
|
||||
// Still apply all generic media types *.tar[.+]gzip and *.tar
|
||||
if strings.HasSuffix(desc.MediaType, ".tar.gzip") || strings.HasSuffix(desc.MediaType, ".tar+gzip") {
|
||||
isCompressed = true
|
||||
} else if !strings.HasSuffix(desc.MediaType, ".tar") {
|
||||
return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType)
|
||||
}
|
||||
}
|
||||
|
||||
dir, err := ioutil.TempDir("", "extract-")
|
||||
if err != nil {
|
||||
return emptyDesc, errors.Wrap(err, "failed to create temporary directory")
|
||||
|
@ -67,22 +86,25 @@ func (s *BaseDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []
|
|||
}
|
||||
defer mount.Unmount(dir, 0)
|
||||
|
||||
r, err := s.store.ReaderAt(ctx, desc.Digest)
|
||||
ra, err := s.store.ReaderAt(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return emptyDesc, errors.Wrap(err, "failed to get reader from content store")
|
||||
}
|
||||
defer r.Close()
|
||||
defer ra.Close()
|
||||
|
||||
// TODO: only decompress stream if media type is compressed
|
||||
ds, err := compression.DecompressStream(content.NewReader(r))
|
||||
if err != nil {
|
||||
return emptyDesc, err
|
||||
r := content.NewReader(ra)
|
||||
if isCompressed {
|
||||
ds, err := compression.DecompressStream(r)
|
||||
if err != nil {
|
||||
return emptyDesc, err
|
||||
}
|
||||
defer ds.Close()
|
||||
r = ds
|
||||
}
|
||||
defer ds.Close()
|
||||
|
||||
digester := digest.Canonical.Digester()
|
||||
rc := &readCounter{
|
||||
r: io.TeeReader(ds, digester.Hash()),
|
||||
r: io.TeeReader(r, digester.Hash()),
|
||||
}
|
||||
|
||||
if _, err := archive.Apply(ctx, dir, rc); err != nil {
|
||||
|
@ -101,7 +123,9 @@ func (s *BaseDiff) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []
|
|||
}, nil
|
||||
}
|
||||
|
||||
func (s *BaseDiff) DiffMounts(ctx context.Context, lower, upper []mount.Mount, media, ref string) (ocispec.Descriptor, error) {
|
||||
// DiffMounts creates a diff between the given mounts and uploads the result
|
||||
// to the content store.
|
||||
func (s *walkingDiff) DiffMounts(ctx context.Context, lower, upper []mount.Mount, media, ref string) (ocispec.Descriptor, error) {
|
||||
var isCompressed bool
|
||||
switch media {
|
||||
case ocispec.MediaTypeImageLayer:
|
||||
|
@ -111,7 +135,7 @@ func (s *BaseDiff) DiffMounts(ctx context.Context, lower, upper []mount.Mount, m
|
|||
media = ocispec.MediaTypeImageLayerGzip
|
||||
isCompressed = true
|
||||
default:
|
||||
return emptyDesc, errors.Errorf("unsupported diff media type: %v", media)
|
||||
return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", media)
|
||||
}
|
||||
aDir, err := ioutil.TempDir("", "left-")
|
||||
if err != nil {
|
||||
|
@ -162,7 +186,7 @@ func (s *BaseDiff) DiffMounts(ctx context.Context, lower, upper []mount.Mount, m
|
|||
}
|
||||
|
||||
dgst := cw.Digest()
|
||||
if err := cw.Commit(0, dgst, opts...); err != nil {
|
||||
if err := cw.Commit(ctx, 0, dgst, opts...); err != nil {
|
||||
return emptyDesc, errors.Wrap(err, "failed to commit")
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ var (
|
|||
ErrAlreadyExists = errors.New("already exists")
|
||||
ErrFailedPrecondition = errors.New("failed precondition")
|
||||
ErrUnavailable = errors.New("unavailable")
|
||||
ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented
|
||||
)
|
||||
|
||||
func IsInvalidArgument(err error) bool {
|
||||
|
@ -52,3 +53,7 @@ func IsFailedPrecondition(err error) bool {
|
|||
func IsUnavailable(err error) bool {
|
||||
return errors.Cause(err) == ErrUnavailable
|
||||
}
|
||||
|
||||
func IsNotImplemented(err error) bool {
|
||||
return errors.Cause(err) == ErrNotImplemented
|
||||
}
|
||||
|
|
|
@ -38,6 +38,8 @@ func ToGRPC(err error) error {
|
|||
return grpc.Errorf(codes.FailedPrecondition, err.Error())
|
||||
case IsUnavailable(err):
|
||||
return grpc.Errorf(codes.Unavailable, err.Error())
|
||||
case IsNotImplemented(err):
|
||||
return grpc.Errorf(codes.Unimplemented, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
|
@ -69,17 +71,17 @@ func FromGRPC(err error) error {
|
|||
cls = ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
cls = ErrFailedPrecondition
|
||||
case codes.Unimplemented:
|
||||
cls = ErrNotImplemented
|
||||
default:
|
||||
cls = ErrUnknown
|
||||
}
|
||||
|
||||
if cls != nil {
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = errors.Wrapf(cls, msg)
|
||||
} else {
|
||||
err = cls
|
||||
}
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = errors.Wrapf(cls, msg)
|
||||
} else {
|
||||
err = errors.WithStack(cls)
|
||||
}
|
||||
|
||||
return err
|
||||
|
|
|
@ -22,3 +22,7 @@ type publisherFunc func(ctx context.Context, topic string, event Event) error
|
|||
func (fn publisherFunc) Publish(ctx context.Context, topic string, event Event) error {
|
||||
return fn(ctx, topic, event)
|
||||
}
|
||||
|
||||
type Subscriber interface {
|
||||
Subscribe(ctx context.Context, filters ...string) (ch <-chan *events.Envelope, errs <-chan error)
|
||||
}
|
||||
|
|
|
@ -185,11 +185,11 @@ func validateTopic(topic string) error {
|
|||
}
|
||||
|
||||
if topic[0] != '/' {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'", topic)
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must start with '/'")
|
||||
}
|
||||
|
||||
if len(topic) == 1 {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component", topic)
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "must have at least one component")
|
||||
}
|
||||
|
||||
components := strings.Split(topic[1:], "/")
|
||||
|
|
|
@ -11,7 +11,6 @@ import (
|
|||
|
||||
"github.com/containerd/continuity/sysx"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// whiteouts are files with a special meaning for the layered filesystem.
|
||||
|
@ -84,11 +83,11 @@ func compareSysStat(s1, s2 interface{}) (bool, error) {
|
|||
|
||||
func compareCapabilities(p1, p2 string) (bool, error) {
|
||||
c1, err := sysx.LGetxattr(p1, "security.capability")
|
||||
if err != nil && err != unix.ENODATA {
|
||||
if err != nil && err != sysx.ENODATA {
|
||||
return false, errors.Wrapf(err, "failed to get xattr for %s", p1)
|
||||
}
|
||||
c2, err := sysx.LGetxattr(p2, "security.capability")
|
||||
if err != nil && err != unix.ENODATA {
|
||||
if err != nil && err != sysx.ENODATA {
|
||||
return false, errors.Wrapf(err, "failed to get xattr for %s", p2)
|
||||
}
|
||||
return bytes.Equal(c1, c2), nil
|
||||
|
|
|
@ -2,10 +2,9 @@ package containerd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/containerd/containerd/rootfs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
@ -20,10 +19,12 @@ type Image interface {
|
|||
Target() ocispec.Descriptor
|
||||
// Unpack unpacks the image's content into a snapshot
|
||||
Unpack(context.Context, string) error
|
||||
// RootFS returns the image digests
|
||||
// RootFS returns the unpacked diffids that make up images rootfs.
|
||||
RootFS(ctx context.Context) ([]digest.Digest, error)
|
||||
// Size returns the image size
|
||||
// Size returns the total size of the image's packed resources.
|
||||
Size(ctx context.Context) (int64, error)
|
||||
// Config descriptor for the image.
|
||||
Config(ctx context.Context) (ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
var _ = (Image)(&image{})
|
||||
|
@ -44,7 +45,7 @@ func (i *image) Target() ocispec.Descriptor {
|
|||
|
||||
func (i *image) RootFS(ctx context.Context) ([]digest.Digest, error) {
|
||||
provider := i.client.ContentStore()
|
||||
return i.i.RootFS(ctx, provider)
|
||||
return i.i.RootFS(ctx, provider, platforms.Format(platforms.Default()))
|
||||
}
|
||||
|
||||
func (i *image) Size(ctx context.Context) (int64, error) {
|
||||
|
@ -52,8 +53,13 @@ func (i *image) Size(ctx context.Context) (int64, error) {
|
|||
return i.i.Size(ctx, provider)
|
||||
}
|
||||
|
||||
func (i *image) Config(ctx context.Context) (ocispec.Descriptor, error) {
|
||||
provider := i.client.ContentStore()
|
||||
return i.i.Config(ctx, provider, platforms.Format(platforms.Default()))
|
||||
}
|
||||
|
||||
func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
|
||||
layers, err := i.getLayers(ctx)
|
||||
layers, err := i.getLayers(ctx, platforms.Format(platforms.Default()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -91,19 +97,15 @@ func (i *image) Unpack(ctx context.Context, snapshotterName string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (i *image) getLayers(ctx context.Context) ([]rootfs.Layer, error) {
|
||||
func (i *image) getLayers(ctx context.Context, platform string) ([]rootfs.Layer, error) {
|
||||
cs := i.client.ContentStore()
|
||||
|
||||
// TODO: Support manifest list
|
||||
p, err := content.ReadBlob(ctx, cs, i.i.Target.Digest)
|
||||
manifest, err := images.Manifest(ctx, cs, i.i.Target, platform)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read manifest blob")
|
||||
return nil, errors.Wrap(err, "")
|
||||
}
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(p, &manifest); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal manifest")
|
||||
}
|
||||
diffIDs, err := i.i.RootFS(ctx, cs)
|
||||
|
||||
diffIDs, err := i.i.RootFS(ctx, cs, platform)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to resolve rootfs")
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
|
|||
children, err := handler.Handle(ctx, desc)
|
||||
if err != nil {
|
||||
if errors.Cause(err) == SkipDesc {
|
||||
return nil // don't traverse the children.
|
||||
continue // don't traverse the children.
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -6,6 +6,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -39,16 +41,16 @@ type Store interface {
|
|||
//
|
||||
// The caller can then use the descriptor to resolve and process the
|
||||
// configuration of the image.
|
||||
func (image *Image) Config(ctx context.Context, provider content.Provider) (ocispec.Descriptor, error) {
|
||||
return Config(ctx, provider, image.Target)
|
||||
func (image *Image) Config(ctx context.Context, provider content.Provider, platform string) (ocispec.Descriptor, error) {
|
||||
return Config(ctx, provider, image.Target, platform)
|
||||
}
|
||||
|
||||
// RootFS returns the unpacked diffids that make up and images rootfs.
|
||||
//
|
||||
// These are used to verify that a set of layers unpacked to the expected
|
||||
// values.
|
||||
func (image *Image) RootFS(ctx context.Context, provider content.Provider) ([]digest.Digest, error) {
|
||||
desc, err := image.Config(ctx, provider)
|
||||
func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform string) ([]digest.Digest, error) {
|
||||
desc, err := image.Config(ctx, provider, platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -67,17 +69,23 @@ func (image *Image) Size(ctx context.Context, provider content.Provider) (int64,
|
|||
}), ChildrenHandler(provider)), image.Target)
|
||||
}
|
||||
|
||||
// Config resolves the image configuration descriptor using a content provided
|
||||
// to resolve child resources on the image.
|
||||
//
|
||||
// The caller can then use the descriptor to resolve and process the
|
||||
// configuration of the image.
|
||||
func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||
var configDesc ocispec.Descriptor
|
||||
return configDesc, Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch image.MediaType {
|
||||
func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform string) (ocispec.Manifest, error) {
|
||||
var (
|
||||
matcher platforms.Matcher
|
||||
m *ocispec.Manifest
|
||||
err error
|
||||
)
|
||||
if platform != "" {
|
||||
matcher, err = platforms.Parse(platform)
|
||||
if err != nil {
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
p, err := content.ReadBlob(ctx, provider, image.Digest)
|
||||
p, err := content.ReadBlob(ctx, provider, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -87,14 +95,108 @@ func Config(ctx context.Context, provider content.Provider, image ocispec.Descri
|
|||
return nil, err
|
||||
}
|
||||
|
||||
configDesc = manifest.Config
|
||||
if platform != "" {
|
||||
if desc.Platform != nil && !matcher.Match(*desc.Platform) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if desc.Platform == nil {
|
||||
p, err := content.ReadBlob(ctx, provider, manifest.Config.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var image ocispec.Image
|
||||
if err := json.Unmarshal(p, &image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !matcher.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
m = &manifest
|
||||
|
||||
return nil, nil
|
||||
default:
|
||||
return nil, errors.New("could not resolve config")
|
||||
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
p, err := content.ReadBlob(ctx, provider, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
if err := json.Unmarshal(p, &idx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if platform == "" {
|
||||
return idx.Manifests, nil
|
||||
}
|
||||
|
||||
var descs []ocispec.Descriptor
|
||||
for _, d := range idx.Manifests {
|
||||
if d.Platform == nil || matcher.Match(*d.Platform) {
|
||||
descs = append(descs, d)
|
||||
}
|
||||
}
|
||||
|
||||
return descs, nil
|
||||
|
||||
}
|
||||
return nil, errors.New("could not resolve manifest")
|
||||
}), image); err != nil {
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
return ocispec.Manifest{}, errors.Wrap(errdefs.ErrNotFound, "manifest not found")
|
||||
}
|
||||
|
||||
return *m, nil
|
||||
}
|
||||
|
||||
// Config resolves the image configuration descriptor using a content provided
|
||||
// to resolve child resources on the image.
|
||||
//
|
||||
// The caller can then use the descriptor to resolve and process the
|
||||
// configuration of the image.
|
||||
func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform string) (ocispec.Descriptor, error) {
|
||||
manifest, err := Manifest(ctx, provider, image, platform)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
return manifest.Config, err
|
||||
}
|
||||
|
||||
// Platforms returns one or more platforms supported by the image.
|
||||
func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) {
|
||||
var platformSpecs []ocispec.Platform
|
||||
return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if desc.Platform != nil {
|
||||
platformSpecs = append(platformSpecs, *desc.Platform)
|
||||
return nil, SkipDesc
|
||||
}
|
||||
|
||||
}), image)
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
|
||||
p, err := content.ReadBlob(ctx, provider, desc.Digest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var image ocispec.Image
|
||||
if err := json.Unmarshal(p, &image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
platformSpecs = append(platformSpecs,
|
||||
platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture}))
|
||||
}
|
||||
return nil, nil
|
||||
}), ChildrenHandler(provider)), image)
|
||||
}
|
||||
|
||||
// RootFS returns the unpacked diffids that make up and images rootfs.
|
||||
|
|
|
@ -24,7 +24,6 @@ type IOConfig struct {
|
|||
type IO interface {
|
||||
// Config returns the IO configuration.
|
||||
Config() IOConfig
|
||||
|
||||
// Cancel aborts all current io operations
|
||||
Cancel()
|
||||
// Wait blocks until all io copy operations have completed
|
||||
|
|
|
@ -88,3 +88,85 @@ func copyIO(fifos *FIFOSet, ioset *ioSet, tty bool) (_ *wgCloser, err error) {
|
|||
cancel: cancel,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewDirectIO returns an IO implementation that exposes the pipes directly
|
||||
func NewDirectIO(ctx context.Context, terminal bool) (*DirectIO, error) {
|
||||
set, err := NewFifos("")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f := &DirectIO{
|
||||
set: set,
|
||||
terminal: terminal,
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
f.Delete()
|
||||
}
|
||||
}()
|
||||
if f.Stdin, err = fifo.OpenFifo(ctx, set.In, syscall.O_WRONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if f.Stdout, err = fifo.OpenFifo(ctx, set.Out, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
|
||||
f.Stdin.Close()
|
||||
return nil, err
|
||||
}
|
||||
if f.Stderr, err = fifo.OpenFifo(ctx, set.Err, syscall.O_RDONLY|syscall.O_CREAT|syscall.O_NONBLOCK, 0700); err != nil {
|
||||
f.Stdin.Close()
|
||||
f.Stdout.Close()
|
||||
return nil, err
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
type DirectIO struct {
|
||||
Stdin io.WriteCloser
|
||||
Stdout io.ReadCloser
|
||||
Stderr io.ReadCloser
|
||||
|
||||
set *FIFOSet
|
||||
terminal bool
|
||||
}
|
||||
|
||||
func (f *DirectIO) IOCreate(id string) (IO, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f *DirectIO) IOAttach(set *FIFOSet) (IO, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func (f *DirectIO) Config() IOConfig {
|
||||
return IOConfig{
|
||||
Terminal: f.terminal,
|
||||
Stdin: f.set.In,
|
||||
Stdout: f.set.Out,
|
||||
Stderr: f.set.Err,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DirectIO) Cancel() {
|
||||
// nothing to cancel as all operations are handled externally
|
||||
}
|
||||
|
||||
func (f *DirectIO) Wait() {
|
||||
// nothing to wait on as all operations are handled externally
|
||||
}
|
||||
|
||||
func (f *DirectIO) Close() error {
|
||||
err := f.Stdin.Close()
|
||||
if err2 := f.Stdout.Close(); err == nil {
|
||||
err = err2
|
||||
}
|
||||
if err2 := f.Stderr.Close(); err == nil {
|
||||
err = err2
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (f *DirectIO) Delete() error {
|
||||
if f.set.Dir == "" {
|
||||
return nil
|
||||
}
|
||||
return os.RemoveAll(f.set.Dir)
|
||||
}
|
||||
|
|
|
@ -37,8 +37,10 @@ var _ = math.Inf
|
|||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type RuncOptions struct {
|
||||
CriuPath string `protobuf:"bytes,1,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"`
|
||||
SystemdCgroup string `protobuf:"bytes,2,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"`
|
||||
Runtime string `protobuf:"bytes,1,opt,name=runtime,proto3" json:"runtime,omitempty"`
|
||||
RuntimeRoot string `protobuf:"bytes,2,opt,name=runtime_root,json=runtimeRoot,proto3" json:"runtime_root,omitempty"`
|
||||
CriuPath string `protobuf:"bytes,3,opt,name=criu_path,json=criuPath,proto3" json:"criu_path,omitempty"`
|
||||
SystemdCgroup bool `protobuf:"varint,4,opt,name=systemd_cgroup,json=systemdCgroup,proto3" json:"systemd_cgroup,omitempty"`
|
||||
}
|
||||
|
||||
func (m *RuncOptions) Reset() { *m = RuncOptions{} }
|
||||
|
@ -95,17 +97,33 @@ func (m *RuncOptions) MarshalTo(dAtA []byte) (int, error) {
|
|||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.CriuPath) > 0 {
|
||||
if len(m.Runtime) > 0 {
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintRunc(dAtA, i, uint64(len(m.Runtime)))
|
||||
i += copy(dAtA[i:], m.Runtime)
|
||||
}
|
||||
if len(m.RuntimeRoot) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintRunc(dAtA, i, uint64(len(m.RuntimeRoot)))
|
||||
i += copy(dAtA[i:], m.RuntimeRoot)
|
||||
}
|
||||
if len(m.CriuPath) > 0 {
|
||||
dAtA[i] = 0x1a
|
||||
i++
|
||||
i = encodeVarintRunc(dAtA, i, uint64(len(m.CriuPath)))
|
||||
i += copy(dAtA[i:], m.CriuPath)
|
||||
}
|
||||
if len(m.SystemdCgroup) > 0 {
|
||||
dAtA[i] = 0x12
|
||||
if m.SystemdCgroup {
|
||||
dAtA[i] = 0x20
|
||||
i++
|
||||
if m.SystemdCgroup {
|
||||
dAtA[i] = 1
|
||||
} else {
|
||||
dAtA[i] = 0
|
||||
}
|
||||
i++
|
||||
i = encodeVarintRunc(dAtA, i, uint64(len(m.SystemdCgroup)))
|
||||
i += copy(dAtA[i:], m.SystemdCgroup)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
@ -334,13 +352,20 @@ func encodeVarintRunc(dAtA []byte, offset int, v uint64) int {
|
|||
func (m *RuncOptions) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Runtime)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRunc(uint64(l))
|
||||
}
|
||||
l = len(m.RuntimeRoot)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRunc(uint64(l))
|
||||
}
|
||||
l = len(m.CriuPath)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRunc(uint64(l))
|
||||
}
|
||||
l = len(m.SystemdCgroup)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovRunc(uint64(l))
|
||||
if m.SystemdCgroup {
|
||||
n += 2
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
@ -432,6 +457,8 @@ func (this *RuncOptions) String() string {
|
|||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&RuncOptions{`,
|
||||
`Runtime:` + fmt.Sprintf("%v", this.Runtime) + `,`,
|
||||
`RuntimeRoot:` + fmt.Sprintf("%v", this.RuntimeRoot) + `,`,
|
||||
`CriuPath:` + fmt.Sprintf("%v", this.CriuPath) + `,`,
|
||||
`SystemdCgroup:` + fmt.Sprintf("%v", this.SystemdCgroup) + `,`,
|
||||
`}`,
|
||||
|
@ -510,6 +537,64 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Runtime", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRunc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Runtime = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field RuntimeRoot", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRunc
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.RuntimeRoot = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field CriuPath", wireType)
|
||||
}
|
||||
|
@ -538,11 +623,11 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.CriuPath = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
case 4:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field SystemdCgroup", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowRunc
|
||||
|
@ -552,21 +637,12 @@ func (m *RuncOptions) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthRunc
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.SystemdCgroup = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
m.SystemdCgroup = bool(v != 0)
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipRunc(dAtA[iNdEx:])
|
||||
|
@ -1163,33 +1239,35 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptorRunc = []byte{
|
||||
// 438 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x93, 0xb1, 0x6f, 0xd4, 0x30,
|
||||
0x18, 0xc5, 0x9b, 0xb6, 0xb4, 0x89, 0xaf, 0x57, 0xc0, 0x50, 0x29, 0x14, 0x11, 0xca, 0x09, 0xa4,
|
||||
0xb2, 0xdc, 0x49, 0xb0, 0x20, 0xd8, 0xb8, 0x11, 0x28, 0x25, 0xc0, 0xc2, 0x62, 0xa5, 0xbe, 0x8f,
|
||||
0xc4, 0xba, 0xe4, 0xfb, 0x2c, 0xdb, 0xa1, 0xb9, 0x8d, 0x3f, 0xaf, 0x23, 0x62, 0x62, 0xa4, 0xf9,
|
||||
0x47, 0x40, 0x71, 0x2e, 0x85, 0x95, 0x95, 0xed, 0xf9, 0xf7, 0x9e, 0x9e, 0xa5, 0x27, 0x7d, 0xec,
|
||||
0x79, 0xae, 0x5c, 0x51, 0x9f, 0x4d, 0x25, 0x55, 0x33, 0x49, 0xe8, 0x32, 0x85, 0x60, 0x16, 0x7f,
|
||||
0xcb, 0x52, 0x61, 0xdd, 0xcc, 0x4c, 0x8d, 0x92, 0xb4, 0xb3, 0x5e, 0x4c, 0xb5, 0x21, 0x47, 0xfc,
|
||||
0xe0, 0x4f, 0x6a, 0xea, 0x53, 0xd3, 0xce, 0x3c, 0xbc, 0x9d, 0x53, 0x4e, 0x3e, 0x31, 0xeb, 0x54,
|
||||
0x1f, 0x9e, 0xbc, 0x63, 0xa3, 0xb4, 0x46, 0xf9, 0x56, 0x3b, 0x45, 0x68, 0xf9, 0x5d, 0x16, 0x49,
|
||||
0xa3, 0x6a, 0xa1, 0x33, 0x57, 0xc4, 0xc1, 0x51, 0x70, 0x1c, 0xa5, 0x61, 0x07, 0x4e, 0x33, 0x57,
|
||||
0xf0, 0x47, 0x6c, 0xdf, 0xae, 0xac, 0x83, 0x6a, 0x21, 0x64, 0x6e, 0xa8, 0xd6, 0xf1, 0xa6, 0x4f,
|
||||
0x8c, 0xd7, 0x74, 0xee, 0xe1, 0xe4, 0xfb, 0x26, 0x1b, 0xcf, 0x0d, 0x64, 0x0e, 0x86, 0xd6, 0x09,
|
||||
0x1b, 0x23, 0x09, 0xad, 0xbe, 0x90, 0x13, 0x86, 0xc8, 0xf9, 0xe6, 0x30, 0x1d, 0x21, 0x9d, 0x76,
|
||||
0x2c, 0x25, 0x72, 0xfc, 0x0e, 0x0b, 0x49, 0x03, 0x0a, 0x27, 0xfb, 0xda, 0x30, 0xdd, 0xed, 0xde,
|
||||
0x1f, 0xa4, 0xe6, 0x4f, 0xd8, 0x01, 0x34, 0x0e, 0x0c, 0x66, 0xa5, 0xa8, 0x51, 0x35, 0xc2, 0x92,
|
||||
0x5c, 0x82, 0xb3, 0xf1, 0x96, 0xcf, 0xdd, 0x1a, 0xcc, 0x8f, 0xa8, 0x9a, 0xf7, 0xbd, 0xc5, 0x0f,
|
||||
0x59, 0xe8, 0xc0, 0x54, 0x0a, 0xb3, 0x32, 0xde, 0xf6, 0xb1, 0xab, 0x37, 0xbf, 0xc7, 0xd8, 0x67,
|
||||
0x55, 0x82, 0x28, 0x49, 0x2e, 0x6d, 0x7c, 0xcd, 0xbb, 0x51, 0x47, 0x5e, 0x77, 0x80, 0x3f, 0x66,
|
||||
0x37, 0xa0, 0xd2, 0x6e, 0x25, 0x30, 0xab, 0xc0, 0xea, 0x4c, 0x82, 0x8d, 0x77, 0x8e, 0xb6, 0x8e,
|
||||
0xa3, 0xf4, 0xba, 0xe7, 0x27, 0x57, 0x98, 0x3f, 0x60, 0x7b, 0xfd, 0x12, 0x56, 0x54, 0xb4, 0x80,
|
||||
0x78, 0xd7, 0xef, 0x31, 0x5a, 0xb3, 0x37, 0xb4, 0x00, 0xfe, 0x90, 0xed, 0x23, 0x09, 0x84, 0x73,
|
||||
0xb1, 0x84, 0x95, 0x51, 0x98, 0xc7, 0xa1, 0xff, 0x70, 0x0f, 0xe9, 0x04, 0xce, 0x5f, 0xf5, 0x8c,
|
||||
0xdf, 0x67, 0x23, 0x5b, 0xa8, 0x6a, 0xd8, 0x35, 0xf2, 0x3d, 0xac, 0x43, 0xeb, 0x51, 0x7f, 0x05,
|
||||
0xec, 0xe6, 0xbc, 0x00, 0xb9, 0xd4, 0xa4, 0xd0, 0x0d, 0xc3, 0x72, 0xb6, 0x0d, 0x8d, 0x1a, 0xf6,
|
||||
0xf4, 0xfa, 0x7f, 0x1d, 0xf2, 0x65, 0x7a, 0x71, 0x99, 0x6c, 0xfc, 0xb8, 0x4c, 0x36, 0xbe, 0xb6,
|
||||
0x49, 0x70, 0xd1, 0x26, 0xc1, 0xb7, 0x36, 0x09, 0x7e, 0xb6, 0x49, 0xf0, 0xe9, 0xd9, 0x3f, 0x1e,
|
||||
0xcb, 0x8b, 0x41, 0x9c, 0xed, 0xf8, 0x23, 0x78, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x00, 0x19,
|
||||
0xba, 0x8f, 0x6f, 0x03, 0x00, 0x00,
|
||||
// 467 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x93, 0x41, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0x85, 0xbb, 0x6d, 0x69, 0x9d, 0x4d, 0x53, 0x60, 0xa1, 0x92, 0x29, 0xc2, 0x84, 0x08, 0xa4,
|
||||
0x70, 0x49, 0x24, 0xb8, 0x20, 0xb8, 0x91, 0x23, 0x50, 0x2a, 0x03, 0x17, 0x2e, 0x2b, 0x77, 0x33,
|
||||
0x24, 0xab, 0xd8, 0x33, 0xab, 0xdd, 0x35, 0x75, 0x6e, 0xfc, 0x02, 0x7e, 0x57, 0x8f, 0x88, 0x13,
|
||||
0x47, 0x9a, 0x3f, 0x02, 0xf2, 0xda, 0x2e, 0x5c, 0xb9, 0x72, 0x7b, 0xf3, 0xbd, 0xb1, 0xe7, 0xe9,
|
||||
0x49, 0xcb, 0x9f, 0x2f, 0xb4, 0x5f, 0x96, 0x67, 0x13, 0x45, 0xc5, 0x54, 0x11, 0xfa, 0x4c, 0x23,
|
||||
0xd8, 0xf9, 0xdf, 0x32, 0xd7, 0x58, 0x56, 0x53, 0x5b, 0xa2, 0x22, 0xe3, 0x5d, 0x10, 0x13, 0x63,
|
||||
0xc9, 0x93, 0x38, 0xfa, 0xb3, 0x35, 0x09, 0x5b, 0x93, 0xda, 0x3c, 0xbe, 0xbd, 0xa0, 0x05, 0x85,
|
||||
0x8d, 0x69, 0xad, 0x9a, 0xe5, 0xd1, 0x57, 0xc6, 0xfb, 0x69, 0x89, 0xea, 0xad, 0xf1, 0x9a, 0xd0,
|
||||
0x89, 0x98, 0xef, 0xdb, 0x12, 0xbd, 0x2e, 0x20, 0x66, 0x43, 0x36, 0xee, 0xa5, 0xdd, 0x28, 0x1e,
|
||||
0xf0, 0x83, 0x56, 0x4a, 0x4b, 0xe4, 0xe3, 0xed, 0x60, 0xf7, 0x5b, 0x96, 0x12, 0x79, 0x71, 0x97,
|
||||
0xf7, 0x94, 0xd5, 0xa5, 0x34, 0x99, 0x5f, 0xc6, 0x3b, 0xc1, 0x8f, 0x6a, 0x70, 0x9a, 0xf9, 0xa5,
|
||||
0x78, 0xc4, 0x0f, 0xdd, 0xda, 0x79, 0x28, 0xe6, 0x52, 0x2d, 0x2c, 0x95, 0x26, 0xde, 0x1d, 0xb2,
|
||||
0x71, 0x94, 0x0e, 0x5a, 0x3a, 0x0b, 0x70, 0xf4, 0x7d, 0x9b, 0x0f, 0x66, 0x16, 0x32, 0x0f, 0x5d,
|
||||
0xa4, 0x11, 0x1f, 0x20, 0x49, 0xa3, 0x3f, 0x93, 0x6f, 0x2e, 0xb3, 0xf0, 0x5d, 0x1f, 0xe9, 0xb4,
|
||||
0x66, 0xe1, 0xf2, 0x1d, 0x1e, 0x91, 0x01, 0x94, 0x5e, 0x99, 0x10, 0x2c, 0x4a, 0xf7, 0xeb, 0xf9,
|
||||
0xbd, 0x32, 0xe2, 0x09, 0x3f, 0x82, 0xca, 0x83, 0xc5, 0x2c, 0x97, 0x25, 0xea, 0x4a, 0x3a, 0x52,
|
||||
0x2b, 0xf0, 0x2e, 0x04, 0x8c, 0xd2, 0x5b, 0x9d, 0xf9, 0x01, 0x75, 0xf5, 0xae, 0xb1, 0xc4, 0x31,
|
||||
0x8f, 0x3c, 0xd8, 0x42, 0x63, 0x96, 0xb7, 0x29, 0xaf, 0x66, 0x71, 0x8f, 0xf3, 0x4f, 0x3a, 0x07,
|
||||
0x99, 0x93, 0x5a, 0xb9, 0xf8, 0x5a, 0x70, 0x7b, 0x35, 0x79, 0x5d, 0x03, 0xf1, 0x98, 0xdf, 0x80,
|
||||
0xc2, 0xf8, 0xb5, 0xc4, 0xac, 0x00, 0x67, 0x32, 0x05, 0x2e, 0xde, 0x1b, 0xee, 0x8c, 0x7b, 0xe9,
|
||||
0xf5, 0xc0, 0x4f, 0xae, 0x70, 0xdd, 0x68, 0xd3, 0x84, 0x93, 0x05, 0xcd, 0x21, 0xde, 0x6f, 0x1a,
|
||||
0x6d, 0xd9, 0x1b, 0x9a, 0x83, 0x78, 0xc8, 0x0f, 0x91, 0x24, 0xc2, 0xb9, 0x5c, 0xc1, 0xda, 0x6a,
|
||||
0x5c, 0xc4, 0x51, 0x38, 0x78, 0x80, 0x74, 0x02, 0xe7, 0xaf, 0x1a, 0x26, 0xee, 0xf3, 0xbe, 0x5b,
|
||||
0xea, 0xa2, 0xeb, 0xb5, 0x17, 0xfe, 0xc3, 0x6b, 0xd4, 0x96, 0xfa, 0x8b, 0xf1, 0x9b, 0xb3, 0x25,
|
||||
0xa8, 0x95, 0x21, 0x8d, 0xbe, 0x2b, 0x56, 0xf0, 0x5d, 0xa8, 0x74, 0xd7, 0x67, 0xd0, 0xff, 0x6b,
|
||||
0x91, 0x2f, 0xd3, 0x8b, 0xcb, 0x64, 0xeb, 0xc7, 0x65, 0xb2, 0xf5, 0x65, 0x93, 0xb0, 0x8b, 0x4d,
|
||||
0xc2, 0xbe, 0x6d, 0x12, 0xf6, 0x73, 0x93, 0xb0, 0x8f, 0xcf, 0xfe, 0xf1, 0xa9, 0xbd, 0xe8, 0xc4,
|
||||
0xd9, 0x5e, 0x78, 0x42, 0x4f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x26, 0x29, 0x60, 0xad,
|
||||
0x03, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -7,8 +7,10 @@ import "gogoproto/gogo.proto";
|
|||
option go_package = "github.com/containerd/containerd/linux/runcopts;runcopts";
|
||||
|
||||
message RuncOptions {
|
||||
string criu_path = 1;
|
||||
string systemd_cgroup = 2;
|
||||
string runtime = 1;
|
||||
string runtime_root = 2;
|
||||
string criu_path = 3;
|
||||
bool systemd_cgroup = 4;
|
||||
}
|
||||
|
||||
message CreateOptions {
|
||||
|
|
|
@ -38,17 +38,18 @@ var (
|
|||
bucketKeyObjectBlob = []byte("blob") // stores content links
|
||||
bucketKeyObjectIngest = []byte("ingest") // stores ingest links
|
||||
|
||||
bucketKeyDigest = []byte("digest")
|
||||
bucketKeyMediaType = []byte("mediatype")
|
||||
bucketKeySize = []byte("size")
|
||||
bucketKeyImage = []byte("image")
|
||||
bucketKeyRuntime = []byte("runtime")
|
||||
bucketKeyName = []byte("name")
|
||||
bucketKeyParent = []byte("parent")
|
||||
bucketKeyOptions = []byte("options")
|
||||
bucketKeySpec = []byte("spec")
|
||||
bucketKeyRootFS = []byte("rootfs")
|
||||
bucketKeyTarget = []byte("target")
|
||||
bucketKeyDigest = []byte("digest")
|
||||
bucketKeyMediaType = []byte("mediatype")
|
||||
bucketKeySize = []byte("size")
|
||||
bucketKeyImage = []byte("image")
|
||||
bucketKeyRuntime = []byte("runtime")
|
||||
bucketKeyName = []byte("name")
|
||||
bucketKeyParent = []byte("parent")
|
||||
bucketKeyOptions = []byte("options")
|
||||
bucketKeySpec = []byte("spec")
|
||||
bucketKeySnapshotKey = []byte("snapshotKey")
|
||||
bucketKeySnapshotter = []byte("snapshotter")
|
||||
bucketKeyTarget = []byte("target")
|
||||
)
|
||||
|
||||
func getBucket(tx *bolt.Tx, keys ...[]byte) *bolt.Bucket {
|
||||
|
|
|
@ -91,8 +91,8 @@ func (s *containerStore) Create(ctx context.Context, container containers.Contai
|
|||
return containers.Container{}, err
|
||||
}
|
||||
|
||||
if err := identifiers.Validate(container.ID); err != nil {
|
||||
return containers.Container{}, err
|
||||
if err := validateContainer(&container); err != nil {
|
||||
return containers.Container{}, errors.Wrap(err, "create container failed validation")
|
||||
}
|
||||
|
||||
bkt, err := createContainersBucket(s.tx, namespace)
|
||||
|
@ -144,37 +144,55 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai
|
|||
createdat := updated.CreatedAt
|
||||
updated.ID = container.ID
|
||||
|
||||
if len(fieldpaths) == 0 {
|
||||
// only allow updates to these field on full replace.
|
||||
fieldpaths = []string{"labels", "spec"}
|
||||
|
||||
// Fields that are immutable must cause an error when no field paths
|
||||
// are provided. This allows these fields to become mutable in the
|
||||
// future.
|
||||
if updated.Image != container.Image {
|
||||
return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Image field is immutable")
|
||||
}
|
||||
|
||||
if updated.SnapshotKey != container.SnapshotKey {
|
||||
return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.SnapshotKey field is immutable")
|
||||
}
|
||||
|
||||
if updated.Snapshotter != container.Snapshotter {
|
||||
return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter field is immutable")
|
||||
}
|
||||
|
||||
if updated.Runtime.Name != container.Runtime.Name {
|
||||
return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name field is immutable")
|
||||
}
|
||||
}
|
||||
|
||||
// apply the field mask. If you update this code, you better follow the
|
||||
// field mask rules in field_mask.proto. If you don't know what this
|
||||
// is, do not update this code.
|
||||
if len(fieldpaths) > 0 {
|
||||
// TODO(stevvooe): Move this logic into the store itself.
|
||||
for _, path := range fieldpaths {
|
||||
if strings.HasPrefix(path, "labels.") {
|
||||
if updated.Labels == nil {
|
||||
updated.Labels = map[string]string{}
|
||||
}
|
||||
key := strings.TrimPrefix(path, "labels.")
|
||||
updated.Labels[key] = container.Labels[key]
|
||||
continue
|
||||
}
|
||||
|
||||
switch path {
|
||||
case "labels":
|
||||
updated.Labels = container.Labels
|
||||
case "image":
|
||||
updated.Image = container.Image
|
||||
case "spec":
|
||||
updated.Spec = container.Spec
|
||||
case "rootfs":
|
||||
updated.RootFS = container.RootFS
|
||||
default:
|
||||
return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID)
|
||||
for _, path := range fieldpaths {
|
||||
if strings.HasPrefix(path, "labels.") {
|
||||
if updated.Labels == nil {
|
||||
updated.Labels = map[string]string{}
|
||||
}
|
||||
key := strings.TrimPrefix(path, "labels.")
|
||||
updated.Labels[key] = container.Labels[key]
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
// no field mask present, just replace everything
|
||||
updated = container
|
||||
|
||||
switch path {
|
||||
case "labels":
|
||||
updated.Labels = container.Labels
|
||||
case "spec":
|
||||
updated.Spec = container.Spec
|
||||
default:
|
||||
return containers.Container{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on %q", path, container.ID)
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateContainer(&updated); err != nil {
|
||||
return containers.Container{}, errors.Wrap(err, "update failed validation")
|
||||
}
|
||||
|
||||
updated.CreatedAt = createdat
|
||||
|
@ -183,7 +201,7 @@ func (s *containerStore) Update(ctx context.Context, container containers.Contai
|
|||
return containers.Container{}, errors.Wrap(err, "failed to write container")
|
||||
}
|
||||
|
||||
return container, nil
|
||||
return updated, nil
|
||||
}
|
||||
|
||||
func (s *containerStore) Delete(ctx context.Context, id string) error {
|
||||
|
@ -203,6 +221,27 @@ func (s *containerStore) Delete(ctx context.Context, id string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func validateContainer(container *containers.Container) error {
|
||||
if err := identifiers.Validate(container.ID); err != nil {
|
||||
return errors.Wrapf(err, "container.ID validation error")
|
||||
}
|
||||
|
||||
// labels and image have no validation
|
||||
if container.Runtime.Name == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Runtime.Name must be set")
|
||||
}
|
||||
|
||||
if container.Spec == nil {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Spec must be set")
|
||||
}
|
||||
|
||||
if container.SnapshotKey != "" && container.Snapshotter == "" {
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "container.Snapshotter must be set if container.SnapshotKey is set")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
|
||||
labels, err := boltutil.ReadLabels(bkt)
|
||||
if err != nil {
|
||||
|
@ -245,9 +284,10 @@ func readContainer(container *containers.Container, bkt *bolt.Bucket) error {
|
|||
return err
|
||||
}
|
||||
container.Spec = &any
|
||||
case string(bucketKeyRootFS):
|
||||
container.RootFS = string(v)
|
||||
|
||||
case string(bucketKeySnapshotKey):
|
||||
container.SnapshotKey = string(v)
|
||||
case string(bucketKeySnapshotter):
|
||||
container.Snapshotter = string(v)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -272,7 +312,8 @@ func writeContainer(bkt *bolt.Bucket, container *containers.Container) error {
|
|||
|
||||
for _, v := range [][2][]byte{
|
||||
{bucketKeyImage, []byte(container.Image)},
|
||||
{bucketKeyRootFS, []byte(container.RootFS)},
|
||||
{bucketKeySnapshotter, []byte(container.Snapshotter)},
|
||||
{bucketKeySnapshotKey, []byte(container.SnapshotKey)},
|
||||
} {
|
||||
if err := bkt.Put(v[0], v[1]); err != nil {
|
||||
return err
|
||||
|
@ -294,18 +335,13 @@ func writeContainer(bkt *bolt.Bucket, container *containers.Container) error {
|
|||
return err
|
||||
}
|
||||
|
||||
obkt, err := rbkt.CreateBucket(bucketKeyOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if container.Runtime.Options != nil {
|
||||
data, err := proto.Marshal(container.Runtime.Options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := obkt.Put(bucketKeyOptions, data); err != nil {
|
||||
if err := rbkt.Put(bucketKeyOptions, data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -352,19 +352,19 @@ type namespacedWriter struct {
|
|||
db *bolt.DB
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) Commit(size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
return nw.db.Update(func(tx *bolt.Tx) error {
|
||||
func (nw *namespacedWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
return update(ctx, nw.db, func(tx *bolt.Tx) error {
|
||||
bkt := getIngestBucket(tx, nw.namespace)
|
||||
if bkt != nil {
|
||||
if err := bkt.Delete([]byte(nw.ref)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nw.commit(tx, size, expected, opts...)
|
||||
return nw.commit(ctx, tx, size, expected, opts...)
|
||||
})
|
||||
}
|
||||
|
||||
func (nw *namespacedWriter) commit(tx *bolt.Tx, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
func (nw *namespacedWriter) commit(ctx context.Context, tx *bolt.Tx, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
|
@ -382,7 +382,7 @@ func (nw *namespacedWriter) commit(tx *bolt.Tx, size int64, expected digest.Dige
|
|||
|
||||
actual := nw.Writer.Digest()
|
||||
|
||||
if err := nw.Writer.Commit(size, expected); err != nil {
|
||||
if err := nw.Writer.Commit(ctx, size, expected); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -481,6 +481,9 @@ func (s *snapshotter) Walk(ctx context.Context, fn func(context.Context, snapsho
|
|||
for _, pair := range pairs {
|
||||
info, err := s.Snapshotter.Stat(ctx, pair.bkey)
|
||||
if err != nil {
|
||||
if errdefs.IsNotFound(err) {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,36 @@ import (
|
|||
|
||||
func (m *Mount) Mount(target string) error {
|
||||
flags, data := parseMountOptions(m.Options)
|
||||
return unix.Mount(m.Source, target, m.Type, uintptr(flags), data)
|
||||
|
||||
// propagation types.
|
||||
const ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE
|
||||
|
||||
// Ensure propagation type change flags aren't included in other calls.
|
||||
oflags := flags &^ ptypes
|
||||
|
||||
// In the case of remounting with changed data (data != ""), need to call mount (moby/moby#34077).
|
||||
if flags&unix.MS_REMOUNT == 0 || data != "" {
|
||||
// Initial call applying all non-propagation flags for mount
|
||||
// or remount with changed data
|
||||
if err := unix.Mount(m.Source, target, m.Type, uintptr(oflags), data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if flags&ptypes != 0 {
|
||||
// Change the propagation type.
|
||||
const pflags = ptypes | unix.MS_REC | unix.MS_SILENT
|
||||
if err := unix.Mount("", target, "", uintptr(flags&pflags), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
const broflags = unix.MS_BIND | unix.MS_RDONLY
|
||||
if oflags&broflags == broflags {
|
||||
// Remount the bind to apply read only.
|
||||
return unix.Mount("", target, "", uintptr(oflags|unix.MS_REMOUNT), "")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Unmount(mount string, flags int) error {
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
package platforms
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// These function are generated from from https://golang.org/src/go/build/syslist.go.
|
||||
//
|
||||
// We use switch statements because they are slightly faster than map lookups
|
||||
// and use a little less memory.
|
||||
|
||||
// isKnownOS returns true if we know about the operating system.
|
||||
//
|
||||
// The OS value should be normalized before calling this function.
|
||||
func isKnownOS(os string) bool {
|
||||
switch os {
|
||||
case "android", "darwin", "dragonfly", "freebsd", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isKnownArch returns true if we know about the architecture.
|
||||
//
|
||||
// The arch value should be normalized before being passed to this function.
|
||||
func isKnownArch(arch string) bool {
|
||||
switch arch {
|
||||
case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "s390", "s390x", "sparc", "sparc64":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func normalizeOS(os string) string {
|
||||
if os == "" {
|
||||
return runtime.GOOS
|
||||
}
|
||||
os = strings.ToLower(os)
|
||||
|
||||
switch os {
|
||||
case "macos":
|
||||
os = "darwin"
|
||||
}
|
||||
return os
|
||||
}
|
||||
|
||||
// normalizeArch normalizes the architecture.
|
||||
func normalizeArch(arch, variant string) (string, string) {
|
||||
arch, variant = strings.ToLower(arch), strings.ToLower(variant)
|
||||
switch arch {
|
||||
case "i386":
|
||||
arch = "386"
|
||||
variant = ""
|
||||
case "x86_64", "x86-64":
|
||||
arch = "amd64"
|
||||
variant = ""
|
||||
case "aarch64":
|
||||
arch = "arm64"
|
||||
variant = "" // v8 is implied
|
||||
case "armhf":
|
||||
arch = "arm"
|
||||
variant = ""
|
||||
case "armel":
|
||||
arch = "arm"
|
||||
variant = "v6"
|
||||
case "arm":
|
||||
switch variant {
|
||||
case "v7", "7":
|
||||
variant = "v7"
|
||||
case "5", "6", "8":
|
||||
variant = "v" + variant
|
||||
}
|
||||
}
|
||||
|
||||
return arch, variant
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
package platforms
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// Default returns the current platform's default platform specification.
|
||||
func Default() specs.Platform {
|
||||
return specs.Platform{
|
||||
OS: runtime.GOOS,
|
||||
Architecture: runtime.GOARCH,
|
||||
// TODO(stevvooe): Need to resolve GOARM for arm hosts.
|
||||
}
|
||||
}
|
|
@ -0,0 +1,236 @@
|
|||
// Package platforms provides a toolkit for normalizing, matching and
|
||||
// specifying container platforms.
|
||||
//
|
||||
// Centered around OCI platform specifications, we define a string-based
|
||||
// specifier syntax that can be used for user input. With a specifier, users
|
||||
// only need to specify the parts of the platform that are relevant to their
|
||||
// context, providing an operating system or architecture or both.
|
||||
//
|
||||
// How do I use this package?
|
||||
//
|
||||
// The vast majority of use cases should simply use the match function with
|
||||
// user input. The first step is to parse a specifier into a matcher:
|
||||
//
|
||||
// m, err := Parse("linux")
|
||||
// if err != nil { ... }
|
||||
//
|
||||
// Once you have a matcher, use it to match against the platform declared by a
|
||||
// component, typically from an image or runtime. Since extracting an images
|
||||
// platform is a little more involved, we'll use an example against the
|
||||
// platform default:
|
||||
//
|
||||
// if ok := m.Match(Default()); !ok { /* doesn't match */ }
|
||||
//
|
||||
// This can be composed in loops for resolving runtimes or used as a filter for
|
||||
// fetch and select images.
|
||||
//
|
||||
// More details of the specifier syntax and platform spec follow.
|
||||
//
|
||||
// Declaring Platform Support
|
||||
//
|
||||
// Components that have strict platform requirements should use the OCI
|
||||
// platform specification to declare their support. Typically, this will be
|
||||
// images and runtimes that should make these declaring which platform they
|
||||
// support specifically. This looks roughly as follows:
|
||||
//
|
||||
// type Platform struct {
|
||||
// Architecture string
|
||||
// OS string
|
||||
// Variant string
|
||||
// }
|
||||
//
|
||||
// Most images and runtimes should at least set Architecture and OS, according
|
||||
// to their GOARCH and GOOS values, respectively (follow the OCI image
|
||||
// specification when in doubt). ARM should set variant under certain
|
||||
// discussions, which are outlined below.
|
||||
//
|
||||
// Platform Specifiers
|
||||
//
|
||||
// While the OCI platform specifications provide a tool for components to
|
||||
// specify structured information, user input typically doesn't need the full
|
||||
// context and much can be inferred. To solve this problem, we introduced
|
||||
// "specifiers". A specifier has the format
|
||||
// `<os>|<arch>|<os>/<arch>[/<variant>]`. The user can provide either the
|
||||
// operating system or the architecture or both.
|
||||
//
|
||||
// An example of a common specifier is `linux/amd64`. If the host has a default
|
||||
// of runtime that matches this, the user can simply provide the component that
|
||||
// matters. For example, if a image provides amd64 and arm64 support, the
|
||||
// operating system, `linux` can be inferred, so they only have to provide
|
||||
// `arm64` or `amd64`. Similar behavior is implemented for operating systems,
|
||||
// where the architecture may be known but a runtime may support images from
|
||||
// different operating systems.
|
||||
//
|
||||
// Normalization
|
||||
//
|
||||
// Because not all users are familiar with the way the Go runtime represents
|
||||
// platforms, several normalizations have been provided to make this package
|
||||
// easier to user.
|
||||
//
|
||||
// The following are performed for architectures:
|
||||
//
|
||||
// Value Normalized
|
||||
// aarch64 arm64
|
||||
// armhf arm
|
||||
// armel arm/v6
|
||||
// i386 386
|
||||
// x86_64 amd64
|
||||
// x86-64 amd64
|
||||
//
|
||||
// We also normalize the operating system `macos` to `darwin`.
|
||||
//
|
||||
// ARM Support
|
||||
//
|
||||
// To qualify ARM architecture, the Variant field is used to qualify the arm
|
||||
// version. The most common arm version, v7, is represented without the variant
|
||||
// unless it is explicitly provided. This is treated as equivalent to armhf. A
|
||||
// previous architecture, armel, will be normalized to arm/v6.
|
||||
//
|
||||
// While these normalizations are provided, their support on arm platforms has
|
||||
// not yet been fully implemented and tested.
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"regexp"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`)
|
||||
)
|
||||
|
||||
// Matcher matches platforms specifications, provided by an image or runtime.
|
||||
type Matcher interface {
|
||||
Spec() specs.Platform
|
||||
Match(platform specs.Platform) bool
|
||||
}
|
||||
|
||||
type matcher struct {
|
||||
specs.Platform
|
||||
}
|
||||
|
||||
func (m *matcher) Spec() specs.Platform {
|
||||
return m.Platform
|
||||
}
|
||||
|
||||
func (m *matcher) Match(platform specs.Platform) bool {
|
||||
normalized := Normalize(platform)
|
||||
return m.OS == normalized.OS &&
|
||||
m.Architecture == normalized.Architecture &&
|
||||
m.Variant == normalized.Variant
|
||||
}
|
||||
|
||||
func (m *matcher) String() string {
|
||||
return Format(m.Platform)
|
||||
}
|
||||
|
||||
// Parse parses the platform specifier syntax into a platform declaration.
|
||||
//
|
||||
// Platform specifiers are in the format `<os>|<arch>|<os>/<arch>[/<variant>]`.
|
||||
// The minimum required information for a platform specifier is the operating
|
||||
// system or architecture. If there is only a single string (no slashes), the
|
||||
// value will be matched against the known set of operating systems, then fall
|
||||
// back to the known set of architectures. The missing component will be
|
||||
// inferred based on the local environment.
|
||||
//
|
||||
// Applications should opt to use `Match` over directly parsing specifiers.
|
||||
func Parse(specifier string) (Matcher, error) {
|
||||
if strings.Contains(specifier, "*") {
|
||||
// TODO(stevvooe): need to work out exact wildcard handling
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
|
||||
}
|
||||
|
||||
parts := strings.Split(specifier, "/")
|
||||
|
||||
for _, part := range parts {
|
||||
if !specifierRe.MatchString(part) {
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
|
||||
}
|
||||
}
|
||||
|
||||
var p specs.Platform
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
// in this case, we will test that the value might be an OS, then look
|
||||
// it up. If it is not known, we'll treat it as an architecture. Since
|
||||
// we have very little information about the platform here, we are
|
||||
// going to be a little more strict if we don't know about the argument
|
||||
// value.
|
||||
p.OS = normalizeOS(parts[0])
|
||||
if isKnownOS(p.OS) {
|
||||
// picks a default architecture
|
||||
p.Architecture = runtime.GOARCH
|
||||
if p.Architecture == "arm" {
|
||||
// TODO(stevvooe): Resolve arm variant, if not v6 (default)
|
||||
return nil, errors.Wrapf(errdefs.ErrNotImplemented, "arm support not fully implemented")
|
||||
}
|
||||
|
||||
return &matcher{p}, nil
|
||||
}
|
||||
|
||||
p.Architecture, p.Variant = normalizeArch(parts[0], "")
|
||||
if isKnownArch(p.Architecture) {
|
||||
p.OS = runtime.GOOS
|
||||
return &matcher{p}, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
|
||||
case 2:
|
||||
// In this case, we treat as a regular os/arch pair. We don't care
|
||||
// about whether or not we know of the platform.
|
||||
p.OS = normalizeOS(parts[0])
|
||||
p.Architecture, p.Variant = normalizeArch(parts[1], "")
|
||||
|
||||
return &matcher{p}, nil
|
||||
case 3:
|
||||
// we have a fully specified variant, this is rare
|
||||
p.OS = normalizeOS(parts[0])
|
||||
p.Architecture, p.Variant = normalizeArch(parts[1], parts[2])
|
||||
|
||||
return &matcher{p}, nil
|
||||
}
|
||||
|
||||
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
|
||||
}
|
||||
|
||||
// Format returns a string specifier from the provided platform specification.
|
||||
func Format(platform specs.Platform) string {
|
||||
if platform.OS == "" {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant)
|
||||
}
|
||||
|
||||
func joinNotEmpty(s ...string) string {
|
||||
var ss []string
|
||||
for _, s := range s {
|
||||
if s == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
ss = append(ss, s)
|
||||
}
|
||||
|
||||
return strings.Join(ss, "/")
|
||||
}
|
||||
|
||||
// Normalize validates and translate the platform to the canonical value.
|
||||
//
|
||||
// For example, if "Aarch64" is encountered, we change it to "arm64" or if
|
||||
// "x86_64" is encountered, it becomes "amd64".
|
||||
func Normalize(platform specs.Platform) specs.Platform {
|
||||
platform.OS = normalizeOS(platform.OS)
|
||||
platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant)
|
||||
|
||||
// these fields are deprecated, remove them
|
||||
platform.OSFeatures = nil
|
||||
platform.OSVersion = ""
|
||||
|
||||
return platform
|
||||
}
|
|
@ -4,13 +4,13 @@ import (
|
|||
"context"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
||||
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/runtime"
|
||||
"github.com/containerd/containerd/typeurl"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -21,11 +21,11 @@ type Process interface {
|
|||
// Start starts the process executing the user's defined binary
|
||||
Start(context.Context) error
|
||||
// Delete removes the process and any resources allocated returning the exit status
|
||||
Delete(context.Context, ...ProcessDeleteOpts) (uint32, error)
|
||||
Delete(context.Context, ...ProcessDeleteOpts) (*ExitStatus, error)
|
||||
// Kill sends the provided signal to the process
|
||||
Kill(context.Context, syscall.Signal) error
|
||||
// Wait blocks until the process has exited returning the exit status
|
||||
Wait(context.Context) (uint32, error)
|
||||
Kill(context.Context, syscall.Signal, ...KillOpts) error
|
||||
// Wait asynchronously waits for the process to exit, and sends the exit code to the returned channel
|
||||
Wait(context.Context) (<-chan ExitStatus, error)
|
||||
// CloseIO allows various pipes to be closed on the process
|
||||
CloseIO(context.Context, ...IOCloserOpts) error
|
||||
// Resize changes the width and heigh of the process's terminal
|
||||
|
@ -36,12 +36,46 @@ type Process interface {
|
|||
Status(context.Context) (Status, error)
|
||||
}
|
||||
|
||||
// ExitStatus encapsulates a process' exit status.
|
||||
// It is used by `Wait()` to return either a process exit code or an error
|
||||
type ExitStatus struct {
|
||||
code uint32
|
||||
exitedAt time.Time
|
||||
err error
|
||||
}
|
||||
|
||||
// Result returns the exit code and time of the exit status.
|
||||
// An error may be returned here to which indicates there was an error
|
||||
// at some point while waiting for the exit status. It does not signify
|
||||
// an error with the process itself.
|
||||
// If an error is returned, the process may still be running.
|
||||
func (s ExitStatus) Result() (uint32, time.Time, error) {
|
||||
return s.code, s.exitedAt, s.err
|
||||
}
|
||||
|
||||
// ExitCode returns the exit code of the process.
|
||||
// This is only valid is Error() returns nil
|
||||
func (s ExitStatus) ExitCode() uint32 {
|
||||
return s.code
|
||||
}
|
||||
|
||||
// ExitTime returns the exit time of the process
|
||||
// This is only valid is Error() returns nil
|
||||
func (s ExitStatus) ExitTime() time.Time {
|
||||
return s.exitedAt
|
||||
}
|
||||
|
||||
// Error returns the error, if any, that occured while waiting for the
|
||||
// process.
|
||||
func (s ExitStatus) Error() error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
type process struct {
|
||||
id string
|
||||
task *task
|
||||
pid uint32
|
||||
io IO
|
||||
spec *specs.Process
|
||||
}
|
||||
|
||||
func (p *process) ID() string {
|
||||
|
@ -70,48 +104,71 @@ func (p *process) Start(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (p *process) Kill(ctx context.Context, s syscall.Signal) error {
|
||||
func (p *process) Kill(ctx context.Context, s syscall.Signal, opts ...KillOpts) error {
|
||||
var i KillInfo
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, p, &i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err := p.task.client.TaskService().Kill(ctx, &tasks.KillRequest{
|
||||
Signal: uint32(s),
|
||||
ContainerID: p.task.id,
|
||||
ExecID: p.id,
|
||||
All: i.All,
|
||||
})
|
||||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
func (p *process) Wait(ctx context.Context) (uint32, error) {
|
||||
func (p *process) Wait(ctx context.Context) (<-chan ExitStatus, error) {
|
||||
cancellable, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
eventstream, err := p.task.client.EventService().Subscribe(cancellable, &eventsapi.SubscribeRequest{
|
||||
Filters: []string{"topic==" + runtime.TaskExitEventTopic},
|
||||
})
|
||||
if err != nil {
|
||||
return UnknownExitStatus, err
|
||||
cancel()
|
||||
return nil, err
|
||||
}
|
||||
// first check if the task has exited
|
||||
status, err := p.Status(ctx)
|
||||
if err != nil {
|
||||
return UnknownExitStatus, errdefs.FromGRPC(err)
|
||||
cancel()
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
chStatus := make(chan ExitStatus, 1)
|
||||
if status.Status == Stopped {
|
||||
return status.ExitStatus, nil
|
||||
cancel()
|
||||
chStatus <- ExitStatus{code: status.ExitStatus, exitedAt: status.ExitTime}
|
||||
return chStatus, nil
|
||||
}
|
||||
for {
|
||||
evt, err := eventstream.Recv()
|
||||
if err != nil {
|
||||
return UnknownExitStatus, err
|
||||
}
|
||||
if typeurl.Is(evt.Event, &eventsapi.TaskExit{}) {
|
||||
v, err := typeurl.UnmarshalAny(evt.Event)
|
||||
|
||||
go func() {
|
||||
defer cancel()
|
||||
chStatus <- ExitStatus{} // signal that the goroutine is running
|
||||
for {
|
||||
evt, err := eventstream.Recv()
|
||||
if err != nil {
|
||||
return UnknownExitStatus, err
|
||||
chStatus <- ExitStatus{code: UnknownExitStatus, err: err}
|
||||
return
|
||||
}
|
||||
e := v.(*eventsapi.TaskExit)
|
||||
if e.ID == p.id && e.ContainerID == p.task.id {
|
||||
return e.ExitStatus, nil
|
||||
if typeurl.Is(evt.Event, &eventsapi.TaskExit{}) {
|
||||
v, err := typeurl.UnmarshalAny(evt.Event)
|
||||
if err != nil {
|
||||
chStatus <- ExitStatus{code: UnknownExitStatus, err: err}
|
||||
return
|
||||
}
|
||||
e := v.(*eventsapi.TaskExit)
|
||||
if e.ID == p.id && e.ContainerID == p.task.id {
|
||||
chStatus <- ExitStatus{code: e.ExitStatus, exitedAt: e.ExitedAt}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
<-chStatus // wait for the goroutine to be running
|
||||
return chStatus, nil
|
||||
}
|
||||
|
||||
func (p *process) CloseIO(ctx context.Context, opts ...IOCloserOpts) error {
|
||||
|
@ -142,31 +199,32 @@ func (p *process) Resize(ctx context.Context, w, h uint32) error {
|
|||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
func (p *process) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (uint32, error) {
|
||||
func (p *process) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStatus, error) {
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, p); err != nil {
|
||||
return UnknownExitStatus, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
status, err := p.Status(ctx)
|
||||
if err != nil {
|
||||
return UnknownExitStatus, err
|
||||
return nil, err
|
||||
}
|
||||
if status.Status != Stopped {
|
||||
return UnknownExitStatus, errors.Wrapf(errdefs.ErrFailedPrecondition, "process must be stopped before deletion")
|
||||
}
|
||||
if p.io != nil {
|
||||
p.io.Wait()
|
||||
p.io.Close()
|
||||
switch status.Status {
|
||||
case Running, Paused, Pausing:
|
||||
return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "process must be stopped before deletion")
|
||||
}
|
||||
r, err := p.task.client.TaskService().DeleteProcess(ctx, &tasks.DeleteProcessRequest{
|
||||
ContainerID: p.task.id,
|
||||
ExecID: p.id,
|
||||
})
|
||||
if err != nil {
|
||||
return UnknownExitStatus, errdefs.FromGRPC(err)
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
return r.ExitStatus, nil
|
||||
if p.io != nil {
|
||||
p.io.Wait()
|
||||
p.io.Close()
|
||||
}
|
||||
return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
|
||||
}
|
||||
|
||||
func (p *process) Status(ctx context.Context) (Status, error) {
|
||||
|
|
|
@ -31,6 +31,11 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
|||
return nil, err
|
||||
}
|
||||
|
||||
ctx, err = contextWithRepositoryScope(ctx, r.refspec, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, path := range paths {
|
||||
u := r.url(path)
|
||||
|
||||
|
|
|
@ -28,6 +28,10 @@ type dockerPusher struct {
|
|||
}
|
||||
|
||||
func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
|
||||
ctx, err := contextWithRepositoryScope(ctx, p.refspec, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ref := remotes.MakeRefKey(ctx, desc)
|
||||
status, err := p.tracker.GetStatus(ref)
|
||||
if err == nil {
|
||||
|
@ -109,7 +113,10 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusAccepted {
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusAccepted, http.StatusNoContent:
|
||||
default:
|
||||
// TODO: log error
|
||||
return nil, errors.Errorf("unexpected response: %s", resp.Status)
|
||||
}
|
||||
|
@ -155,7 +162,10 @@ func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (conten
|
|||
pr.CloseWithError(err)
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != http.StatusCreated {
|
||||
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusNoContent:
|
||||
default:
|
||||
// TODO: log error
|
||||
pr.CloseWithError(errors.Errorf("unexpected response: %s", resp.Status))
|
||||
}
|
||||
|
@ -215,7 +225,7 @@ func (pw *pushWriter) Digest() digest.Digest {
|
|||
return pw.expected
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Commit(size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
// Check whether read has already thrown an error
|
||||
if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe {
|
||||
return errors.Wrap(err, "pipe error before commit")
|
||||
|
@ -232,6 +242,14 @@ func (pw *pushWriter) Commit(size int64, expected digest.Digest, opts ...content
|
|||
return errors.New("no response")
|
||||
}
|
||||
|
||||
// 201 is specified return status, some registries return
|
||||
// 200 or 204.
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusNoContent:
|
||||
default:
|
||||
return errors.Errorf("unexpected status: %s", resp.Status)
|
||||
}
|
||||
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get status")
|
||||
|
|
|
@ -116,6 +116,10 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
|||
urls = append(urls, fetcher.url("manifests", refspec.Object))
|
||||
}
|
||||
|
||||
ctx, err = contextWithRepositoryScope(ctx, refspec, false)
|
||||
if err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
for _, u := range urls {
|
||||
req, err := http.NewRequest(http.MethodHead, u, nil)
|
||||
if err != nil {
|
||||
|
@ -228,8 +232,9 @@ func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher
|
|||
}
|
||||
|
||||
type dockerBase struct {
|
||||
base url.URL
|
||||
token string
|
||||
refspec reference.Spec
|
||||
base url.URL
|
||||
token string
|
||||
|
||||
client *http.Client
|
||||
useBasic bool
|
||||
|
@ -268,6 +273,7 @@ func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
|
|||
base.Path = path.Join("/v2", prefix)
|
||||
|
||||
return &dockerBase{
|
||||
refspec: refspec,
|
||||
base: base,
|
||||
client: r.client,
|
||||
username: username,
|
||||
|
@ -313,9 +319,11 @@ func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request
|
|||
responses = append(responses, resp)
|
||||
req, err = r.retryRequest(ctx, req, responses)
|
||||
if err != nil {
|
||||
resp.Body.Close()
|
||||
return nil, err
|
||||
}
|
||||
if req != nil {
|
||||
resp.Body.Close()
|
||||
return r.doRequestWithRetries(ctx, req, responses)
|
||||
}
|
||||
return resp, err
|
||||
|
@ -428,14 +436,10 @@ func (r *dockerBase) setTokenAuth(ctx context.Context, params map[string]string)
|
|||
service: params["service"],
|
||||
}
|
||||
|
||||
scope, ok := params["scope"]
|
||||
if !ok {
|
||||
to.scopes = getTokenScopes(ctx, params)
|
||||
if len(to.scopes) == 0 {
|
||||
return errors.Errorf("no scope specified for token auth challenge")
|
||||
}
|
||||
|
||||
// TODO: Get added scopes from context
|
||||
to.scopes = []string{scope}
|
||||
|
||||
if r.secret != "" {
|
||||
// Credential information is provided, use oauth POST endpoint
|
||||
r.token, err = r.fetchTokenWithOAuth(ctx, to)
|
||||
|
@ -489,8 +493,9 @@ func (r *dockerBase) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (
|
|||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode == 405 && r.username != "" {
|
||||
// It would be nice if registries would implement the specifications
|
||||
// Registries without support for POST may return 404 for POST /v2/token.
|
||||
// As of September 2017, GCR is known to return 404.
|
||||
if (resp.StatusCode == 405 && r.username != "") || resp.StatusCode == 404 {
|
||||
return r.getToken(ctx, to)
|
||||
} else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
b, _ := ioutil.ReadAll(resp.Body)
|
||||
|
|
|
@ -252,7 +252,7 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro
|
|||
|
||||
eg.Go(func() error {
|
||||
defer pw.Close()
|
||||
return content.Copy(cw, io.TeeReader(rc, pw), desc.Size, desc.Digest)
|
||||
return content.Copy(ctx, cw, io.TeeReader(rc, pw), desc.Size, desc.Digest)
|
||||
})
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/url"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/reference"
|
||||
)
|
||||
|
||||
// repositoryScope returns a repository scope string such as "repository:foo/bar:pull"
|
||||
// for "host/foo/bar:baz".
|
||||
// When push is true, both pull and push are added to the scope.
|
||||
func repositoryScope(refspec reference.Spec, push bool) (string, error) {
|
||||
u, err := url.Parse("dummy://" + refspec.Locator)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull"
|
||||
if push {
|
||||
s += ",push"
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// tokenScopesKey is used for the key for context.WithValue().
|
||||
// value: []string (e.g. {"registry:foo/bar:pull"})
|
||||
type tokenScopesKey struct{}
|
||||
|
||||
// contextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value.
|
||||
func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) {
|
||||
s, err := repositoryScope(refspec, push)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil
|
||||
}
|
||||
|
||||
// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and params["scope"].
|
||||
func getTokenScopes(ctx context.Context, params map[string]string) []string {
|
||||
var scopes []string
|
||||
if x := ctx.Value(tokenScopesKey{}); x != nil {
|
||||
scopes = append(scopes, x.([]string)...)
|
||||
}
|
||||
if scope, ok := params["scope"]; ok {
|
||||
for _, s := range scopes {
|
||||
// Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/)
|
||||
// So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal.
|
||||
if s == scope {
|
||||
// already appended
|
||||
goto Sort
|
||||
}
|
||||
}
|
||||
scopes = append(scopes, scope)
|
||||
}
|
||||
Sort:
|
||||
sort.Strings(scopes)
|
||||
return scopes
|
||||
}
|
|
@ -101,7 +101,7 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc
|
|||
}
|
||||
defer rc.Close()
|
||||
|
||||
return content.Copy(cw, rc, desc.Size, desc.Digest)
|
||||
return content.Copy(ctx, cw, rc, desc.Size, desc.Digest)
|
||||
}
|
||||
|
||||
// PushHandler returns a handler that will push all content from the provider
|
||||
|
@ -139,5 +139,5 @@ func push(ctx context.Context, provider content.Provider, pusher Pusher, desc oc
|
|||
defer ra.Close()
|
||||
|
||||
rd := io.NewSectionReader(ra, 0, desc.Size)
|
||||
return content.Copy(cw, rd, desc.Size, desc.Digest)
|
||||
return content.Copy(ctx, cw, rd, desc.Size, desc.Digest)
|
||||
}
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
package rootfs
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
|
@ -63,7 +66,7 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap
|
|||
return false, errors.Wrap(err, "failed to stat snapshot")
|
||||
}
|
||||
|
||||
key := fmt.Sprintf("extract %s", chainID)
|
||||
key := fmt.Sprintf("extract-%s %s", uniquePart(), chainID)
|
||||
|
||||
// Prepare snapshot with from parent
|
||||
mounts, err := sn.Prepare(ctx, key, parent.String())
|
||||
|
@ -90,8 +93,25 @@ func ApplyLayer(ctx context.Context, layer Layer, chain []digest.Digest, sn snap
|
|||
}
|
||||
|
||||
if err = sn.Commit(ctx, chainID.String(), key); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to commit snapshot %s", parent)
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return false, errors.Wrapf(err, "failed to commit snapshot %s", parent)
|
||||
}
|
||||
|
||||
// Destination already exists, cleanup key and return without error
|
||||
err = nil
|
||||
if err := sn.Remove(ctx, key); err != nil {
|
||||
return false, errors.Wrapf(err, "failed to cleanup aborted apply %s", key)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func uniquePart() string {
|
||||
t := time.Now()
|
||||
var b [3]byte
|
||||
// Ignore read failures, just decreases uniqueness
|
||||
rand.Read(b[:])
|
||||
return fmt.Sprintf("%d-%s", t.Nanosecond(), base64.URLEncoding.EncodeToString(b[:]))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
package rootfs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/snapshot"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
initializers = map[string]initializerFunc{}
|
||||
)
|
||||
|
||||
type initializerFunc func(string) error
|
||||
|
||||
type Mounter interface {
|
||||
Mount(target string, mounts ...mount.Mount) error
|
||||
Unmount(target string) error
|
||||
}
|
||||
|
||||
func InitRootFS(ctx context.Context, name string, parent digest.Digest, readonly bool, snapshotter snapshot.Snapshotter, mounter Mounter) ([]mount.Mount, error) {
|
||||
_, err := snapshotter.Stat(ctx, name)
|
||||
if err == nil {
|
||||
return nil, errors.Errorf("rootfs already exists")
|
||||
}
|
||||
// TODO: ensure not exist error once added to snapshot package
|
||||
|
||||
parentS := parent.String()
|
||||
|
||||
initName := defaultInitializer
|
||||
initFn := initializers[initName]
|
||||
if initFn != nil {
|
||||
parentS, err = createInitLayer(ctx, parentS, initName, initFn, snapshotter, mounter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if readonly {
|
||||
return snapshotter.View(ctx, name, parentS)
|
||||
}
|
||||
|
||||
return snapshotter.Prepare(ctx, name, parentS)
|
||||
}
|
||||
|
||||
func createInitLayer(ctx context.Context, parent, initName string, initFn func(string) error, snapshotter snapshot.Snapshotter, mounter Mounter) (string, error) {
|
||||
initS := fmt.Sprintf("%s %s", parent, initName)
|
||||
if _, err := snapshotter.Stat(ctx, initS); err == nil {
|
||||
return initS, nil
|
||||
}
|
||||
// TODO: ensure not exist error once added to snapshot package
|
||||
|
||||
// Create tempdir
|
||||
td, err := ioutil.TempDir("", "create-init-")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer os.RemoveAll(td)
|
||||
|
||||
mounts, err := snapshotter.Prepare(ctx, td, parent)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// TODO: once implemented uncomment
|
||||
//if rerr := snapshotter.Remove(ctx, td); rerr != nil {
|
||||
// log.G(ctx).Errorf("Failed to remove snapshot %s: %v", td, merr)
|
||||
//}
|
||||
}
|
||||
}()
|
||||
|
||||
if err = mounter.Mount(td, mounts...); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err = initFn(td); err != nil {
|
||||
if merr := mounter.Unmount(td); merr != nil {
|
||||
log.G(ctx).Errorf("Failed to unmount %s: %v", td, merr)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err = mounter.Unmount(td); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := snapshotter.Commit(ctx, initS, td); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return initS, nil
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
package rootfs
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultInitializer = "linux-init"
|
||||
)
|
||||
|
||||
func init() {
|
||||
initializers[defaultInitializer] = initFS
|
||||
}
|
||||
|
||||
func createDirectory(name string, uid, gid int) initializerFunc {
|
||||
return func(root string) error {
|
||||
dname := filepath.Join(root, name)
|
||||
st, err := os.Stat(dname)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else if err == nil {
|
||||
if st.IsDir() {
|
||||
stat := st.Sys().(*syscall.Stat_t)
|
||||
if int(stat.Gid) == gid && int(stat.Uid) == uid {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if err := os.Remove(dname); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := os.Mkdir(dname, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err := os.Mkdir(dname, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return os.Chown(dname, uid, gid)
|
||||
}
|
||||
}
|
||||
|
||||
func touchFile(name string, uid, gid int) initializerFunc {
|
||||
return func(root string) error {
|
||||
fname := filepath.Join(root, name)
|
||||
|
||||
st, err := os.Stat(fname)
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else if err == nil {
|
||||
stat := st.Sys().(*syscall.Stat_t)
|
||||
if int(stat.Gid) == gid && int(stat.Uid) == uid {
|
||||
return nil
|
||||
}
|
||||
return os.Chown(fname, uid, gid)
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(fname, os.O_CREATE, 0644)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return f.Chown(uid, gid)
|
||||
}
|
||||
}
|
||||
|
||||
func symlink(oldname, newname string) initializerFunc {
|
||||
return func(root string) error {
|
||||
linkName := filepath.Join(root, newname)
|
||||
if _, err := os.Stat(linkName); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
} else if err == nil {
|
||||
return nil
|
||||
}
|
||||
return os.Symlink(oldname, linkName)
|
||||
}
|
||||
}
|
||||
|
||||
func initFS(root string) error {
|
||||
st, err := os.Stat(root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
stat := st.Sys().(*syscall.Stat_t)
|
||||
uid := int(stat.Uid)
|
||||
gid := int(stat.Gid)
|
||||
|
||||
initFuncs := []initializerFunc{
|
||||
createDirectory("/dev", uid, gid),
|
||||
createDirectory("/dev/pts", uid, gid),
|
||||
createDirectory("/dev/shm", uid, gid),
|
||||
touchFile("/dev/console", uid, gid),
|
||||
createDirectory("/proc", uid, gid),
|
||||
createDirectory("/sys", uid, gid),
|
||||
createDirectory("/etc", uid, gid),
|
||||
touchFile("/etc/resolv.conf", uid, gid),
|
||||
touchFile("/etc/hosts", uid, gid),
|
||||
touchFile("/etc/hostname", uid, gid),
|
||||
symlink("/proc/mounts", "/etc/mtab"),
|
||||
}
|
||||
|
||||
for _, fn := range initFuncs {
|
||||
if err := fn(root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
// +build !linux
|
||||
|
||||
package rootfs
|
||||
|
||||
const (
|
||||
defaultInitializer = ""
|
||||
)
|
|
@ -7,7 +7,9 @@ const (
|
|||
TaskExitEventTopic = "/tasks/exit"
|
||||
TaskDeleteEventTopic = "/tasks/delete"
|
||||
TaskExecAddedEventTopic = "/tasks/exec-added"
|
||||
TaskExecStartedEventTopic = "/tasks/exec-started"
|
||||
TaskPausedEventTopic = "/tasks/paused"
|
||||
TaskResumedEventTopic = "/tasks/resumed"
|
||||
TaskCheckpointedEventTopic = "/tasks/checkpointed"
|
||||
TaskUnknownTopic = "/tasks/?"
|
||||
)
|
||||
|
|
|
@ -2,6 +2,7 @@ package runtime
|
|||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/types"
|
||||
)
|
||||
|
@ -48,6 +49,8 @@ type Task interface {
|
|||
Update(context.Context, *types.Any) error
|
||||
// Process returns a process within the task for the provided id
|
||||
Process(context.Context, string) (Process, error)
|
||||
// Metrics returns runtime specific metrics for a task
|
||||
Metrics(context.Context) (interface{}, error)
|
||||
}
|
||||
|
||||
type ExecOpts struct {
|
||||
|
@ -77,10 +80,13 @@ type State struct {
|
|||
// Pid is the main process id for the container
|
||||
Pid uint32
|
||||
// ExitStatus of the process
|
||||
// Only vaid if the Status is Stopped
|
||||
// Only valid if the Status is Stopped
|
||||
ExitStatus uint32
|
||||
Stdin string
|
||||
Stdout string
|
||||
Stderr string
|
||||
Terminal bool
|
||||
// ExitedAt is the time at which the process exited
|
||||
// Only valid if the Status is Stopped
|
||||
ExitedAt time.Time
|
||||
Stdin string
|
||||
Stdout string
|
||||
Stderr string
|
||||
Terminal bool
|
||||
}
|
||||
|
|
|
@ -2,10 +2,10 @@ package runtime
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -75,7 +75,7 @@ func (l *TaskList) AddWithNamespace(namespace string, t Task) error {
|
|||
l.tasks[namespace] = make(map[string]Task)
|
||||
}
|
||||
if _, ok := l.tasks[namespace][id]; ok {
|
||||
return ErrTaskAlreadyExists
|
||||
return errors.Wrap(ErrTaskAlreadyExists, id)
|
||||
}
|
||||
l.tasks[namespace][id] = t
|
||||
return nil
|
||||
|
|
|
@ -424,7 +424,7 @@ func (s *Service) Write(session api.Content_WriteServer) (err error) {
|
|||
if req.Labels != nil {
|
||||
opts = append(opts, content.WithLabels(req.Labels))
|
||||
}
|
||||
if err := wr.Commit(total, expected, opts...); err != nil {
|
||||
if err := wr.Commit(ctx, total, expected, opts...); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
|
@ -80,7 +81,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
|||
return
|
||||
}
|
||||
|
||||
func (rw *remoteWriter) Commit(size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
if err := opt(&base); err != nil {
|
||||
|
|
|
@ -6,10 +6,22 @@ import (
|
|||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/mount"
|
||||
"github.com/containerd/containerd/plugin"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
// Order is the order of preference in which to try diff algorithms, the
|
||||
// first differ which is supported is used.
|
||||
// Note when multiple differs may be supported, this order will be
|
||||
// respected for which is choosen. Each differ should return the same
|
||||
// correct output, allowing any ordering to be used to prefer
|
||||
// more optimimal implementations.
|
||||
Order []string `toml:"default,omitempty"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugin.Register(&plugin.Registration{
|
||||
Type: plugin.GRPCPlugin,
|
||||
|
@ -17,20 +29,34 @@ func init() {
|
|||
Requires: []plugin.PluginType{
|
||||
plugin.DiffPlugin,
|
||||
},
|
||||
Config: &config{
|
||||
Order: []string{"walking"},
|
||||
},
|
||||
Init: func(ic *plugin.InitContext) (interface{}, error) {
|
||||
d, err := ic.Get(plugin.DiffPlugin)
|
||||
differs, err := ic.GetAll(plugin.DiffPlugin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
orderedNames := ic.Config.(*config).Order
|
||||
ordered := make([]plugin.Differ, len(orderedNames))
|
||||
for i, n := range orderedNames {
|
||||
differ, ok := differs[n]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("needed differ not loaded: %s", n)
|
||||
}
|
||||
ordered[i] = differ.(plugin.Differ)
|
||||
}
|
||||
|
||||
return &service{
|
||||
diff: d.(plugin.Differ),
|
||||
differs: ordered,
|
||||
}, nil
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type service struct {
|
||||
diff plugin.Differ
|
||||
differs []plugin.Differ
|
||||
}
|
||||
|
||||
func (s *service) Register(gs *grpc.Server) error {
|
||||
|
@ -39,12 +65,20 @@ func (s *service) Register(gs *grpc.Server) error {
|
|||
}
|
||||
|
||||
func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi.ApplyResponse, error) {
|
||||
desc := toDescriptor(er.Diff)
|
||||
// TODO: Check for supported media types
|
||||
var (
|
||||
ocidesc ocispec.Descriptor
|
||||
err error
|
||||
desc = toDescriptor(er.Diff)
|
||||
mounts = toMounts(er.Mounts)
|
||||
)
|
||||
|
||||
mounts := toMounts(er.Mounts)
|
||||
for _, differ := range s.differs {
|
||||
ocidesc, err = differ.Apply(ctx, desc, mounts)
|
||||
if !errdefs.IsNotImplemented(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
ocidesc, err := s.diff.Apply(ctx, desc, mounts)
|
||||
if err != nil {
|
||||
return nil, errdefs.ToGRPC(err)
|
||||
}
|
||||
|
@ -56,10 +90,19 @@ func (s *service) Apply(ctx context.Context, er *diffapi.ApplyRequest) (*diffapi
|
|||
}
|
||||
|
||||
func (s *service) Diff(ctx context.Context, dr *diffapi.DiffRequest) (*diffapi.DiffResponse, error) {
|
||||
aMounts := toMounts(dr.Left)
|
||||
bMounts := toMounts(dr.Right)
|
||||
var (
|
||||
ocidesc ocispec.Descriptor
|
||||
err error
|
||||
aMounts = toMounts(dr.Left)
|
||||
bMounts = toMounts(dr.Right)
|
||||
)
|
||||
|
||||
ocidesc, err := s.diff.DiffMounts(ctx, aMounts, bMounts, dr.MediaType, dr.Ref)
|
||||
for _, differ := range s.differs {
|
||||
ocidesc, err = differ.DiffMounts(ctx, aMounts, bMounts, dr.MediaType, dr.Ref)
|
||||
if !errdefs.IsNotImplemented(err) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, errdefs.ToGRPC(err)
|
||||
}
|
||||
|
|
|
@ -1,5 +0,0 @@
|
|||
package snapshot
|
||||
|
||||
const (
|
||||
defaultSnapshotter = "overlayfs"
|
||||
)
|
|
@ -1,7 +0,0 @@
|
|||
// +build darwin freebsd solaris
|
||||
|
||||
package snapshot
|
||||
|
||||
const (
|
||||
defaultSnapshotter = "naive"
|
||||
)
|
|
@ -1,5 +0,0 @@
|
|||
package snapshot
|
||||
|
||||
const (
|
||||
defaultSnapshotter = "windows"
|
||||
)
|
|
@ -20,11 +20,6 @@ import (
|
|||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
type config struct {
|
||||
// Default is the default snapshotter to use for the service
|
||||
Default string `toml:"default,omitempty"`
|
||||
}
|
||||
|
||||
func init() {
|
||||
plugin.Register(&plugin.Registration{
|
||||
Type: plugin.GRPCPlugin,
|
||||
|
@ -33,9 +28,6 @@ func init() {
|
|||
plugin.SnapshotPlugin,
|
||||
plugin.MetadataPlugin,
|
||||
},
|
||||
Config: &config{
|
||||
Default: defaultSnapshotter,
|
||||
},
|
||||
Init: newService,
|
||||
})
|
||||
}
|
||||
|
@ -43,9 +35,8 @@ func init() {
|
|||
var empty = &protoempty.Empty{}
|
||||
|
||||
type service struct {
|
||||
snapshotters map[string]snapshot.Snapshotter
|
||||
defaultSnapshotterName string
|
||||
publisher events.Publisher
|
||||
snapshotters map[string]snapshot.Snapshotter
|
||||
publisher events.Publisher
|
||||
}
|
||||
|
||||
func newService(ic *plugin.InitContext) (interface{}, error) {
|
||||
|
@ -62,26 +53,24 @@ func newService(ic *plugin.InitContext) (interface{}, error) {
|
|||
snapshotters[name] = metadata.NewSnapshotter(md.(*bolt.DB), name, sn.(snapshot.Snapshotter))
|
||||
}
|
||||
|
||||
cfg := ic.Config.(*config)
|
||||
_, ok := snapshotters[cfg.Default]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("default snapshotter not loaded: %s", cfg.Default)
|
||||
if len(snapshotters) == 0 {
|
||||
return nil, errors.Errorf("failed to create snapshotter service: no snapshotters loaded")
|
||||
}
|
||||
|
||||
return &service{
|
||||
snapshotters: snapshotters,
|
||||
defaultSnapshotterName: cfg.Default,
|
||||
publisher: ic.Events,
|
||||
snapshotters: snapshotters,
|
||||
publisher: ic.Events,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *service) getSnapshotter(name string) (snapshot.Snapshotter, error) {
|
||||
if name == "" {
|
||||
name = s.defaultSnapshotterName
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter argument missing")
|
||||
}
|
||||
|
||||
sn, ok := s.snapshotters[name]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("snapshotter not loaded: %s", name)
|
||||
return nil, errdefs.ToGRPCf(errdefs.ErrInvalidArgument, "snapshotter not loaded: %s", name)
|
||||
}
|
||||
return sn, nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,8 @@ package snapshot
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/mount"
|
||||
|
@ -12,11 +14,26 @@ type Kind uint8
|
|||
|
||||
// definitions of snapshot kinds
|
||||
const (
|
||||
KindView Kind = iota + 1
|
||||
KindUnknown Kind = iota
|
||||
KindView
|
||||
KindActive
|
||||
KindCommitted
|
||||
)
|
||||
|
||||
func ParseKind(s string) Kind {
|
||||
s = strings.ToLower(s)
|
||||
switch s {
|
||||
case "view":
|
||||
return KindView
|
||||
case "active":
|
||||
return KindActive
|
||||
case "committed":
|
||||
return KindCommitted
|
||||
}
|
||||
|
||||
return KindUnknown
|
||||
}
|
||||
|
||||
func (k Kind) String() string {
|
||||
switch k {
|
||||
case KindView:
|
||||
|
@ -25,19 +42,34 @@ func (k Kind) String() string {
|
|||
return "Active"
|
||||
case KindCommitted:
|
||||
return "Committed"
|
||||
default:
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
return "Unknown"
|
||||
}
|
||||
|
||||
func (k Kind) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(k.String())
|
||||
}
|
||||
|
||||
func (k *Kind) UnmarshalJSON(b []byte) error {
|
||||
var s string
|
||||
if err := json.Unmarshal(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*k = ParseKind(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Info provides information about a particular snapshot.
|
||||
// JSON marshallability is supported for interactive with tools like ctr,
|
||||
type Info struct {
|
||||
Kind Kind // active or committed snapshot
|
||||
Name string // name or key of snapshot
|
||||
Parent string // name of parent snapshot
|
||||
Labels map[string]string // Labels for snapshot
|
||||
Created time.Time // Created time
|
||||
Updated time.Time // Last update time
|
||||
Parent string `json:",omitempty"` // name of parent snapshot
|
||||
Labels map[string]string `json:",omitempty"` // Labels for snapshot
|
||||
Created time.Time `json:",omitempty"` // Created time
|
||||
Updated time.Time `json:",omitempty"` // Last update time
|
||||
}
|
||||
|
||||
// Usage defines statistics for disk resources consumed by the snapshot.
|
||||
|
|
8
vendor/github.com/containerd/containerd/snapshotter_default_linux.go
generated
vendored
Normal file
8
vendor/github.com/containerd/containerd/snapshotter_default_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
package containerd
|
||||
|
||||
const (
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "overlayfs"
|
||||
)
|
10
vendor/github.com/containerd/containerd/snapshotter_default_unix.go
generated
vendored
Normal file
10
vendor/github.com/containerd/containerd/snapshotter_default_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,10 @@
|
|||
// +build darwin freebsd solaris
|
||||
|
||||
package containerd
|
||||
|
||||
const (
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "naive"
|
||||
)
|
8
vendor/github.com/containerd/containerd/snapshotter_default_windows.go
generated
vendored
Normal file
8
vendor/github.com/containerd/containerd/snapshotter_default_windows.go
generated
vendored
Normal file
|
@ -0,0 +1,8 @@
|
|||
package containerd
|
||||
|
||||
const (
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "windows"
|
||||
)
|
|
@ -1,16 +1,21 @@
|
|||
package containerd
|
||||
|
||||
import specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
// GenerateSpec will generate a default spec from the provided image
|
||||
// for use as a containerd container
|
||||
func GenerateSpec(opts ...SpecOpts) (*specs.Spec, error) {
|
||||
func GenerateSpec(ctx context.Context, client *Client, c *containers.Container, opts ...SpecOpts) (*specs.Spec, error) {
|
||||
s, err := createDefaultSpec()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(s); err != nil {
|
||||
if err := o(ctx, client, c, s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,13 +1,19 @@
|
|||
package containerd
|
||||
|
||||
import specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/typeurl"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
// SpecOpts sets spec specific information to a newly generated OCI spec
|
||||
type SpecOpts func(s *specs.Spec) error
|
||||
type SpecOpts func(context.Context, *Client, *containers.Container, *specs.Spec) error
|
||||
|
||||
// WithProcessArgs replaces the args on the generated spec
|
||||
func WithProcessArgs(args ...string) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Process.Args = args
|
||||
return nil
|
||||
}
|
||||
|
@ -15,8 +21,46 @@ func WithProcessArgs(args ...string) SpecOpts {
|
|||
|
||||
// WithHostname sets the container's hostname
|
||||
func WithHostname(name string) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Hostname = name
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithNewSpec generates a new spec for a new container
|
||||
func WithNewSpec(opts ...SpecOpts) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
s, err := createDefaultSpec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, client, c, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
any, err := typeurl.MarshalAny(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Spec = any
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSpec sets the provided spec on the container
|
||||
func WithSpec(s *specs.Spec, opts ...SpecOpts) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, client, c, s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
any, err := typeurl.MarshalAny(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Spec = any
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,23 +6,30 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
|
||||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/fs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/namespaces"
|
||||
"github.com/containerd/containerd/typeurl"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/opencontainers/image-spec/identity"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// WithTTY sets the information on the spec as well as the environment variables for
|
||||
// using a TTY
|
||||
func WithTTY(s *specs.Spec) error {
|
||||
func WithTTY(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Process.Terminal = true
|
||||
s.Process.Env = append(s.Process.Env, "TERM=xterm")
|
||||
return nil
|
||||
|
@ -30,7 +37,7 @@ func WithTTY(s *specs.Spec) error {
|
|||
|
||||
// WithHostNamespace allows a task to run inside the host's linux namespace
|
||||
func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
for i, n := range s.Linux.Namespaces {
|
||||
if n.Type == ns {
|
||||
s.Linux.Namespaces = append(s.Linux.Namespaces[:i], s.Linux.Namespaces[i+1:]...)
|
||||
|
@ -44,7 +51,7 @@ func WithHostNamespace(ns specs.LinuxNamespaceType) SpecOpts {
|
|||
// WithLinuxNamespace uses the passed in namespace for the spec. If a namespace of the same type already exists in the
|
||||
// spec, the existing namespace is replaced by the one provided.
|
||||
func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
for i, n := range s.Linux.Namespaces {
|
||||
if n.Type == ns.Type {
|
||||
before := s.Linux.Namespaces[:i]
|
||||
|
@ -60,13 +67,13 @@ func WithLinuxNamespace(ns specs.LinuxNamespace) SpecOpts {
|
|||
}
|
||||
|
||||
// WithImageConfig configures the spec to from the configuration of an Image
|
||||
func WithImageConfig(ctx context.Context, i Image) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
func WithImageConfig(i Image) SpecOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error {
|
||||
var (
|
||||
image = i.(*image)
|
||||
store = image.client.ContentStore()
|
||||
store = client.ContentStore()
|
||||
)
|
||||
ic, err := image.i.Config(ctx, store)
|
||||
ic, err := image.i.Config(ctx, store, platforms.Format(platforms.Default()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -89,9 +96,6 @@ func WithImageConfig(ctx context.Context, i Image) SpecOpts {
|
|||
return fmt.Errorf("unknown image config media type %s", ic.MediaType)
|
||||
}
|
||||
s.Process.Env = append(s.Process.Env, config.Env...)
|
||||
var (
|
||||
uid, gid uint32
|
||||
)
|
||||
cmd := config.Cmd
|
||||
s.Process.Args = append(config.Entrypoint, cmd...)
|
||||
if config.User != "" {
|
||||
|
@ -100,24 +104,30 @@ func WithImageConfig(ctx context.Context, i Image) SpecOpts {
|
|||
case 1:
|
||||
v, err := strconv.ParseUint(parts[0], 0, 10)
|
||||
if err != nil {
|
||||
// if we cannot parse as a uint they try to see if it is a username
|
||||
if err := WithUsername(config.User)(ctx, client, c, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := WithUserID(uint32(v))(ctx, client, c, s); err != nil {
|
||||
return err
|
||||
}
|
||||
uid, gid = uint32(v), uint32(v)
|
||||
case 2:
|
||||
v, err := strconv.ParseUint(parts[0], 0, 10)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
uid = uint32(v)
|
||||
uid := uint32(v)
|
||||
if v, err = strconv.ParseUint(parts[1], 0, 10); err != nil {
|
||||
return err
|
||||
}
|
||||
gid = uint32(v)
|
||||
gid := uint32(v)
|
||||
s.Process.User.UID, s.Process.User.GID = uid, gid
|
||||
default:
|
||||
return fmt.Errorf("invalid USER value %s", config.User)
|
||||
}
|
||||
}
|
||||
s.Process.User.UID, s.Process.User.GID = uid, gid
|
||||
cwd := config.WorkingDir
|
||||
if cwd == "" {
|
||||
cwd = "/"
|
||||
|
@ -129,7 +139,7 @@ func WithImageConfig(ctx context.Context, i Image) SpecOpts {
|
|||
|
||||
// WithRootFSPath specifies unmanaged rootfs path.
|
||||
func WithRootFSPath(path string, readonly bool) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Root = &specs.Root{
|
||||
Path: path,
|
||||
Readonly: readonly,
|
||||
|
@ -139,18 +149,6 @@ func WithRootFSPath(path string, readonly bool) SpecOpts {
|
|||
}
|
||||
}
|
||||
|
||||
// WithSpec sets the provided spec for a new container
|
||||
func WithSpec(spec *specs.Spec) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
any, err := typeurl.MarshalAny(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Spec = any
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithResources sets the provided resources on the spec for task updates
|
||||
func WithResources(resources *specs.LinuxResources) UpdateTaskOpts {
|
||||
return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
|
||||
|
@ -160,13 +158,13 @@ func WithResources(resources *specs.LinuxResources) UpdateTaskOpts {
|
|||
}
|
||||
|
||||
// WithNoNewPrivileges sets no_new_privileges on the process for the container
|
||||
func WithNoNewPrivileges(s *specs.Spec) error {
|
||||
func WithNoNewPrivileges(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Process.NoNewPrivileges = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithHostHostsFile bind-mounts the host's /etc/hosts into the container as readonly
|
||||
func WithHostHostsFile(s *specs.Spec) error {
|
||||
func WithHostHostsFile(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Mounts = append(s.Mounts, specs.Mount{
|
||||
Destination: "/etc/hosts",
|
||||
Type: "bind",
|
||||
|
@ -177,7 +175,7 @@ func WithHostHostsFile(s *specs.Spec) error {
|
|||
}
|
||||
|
||||
// WithHostResolvconf bind-mounts the host's /etc/resolv.conf into the container as readonly
|
||||
func WithHostResolvconf(s *specs.Spec) error {
|
||||
func WithHostResolvconf(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Mounts = append(s.Mounts, specs.Mount{
|
||||
Destination: "/etc/resolv.conf",
|
||||
Type: "bind",
|
||||
|
@ -188,7 +186,7 @@ func WithHostResolvconf(s *specs.Spec) error {
|
|||
}
|
||||
|
||||
// WithHostLocaltime bind-mounts the host's /etc/localtime into the container as readonly
|
||||
func WithHostLocaltime(s *specs.Spec) error {
|
||||
func WithHostLocaltime(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Mounts = append(s.Mounts, specs.Mount{
|
||||
Destination: "/etc/localtime",
|
||||
Type: "bind",
|
||||
|
@ -201,7 +199,7 @@ func WithHostLocaltime(s *specs.Spec) error {
|
|||
// WithUserNamespace sets the uid and gid mappings for the task
|
||||
// this can be called multiple times to add more mappings to the generated spec
|
||||
func WithUserNamespace(container, host, size uint32) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
var hasUserns bool
|
||||
for _, ns := range s.Linux.Namespaces {
|
||||
if ns.Type == specs.UserNamespace {
|
||||
|
@ -228,11 +226,23 @@ func WithUserNamespace(container, host, size uint32) SpecOpts {
|
|||
// WithRemappedSnapshot creates a new snapshot and remaps the uid/gid for the
|
||||
// filesystem to be used by a container with user namespaces
|
||||
func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts {
|
||||
return withRemappedSnapshotBase(id, i, uid, gid, false)
|
||||
}
|
||||
|
||||
// WithRemappedSnapshotView is similar to WithRemappedSnapshot but rootfs is mounted as read-only.
|
||||
func WithRemappedSnapshotView(id string, i Image, uid, gid uint32) NewContainerOpts {
|
||||
return withRemappedSnapshotBase(id, i, uid, gid, true)
|
||||
}
|
||||
|
||||
func withRemappedSnapshotBase(id string, i Image, uid, gid uint32, readonly bool) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore())
|
||||
diffIDs, err := i.(*image).i.RootFS(ctx, client.ContentStore(), platforms.Format(platforms.Default()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
setSnapshotterIfEmpty(c)
|
||||
|
||||
var (
|
||||
snapshotter = client.SnapshotService(c.Snapshotter)
|
||||
parent = identity.ChainID(diffIDs).String()
|
||||
|
@ -242,7 +252,7 @@ func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts
|
|||
if _, err := snapshotter.Prepare(ctx, id, usernsID); err != nil {
|
||||
return err
|
||||
}
|
||||
c.RootFS = id
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
|
@ -257,10 +267,15 @@ func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts
|
|||
if err := snapshotter.Commit(ctx, usernsID, usernsID+"-remap"); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := snapshotter.Prepare(ctx, id, usernsID); err != nil {
|
||||
if readonly {
|
||||
_, err = snapshotter.View(ctx, id, usernsID)
|
||||
} else {
|
||||
_, err = snapshotter.Prepare(ctx, id, usernsID)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.RootFS = id
|
||||
c.SnapshotKey = id
|
||||
c.Image = i.Name()
|
||||
return nil
|
||||
}
|
||||
|
@ -268,7 +283,7 @@ func WithRemappedSnapshot(id string, i Image, uid, gid uint32) NewContainerOpts
|
|||
|
||||
// WithCgroup sets the container's cgroup path
|
||||
func WithCgroup(path string) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Linux.CgroupsPath = path
|
||||
return nil
|
||||
}
|
||||
|
@ -276,13 +291,131 @@ func WithCgroup(path string) SpecOpts {
|
|||
|
||||
// WithNamespacedCgroup uses the namespace set on the context to create a
|
||||
// root directory for containers in the cgroup with the id as the subcgroup
|
||||
func WithNamespacedCgroup(ctx context.Context, id string) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
func WithNamespacedCgroup() SpecOpts {
|
||||
return func(ctx context.Context, _ *Client, c *containers.Container, s *specs.Spec) error {
|
||||
namespace, err := namespaces.NamespaceRequired(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Linux.CgroupsPath = filepath.Join("/", namespace, id)
|
||||
s.Linux.CgroupsPath = filepath.Join("/", namespace, c.ID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithUidGid allows the UID and GID for the Process to be set
|
||||
func WithUidGid(uid, gid uint32) SpecOpts {
|
||||
return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Process.User.UID = uid
|
||||
s.Process.User.GID = gid
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithUserID sets the correct UID and GID for the container based
|
||||
// on the image's /etc/passwd contents. If /etc/passwd does not exist,
|
||||
// or uid is not found in /etc/passwd, it sets gid to be the same with
|
||||
// uid, and not returns error.
|
||||
func WithUserID(uid uint32) SpecOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error {
|
||||
if c.Snapshotter == "" {
|
||||
return errors.Errorf("no snapshotter set for container")
|
||||
}
|
||||
if c.SnapshotKey == "" {
|
||||
return errors.Errorf("rootfs snapshot not created for container")
|
||||
}
|
||||
snapshotter := client.SnapshotService(c.Snapshotter)
|
||||
mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := ioutil.TempDir("", "ctd-username")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
for _, m := range mounts {
|
||||
if err := m.Mount(root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unix.Unmount(root, 0)
|
||||
ppath, err := fs.RootPath(root, "/etc/passwd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Open(ppath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
s.Process.User.UID, s.Process.User.GID = uid, uid
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
users, err := user.ParsePasswdFilter(f, func(u user.User) bool {
|
||||
return u.Uid == int(uid)
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(users) == 0 {
|
||||
s.Process.User.UID, s.Process.User.GID = uid, uid
|
||||
return nil
|
||||
}
|
||||
u := users[0]
|
||||
s.Process.User.UID, s.Process.User.GID = uint32(u.Uid), uint32(u.Gid)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithUsername sets the correct UID and GID for the container
|
||||
// based on the the image's /etc/passwd contents. If /etc/passwd
|
||||
// does not exist, or the username is not found in /etc/passwd,
|
||||
// it returns error.
|
||||
func WithUsername(username string) SpecOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container, s *specs.Spec) error {
|
||||
if c.Snapshotter == "" {
|
||||
return errors.Errorf("no snapshotter set for container")
|
||||
}
|
||||
if c.SnapshotKey == "" {
|
||||
return errors.Errorf("rootfs snapshot not created for container")
|
||||
}
|
||||
snapshotter := client.SnapshotService(c.Snapshotter)
|
||||
mounts, err := snapshotter.Mounts(ctx, c.SnapshotKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
root, err := ioutil.TempDir("", "ctd-username")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(root)
|
||||
for _, m := range mounts {
|
||||
if err := m.Mount(root); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer unix.Unmount(root, 0)
|
||||
ppath, err := fs.RootPath(root, "/etc/passwd")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.Open(ppath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
users, err := user.ParsePasswdFilter(f, func(u user.User) bool {
|
||||
return u.Name == username
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(users) == 0 {
|
||||
return errors.Errorf("no users found for %s", username)
|
||||
}
|
||||
u := users[0]
|
||||
s.Process.User.UID, s.Process.User.GID = uint32(u.Uid), uint32(u.Gid)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,18 +10,18 @@ import (
|
|||
"github.com/containerd/containerd/containers"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/typeurl"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
specs "github.com/opencontainers/runtime-spec/specs-go"
|
||||
)
|
||||
|
||||
func WithImageConfig(ctx context.Context, i Image) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
func WithImageConfig(i Image) SpecOpts {
|
||||
return func(ctx context.Context, client *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
var (
|
||||
image = i.(*image)
|
||||
store = image.client.ContentStore()
|
||||
store = client.ContentStore()
|
||||
)
|
||||
ic, err := image.i.Config(ctx, store)
|
||||
ic, err := image.i.Config(ctx, store, platforms.Format(platforms.Default()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -52,25 +52,17 @@ func WithImageConfig(ctx context.Context, i Image) SpecOpts {
|
|||
}
|
||||
|
||||
func WithTTY(width, height int) SpecOpts {
|
||||
return func(s *specs.Spec) error {
|
||||
return func(_ context.Context, _ *Client, _ *containers.Container, s *specs.Spec) error {
|
||||
s.Process.Terminal = true
|
||||
if s.Process.ConsoleSize == nil {
|
||||
s.Process.ConsoleSize = &specs.Box{}
|
||||
}
|
||||
s.Process.ConsoleSize.Width = uint(width)
|
||||
s.Process.ConsoleSize.Height = uint(height)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithSpec(spec *specs.Spec) NewContainerOpts {
|
||||
return func(ctx context.Context, client *Client, c *containers.Container) error {
|
||||
any, err := typeurl.MarshalAny(spec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Spec = any
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func WithResources(resources *specs.WindowsResources) UpdateTaskOpts {
|
||||
return func(ctx context.Context, client *Client, r *UpdateTaskInfo) error {
|
||||
r.Resources = resources
|
||||
|
|
|
@ -25,7 +25,7 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func defaltCaps() []string {
|
||||
func defaultCaps() []string {
|
||||
return []string{
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
|
@ -79,10 +79,10 @@ func createDefaultSpec() (*specs.Spec, error) {
|
|||
GID: 0,
|
||||
},
|
||||
Capabilities: &specs.LinuxCapabilities{
|
||||
Bounding: defaltCaps(),
|
||||
Permitted: defaltCaps(),
|
||||
Inheritable: defaltCaps(),
|
||||
Effective: defaltCaps(),
|
||||
Bounding: defaultCaps(),
|
||||
Permitted: defaultCaps(),
|
||||
Inheritable: defaultCaps(),
|
||||
Effective: defaultCaps(),
|
||||
},
|
||||
Rlimits: []specs.POSIXRlimit{
|
||||
{
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
eventsapi "github.com/containerd/containerd/api/services/events/v1"
|
||||
"github.com/containerd/containerd/api/services/tasks/v1"
|
||||
|
@ -38,6 +39,8 @@ type Status struct {
|
|||
Status ProcessStatus
|
||||
// ExitStatus returned by the process
|
||||
ExitStatus uint32
|
||||
// ExitedTime is the time at which the process died
|
||||
ExitTime time.Time
|
||||
}
|
||||
|
||||
type ProcessStatus string
|
||||
|
@ -113,13 +116,20 @@ type Task interface {
|
|||
Checkpoint(context.Context, ...CheckpointTaskOpts) (v1.Descriptor, error)
|
||||
// Update modifies executing tasks with updated settings
|
||||
Update(context.Context, ...UpdateTaskOpts) error
|
||||
// LoadProcess loads a previously created exec'd process
|
||||
LoadProcess(context.Context, string, IOAttach) (Process, error)
|
||||
// Metrics returns task metrics for runtime specific metrics
|
||||
//
|
||||
// The metric types are generic to containerd and change depending on the runtime
|
||||
// For the built in Linux runtime, github.com/containerd/cgroups.Metrics
|
||||
// are returned in protobuf format
|
||||
Metrics(context.Context) (*types.Metric, error)
|
||||
}
|
||||
|
||||
var _ = (Task)(&task{})
|
||||
|
||||
type task struct {
|
||||
client *Client
|
||||
container Container
|
||||
client *Client
|
||||
|
||||
io IO
|
||||
id string
|
||||
|
@ -159,10 +169,17 @@ func (t *task) Start(ctx context.Context) error {
|
|||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
func (t *task) Kill(ctx context.Context, s syscall.Signal) error {
|
||||
func (t *task) Kill(ctx context.Context, s syscall.Signal, opts ...KillOpts) error {
|
||||
var i KillInfo
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, t, &i); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err := t.client.TaskService().Kill(ctx, &tasks.KillRequest{
|
||||
Signal: uint32(s),
|
||||
ContainerID: t.id,
|
||||
All: i.All,
|
||||
})
|
||||
if err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
|
@ -194,18 +211,22 @@ func (t *task) Status(ctx context.Context) (Status, error) {
|
|||
return Status{
|
||||
Status: ProcessStatus(strings.ToLower(r.Process.Status.String())),
|
||||
ExitStatus: r.Process.ExitStatus,
|
||||
ExitTime: r.Process.ExitedAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *task) Wait(ctx context.Context) (uint32, error) {
|
||||
func (t *task) Wait(ctx context.Context) (<-chan ExitStatus, error) {
|
||||
cancellable, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
eventstream, err := t.client.EventService().Subscribe(cancellable, &eventsapi.SubscribeRequest{
|
||||
Filters: []string{"topic==" + runtime.TaskExitEventTopic},
|
||||
})
|
||||
if err != nil {
|
||||
return UnknownExitStatus, errdefs.FromGRPC(err)
|
||||
cancel()
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
chStatus := make(chan ExitStatus, 1)
|
||||
|
||||
t.mu.Lock()
|
||||
checkpoint := t.deferred != nil
|
||||
t.mu.Unlock()
|
||||
|
@ -213,42 +234,56 @@ func (t *task) Wait(ctx context.Context) (uint32, error) {
|
|||
// first check if the task has exited
|
||||
status, err := t.Status(ctx)
|
||||
if err != nil {
|
||||
return UnknownExitStatus, errdefs.FromGRPC(err)
|
||||
cancel()
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
if status.Status == Stopped {
|
||||
return status.ExitStatus, nil
|
||||
cancel()
|
||||
chStatus <- ExitStatus{code: status.ExitStatus, exitedAt: status.ExitTime}
|
||||
return chStatus, nil
|
||||
}
|
||||
}
|
||||
for {
|
||||
evt, err := eventstream.Recv()
|
||||
if err != nil {
|
||||
return UnknownExitStatus, errdefs.FromGRPC(err)
|
||||
}
|
||||
if typeurl.Is(evt.Event, &eventsapi.TaskExit{}) {
|
||||
v, err := typeurl.UnmarshalAny(evt.Event)
|
||||
|
||||
go func() {
|
||||
defer cancel()
|
||||
chStatus <- ExitStatus{} // signal that goroutine is running
|
||||
for {
|
||||
evt, err := eventstream.Recv()
|
||||
if err != nil {
|
||||
return UnknownExitStatus, err
|
||||
chStatus <- ExitStatus{code: UnknownExitStatus, err: errdefs.FromGRPC(err)}
|
||||
return
|
||||
}
|
||||
e := v.(*eventsapi.TaskExit)
|
||||
if e.ContainerID == t.id && e.Pid == t.pid {
|
||||
return e.ExitStatus, nil
|
||||
if typeurl.Is(evt.Event, &eventsapi.TaskExit{}) {
|
||||
v, err := typeurl.UnmarshalAny(evt.Event)
|
||||
if err != nil {
|
||||
chStatus <- ExitStatus{code: UnknownExitStatus, err: err}
|
||||
return
|
||||
}
|
||||
e := v.(*eventsapi.TaskExit)
|
||||
if e.ContainerID == t.id && e.Pid == t.pid {
|
||||
chStatus <- ExitStatus{code: e.ExitStatus, exitedAt: e.ExitedAt}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
<-chStatus // wait for the goroutine to be running
|
||||
return chStatus, nil
|
||||
}
|
||||
|
||||
// Delete deletes the task and its runtime state
|
||||
// it returns the exit status of the task and any errors that were encountered
|
||||
// during cleanup
|
||||
func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (uint32, error) {
|
||||
func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (*ExitStatus, error) {
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, t); err != nil {
|
||||
return UnknownExitStatus, err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
status, err := t.Status(ctx)
|
||||
if err != nil && errdefs.IsNotFound(err) {
|
||||
return UnknownExitStatus, err
|
||||
return nil, err
|
||||
}
|
||||
switch status.Status {
|
||||
case Stopped, Unknown, "":
|
||||
|
@ -259,7 +294,7 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (uint32, e
|
|||
}
|
||||
fallthrough
|
||||
default:
|
||||
return UnknownExitStatus, errors.Wrapf(errdefs.ErrFailedPrecondition, "task must be stopped before deletion: %s", status.Status)
|
||||
return nil, errors.Wrapf(errdefs.ErrFailedPrecondition, "task must be stopped before deletion: %s", status.Status)
|
||||
}
|
||||
if t.io != nil {
|
||||
t.io.Cancel()
|
||||
|
@ -270,9 +305,9 @@ func (t *task) Delete(ctx context.Context, opts ...ProcessDeleteOpts) (uint32, e
|
|||
ContainerID: t.id,
|
||||
})
|
||||
if err != nil {
|
||||
return UnknownExitStatus, errdefs.FromGRPC(err)
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
return r.ExitStatus, nil
|
||||
return &ExitStatus{code: r.ExitStatus, exitedAt: r.ExitedAt}, nil
|
||||
}
|
||||
|
||||
func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreate IOCreation) (Process, error) {
|
||||
|
@ -307,7 +342,6 @@ func (t *task) Exec(ctx context.Context, id string, spec *specs.Process, ioCreat
|
|||
id: id,
|
||||
task: t,
|
||||
io: i,
|
||||
spec: spec,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -381,7 +415,7 @@ func (t *task) Checkpoint(ctx context.Context, opts ...CheckpointTaskOpts) (d v1
|
|||
if err := t.checkpointImage(ctx, &index, cr.Image); err != nil {
|
||||
return d, err
|
||||
}
|
||||
if err := t.checkpointRWSnapshot(ctx, &index, cr.Snapshotter, cr.RootFS); err != nil {
|
||||
if err := t.checkpointRWSnapshot(ctx, &index, cr.Snapshotter, cr.SnapshotKey); err != nil {
|
||||
return d, err
|
||||
}
|
||||
index.Annotations = make(map[string]string)
|
||||
|
@ -419,6 +453,43 @@ func (t *task) Update(ctx context.Context, opts ...UpdateTaskOpts) error {
|
|||
return errdefs.FromGRPC(err)
|
||||
}
|
||||
|
||||
func (t *task) LoadProcess(ctx context.Context, id string, ioAttach IOAttach) (Process, error) {
|
||||
response, err := t.client.TaskService().Get(ctx, &tasks.GetRequest{
|
||||
ContainerID: t.id,
|
||||
ExecID: id,
|
||||
})
|
||||
if err != nil {
|
||||
err = errdefs.FromGRPC(err)
|
||||
if errdefs.IsNotFound(err) {
|
||||
return nil, errors.Wrapf(err, "no running process found")
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
var i IO
|
||||
if ioAttach != nil {
|
||||
if i, err = attachExistingIO(response, ioAttach); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return &process{
|
||||
id: id,
|
||||
task: t,
|
||||
io: i,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *task) Metrics(ctx context.Context) (*types.Metric, error) {
|
||||
response, err := t.client.TaskService().Metrics(ctx, &tasks.MetricsRequest{
|
||||
Filters: []string{
|
||||
"id==" + t.id,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
}
|
||||
return response.Metrics[0], nil
|
||||
}
|
||||
|
||||
func (t *task) checkpointTask(ctx context.Context, index *v1.Index, request *tasks.CheckpointTaskRequest) error {
|
||||
response, err := t.client.TaskService().Checkpoint(ctx, request)
|
||||
if err != nil {
|
||||
|
@ -482,7 +553,7 @@ func writeContent(ctx context.Context, store content.Store, mediaType, ref strin
|
|||
if err != nil {
|
||||
return d, err
|
||||
}
|
||||
if err := writer.Commit(size, ""); err != nil {
|
||||
if err := writer.Commit(ctx, size, ""); err != nil {
|
||||
return d, err
|
||||
}
|
||||
return v1.Descriptor{
|
||||
|
|
|
@ -33,16 +33,15 @@ type ProcessDeleteOpts func(context.Context, Process) error
|
|||
|
||||
// WithProcessKill will forcefully kill and delete a process
|
||||
func WithProcessKill(ctx context.Context, p Process) error {
|
||||
s := make(chan struct{}, 1)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
// ignore errors to wait and kill as we are forcefully killing
|
||||
// the process and don't care about the exit status
|
||||
go func() {
|
||||
p.Wait(ctx)
|
||||
close(s)
|
||||
}()
|
||||
if err := p.Kill(ctx, syscall.SIGKILL); err != nil {
|
||||
s, err := p.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.Kill(ctx, syscall.SIGKILL, WithKillAll); err != nil {
|
||||
if errdefs.IsFailedPrecondition(err) || errdefs.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
@ -52,3 +51,17 @@ func WithProcessKill(ctx context.Context, p Process) error {
|
|||
<-s
|
||||
return nil
|
||||
}
|
||||
|
||||
type KillInfo struct {
|
||||
// All kills all processes inside the task
|
||||
// only valid on tasks, ignored on processes
|
||||
All bool
|
||||
}
|
||||
|
||||
type KillOpts func(context.Context, Process, *KillInfo) error
|
||||
|
||||
// WithKillAll kills all processes for a task
|
||||
func WithKillAll(ctx context.Context, p Process, i *KillInfo) error {
|
||||
i.All = true
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
github.com/coreos/go-systemd 48702e0da86bd25e76cfef347e2adeb434a0d0a6
|
||||
github.com/containerd/go-runc b85ac701de5065a66918203dd18f057433290807
|
||||
github.com/containerd/go-runc b3c048c028ddd789c6f9510c597f8b9c62f25359
|
||||
github.com/containerd/console 76d18fd1d66972718ab2284449591db0b3cdb4de
|
||||
github.com/containerd/cgroups 4fd64a776f25b5540cddcb72eea6e35e58baca6e
|
||||
github.com/containerd/cgroups 5933ab4dc4f7caa3a73a1dc141bd11f42b5c9163
|
||||
github.com/docker/go-metrics 8fd5772bf1584597834c6f7961a530f06cbfbb87
|
||||
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
|
||||
github.com/godbus/dbus c7fdd8b5cd55e87b4e1f4e372cdb1db61dd6c66f
|
||||
|
@ -15,7 +15,7 @@ github.com/docker/go-units v0.3.1
|
|||
github.com/gogo/protobuf d2e1ade2d719b78fe5b061b4c18a9f7111b5bdc8
|
||||
github.com/golang/protobuf 5a0f697c9ed9d68fef0116532c6e05cfeae00e55
|
||||
github.com/opencontainers/runtime-spec v1.0.0
|
||||
github.com/opencontainers/runc e775f0fba3ea329b8b766451c892c41a3d49594d
|
||||
github.com/opencontainers/runc 593914b8bd5448a93f7c3e4902a03408b6d5c0ce
|
||||
github.com/sirupsen/logrus v1.0.0
|
||||
github.com/containerd/btrfs cc52c4dea2ce11a44e6639e561bb5c2af9ada9e3
|
||||
github.com/stretchr/testify v1.1.4
|
||||
|
@ -27,15 +27,16 @@ golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6
|
|||
google.golang.org/grpc v1.3.0
|
||||
github.com/pkg/errors v0.8.0
|
||||
github.com/opencontainers/go-digest 21dfd564fd89c944783d00d069f33e3e7123c448
|
||||
golang.org/x/sys b892924b68aa53038e8a55b255ee0d8391e8eec5 https://github.com/golang/sys
|
||||
golang.org/x/sys 7ddbeae9ae08c6a06a59597f0c9edbc5ff2444ce https://github.com/golang/sys
|
||||
github.com/opencontainers/image-spec v1.0.0
|
||||
github.com/containerd/continuity cf279e6ac893682272b4479d4c67fd3abf878b4e
|
||||
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
|
||||
github.com/BurntSushi/toml v0.2.0-21-g9906417
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus 6b7015e65d366bf3f19b2b2a000a831940f0f7e0
|
||||
github.com/Microsoft/go-winio v0.4.4
|
||||
github.com/Microsoft/hcsshim v0.6.3
|
||||
github.com/Microsoft/opengcs v0.3.2
|
||||
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
|
||||
github.com/Microsoft/hcsshim v0.6.1
|
||||
github.com/Azure/go-ansiterm 19f72df4d05d31cbe1c56bfc8045c96babff6c7e
|
||||
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
|
||||
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
|
||||
|
|
111
vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
generated
vendored
Normal file
111
vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
|||
package user
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
// The current operating system does not provide the required data for user lookups.
|
||||
ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data")
|
||||
// No matching entries found in file.
|
||||
ErrNoPasswdEntries = errors.New("no matching entries in passwd file")
|
||||
ErrNoGroupEntries = errors.New("no matching entries in group file")
|
||||
)
|
||||
|
||||
func lookupUser(filter func(u User) bool) (User, error) {
|
||||
// Get operating system-specific passwd reader-closer.
|
||||
passwd, err := GetPasswd()
|
||||
if err != nil {
|
||||
return User{}, err
|
||||
}
|
||||
defer passwd.Close()
|
||||
|
||||
// Get the users.
|
||||
users, err := ParsePasswdFilter(passwd, filter)
|
||||
if err != nil {
|
||||
return User{}, err
|
||||
}
|
||||
|
||||
// No user entries found.
|
||||
if len(users) == 0 {
|
||||
return User{}, ErrNoPasswdEntries
|
||||
}
|
||||
|
||||
// Assume the first entry is the "correct" one.
|
||||
return users[0], nil
|
||||
}
|
||||
|
||||
// CurrentUser looks up the current user by their user id in /etc/passwd. If the
|
||||
// user cannot be found (or there is no /etc/passwd file on the filesystem),
|
||||
// then CurrentUser returns an error.
|
||||
func CurrentUser() (User, error) {
|
||||
return LookupUid(unix.Getuid())
|
||||
}
|
||||
|
||||
// LookupUser looks up a user by their username in /etc/passwd. If the user
|
||||
// cannot be found (or there is no /etc/passwd file on the filesystem), then
|
||||
// LookupUser returns an error.
|
||||
func LookupUser(username string) (User, error) {
|
||||
return lookupUser(func(u User) bool {
|
||||
return u.Name == username
|
||||
})
|
||||
}
|
||||
|
||||
// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot
|
||||
// be found (or there is no /etc/passwd file on the filesystem), then LookupId
|
||||
// returns an error.
|
||||
func LookupUid(uid int) (User, error) {
|
||||
return lookupUser(func(u User) bool {
|
||||
return u.Uid == uid
|
||||
})
|
||||
}
|
||||
|
||||
func lookupGroup(filter func(g Group) bool) (Group, error) {
|
||||
// Get operating system-specific group reader-closer.
|
||||
group, err := GetGroup()
|
||||
if err != nil {
|
||||
return Group{}, err
|
||||
}
|
||||
defer group.Close()
|
||||
|
||||
// Get the users.
|
||||
groups, err := ParseGroupFilter(group, filter)
|
||||
if err != nil {
|
||||
return Group{}, err
|
||||
}
|
||||
|
||||
// No user entries found.
|
||||
if len(groups) == 0 {
|
||||
return Group{}, ErrNoGroupEntries
|
||||
}
|
||||
|
||||
// Assume the first entry is the "correct" one.
|
||||
return groups[0], nil
|
||||
}
|
||||
|
||||
// CurrentGroup looks up the current user's group by their primary group id's
|
||||
// entry in /etc/passwd. If the group cannot be found (or there is no
|
||||
// /etc/group file on the filesystem), then CurrentGroup returns an error.
|
||||
func CurrentGroup() (Group, error) {
|
||||
return LookupGid(unix.Getgid())
|
||||
}
|
||||
|
||||
// LookupGroup looks up a group by its name in /etc/group. If the group cannot
|
||||
// be found (or there is no /etc/group file on the filesystem), then LookupGroup
|
||||
// returns an error.
|
||||
func LookupGroup(groupname string) (Group, error) {
|
||||
return lookupGroup(func(g Group) bool {
|
||||
return g.Name == groupname
|
||||
})
|
||||
}
|
||||
|
||||
// LookupGid looks up a group by its group id in /etc/group. If the group cannot
|
||||
// be found (or there is no /etc/group file on the filesystem), then LookupGid
|
||||
// returns an error.
|
||||
func LookupGid(gid int) (Group, error) {
|
||||
return lookupGroup(func(g Group) bool {
|
||||
return g.Gid == gid
|
||||
})
|
||||
}
|
30
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
Normal file
30
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go
generated
vendored
Normal file
|
@ -0,0 +1,30 @@
|
|||
// +build darwin dragonfly freebsd linux netbsd openbsd solaris
|
||||
|
||||
package user
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Unix-specific path to the passwd and group formatted files.
|
||||
const (
|
||||
unixPasswdPath = "/etc/passwd"
|
||||
unixGroupPath = "/etc/group"
|
||||
)
|
||||
|
||||
func GetPasswdPath() (string, error) {
|
||||
return unixPasswdPath, nil
|
||||
}
|
||||
|
||||
func GetPasswd() (io.ReadCloser, error) {
|
||||
return os.Open(unixPasswdPath)
|
||||
}
|
||||
|
||||
func GetGroupPath() (string, error) {
|
||||
return unixGroupPath, nil
|
||||
}
|
||||
|
||||
func GetGroup() (io.ReadCloser, error) {
|
||||
return os.Open(unixGroupPath)
|
||||
}
|
21
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
generated
vendored
Normal file
21
vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
|||
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
|
||||
|
||||
package user
|
||||
|
||||
import "io"
|
||||
|
||||
func GetPasswdPath() (string, error) {
|
||||
return "", ErrUnsupported
|
||||
}
|
||||
|
||||
func GetPasswd() (io.ReadCloser, error) {
|
||||
return nil, ErrUnsupported
|
||||
}
|
||||
|
||||
func GetGroupPath() (string, error) {
|
||||
return "", ErrUnsupported
|
||||
}
|
||||
|
||||
func GetGroup() (io.ReadCloser, error) {
|
||||
return nil, ErrUnsupported
|
||||
}
|
|
@ -0,0 +1,441 @@
|
|||
package user
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
minId = 0
|
||||
maxId = 1<<31 - 1 //for 32-bit systems compatibility
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRange = fmt.Errorf("uids and gids must be in range %d-%d", minId, maxId)
|
||||
)
|
||||
|
||||
type User struct {
|
||||
Name string
|
||||
Pass string
|
||||
Uid int
|
||||
Gid int
|
||||
Gecos string
|
||||
Home string
|
||||
Shell string
|
||||
}
|
||||
|
||||
type Group struct {
|
||||
Name string
|
||||
Pass string
|
||||
Gid int
|
||||
List []string
|
||||
}
|
||||
|
||||
func parseLine(line string, v ...interface{}) {
|
||||
if line == "" {
|
||||
return
|
||||
}
|
||||
|
||||
parts := strings.Split(line, ":")
|
||||
for i, p := range parts {
|
||||
// Ignore cases where we don't have enough fields to populate the arguments.
|
||||
// Some configuration files like to misbehave.
|
||||
if len(v) <= i {
|
||||
break
|
||||
}
|
||||
|
||||
// Use the type of the argument to figure out how to parse it, scanf() style.
|
||||
// This is legit.
|
||||
switch e := v[i].(type) {
|
||||
case *string:
|
||||
*e = p
|
||||
case *int:
|
||||
// "numbers", with conversion errors ignored because of some misbehaving configuration files.
|
||||
*e, _ = strconv.Atoi(p)
|
||||
case *[]string:
|
||||
// Comma-separated lists.
|
||||
if p != "" {
|
||||
*e = strings.Split(p, ",")
|
||||
} else {
|
||||
*e = []string{}
|
||||
}
|
||||
default:
|
||||
// Someone goof'd when writing code using this function. Scream so they can hear us.
|
||||
panic(fmt.Sprintf("parseLine only accepts {*string, *int, *[]string} as arguments! %#v is not a pointer!", e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ParsePasswdFile(path string) ([]User, error) {
|
||||
passwd, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer passwd.Close()
|
||||
return ParsePasswd(passwd)
|
||||
}
|
||||
|
||||
func ParsePasswd(passwd io.Reader) ([]User, error) {
|
||||
return ParsePasswdFilter(passwd, nil)
|
||||
}
|
||||
|
||||
func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) {
|
||||
passwd, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer passwd.Close()
|
||||
return ParsePasswdFilter(passwd, filter)
|
||||
}
|
||||
|
||||
func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) {
|
||||
if r == nil {
|
||||
return nil, fmt.Errorf("nil source for passwd-formatted data")
|
||||
}
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []User{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(s.Text())
|
||||
if line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 5 passwd
|
||||
// name:password:UID:GID:GECOS:directory:shell
|
||||
// Name:Pass:Uid:Gid:Gecos:Home:Shell
|
||||
// root:x:0:0:root:/root:/bin/bash
|
||||
// adm:x:3:4:adm:/var/adm:/bin/false
|
||||
p := User{}
|
||||
parseLine(line, &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ParseGroupFile(path string) ([]Group, error) {
|
||||
group, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer group.Close()
|
||||
return ParseGroup(group)
|
||||
}
|
||||
|
||||
func ParseGroup(group io.Reader) ([]Group, error) {
|
||||
return ParseGroupFilter(group, nil)
|
||||
}
|
||||
|
||||
func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) {
|
||||
group, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer group.Close()
|
||||
return ParseGroupFilter(group, filter)
|
||||
}
|
||||
|
||||
func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) {
|
||||
if r == nil {
|
||||
return nil, fmt.Errorf("nil source for group-formatted data")
|
||||
}
|
||||
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []Group{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
text := s.Text()
|
||||
if text == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
// see: man 5 group
|
||||
// group_name:password:GID:user_list
|
||||
// Name:Pass:Gid:List
|
||||
// root:x:0:root
|
||||
// adm:x:4:root,adm,daemon
|
||||
p := Group{}
|
||||
parseLine(text, &p.Name, &p.Pass, &p.Gid, &p.List)
|
||||
|
||||
if filter == nil || filter(p) {
|
||||
out = append(out, p)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type ExecUser struct {
|
||||
Uid int
|
||||
Gid int
|
||||
Sgids []int
|
||||
Home string
|
||||
}
|
||||
|
||||
// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the
|
||||
// given file paths and uses that data as the arguments to GetExecUser. If the
|
||||
// files cannot be opened for any reason, the error is ignored and a nil
|
||||
// io.Reader is passed instead.
|
||||
func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) {
|
||||
var passwd, group io.Reader
|
||||
|
||||
if passwdFile, err := os.Open(passwdPath); err == nil {
|
||||
passwd = passwdFile
|
||||
defer passwdFile.Close()
|
||||
}
|
||||
|
||||
if groupFile, err := os.Open(groupPath); err == nil {
|
||||
group = groupFile
|
||||
defer groupFile.Close()
|
||||
}
|
||||
|
||||
return GetExecUser(userSpec, defaults, passwd, group)
|
||||
}
|
||||
|
||||
// GetExecUser parses a user specification string (using the passwd and group
|
||||
// readers as sources for /etc/passwd and /etc/group data, respectively). In
|
||||
// the case of blank fields or missing data from the sources, the values in
|
||||
// defaults is used.
|
||||
//
|
||||
// GetExecUser will return an error if a user or group literal could not be
|
||||
// found in any entry in passwd and group respectively.
|
||||
//
|
||||
// Examples of valid user specifications are:
|
||||
// * ""
|
||||
// * "user"
|
||||
// * "uid"
|
||||
// * "user:group"
|
||||
// * "uid:gid
|
||||
// * "user:gid"
|
||||
// * "uid:group"
|
||||
//
|
||||
// It should be noted that if you specify a numeric user or group id, they will
|
||||
// not be evaluated as usernames (only the metadata will be filled). So attempting
|
||||
// to parse a user with user.Name = "1337" will produce the user with a UID of
|
||||
// 1337.
|
||||
func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) {
|
||||
if defaults == nil {
|
||||
defaults = new(ExecUser)
|
||||
}
|
||||
|
||||
// Copy over defaults.
|
||||
user := &ExecUser{
|
||||
Uid: defaults.Uid,
|
||||
Gid: defaults.Gid,
|
||||
Sgids: defaults.Sgids,
|
||||
Home: defaults.Home,
|
||||
}
|
||||
|
||||
// Sgids slice *cannot* be nil.
|
||||
if user.Sgids == nil {
|
||||
user.Sgids = []int{}
|
||||
}
|
||||
|
||||
// Allow for userArg to have either "user" syntax, or optionally "user:group" syntax
|
||||
var userArg, groupArg string
|
||||
parseLine(userSpec, &userArg, &groupArg)
|
||||
|
||||
// Convert userArg and groupArg to be numeric, so we don't have to execute
|
||||
// Atoi *twice* for each iteration over lines.
|
||||
uidArg, uidErr := strconv.Atoi(userArg)
|
||||
gidArg, gidErr := strconv.Atoi(groupArg)
|
||||
|
||||
// Find the matching user.
|
||||
users, err := ParsePasswdFilter(passwd, func(u User) bool {
|
||||
if userArg == "" {
|
||||
// Default to current state of the user.
|
||||
return u.Uid == user.Uid
|
||||
}
|
||||
|
||||
if uidErr == nil {
|
||||
// If the userArg is numeric, always treat it as a UID.
|
||||
return uidArg == u.Uid
|
||||
}
|
||||
|
||||
return u.Name == userArg
|
||||
})
|
||||
|
||||
// If we can't find the user, we have to bail.
|
||||
if err != nil && passwd != nil {
|
||||
if userArg == "" {
|
||||
userArg = strconv.Itoa(user.Uid)
|
||||
}
|
||||
return nil, fmt.Errorf("unable to find user %s: %v", userArg, err)
|
||||
}
|
||||
|
||||
var matchedUserName string
|
||||
if len(users) > 0 {
|
||||
// First match wins, even if there's more than one matching entry.
|
||||
matchedUserName = users[0].Name
|
||||
user.Uid = users[0].Uid
|
||||
user.Gid = users[0].Gid
|
||||
user.Home = users[0].Home
|
||||
} else if userArg != "" {
|
||||
// If we can't find a user with the given username, the only other valid
|
||||
// option is if it's a numeric username with no associated entry in passwd.
|
||||
|
||||
if uidErr != nil {
|
||||
// Not numeric.
|
||||
return nil, fmt.Errorf("unable to find user %s: %v", userArg, ErrNoPasswdEntries)
|
||||
}
|
||||
user.Uid = uidArg
|
||||
|
||||
// Must be inside valid uid range.
|
||||
if user.Uid < minId || user.Uid > maxId {
|
||||
return nil, ErrRange
|
||||
}
|
||||
|
||||
// Okay, so it's numeric. We can just roll with this.
|
||||
}
|
||||
|
||||
// On to the groups. If we matched a username, we need to do this because of
|
||||
// the supplementary group IDs.
|
||||
if groupArg != "" || matchedUserName != "" {
|
||||
groups, err := ParseGroupFilter(group, func(g Group) bool {
|
||||
// If the group argument isn't explicit, we'll just search for it.
|
||||
if groupArg == "" {
|
||||
// Check if user is a member of this group.
|
||||
for _, u := range g.List {
|
||||
if u == matchedUserName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if gidErr == nil {
|
||||
// If the groupArg is numeric, always treat it as a GID.
|
||||
return gidArg == g.Gid
|
||||
}
|
||||
|
||||
return g.Name == groupArg
|
||||
})
|
||||
if err != nil && group != nil {
|
||||
return nil, fmt.Errorf("unable to find groups for spec %v: %v", matchedUserName, err)
|
||||
}
|
||||
|
||||
// Only start modifying user.Gid if it is in explicit form.
|
||||
if groupArg != "" {
|
||||
if len(groups) > 0 {
|
||||
// First match wins, even if there's more than one matching entry.
|
||||
user.Gid = groups[0].Gid
|
||||
} else {
|
||||
// If we can't find a group with the given name, the only other valid
|
||||
// option is if it's a numeric group name with no associated entry in group.
|
||||
|
||||
if gidErr != nil {
|
||||
// Not numeric.
|
||||
return nil, fmt.Errorf("unable to find group %s: %v", groupArg, ErrNoGroupEntries)
|
||||
}
|
||||
user.Gid = gidArg
|
||||
|
||||
// Must be inside valid gid range.
|
||||
if user.Gid < minId || user.Gid > maxId {
|
||||
return nil, ErrRange
|
||||
}
|
||||
|
||||
// Okay, so it's numeric. We can just roll with this.
|
||||
}
|
||||
} else if len(groups) > 0 && uidErr != nil {
|
||||
// Supplementary group ids only make sense if in the implicit form for non-numeric users.
|
||||
user.Sgids = make([]int, len(groups))
|
||||
for i, group := range groups {
|
||||
user.Sgids[i] = group.Gid
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
// GetAdditionalGroups looks up a list of groups by name or group id
|
||||
// against the given /etc/group formatted data. If a group name cannot
|
||||
// be found, an error will be returned. If a group id cannot be found,
|
||||
// or the given group data is nil, the id will be returned as-is
|
||||
// provided it is in the legal range.
|
||||
func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) {
|
||||
var groups = []Group{}
|
||||
if group != nil {
|
||||
var err error
|
||||
groups, err = ParseGroupFilter(group, func(g Group) bool {
|
||||
for _, ag := range additionalGroups {
|
||||
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err)
|
||||
}
|
||||
}
|
||||
|
||||
gidMap := make(map[int]struct{})
|
||||
for _, ag := range additionalGroups {
|
||||
var found bool
|
||||
for _, g := range groups {
|
||||
// if we found a matched group either by name or gid, take the
|
||||
// first matched as correct
|
||||
if g.Name == ag || strconv.Itoa(g.Gid) == ag {
|
||||
if _, ok := gidMap[g.Gid]; !ok {
|
||||
gidMap[g.Gid] = struct{}{}
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// we asked for a group but didn't find it. let's check to see
|
||||
// if we wanted a numeric group
|
||||
if !found {
|
||||
gid, err := strconv.Atoi(ag)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to find group %s", ag)
|
||||
}
|
||||
// Ensure gid is inside gid range.
|
||||
if gid < minId || gid > maxId {
|
||||
return nil, ErrRange
|
||||
}
|
||||
gidMap[gid] = struct{}{}
|
||||
}
|
||||
}
|
||||
gids := []int{}
|
||||
for gid := range gidMap {
|
||||
gids = append(gids, gid)
|
||||
}
|
||||
return gids, nil
|
||||
}
|
||||
|
||||
// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups
|
||||
// that opens the groupPath given and gives it as an argument to
|
||||
// GetAdditionalGroups.
|
||||
func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) {
|
||||
var group io.Reader
|
||||
|
||||
if groupFile, err := os.Open(groupPath); err == nil {
|
||||
group = groupFile
|
||||
defer groupFile.Close()
|
||||
}
|
||||
return GetAdditionalGroups(additionalGroups, group)
|
||||
}
|
|
@ -58,12 +58,13 @@ func (w containerdWorker) Exec(ctx context.Context, meta worker.Meta, root cache
|
|||
return err
|
||||
}
|
||||
|
||||
status, err := task.Wait(ctx)
|
||||
statusCh, err := task.Wait(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if status != 0 {
|
||||
return errors.Errorf("process returned non-zero status: %d", status)
|
||||
status := <-statusCh
|
||||
if status.ExitCode() != 0 {
|
||||
return errors.Errorf("process returned non-zero exit code: %d", status.ExitCode())
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
// Ideally we don't have to import whole containerd just for the default spec
|
||||
|
||||
func GenerateSpec(ctx context.Context, meta worker.Meta, mounts []worker.Mount) (*specs.Spec, func(), error) {
|
||||
s, err := containerd.GenerateSpec(
|
||||
s, err := containerd.GenerateSpec(ctx, nil, nil,
|
||||
containerd.WithHostNamespace(specs.NetworkNamespace),
|
||||
containerd.WithHostResolvconf,
|
||||
containerd.WithHostHostsFile,
|
||||
|
|
Loading…
Reference in New Issue